repo_name
stringclasses
6 values
pr_number
int64
512
78.9k
pr_title
stringlengths
3
144
pr_description
stringlengths
0
30.3k
author
stringlengths
2
21
date_created
unknown
date_merged
unknown
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
17
30.4k
filepath
stringlengths
9
210
before_content
stringlengths
0
112M
after_content
stringlengths
0
112M
label
int64
-1
1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/native/corehost/corehost.proj
<Project Sdk="Microsoft.Build.NoTargets"> <!-- Add basic project properties for NuGet restore, needed to import the SourceLink MSBuild tool package's targets into the build. --> <PropertyGroup> <IncrementalNativeBuild Condition="'$(IncrementalNativeBuild)' == ''">true</IncrementalNativeBuild> <BuildCoreHostDependsOn>GetProductVersions;GenerateNativeVersionFile</BuildCoreHostDependsOn> <BuildCoreHostDependsOn Condition="'$(DisableSourceLink)' != 'true'">$(BuildCoreHostDependsOn);InitializeSourceControlInformationFromSourceControlManager</BuildCoreHostDependsOn> <IntermediateOutputRootPath>$(ArtifactsObjDir)$(OutputRid).$(Configuration)\</IntermediateOutputRootPath> </PropertyGroup> <!-- Target that builds dotnet, hostfxr and hostpolicy with the same version as what NetCoreApp will be built for since the build produced artifacts should always version the same (even if they may not get used). --> <Target Name="BuildCoreHostUnix" Condition="'$(TargetOS)' != 'windows'" AfterTargets="Build" DependsOnTargets="$(BuildCoreHostDependsOn)"> <PropertyGroup> <CMakeBuildDir>$(IntermediateOutputRootPath)corehost\cmake\</CMakeBuildDir> <BuildScript>$([MSBuild]::NormalizePath('$(MSBuildThisFileDirectory)', 'build.sh'))</BuildScript> <BuildArgs>$(Configuration) $(TargetArchitecture) -apphostver "$(AppHostVersion)" -hostver "$(HostVersion)" -fxrver "$(HostResolverVersion)" -policyver "$(HostPolicyVersion)" -commithash "$([MSBuild]::ValueOrDefault('$(SourceRevisionId)', 'N/A'))" -os $(TargetOS)</BuildArgs> <BuildArgs>$(BuildArgs) -cmakeargs "-DVERSION_FILE_PATH=$(NativeVersionFile)"</BuildArgs> <BuildArgs Condition="'$(ConfigureOnly)' == 'true'">$(BuildArgs) -configureonly</BuildArgs> <BuildArgs Condition="'$(PortableBuild)' != 'true'">$(BuildArgs) -portablebuild=false</BuildArgs> <BuildArgs Condition="'$(KeepNativeSymbols)' != 'false'">$(BuildArgs) -keepnativesymbols</BuildArgs> <BuildArgs Condition="'$(CrossBuild)' == 'true'">$(BuildArgs) -cross</BuildArgs> <BuildArgs Condition="'$(Compiler)' != ''">$(BuildArgs) $(Compiler)</BuildArgs> <BuildArgs Condition="'$(CMakeArgs)' != ''">$(BuildArgs) $(CMakeArgs)</BuildArgs> <BuildArgs>$(BuildArgs) -coreclrartifacts $(CoreCLRArtifactsPath)</BuildArgs> <BuildArgs Condition="'$(Ninja)' == 'true'">$(BuildArgs) -ninja</BuildArgs> <BuildArgs>$(BuildArgs) -runtimeflavor $(RuntimeFlavor)</BuildArgs> <BuildArgs Condition="'$(OfficialBuildId)' != ''">$(BuildArgs) /p:OfficialBuildId="$(OfficialBuildId)"</BuildArgs> </PropertyGroup> <!-- Use IgnoreStandardErrorWarningFormat because Arcade sets WarnAsError and there's an existing warning in the macOS build when dsymutil tries to strip symbols. --> <Message Text="&quot;$(BuildScript)&quot; $(BuildArgs)" Importance="High"/> <Exec Command="&quot;$(BuildScript)&quot; $(BuildArgs)" IgnoreStandardErrorWarningFormat="true"/> </Target> <Target Name="BuildCoreHostWindows" Condition="'$(TargetOS)' == 'windows'" AfterTargets="Build" DependsOnTargets="$(BuildCoreHostDependsOn)"> <!-- Generate Version files --> <ItemGroup> <HostFiles Include="dotnet"> <FileDescription>.NET Host</FileDescription> </HostFiles> <HostFiles Include="hostfxr"> <FileDescription>.NET Host Resolver - $(HostResolverVersion)</FileDescription> </HostFiles> <HostFiles Include="hostpolicy"> <FileDescription>.NET Host Policy - $(HostPolicyVersion)</FileDescription> </HostFiles> <HostFiles Include="comhost"> <FileDescription>.NET COM Host</FileDescription> </HostFiles> <HostFiles Include="ijwhost"> <FileDescription>.NET IJW Host</FileDescription> </HostFiles> <HostFiles Include="nethost"> <FileDescription>.NET Component Host</FileDescription> </HostFiles> </ItemGroup> <MSBuild Projects="$(MSBuildProjectFullPath)" Properties=" GenerateNativeVersionInfo=true; AssemblyName=%(HostFiles.FileDescription); NativeVersionFile=$(IntermediateOutputRootPath)hostResourceFiles\%(HostFiles.Identity)\version_info.h" Targets="GenerateNativeVersionFile" Condition=" '$(IncrementalNativeBuild)' != 'true' or !Exists('$(IntermediateOutputRootPath)hostResourceFiles\%(HostFiles.Identity)\version_info.h')"/> <PropertyGroup> <BuildScript>$([MSBuild]::NormalizePath('$(MSBuildThisFileDirectory)', 'build.cmd'))</BuildScript> <BuildArgs>$(Configuration) $(TargetArchitecture) apphostver $(AppHostVersion) hostver $(HostVersion) fxrver $(HostResolverVersion) policyver $(HostPolicyVersion) commit $([MSBuild]::ValueOrDefault('$(SourceRevisionId)', 'N/A')) rid $(OutputRid)</BuildArgs> <BuildArgs Condition="'$(ConfigureOnly)' == 'true'">$(BuildArgs) configureonly</BuildArgs> <BuildArgs Condition="'$(PortableBuild)' == 'true'">$(BuildArgs) portable</BuildArgs> <BuildArgs Condition="'$(IncrementalNativeBuild)' == 'true'">$(BuildArgs) incremental-native-build</BuildArgs> <BuildArgs>$(BuildArgs) rootdir $(RepoRoot)</BuildArgs> <BuildArgs>$(BuildArgs) coreclrartifacts $(CoreCLRArtifactsPath)</BuildArgs> <BuildArgs Condition="'$(Ninja)' == 'false'">$(BuildArgs) msbuild</BuildArgs> <BuildArgs>$(BuildArgs) runtimeflavor $(RuntimeFlavor)</BuildArgs> <BuildArgs>$(BuildArgs) runtimeconfiguration $(RuntimeConfiguration)</BuildArgs> </PropertyGroup> <!-- Run script that invokes Cmake to create VS files, and then calls msbuild to compile them. Use IgnoreStandardErrorWarningFormat because Arcade sets WarnAsError and there's an existing warning in the native build. --> <Message Text="&quot;$(BuildScript)&quot; $(BuildArgs)" Importance="High"/> <Exec Command="&quot;$(BuildScript)&quot; $(BuildArgs)" IgnoreStandardErrorWarningFormat="true"/> </Target> <Target Name="PrependWindowsHeaderIncludeToVersionHeaderFile" Condition="'$(TargetOS)' == 'windows'" AfterTargets="GenerateNativeVersionFile"> <PropertyGroup> <IncludeStatementLine>#include &lt;Windows.h&gt;</IncludeStatementLine> <NativeVersionFileContents>$([System.IO.File]::ReadAllText('$(NativeVersionFile)'))</NativeVersionFileContents> </PropertyGroup> <WriteLinesToFile File="$(NativeVersionFile)" Lines="$(IncludeStatementLine);$(NativeVersionFileContents)" Overwrite="true" /> </Target> </Project>
<Project Sdk="Microsoft.Build.NoTargets"> <!-- Add basic project properties for NuGet restore, needed to import the SourceLink MSBuild tool package's targets into the build. --> <PropertyGroup> <IncrementalNativeBuild Condition="'$(IncrementalNativeBuild)' == ''">true</IncrementalNativeBuild> <BuildCoreHostDependsOn>GetProductVersions;GenerateNativeVersionFile</BuildCoreHostDependsOn> <BuildCoreHostDependsOn Condition="'$(DisableSourceLink)' != 'true'">$(BuildCoreHostDependsOn);InitializeSourceControlInformationFromSourceControlManager</BuildCoreHostDependsOn> <IntermediateOutputRootPath>$(ArtifactsObjDir)$(OutputRid).$(Configuration)\</IntermediateOutputRootPath> </PropertyGroup> <!-- Target that builds dotnet, hostfxr and hostpolicy with the same version as what NetCoreApp will be built for since the build produced artifacts should always version the same (even if they may not get used). --> <Target Name="BuildCoreHostUnix" Condition="'$(TargetOS)' != 'windows'" AfterTargets="Build" DependsOnTargets="$(BuildCoreHostDependsOn)"> <PropertyGroup> <CMakeBuildDir>$(IntermediateOutputRootPath)corehost\cmake\</CMakeBuildDir> <BuildScript>$([MSBuild]::NormalizePath('$(MSBuildThisFileDirectory)', 'build.sh'))</BuildScript> <BuildArgs>$(Configuration) $(TargetArchitecture) -apphostver "$(AppHostVersion)" -hostver "$(HostVersion)" -fxrver "$(HostResolverVersion)" -policyver "$(HostPolicyVersion)" -commithash "$([MSBuild]::ValueOrDefault('$(SourceRevisionId)', 'N/A'))" -os $(TargetOS)</BuildArgs> <BuildArgs>$(BuildArgs) -cmakeargs "-DVERSION_FILE_PATH=$(NativeVersionFile)"</BuildArgs> <BuildArgs Condition="'$(ConfigureOnly)' == 'true'">$(BuildArgs) -configureonly</BuildArgs> <BuildArgs Condition="'$(PortableBuild)' != 'true'">$(BuildArgs) -portablebuild=false</BuildArgs> <BuildArgs Condition="'$(KeepNativeSymbols)' != 'false'">$(BuildArgs) -keepnativesymbols</BuildArgs> <BuildArgs Condition="'$(CrossBuild)' == 'true'">$(BuildArgs) -cross</BuildArgs> <BuildArgs Condition="'$(Compiler)' != ''">$(BuildArgs) $(Compiler)</BuildArgs> <BuildArgs Condition="'$(CMakeArgs)' != ''">$(BuildArgs) $(CMakeArgs)</BuildArgs> <BuildArgs>$(BuildArgs) -coreclrartifacts $(CoreCLRArtifactsPath)</BuildArgs> <BuildArgs Condition="'$(Ninja)' == 'true'">$(BuildArgs) -ninja</BuildArgs> <BuildArgs>$(BuildArgs) -runtimeflavor $(RuntimeFlavor)</BuildArgs> <BuildArgs Condition="'$(OfficialBuildId)' != ''">$(BuildArgs) /p:OfficialBuildId="$(OfficialBuildId)"</BuildArgs> </PropertyGroup> <!-- Use IgnoreStandardErrorWarningFormat because Arcade sets WarnAsError and there's an existing warning in the macOS build when dsymutil tries to strip symbols. --> <Message Text="&quot;$(BuildScript)&quot; $(BuildArgs)" Importance="High"/> <Exec Command="&quot;$(BuildScript)&quot; $(BuildArgs)" IgnoreStandardErrorWarningFormat="true"/> </Target> <Target Name="BuildCoreHostWindows" Condition="'$(TargetOS)' == 'windows'" AfterTargets="Build" DependsOnTargets="$(BuildCoreHostDependsOn)"> <!-- Generate Version files --> <ItemGroup> <HostFiles Include="dotnet"> <FileDescription>.NET Host</FileDescription> </HostFiles> <HostFiles Include="hostfxr"> <FileDescription>.NET Host Resolver - $(HostResolverVersion)</FileDescription> </HostFiles> <HostFiles Include="hostpolicy"> <FileDescription>.NET Host Policy - $(HostPolicyVersion)</FileDescription> </HostFiles> <HostFiles Include="comhost"> <FileDescription>.NET COM Host</FileDescription> </HostFiles> <HostFiles Include="ijwhost"> <FileDescription>.NET IJW Host</FileDescription> </HostFiles> <HostFiles Include="nethost"> <FileDescription>.NET Component Host</FileDescription> </HostFiles> </ItemGroup> <MSBuild Projects="$(MSBuildProjectFullPath)" Properties=" GenerateNativeVersionInfo=true; AssemblyName=%(HostFiles.FileDescription); NativeVersionFile=$(IntermediateOutputRootPath)hostResourceFiles\%(HostFiles.Identity)\version_info.h" Targets="GenerateNativeVersionFile" Condition=" '$(IncrementalNativeBuild)' != 'true' or !Exists('$(IntermediateOutputRootPath)hostResourceFiles\%(HostFiles.Identity)\version_info.h')"/> <PropertyGroup> <BuildScript>$([MSBuild]::NormalizePath('$(MSBuildThisFileDirectory)', 'build.cmd'))</BuildScript> <BuildArgs>$(Configuration) $(TargetArchitecture) apphostver $(AppHostVersion) hostver $(HostVersion) fxrver $(HostResolverVersion) policyver $(HostPolicyVersion) commit $([MSBuild]::ValueOrDefault('$(SourceRevisionId)', 'N/A')) rid $(OutputRid)</BuildArgs> <BuildArgs Condition="'$(ConfigureOnly)' == 'true'">$(BuildArgs) configureonly</BuildArgs> <BuildArgs Condition="'$(PortableBuild)' == 'true'">$(BuildArgs) portable</BuildArgs> <BuildArgs Condition="'$(IncrementalNativeBuild)' == 'true'">$(BuildArgs) incremental-native-build</BuildArgs> <BuildArgs>$(BuildArgs) rootdir $(RepoRoot)</BuildArgs> <BuildArgs>$(BuildArgs) coreclrartifacts $(CoreCLRArtifactsPath)</BuildArgs> <BuildArgs Condition="'$(Ninja)' == 'false'">$(BuildArgs) msbuild</BuildArgs> <BuildArgs>$(BuildArgs) runtimeflavor $(RuntimeFlavor)</BuildArgs> <BuildArgs>$(BuildArgs) runtimeconfiguration $(RuntimeConfiguration)</BuildArgs> </PropertyGroup> <!-- Run script that invokes Cmake to create VS files, and then calls msbuild to compile them. Use IgnoreStandardErrorWarningFormat because Arcade sets WarnAsError and there's an existing warning in the native build. --> <Message Text="&quot;$(BuildScript)&quot; $(BuildArgs)" Importance="High"/> <Exec Command="&quot;$(BuildScript)&quot; $(BuildArgs)" IgnoreStandardErrorWarningFormat="true"/> </Target> <Target Name="PrependWindowsHeaderIncludeToVersionHeaderFile" Condition="'$(TargetOS)' == 'windows'" AfterTargets="GenerateNativeVersionFile"> <PropertyGroup> <IncludeStatementLine>#include &lt;Windows.h&gt;</IncludeStatementLine> <NativeVersionFileContents>$([System.IO.File]::ReadAllText('$(NativeVersionFile)'))</NativeVersionFileContents> </PropertyGroup> <WriteLinesToFile File="$(NativeVersionFile)" Lines="$(IncludeStatementLine);$(NativeVersionFileContents)" Overwrite="true" /> </Target> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Runtime.Loader/tests/ApplyUpdate/System.Reflection.Metadata.ApplyUpdate.Test.AddNestedClass/deltascript.json
{ "changes": [ {"document": "AddNestedClass.cs", "update": "AddNestedClass_v1.cs"}, ] }
{ "changes": [ {"document": "AddNestedClass.cs", "update": "AddNestedClass_v1.cs"}, ] }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/native-binplace.proj
<Project> <Import Sdk="Microsoft.NET.Sdk" Project="Sdk.props" /> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <BinPlaceRuntime>false</BinPlaceRuntime> <BinPlaceNative>true</BinPlaceNative> <DisableImplicitFrameworkReferences>true</DisableImplicitFrameworkReferences> </PropertyGroup> <Import Sdk="Microsoft.NET.Sdk" Project="Sdk.targets" /> <!-- Ordering matters! Overriding GetBinPlaceItems and Build targets after the Sdk import. --> <Target Name="GetBinPlaceItems"> <ItemGroup> <BinPlaceItem Include="$(NativeBinDir)*.dll" /> <BinPlaceItem Include="$(NativeBinDir)*.pdb" /> <BinPlaceItem Include="$(NativeBinDir)*.lib" /> <BinPlaceItem Include="$(NativeBinDir)*.a" /> <BinPlaceItem Include="$(NativeBinDir)*.bc" /> <BinPlaceItem Include="$(NativeBinDir)*.so" /> <BinPlaceItem Include="$(NativeBinDir)*.dbg" /> <BinPlaceItem Include="$(NativeBinDir)*.dylib" /> <BinPlaceItem Include="$(NativeBinDir)*.dwarf" /> <FileWrites Include="@(BinPlaceItem)" /> </ItemGroup> </Target> <Target Name="Build" DependsOnTargets="BinPlace" /> <Target Name="CreateManifestResourceNames" /> </Project>
<Project> <Import Sdk="Microsoft.NET.Sdk" Project="Sdk.props" /> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <BinPlaceRuntime>false</BinPlaceRuntime> <BinPlaceNative>true</BinPlaceNative> <DisableImplicitFrameworkReferences>true</DisableImplicitFrameworkReferences> </PropertyGroup> <Import Sdk="Microsoft.NET.Sdk" Project="Sdk.targets" /> <!-- Ordering matters! Overriding GetBinPlaceItems and Build targets after the Sdk import. --> <Target Name="GetBinPlaceItems"> <ItemGroup> <BinPlaceItem Include="$(NativeBinDir)*.dll" /> <BinPlaceItem Include="$(NativeBinDir)*.pdb" /> <BinPlaceItem Include="$(NativeBinDir)*.lib" /> <BinPlaceItem Include="$(NativeBinDir)*.a" /> <BinPlaceItem Include="$(NativeBinDir)*.bc" /> <BinPlaceItem Include="$(NativeBinDir)*.so" /> <BinPlaceItem Include="$(NativeBinDir)*.dbg" /> <BinPlaceItem Include="$(NativeBinDir)*.dylib" /> <BinPlaceItem Include="$(NativeBinDir)*.dwarf" /> <FileWrites Include="@(BinPlaceItem)" /> </ItemGroup> </Target> <Target Name="Build" DependsOnTargets="BinPlace" /> <Target Name="CreateManifestResourceNames" /> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Net.Http/tests/FunctionalTests/package.json
{ "name": "system.net.websockets.client.tests", "private": true, "dependencies": { "node-abort-controller": "3.0.1", "node-fetch": "2.6.7", "ws": "8.4.0" } }
{ "name": "system.net.websockets.client.tests", "private": true, "dependencies": { "node-abort-controller": "3.0.1", "node-fetch": "2.6.7", "ws": "8.4.0" } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/Microsoft.NETCore.Platforms/src/runtime.compatibility.json
{ "alpine": [ "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine-arm": [ "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine-arm64": [ "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine-x64": [ "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.10": [ "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.10-arm": [ "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.10-arm64": [ "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.10-x64": [ "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.11": [ "alpine.3.11", "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.11-arm": [ "alpine.3.11-arm", "alpine.3.11", "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.11-arm64": [ "alpine.3.11-arm64", "alpine.3.11", "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.11-x64": [ "alpine.3.11-x64", "alpine.3.11", "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.12": [ "alpine.3.12", "alpine.3.11", "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.12-arm": [ "alpine.3.12-arm", "alpine.3.12", "alpine.3.11-arm", "alpine.3.11", "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.12-arm64": [ "alpine.3.12-arm64", "alpine.3.12", "alpine.3.11-arm64", "alpine.3.11", "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.12-x64": [ "alpine.3.12-x64", "alpine.3.12", "alpine.3.11-x64", "alpine.3.11", "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.13": [ "alpine.3.13", "alpine.3.12", "alpine.3.11", "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.13-arm": [ "alpine.3.13-arm", "alpine.3.13", "alpine.3.12-arm", "alpine.3.12", "alpine.3.11-arm", "alpine.3.11", "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.13-arm64": [ "alpine.3.13-arm64", "alpine.3.13", "alpine.3.12-arm64", "alpine.3.12", "alpine.3.11-arm64", "alpine.3.11", "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.13-x64": [ "alpine.3.13-x64", "alpine.3.13", "alpine.3.12-x64", "alpine.3.12", "alpine.3.11-x64", "alpine.3.11", "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.14": [ "alpine.3.14", "alpine.3.13", "alpine.3.12", "alpine.3.11", "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.14-arm": [ "alpine.3.14-arm", "alpine.3.14", "alpine.3.13-arm", "alpine.3.13", "alpine.3.12-arm", "alpine.3.12", "alpine.3.11-arm", "alpine.3.11", "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.14-arm64": [ "alpine.3.14-arm64", "alpine.3.14", "alpine.3.13-arm64", "alpine.3.13", "alpine.3.12-arm64", "alpine.3.12", "alpine.3.11-arm64", "alpine.3.11", "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.14-x64": [ "alpine.3.14-x64", "alpine.3.14", "alpine.3.13-x64", "alpine.3.13", "alpine.3.12-x64", "alpine.3.12", "alpine.3.11-x64", "alpine.3.11", "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.15": [ "alpine.3.15", "alpine.3.14", "alpine.3.13", "alpine.3.12", "alpine.3.11", "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.15-arm": [ "alpine.3.15-arm", "alpine.3.15", "alpine.3.14-arm", "alpine.3.14", "alpine.3.13-arm", "alpine.3.13", "alpine.3.12-arm", "alpine.3.12", "alpine.3.11-arm", "alpine.3.11", "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.15-arm64": [ "alpine.3.15-arm64", "alpine.3.15", "alpine.3.14-arm64", "alpine.3.14", "alpine.3.13-arm64", "alpine.3.13", "alpine.3.12-arm64", "alpine.3.12", "alpine.3.11-arm64", "alpine.3.11", "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.15-x64": [ "alpine.3.15-x64", "alpine.3.15", "alpine.3.14-x64", "alpine.3.14", "alpine.3.13-x64", "alpine.3.13", "alpine.3.12-x64", "alpine.3.12", "alpine.3.11-x64", "alpine.3.11", "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.6": [ "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.6-arm": [ "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.6-arm64": [ "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.6-x64": [ "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.7": [ "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.7-arm": [ "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.7-arm64": [ "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.7-x64": [ "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.8": [ "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.8-arm": [ "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.8-arm64": [ "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.8-x64": [ "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.9": [ "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.9-arm": [ "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.9-arm64": [ "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.9-x64": [ "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android": [ "android", "linux", "unix", "any", "base" ], "android-arm": [ "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android-arm64": [ "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android-x64": [ "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android-x86": [ "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.21": [ "android.21", "android", "linux", "unix", "any", "base" ], "android.21-arm": [ "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.21-arm64": [ "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.21-x64": [ "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.21-x86": [ "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.22": [ "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.22-arm": [ "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.22-arm64": [ "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.22-x64": [ "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.22-x86": [ "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.23": [ "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.23-arm": [ "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.23-arm64": [ "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.23-x64": [ "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.23-x86": [ "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.24": [ "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.24-arm": [ "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.24-arm64": [ "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.24-x64": [ "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.24-x86": [ "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.25": [ "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.25-arm": [ "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.25-arm64": [ "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.25-x64": [ "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.25-x86": [ "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.26": [ "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.26-arm": [ "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.26-arm64": [ "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.26-x64": [ "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.26-x86": [ "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.27": [ "android.27", "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.27-arm": [ "android.27-arm", "android.27", "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.27-arm64": [ "android.27-arm64", "android.27", "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.27-x64": [ "android.27-x64", "android.27", "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.27-x86": [ "android.27-x86", "android.27", "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.28": [ "android.28", "android.27", "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.28-arm": [ "android.28-arm", "android.28", "android.27-arm", "android.27", "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.28-arm64": [ "android.28-arm64", "android.28", "android.27-arm64", "android.27", "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.28-x64": [ "android.28-x64", "android.28", "android.27-x64", "android.27", "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.28-x86": [ "android.28-x86", "android.28", "android.27-x86", "android.27", "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.29": [ "android.29", "android.28", "android.27", "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.29-arm": [ "android.29-arm", "android.29", "android.28-arm", "android.28", "android.27-arm", "android.27", "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.29-arm64": [ "android.29-arm64", "android.29", "android.28-arm64", "android.28", "android.27-arm64", "android.27", "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.29-x64": [ "android.29-x64", "android.29", "android.28-x64", "android.28", "android.27-x64", "android.27", "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.29-x86": [ "android.29-x86", "android.29", "android.28-x86", "android.28", "android.27-x86", "android.27", "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.30": [ "android.30", "android.29", "android.28", "android.27", "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.30-arm": [ "android.30-arm", "android.30", "android.29-arm", "android.29", "android.28-arm", "android.28", "android.27-arm", "android.27", "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.30-arm64": [ "android.30-arm64", "android.30", "android.29-arm64", "android.29", "android.28-arm64", "android.28", "android.27-arm64", "android.27", "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.30-x64": [ "android.30-x64", "android.30", "android.29-x64", "android.29", "android.28-x64", "android.28", "android.27-x64", "android.27", "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.30-x86": [ "android.30-x86", "android.30", "android.29-x86", "android.29", "android.28-x86", "android.28", "android.27-x86", "android.27", "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.31": [ "android.31", "android.30", "android.29", "android.28", "android.27", "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.31-arm": [ "android.31-arm", "android.31", "android.30-arm", "android.30", "android.29-arm", "android.29", "android.28-arm", "android.28", "android.27-arm", "android.27", "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.31-arm64": [ "android.31-arm64", "android.31", "android.30-arm64", "android.30", "android.29-arm64", "android.29", "android.28-arm64", "android.28", "android.27-arm64", "android.27", "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.31-x64": [ "android.31-x64", "android.31", "android.30-x64", "android.30", "android.29-x64", "android.29", "android.28-x64", "android.28", "android.27-x64", "android.27", "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.31-x86": [ "android.31-x86", "android.31", "android.30-x86", "android.30", "android.29-x86", "android.29", "android.28-x86", "android.28", "android.27-x86", "android.27", "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "any": [ "any", "base" ], "aot": [ "aot", "any", "base" ], "arch": [ "arch", "linux", "unix", "any", "base" ], "arch-x64": [ "arch-x64", "arch", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "base": [ "base" ], "browser": [ "browser", "any", "base" ], "browser-wasm": [ "browser-wasm", "browser", "any", "base" ], "centos": [ "centos", "rhel", "linux", "unix", "any", "base" ], "centos-arm64": [ "centos-arm64", "centos", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "centos-x64": [ "centos-x64", "centos", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "centos.7": [ "centos.7", "centos", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "centos.7-x64": [ "centos.7-x64", "centos.7", "centos-x64", "rhel.7-x64", "centos", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "centos.8": [ "centos.8", "centos", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "centos.8-arm64": [ "centos.8-arm64", "centos.8", "centos-arm64", "rhel.8-arm64", "centos", "rhel.8", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "centos.8-x64": [ "centos.8-x64", "centos.8", "centos-x64", "rhel.8-x64", "centos", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "centos.9": [ "centos.9", "centos", "rhel.9", "rhel", "linux", "unix", "any", "base" ], "centos.9-arm64": [ "centos.9-arm64", "centos.9", "centos-arm64", "rhel.9-arm64", "centos", "rhel.9", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "centos.9-x64": [ "centos.9-x64", "centos.9", "centos-x64", "rhel.9-x64", "centos", "rhel.9", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian": [ "debian", "linux", "unix", "any", "base" ], "debian-arm": [ "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "debian-arm64": [ "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "debian-armel": [ "debian-armel", "debian", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "debian-x64": [ "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian-x86": [ "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "debian.10": [ "debian.10", "debian", "linux", "unix", "any", "base" ], "debian.10-arm": [ "debian.10-arm", "debian.10", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "debian.10-arm64": [ "debian.10-arm64", "debian.10", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "debian.10-armel": [ "debian.10-armel", "debian.10", "debian-armel", "debian", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "debian.10-x64": [ "debian.10-x64", "debian.10", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian.10-x86": [ "debian.10-x86", "debian.10", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "debian.11": [ "debian.11", "debian", "linux", "unix", "any", "base" ], "debian.11-arm": [ "debian.11-arm", "debian.11", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "debian.11-arm64": [ "debian.11-arm64", "debian.11", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "debian.11-armel": [ "debian.11-armel", "debian.11", "debian-armel", "debian", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "debian.11-x64": [ "debian.11-x64", "debian.11", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian.11-x86": [ "debian.11-x86", "debian.11", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "debian.8": [ "debian.8", "debian", "linux", "unix", "any", "base" ], "debian.8-arm": [ "debian.8-arm", "debian.8", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "debian.8-arm64": [ "debian.8-arm64", "debian.8", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "debian.8-armel": [ "debian.8-armel", "debian.8", "debian-armel", "debian", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "debian.8-x64": [ "debian.8-x64", "debian.8", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian.8-x86": [ "debian.8-x86", "debian.8", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "debian.9": [ "debian.9", "debian", "linux", "unix", "any", "base" ], "debian.9-arm": [ "debian.9-arm", "debian.9", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "debian.9-arm64": [ "debian.9-arm64", "debian.9", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "debian.9-armel": [ "debian.9-armel", "debian.9", "debian-armel", "debian", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "debian.9-x64": [ "debian.9-x64", "debian.9", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian.9-x86": [ "debian.9-x86", "debian.9", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "exherbo": [ "exherbo", "linux", "unix", "any", "base" ], "exherbo-x64": [ "exherbo-x64", "exherbo", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora": [ "fedora", "linux", "unix", "any", "base" ], "fedora-arm64": [ "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora-x64": [ "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.23": [ "fedora.23", "fedora", "linux", "unix", "any", "base" ], "fedora.23-arm64": [ "fedora.23-arm64", "fedora.23", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.23-x64": [ "fedora.23-x64", "fedora.23", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.24": [ "fedora.24", "fedora", "linux", "unix", "any", "base" ], "fedora.24-arm64": [ "fedora.24-arm64", "fedora.24", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.24-x64": [ "fedora.24-x64", "fedora.24", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.25": [ "fedora.25", "fedora", "linux", "unix", "any", "base" ], "fedora.25-arm64": [ "fedora.25-arm64", "fedora.25", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.25-x64": [ "fedora.25-x64", "fedora.25", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.26": [ "fedora.26", "fedora", "linux", "unix", "any", "base" ], "fedora.26-arm64": [ "fedora.26-arm64", "fedora.26", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.26-x64": [ "fedora.26-x64", "fedora.26", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.27": [ "fedora.27", "fedora", "linux", "unix", "any", "base" ], "fedora.27-arm64": [ "fedora.27-arm64", "fedora.27", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.27-x64": [ "fedora.27-x64", "fedora.27", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.28": [ "fedora.28", "fedora", "linux", "unix", "any", "base" ], "fedora.28-arm64": [ "fedora.28-arm64", "fedora.28", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.28-x64": [ "fedora.28-x64", "fedora.28", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.29": [ "fedora.29", "fedora", "linux", "unix", "any", "base" ], "fedora.29-arm64": [ "fedora.29-arm64", "fedora.29", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.29-x64": [ "fedora.29-x64", "fedora.29", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.30": [ "fedora.30", "fedora", "linux", "unix", "any", "base" ], "fedora.30-arm64": [ "fedora.30-arm64", "fedora.30", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.30-x64": [ "fedora.30-x64", "fedora.30", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.31": [ "fedora.31", "fedora", "linux", "unix", "any", "base" ], "fedora.31-arm64": [ "fedora.31-arm64", "fedora.31", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.31-x64": [ "fedora.31-x64", "fedora.31", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.32": [ "fedora.32", "fedora", "linux", "unix", "any", "base" ], "fedora.32-arm64": [ "fedora.32-arm64", "fedora.32", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.32-x64": [ "fedora.32-x64", "fedora.32", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.33": [ "fedora.33", "fedora", "linux", "unix", "any", "base" ], "fedora.33-arm64": [ "fedora.33-arm64", "fedora.33", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.33-x64": [ "fedora.33-x64", "fedora.33", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.34": [ "fedora.34", "fedora", "linux", "unix", "any", "base" ], "fedora.34-arm64": [ "fedora.34-arm64", "fedora.34", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.34-x64": [ "fedora.34-x64", "fedora.34", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.35": [ "fedora.35", "fedora", "linux", "unix", "any", "base" ], "fedora.35-arm64": [ "fedora.35-arm64", "fedora.35", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.35-x64": [ "fedora.35-x64", "fedora.35", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.36": [ "fedora.36", "fedora", "linux", "unix", "any", "base" ], "fedora.36-arm64": [ "fedora.36-arm64", "fedora.36", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.36-x64": [ "fedora.36-x64", "fedora.36", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "freebsd": [ "freebsd", "unix", "any", "base" ], "freebsd-x64": [ "freebsd-x64", "freebsd", "unix-x64", "unix", "any", "base" ], "freebsd.12": [ "freebsd.12", "freebsd", "unix", "any", "base" ], "freebsd.12-x64": [ "freebsd.12-x64", "freebsd.12", "freebsd-x64", "freebsd", "unix-x64", "unix", "any", "base" ], "freebsd.13": [ "freebsd.13", "freebsd.12", "freebsd", "unix", "any", "base" ], "freebsd.13-x64": [ "freebsd.13-x64", "freebsd.13", "freebsd.12-x64", "freebsd.12", "freebsd-x64", "freebsd", "unix-x64", "unix", "any", "base" ], "gentoo": [ "gentoo", "linux", "unix", "any", "base" ], "gentoo-x64": [ "gentoo-x64", "gentoo", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "illumos": [ "illumos", "unix", "any", "base" ], "illumos-x64": [ "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "ios": [ "ios", "unix", "any", "base" ], "ios-arm": [ "ios-arm", "ios", "unix-arm", "unix", "any", "base" ], "ios-arm64": [ "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios-x64": [ "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios-x86": [ "ios-x86", "ios", "unix-x86", "unix", "any", "base" ], "ios.10": [ "ios.10", "ios", "unix", "any", "base" ], "ios.10-arm": [ "ios.10-arm", "ios.10", "ios-arm", "ios", "unix-arm", "unix", "any", "base" ], "ios.10-arm64": [ "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.10-x64": [ "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios.10-x86": [ "ios.10-x86", "ios.10", "ios-x86", "ios", "unix-x86", "unix", "any", "base" ], "ios.11": [ "ios.11", "ios.10", "ios", "unix", "any", "base" ], "ios.11-arm64": [ "ios.11-arm64", "ios.11", "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.11-x64": [ "ios.11-x64", "ios.11", "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios.12": [ "ios.12", "ios.11", "ios.10", "ios", "unix", "any", "base" ], "ios.12-arm64": [ "ios.12-arm64", "ios.12", "ios.11-arm64", "ios.11", "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.12-x64": [ "ios.12-x64", "ios.12", "ios.11-x64", "ios.11", "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios.13": [ "ios.13", "ios.12", "ios.11", "ios.10", "ios", "unix", "any", "base" ], "ios.13-arm64": [ "ios.13-arm64", "ios.13", "ios.12-arm64", "ios.12", "ios.11-arm64", "ios.11", "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.13-x64": [ "ios.13-x64", "ios.13", "ios.12-x64", "ios.12", "ios.11-x64", "ios.11", "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios.14": [ "ios.14", "ios.13", "ios.12", "ios.11", "ios.10", "ios", "unix", "any", "base" ], "ios.14-arm64": [ "ios.14-arm64", "ios.14", "ios.13-arm64", "ios.13", "ios.12-arm64", "ios.12", "ios.11-arm64", "ios.11", "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.14-x64": [ "ios.14-x64", "ios.14", "ios.13-x64", "ios.13", "ios.12-x64", "ios.12", "ios.11-x64", "ios.11", "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios.15": [ "ios.15", "ios.14", "ios.13", "ios.12", "ios.11", "ios.10", "ios", "unix", "any", "base" ], "ios.15-arm64": [ "ios.15-arm64", "ios.15", "ios.14-arm64", "ios.14", "ios.13-arm64", "ios.13", "ios.12-arm64", "ios.12", "ios.11-arm64", "ios.11", "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.15-x64": [ "ios.15-x64", "ios.15", "ios.14-x64", "ios.14", "ios.13-x64", "ios.13", "ios.12-x64", "ios.12", "ios.11-x64", "ios.11", "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator": [ "iossimulator", "ios", "unix", "any", "base" ], "iossimulator-arm64": [ "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator-x64": [ "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator-x86": [ "iossimulator-x86", "iossimulator", "ios-x86", "ios", "unix-x86", "unix", "any", "base" ], "iossimulator.10": [ "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.10-arm64": [ "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.10-x64": [ "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator.10-x86": [ "iossimulator.10-x86", "iossimulator.10", "iossimulator-x86", "iossimulator", "ios-x86", "ios", "unix-x86", "unix", "any", "base" ], "iossimulator.11": [ "iossimulator.11", "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.11-arm64": [ "iossimulator.11-arm64", "iossimulator.11", "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.11-x64": [ "iossimulator.11-x64", "iossimulator.11", "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator.12": [ "iossimulator.12", "iossimulator.11", "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.12-arm64": [ "iossimulator.12-arm64", "iossimulator.12", "iossimulator.11-arm64", "iossimulator.11", "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.12-x64": [ "iossimulator.12-x64", "iossimulator.12", "iossimulator.11-x64", "iossimulator.11", "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator.13": [ "iossimulator.13", "iossimulator.12", "iossimulator.11", "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.13-arm64": [ "iossimulator.13-arm64", "iossimulator.13", "iossimulator.12-arm64", "iossimulator.12", "iossimulator.11-arm64", "iossimulator.11", "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.13-x64": [ "iossimulator.13-x64", "iossimulator.13", "iossimulator.12-x64", "iossimulator.12", "iossimulator.11-x64", "iossimulator.11", "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator.14": [ "iossimulator.14", "iossimulator.13", "iossimulator.12", "iossimulator.11", "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.14-arm64": [ "iossimulator.14-arm64", "iossimulator.14", "iossimulator.13-arm64", "iossimulator.13", "iossimulator.12-arm64", "iossimulator.12", "iossimulator.11-arm64", "iossimulator.11", "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.14-x64": [ "iossimulator.14-x64", "iossimulator.14", "iossimulator.13-x64", "iossimulator.13", "iossimulator.12-x64", "iossimulator.12", "iossimulator.11-x64", "iossimulator.11", "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator.15": [ "iossimulator.15", "iossimulator.14", "iossimulator.13", "iossimulator.12", "iossimulator.11", "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.15-arm64": [ "iossimulator.15-arm64", "iossimulator.15", "iossimulator.14-arm64", "iossimulator.14", "iossimulator.13-arm64", "iossimulator.13", "iossimulator.12-arm64", "iossimulator.12", "iossimulator.11-arm64", "iossimulator.11", "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.15-x64": [ "iossimulator.15-x64", "iossimulator.15", "iossimulator.14-x64", "iossimulator.14", "iossimulator.13-x64", "iossimulator.13", "iossimulator.12-x64", "iossimulator.12", "iossimulator.11-x64", "iossimulator.11", "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "linux": [ "linux", "unix", "any", "base" ], "linux-arm": [ "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "linux-arm64": [ "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "linux-armel": [ "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "linux-loongarch64": [ "linux-loongarch64", "linux", "unix-loongarch64", "unix", "any", "base" ], "linux-armv6": [ "linux-armv6", "linux", "unix-armv6", "unix", "any", "base" ], "linux-mips64": [ "linux-mips64", "linux", "unix-mips64", "unix", "any", "base" ], "linux-musl": [ "linux-musl", "linux", "unix", "any", "base" ], "linux-musl-arm": [ "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "linux-musl-arm64": [ "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "linux-musl-armel": [ "linux-musl-armel", "linux-musl", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "linux-musl-s390x": [ "linux-musl-s390x", "linux-musl", "linux-s390x", "linux", "unix-s390x", "unix", "any", "base" ], "linux-musl-x64": [ "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linux-musl-x86": [ "linux-musl-x86", "linux-musl", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "linux-s390x": [ "linux-s390x", "linux", "unix-s390x", "unix", "any", "base" ], "linux-x64": [ "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linux-x86": [ "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "linuxmint.17": [ "linuxmint.17", "ubuntu.14.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.17-x64": [ "linuxmint.17-x64", "linuxmint.17", "ubuntu.14.04-x64", "ubuntu.14.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.17.1": [ "linuxmint.17.1", "linuxmint.17", "ubuntu.14.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.17.1-x64": [ "linuxmint.17.1-x64", "linuxmint.17.1", "linuxmint.17-x64", "linuxmint.17", "ubuntu.14.04-x64", "ubuntu.14.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.17.2": [ "linuxmint.17.2", "linuxmint.17.1", "linuxmint.17", "ubuntu.14.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.17.2-x64": [ "linuxmint.17.2-x64", "linuxmint.17.2", "linuxmint.17.1-x64", "linuxmint.17.1", "linuxmint.17-x64", "linuxmint.17", "ubuntu.14.04-x64", "ubuntu.14.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.17.3": [ "linuxmint.17.3", "linuxmint.17.2", "linuxmint.17.1", "linuxmint.17", "ubuntu.14.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.17.3-x64": [ "linuxmint.17.3-x64", "linuxmint.17.3", "linuxmint.17.2-x64", "linuxmint.17.2", "linuxmint.17.1-x64", "linuxmint.17.1", "linuxmint.17-x64", "linuxmint.17", "ubuntu.14.04-x64", "ubuntu.14.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.18": [ "linuxmint.18", "ubuntu.16.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.18-x64": [ "linuxmint.18-x64", "linuxmint.18", "ubuntu.16.04-x64", "ubuntu.16.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.18.1": [ "linuxmint.18.1", "linuxmint.18", "ubuntu.16.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.18.1-x64": [ "linuxmint.18.1-x64", "linuxmint.18.1", "linuxmint.18-x64", "linuxmint.18", "ubuntu.16.04-x64", "ubuntu.16.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.18.2": [ "linuxmint.18.2", "linuxmint.18.1", "linuxmint.18", "ubuntu.16.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.18.2-x64": [ "linuxmint.18.2-x64", "linuxmint.18.2", "linuxmint.18.1-x64", "linuxmint.18.1", "linuxmint.18-x64", "linuxmint.18", "ubuntu.16.04-x64", "ubuntu.16.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.18.3": [ "linuxmint.18.3", "linuxmint.18.2", "linuxmint.18.1", "linuxmint.18", "ubuntu.16.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.18.3-x64": [ "linuxmint.18.3-x64", "linuxmint.18.3", "linuxmint.18.2-x64", "linuxmint.18.2", "linuxmint.18.1-x64", "linuxmint.18.1", "linuxmint.18-x64", "linuxmint.18", "ubuntu.16.04-x64", "ubuntu.16.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.19": [ "linuxmint.19", "ubuntu.18.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.19-x64": [ "linuxmint.19-x64", "linuxmint.19", "ubuntu.18.04-x64", "ubuntu.18.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.19.1": [ "linuxmint.19.1", "linuxmint.19", "ubuntu.18.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.19.1-x64": [ "linuxmint.19.1-x64", "linuxmint.19.1", "linuxmint.19-x64", "linuxmint.19", "ubuntu.18.04-x64", "ubuntu.18.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.19.2": [ "linuxmint.19.2", "linuxmint.19.1", "linuxmint.19", "ubuntu.18.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.19.2-x64": [ "linuxmint.19.2-x64", "linuxmint.19.2", "linuxmint.19.1-x64", "linuxmint.19.1", "linuxmint.19-x64", "linuxmint.19", "ubuntu.18.04-x64", "ubuntu.18.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "maccatalyst": [ "maccatalyst", "ios", "unix", "any", "base" ], "maccatalyst-arm64": [ "maccatalyst-arm64", "maccatalyst", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "maccatalyst-x64": [ "maccatalyst-x64", "maccatalyst", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "maccatalyst.13": [ "maccatalyst.13", "maccatalyst", "ios", "unix", "any", "base" ], "maccatalyst.13-arm64": [ "maccatalyst.13-arm64", "maccatalyst.13", "maccatalyst-arm64", "maccatalyst", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "maccatalyst.13-x64": [ "maccatalyst.13-x64", "maccatalyst.13", "maccatalyst-x64", "maccatalyst", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "maccatalyst.14": [ "maccatalyst.14", "maccatalyst.13", "maccatalyst", "ios", "unix", "any", "base" ], "maccatalyst.14-arm64": [ "maccatalyst.14-arm64", "maccatalyst.14", "maccatalyst.13-arm64", "maccatalyst.13", "maccatalyst-arm64", "maccatalyst", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "maccatalyst.14-x64": [ "maccatalyst.14-x64", "maccatalyst.14", "maccatalyst.13-x64", "maccatalyst.13", "maccatalyst-x64", "maccatalyst", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "maccatalyst.15": [ "maccatalyst.15", "maccatalyst.14", "maccatalyst.13", "maccatalyst", "ios", "unix", "any", "base" ], "maccatalyst.15-arm64": [ "maccatalyst.15-arm64", "maccatalyst.15", "maccatalyst.14-arm64", "maccatalyst.14", "maccatalyst.13-arm64", "maccatalyst.13", "maccatalyst-arm64", "maccatalyst", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "maccatalyst.15-x64": [ "maccatalyst.15-x64", "maccatalyst.15", "maccatalyst.14-x64", "maccatalyst.14", "maccatalyst.13-x64", "maccatalyst.13", "maccatalyst-x64", "maccatalyst", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "manjaro": [ "manjaro", "arch", "linux", "unix", "any", "base" ], "manjaro-x64": [ "manjaro-x64", "manjaro", "arch-x64", "arch", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol": [ "ol", "rhel", "linux", "unix", "any", "base" ], "ol-x64": [ "ol-x64", "ol", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7": [ "ol.7", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7-x64": [ "ol.7-x64", "ol.7", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.0": [ "ol.7.0", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.0-x64": [ "ol.7.0-x64", "ol.7.0", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.1": [ "ol.7.1", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.1-x64": [ "ol.7.1-x64", "ol.7.1", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.2": [ "ol.7.2", "ol.7.1", "rhel.7.2", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.2-x64": [ "ol.7.2-x64", "ol.7.2", "ol.7.1-x64", "rhel.7.2-x64", "ol.7.1", "rhel.7.2", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.3": [ "ol.7.3", "ol.7.2", "rhel.7.3", "ol.7.1", "rhel.7.2", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.3-x64": [ "ol.7.3-x64", "ol.7.3", "ol.7.2-x64", "rhel.7.3-x64", "ol.7.2", "rhel.7.3", "ol.7.1-x64", "rhel.7.2-x64", "ol.7.1", "rhel.7.2", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.4": [ "ol.7.4", "ol.7.3", "rhel.7.4", "ol.7.2", "rhel.7.3", "ol.7.1", "rhel.7.2", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.4-x64": [ "ol.7.4-x64", "ol.7.4", "ol.7.3-x64", "rhel.7.4-x64", "ol.7.3", "rhel.7.4", "ol.7.2-x64", "rhel.7.3-x64", "ol.7.2", "rhel.7.3", "ol.7.1-x64", "rhel.7.2-x64", "ol.7.1", "rhel.7.2", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.5": [ "ol.7.5", "ol.7.4", "rhel.7.5", "ol.7.3", "rhel.7.4", "ol.7.2", "rhel.7.3", "ol.7.1", "rhel.7.2", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.5-x64": [ "ol.7.5-x64", "ol.7.5", "ol.7.4-x64", "rhel.7.5-x64", "ol.7.4", "rhel.7.5", "ol.7.3-x64", "rhel.7.4-x64", "ol.7.3", "rhel.7.4", "ol.7.2-x64", "rhel.7.3-x64", "ol.7.2", "rhel.7.3", "ol.7.1-x64", "rhel.7.2-x64", "ol.7.1", "rhel.7.2", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.6": [ "ol.7.6", "ol.7.5", "rhel.7.6", "ol.7.4", "rhel.7.5", "ol.7.3", "rhel.7.4", "ol.7.2", "rhel.7.3", "ol.7.1", "rhel.7.2", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.6-x64": [ "ol.7.6-x64", "ol.7.6", "ol.7.5-x64", "rhel.7.6-x64", "ol.7.5", "rhel.7.6", "ol.7.4-x64", "rhel.7.5-x64", "ol.7.4", "rhel.7.5", "ol.7.3-x64", "rhel.7.4-x64", "ol.7.3", "rhel.7.4", "ol.7.2-x64", "rhel.7.3-x64", "ol.7.2", "rhel.7.3", "ol.7.1-x64", "rhel.7.2-x64", "ol.7.1", "rhel.7.2", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.8": [ "ol.8", "ol", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "ol.8-x64": [ "ol.8-x64", "ol.8", "ol-x64", "rhel.8-x64", "ol", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.8.0": [ "ol.8.0", "ol.8", "rhel.8.0", "ol", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "ol.8.0-x64": [ "ol.8.0-x64", "ol.8.0", "ol.8-x64", "rhel.8.0-x64", "ol.8", "rhel.8.0", "ol-x64", "rhel.8-x64", "ol", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "omnios": [ "omnios", "illumos", "unix", "any", "base" ], "omnios-x64": [ "omnios-x64", "omnios", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "omnios.15": [ "omnios.15", "omnios", "illumos", "unix", "any", "base" ], "omnios.15-x64": [ "omnios.15-x64", "omnios.15", "omnios-x64", "omnios", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "openindiana": [ "openindiana", "illumos", "unix", "any", "base" ], "openindiana-x64": [ "openindiana-x64", "openindiana", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "opensuse": [ "opensuse", "linux", "unix", "any", "base" ], "opensuse-x64": [ "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.13.2": [ "opensuse.13.2", "opensuse", "linux", "unix", "any", "base" ], "opensuse.13.2-x64": [ "opensuse.13.2-x64", "opensuse.13.2", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.15.0": [ "opensuse.15.0", "opensuse", "linux", "unix", "any", "base" ], "opensuse.15.0-x64": [ "opensuse.15.0-x64", "opensuse.15.0", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.15.1": [ "opensuse.15.1", "opensuse", "linux", "unix", "any", "base" ], "opensuse.15.1-x64": [ "opensuse.15.1-x64", "opensuse.15.1", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.42.1": [ "opensuse.42.1", "opensuse", "linux", "unix", "any", "base" ], "opensuse.42.1-x64": [ "opensuse.42.1-x64", "opensuse.42.1", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.42.2": [ "opensuse.42.2", "opensuse", "linux", "unix", "any", "base" ], "opensuse.42.2-x64": [ "opensuse.42.2-x64", "opensuse.42.2", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.42.3": [ "opensuse.42.3", "opensuse", "linux", "unix", "any", "base" ], "opensuse.42.3-x64": [ "opensuse.42.3-x64", "opensuse.42.3", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "osx": [ "osx", "unix", "any", "base" ], "osx-arm64": [ "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx-x64": [ "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.10": [ "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.10-arm64": [ "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.10-x64": [ "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.11": [ "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.11-arm64": [ "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.11-x64": [ "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.12": [ "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.12-arm64": [ "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.12-x64": [ "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.13": [ "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.13-arm64": [ "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.13-x64": [ "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.14": [ "osx.10.14", "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.14-arm64": [ "osx.10.14-arm64", "osx.10.14", "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.14-x64": [ "osx.10.14-x64", "osx.10.14", "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.15": [ "osx.10.15", "osx.10.14", "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.15-arm64": [ "osx.10.15-arm64", "osx.10.15", "osx.10.14-arm64", "osx.10.14", "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.15-x64": [ "osx.10.15-x64", "osx.10.15", "osx.10.14-x64", "osx.10.14", "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.16": [ "osx.10.16", "osx.10.15", "osx.10.14", "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.16-arm64": [ "osx.10.16-arm64", "osx.10.16", "osx.10.15-arm64", "osx.10.15", "osx.10.14-arm64", "osx.10.14", "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.16-x64": [ "osx.10.16-x64", "osx.10.16", "osx.10.15-x64", "osx.10.15", "osx.10.14-x64", "osx.10.14", "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.11.0": [ "osx.11.0", "osx.10.16", "osx.10.15", "osx.10.14", "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.11.0-arm64": [ "osx.11.0-arm64", "osx.11.0", "osx.10.16-arm64", "osx.10.16", "osx.10.15-arm64", "osx.10.15", "osx.10.14-arm64", "osx.10.14", "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.11.0-x64": [ "osx.11.0-x64", "osx.11.0", "osx.10.16-x64", "osx.10.16", "osx.10.15-x64", "osx.10.15", "osx.10.14-x64", "osx.10.14", "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.12": [ "osx.12", "osx.11.0", "osx.10.16", "osx.10.15", "osx.10.14", "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.12-arm64": [ "osx.12-arm64", "osx.12", "osx.11.0-arm64", "osx.11.0", "osx.10.16-arm64", "osx.10.16", "osx.10.15-arm64", "osx.10.15", "osx.10.14-arm64", "osx.10.14", "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.12-x64": [ "osx.12-x64", "osx.12", "osx.11.0-x64", "osx.11.0", "osx.10.16-x64", "osx.10.16", "osx.10.15-x64", "osx.10.15", "osx.10.14-x64", "osx.10.14", "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "rhel": [ "rhel", "linux", "unix", "any", "base" ], "rhel-arm64": [ "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rhel-x64": [ "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.6": [ "rhel.6", "rhel", "linux", "unix", "any", "base" ], "rhel.6-x64": [ "rhel.6-x64", "rhel.6", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7": [ "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7-x64": [ "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.0": [ "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.0-x64": [ "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.1": [ "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.1-x64": [ "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.2": [ "rhel.7.2", "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.2-x64": [ "rhel.7.2-x64", "rhel.7.2", "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.3": [ "rhel.7.3", "rhel.7.2", "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.3-x64": [ "rhel.7.3-x64", "rhel.7.3", "rhel.7.2-x64", "rhel.7.2", "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.4": [ "rhel.7.4", "rhel.7.3", "rhel.7.2", "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.4-x64": [ "rhel.7.4-x64", "rhel.7.4", "rhel.7.3-x64", "rhel.7.3", "rhel.7.2-x64", "rhel.7.2", "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.5": [ "rhel.7.5", "rhel.7.4", "rhel.7.3", "rhel.7.2", "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.5-x64": [ "rhel.7.5-x64", "rhel.7.5", "rhel.7.4-x64", "rhel.7.4", "rhel.7.3-x64", "rhel.7.3", "rhel.7.2-x64", "rhel.7.2", "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.6": [ "rhel.7.6", "rhel.7.5", "rhel.7.4", "rhel.7.3", "rhel.7.2", "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.6-x64": [ "rhel.7.6-x64", "rhel.7.6", "rhel.7.5-x64", "rhel.7.5", "rhel.7.4-x64", "rhel.7.4", "rhel.7.3-x64", "rhel.7.3", "rhel.7.2-x64", "rhel.7.2", "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.8": [ "rhel.8", "rhel", "linux", "unix", "any", "base" ], "rhel.8-arm64": [ "rhel.8-arm64", "rhel.8", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rhel.8-x64": [ "rhel.8-x64", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.8.0": [ "rhel.8.0", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "rhel.8.0-arm64": [ "rhel.8.0-arm64", "rhel.8.0", "rhel.8-arm64", "rhel.8", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rhel.8.0-x64": [ "rhel.8.0-x64", "rhel.8.0", "rhel.8-x64", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.8.1": [ "rhel.8.1", "rhel.8.0", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "rhel.8.1-arm64": [ "rhel.8.1-arm64", "rhel.8.1", "rhel.8.0-arm64", "rhel.8.0", "rhel.8-arm64", "rhel.8", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rhel.8.1-x64": [ "rhel.8.1-x64", "rhel.8.1", "rhel.8.0-x64", "rhel.8.0", "rhel.8-x64", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.9": [ "rhel.9", "rhel", "linux", "unix", "any", "base" ], "rhel.9-arm64": [ "rhel.9-arm64", "rhel.9", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rhel.9-x64": [ "rhel.9-x64", "rhel.9", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rocky": [ "rocky", "rhel", "linux", "unix", "any", "base" ], "rocky-arm64": [ "rocky-arm64", "rocky", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rocky-x64": [ "rocky-x64", "rocky", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rocky.8": [ "rocky.8", "rocky", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "rocky.8-arm64": [ "rocky.8-arm64", "rocky.8", "rocky-arm64", "rhel.8-arm64", "rocky", "rhel.8", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rocky.8-x64": [ "rocky.8-x64", "rocky.8", "rocky-x64", "rhel.8-x64", "rocky", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rocky.9": [ "rocky.9", "rocky", "rhel.9", "rhel", "linux", "unix", "any", "base" ], "rocky.9-arm64": [ "rocky.9-arm64", "rocky.9", "rocky-arm64", "rhel.9-arm64", "rocky", "rhel.9", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rocky.9-x64": [ "rocky.9-x64", "rocky.9", "rocky-x64", "rhel.9-x64", "rocky", "rhel.9", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles": [ "sles", "linux", "unix", "any", "base" ], "sles-x64": [ "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.12": [ "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.12-x64": [ "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.12.1": [ "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.12.1-x64": [ "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.12.2": [ "sles.12.2", "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.12.2-x64": [ "sles.12.2-x64", "sles.12.2", "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.12.3": [ "sles.12.3", "sles.12.2", "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.12.3-x64": [ "sles.12.3-x64", "sles.12.3", "sles.12.2-x64", "sles.12.2", "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.12.4": [ "sles.12.4", "sles.12.3", "sles.12.2", "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.12.4-x64": [ "sles.12.4-x64", "sles.12.4", "sles.12.3-x64", "sles.12.3", "sles.12.2-x64", "sles.12.2", "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.15": [ "sles.15", "sles.12.4", "sles.12.3", "sles.12.2", "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.15-x64": [ "sles.15-x64", "sles.15", "sles.12.4-x64", "sles.12.4", "sles.12.3-x64", "sles.12.3", "sles.12.2-x64", "sles.12.2", "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.15.1": [ "sles.15.1", "sles.15", "sles.12.4", "sles.12.3", "sles.12.2", "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.15.1-x64": [ "sles.15.1-x64", "sles.15.1", "sles.15-x64", "sles.15", "sles.12.4-x64", "sles.12.4", "sles.12.3-x64", "sles.12.3", "sles.12.2-x64", "sles.12.2", "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "smartos": [ "smartos", "illumos", "unix", "any", "base" ], "smartos-x64": [ "smartos-x64", "smartos", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "smartos.2020": [ "smartos.2020", "smartos", "illumos", "unix", "any", "base" ], "smartos.2020-x64": [ "smartos.2020-x64", "smartos.2020", "smartos-x64", "smartos", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "smartos.2021": [ "smartos.2021", "smartos.2020", "smartos", "illumos", "unix", "any", "base" ], "smartos.2021-x64": [ "smartos.2021-x64", "smartos.2021", "smartos.2020-x64", "smartos.2020", "smartos-x64", "smartos", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "solaris": [ "solaris", "unix", "any", "base" ], "solaris-x64": [ "solaris-x64", "solaris", "unix-x64", "unix", "any", "base" ], "solaris.11": [ "solaris.11", "solaris", "unix", "any", "base" ], "solaris.11-x64": [ "solaris.11-x64", "solaris.11", "solaris-x64", "solaris", "unix-x64", "unix", "any", "base" ], "tizen": [ "tizen", "linux", "unix", "any", "base" ], "tizen-arm64": [ "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen-armel": [ "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen-x86": [ "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tizen.4.0.0": [ "tizen.4.0.0", "tizen", "linux", "unix", "any", "base" ], "tizen.4.0.0-arm64": [ "tizen.4.0.0-arm64", "tizen.4.0.0", "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen.4.0.0-armel": [ "tizen.4.0.0-armel", "tizen.4.0.0", "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen.4.0.0-x86": [ "tizen.4.0.0-x86", "tizen.4.0.0", "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tizen.5.0.0": [ "tizen.5.0.0", "tizen.4.0.0", "tizen", "linux", "unix", "any", "base" ], "tizen.5.0.0-arm64": [ "tizen.5.0.0-arm64", "tizen.5.0.0", "tizen.4.0.0-arm64", "tizen.4.0.0", "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen.5.0.0-armel": [ "tizen.5.0.0-armel", "tizen.5.0.0", "tizen.4.0.0-armel", "tizen.4.0.0", "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen.5.0.0-x86": [ "tizen.5.0.0-x86", "tizen.5.0.0", "tizen.4.0.0-x86", "tizen.4.0.0", "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tizen.5.5.0": [ "tizen.5.5.0", "tizen.5.0.0", "tizen.4.0.0", "tizen", "linux", "unix", "any", "base" ], "tizen.5.5.0-arm64": [ "tizen.5.5.0-arm64", "tizen.5.5.0", "tizen.5.0.0-arm64", "tizen.5.0.0", "tizen.4.0.0-arm64", "tizen.4.0.0", "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen.5.5.0-armel": [ "tizen.5.5.0-armel", "tizen.5.5.0", "tizen.5.0.0-armel", "tizen.5.0.0", "tizen.4.0.0-armel", "tizen.4.0.0", "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen.5.5.0-x86": [ "tizen.5.5.0-x86", "tizen.5.5.0", "tizen.5.0.0-x86", "tizen.5.0.0", "tizen.4.0.0-x86", "tizen.4.0.0", "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tizen.6.0.0": [ "tizen.6.0.0", "tizen.5.5.0", "tizen.5.0.0", "tizen.4.0.0", "tizen", "linux", "unix", "any", "base" ], "tizen.6.0.0-arm64": [ "tizen.6.0.0-arm64", "tizen.6.0.0", "tizen.5.5.0-arm64", "tizen.5.5.0", "tizen.5.0.0-arm64", "tizen.5.0.0", "tizen.4.0.0-arm64", "tizen.4.0.0", "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen.6.0.0-armel": [ "tizen.6.0.0-armel", "tizen.6.0.0", "tizen.5.5.0-armel", "tizen.5.5.0", "tizen.5.0.0-armel", "tizen.5.0.0", "tizen.4.0.0-armel", "tizen.4.0.0", "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen.6.0.0-x86": [ "tizen.6.0.0-x86", "tizen.6.0.0", "tizen.5.5.0-x86", "tizen.5.5.0", "tizen.5.0.0-x86", "tizen.5.0.0", "tizen.4.0.0-x86", "tizen.4.0.0", "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tizen.6.5.0": [ "tizen.6.5.0", "tizen.6.0.0", "tizen.5.5.0", "tizen.5.0.0", "tizen.4.0.0", "tizen", "linux", "unix", "any", "base" ], "tizen.6.5.0-arm64": [ "tizen.6.5.0-arm64", "tizen.6.5.0", "tizen.6.0.0-arm64", "tizen.6.0.0", "tizen.5.5.0-arm64", "tizen.5.5.0", "tizen.5.0.0-arm64", "tizen.5.0.0", "tizen.4.0.0-arm64", "tizen.4.0.0", "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen.6.5.0-armel": [ "tizen.6.5.0-armel", "tizen.6.5.0", "tizen.6.0.0-armel", "tizen.6.0.0", "tizen.5.5.0-armel", "tizen.5.5.0", "tizen.5.0.0-armel", "tizen.5.0.0", "tizen.4.0.0-armel", "tizen.4.0.0", "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen.6.5.0-x86": [ "tizen.6.5.0-x86", "tizen.6.5.0", "tizen.6.0.0-x86", "tizen.6.0.0", "tizen.5.5.0-x86", "tizen.5.5.0", "tizen.5.0.0-x86", "tizen.5.0.0", "tizen.4.0.0-x86", "tizen.4.0.0", "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tvos": [ "tvos", "unix", "any", "base" ], "tvos-arm64": [ "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos-x64": [ "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.10": [ "tvos.10", "tvos", "unix", "any", "base" ], "tvos.10-arm64": [ "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.10-x64": [ "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.11": [ "tvos.11", "tvos.10", "tvos", "unix", "any", "base" ], "tvos.11-arm64": [ "tvos.11-arm64", "tvos.11", "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.11-x64": [ "tvos.11-x64", "tvos.11", "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.12": [ "tvos.12", "tvos.11", "tvos.10", "tvos", "unix", "any", "base" ], "tvos.12-arm64": [ "tvos.12-arm64", "tvos.12", "tvos.11-arm64", "tvos.11", "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.12-x64": [ "tvos.12-x64", "tvos.12", "tvos.11-x64", "tvos.11", "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.13": [ "tvos.13", "tvos.12", "tvos.11", "tvos.10", "tvos", "unix", "any", "base" ], "tvos.13-arm64": [ "tvos.13-arm64", "tvos.13", "tvos.12-arm64", "tvos.12", "tvos.11-arm64", "tvos.11", "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.13-x64": [ "tvos.13-x64", "tvos.13", "tvos.12-x64", "tvos.12", "tvos.11-x64", "tvos.11", "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.14": [ "tvos.14", "tvos.13", "tvos.12", "tvos.11", "tvos.10", "tvos", "unix", "any", "base" ], "tvos.14-arm64": [ "tvos.14-arm64", "tvos.14", "tvos.13-arm64", "tvos.13", "tvos.12-arm64", "tvos.12", "tvos.11-arm64", "tvos.11", "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.14-x64": [ "tvos.14-x64", "tvos.14", "tvos.13-x64", "tvos.13", "tvos.12-x64", "tvos.12", "tvos.11-x64", "tvos.11", "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.15": [ "tvos.15", "tvos.14", "tvos.13", "tvos.12", "tvos.11", "tvos.10", "tvos", "unix", "any", "base" ], "tvos.15-arm64": [ "tvos.15-arm64", "tvos.15", "tvos.14-arm64", "tvos.14", "tvos.13-arm64", "tvos.13", "tvos.12-arm64", "tvos.12", "tvos.11-arm64", "tvos.11", "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.15-x64": [ "tvos.15-x64", "tvos.15", "tvos.14-x64", "tvos.14", "tvos.13-x64", "tvos.13", "tvos.12-x64", "tvos.12", "tvos.11-x64", "tvos.11", "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator": [ "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator-arm64": [ "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator-x64": [ "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.10": [ "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.10-arm64": [ "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.10-x64": [ "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.11": [ "tvossimulator.11", "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.11-arm64": [ "tvossimulator.11-arm64", "tvossimulator.11", "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.11-x64": [ "tvossimulator.11-x64", "tvossimulator.11", "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.12": [ "tvossimulator.12", "tvossimulator.11", "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.12-arm64": [ "tvossimulator.12-arm64", "tvossimulator.12", "tvossimulator.11-arm64", "tvossimulator.11", "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.12-x64": [ "tvossimulator.12-x64", "tvossimulator.12", "tvossimulator.11-x64", "tvossimulator.11", "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.13": [ "tvossimulator.13", "tvossimulator.12", "tvossimulator.11", "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.13-arm64": [ "tvossimulator.13-arm64", "tvossimulator.13", "tvossimulator.12-arm64", "tvossimulator.12", "tvossimulator.11-arm64", "tvossimulator.11", "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.13-x64": [ "tvossimulator.13-x64", "tvossimulator.13", "tvossimulator.12-x64", "tvossimulator.12", "tvossimulator.11-x64", "tvossimulator.11", "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.14": [ "tvossimulator.14", "tvossimulator.13", "tvossimulator.12", "tvossimulator.11", "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.14-arm64": [ "tvossimulator.14-arm64", "tvossimulator.14", "tvossimulator.13-arm64", "tvossimulator.13", "tvossimulator.12-arm64", "tvossimulator.12", "tvossimulator.11-arm64", "tvossimulator.11", "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.14-x64": [ "tvossimulator.14-x64", "tvossimulator.14", "tvossimulator.13-x64", "tvossimulator.13", "tvossimulator.12-x64", "tvossimulator.12", "tvossimulator.11-x64", "tvossimulator.11", "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.15": [ "tvossimulator.15", "tvossimulator.14", "tvossimulator.13", "tvossimulator.12", "tvossimulator.11", "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.15-arm64": [ "tvossimulator.15-arm64", "tvossimulator.15", "tvossimulator.14-arm64", "tvossimulator.14", "tvossimulator.13-arm64", "tvossimulator.13", "tvossimulator.12-arm64", "tvossimulator.12", "tvossimulator.11-arm64", "tvossimulator.11", "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.15-x64": [ "tvossimulator.15-x64", "tvossimulator.15", "tvossimulator.14-x64", "tvossimulator.14", "tvossimulator.13-x64", "tvossimulator.13", "tvossimulator.12-x64", "tvossimulator.12", "tvossimulator.11-x64", "tvossimulator.11", "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "ubuntu": [ "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu-arm": [ "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu-arm64": [ "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu-x64": [ "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu-x86": [ "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.14.04": [ "ubuntu.14.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.14.04-arm": [ "ubuntu.14.04-arm", "ubuntu.14.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.14.04-x64": [ "ubuntu.14.04-x64", "ubuntu.14.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.14.04-x86": [ "ubuntu.14.04-x86", "ubuntu.14.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.14.10": [ "ubuntu.14.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.14.10-arm": [ "ubuntu.14.10-arm", "ubuntu.14.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.14.10-x64": [ "ubuntu.14.10-x64", "ubuntu.14.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.14.10-x86": [ "ubuntu.14.10-x86", "ubuntu.14.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.15.04": [ "ubuntu.15.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.15.04-arm": [ "ubuntu.15.04-arm", "ubuntu.15.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.15.04-x64": [ "ubuntu.15.04-x64", "ubuntu.15.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.15.04-x86": [ "ubuntu.15.04-x86", "ubuntu.15.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.15.10": [ "ubuntu.15.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.15.10-arm": [ "ubuntu.15.10-arm", "ubuntu.15.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.15.10-x64": [ "ubuntu.15.10-x64", "ubuntu.15.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.15.10-x86": [ "ubuntu.15.10-x86", "ubuntu.15.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.16.04": [ "ubuntu.16.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.16.04-arm": [ "ubuntu.16.04-arm", "ubuntu.16.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.16.04-arm64": [ "ubuntu.16.04-arm64", "ubuntu.16.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.16.04-x64": [ "ubuntu.16.04-x64", "ubuntu.16.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.16.04-x86": [ "ubuntu.16.04-x86", "ubuntu.16.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.16.10": [ "ubuntu.16.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.16.10-arm": [ "ubuntu.16.10-arm", "ubuntu.16.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.16.10-arm64": [ "ubuntu.16.10-arm64", "ubuntu.16.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.16.10-x64": [ "ubuntu.16.10-x64", "ubuntu.16.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.16.10-x86": [ "ubuntu.16.10-x86", "ubuntu.16.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.17.04": [ "ubuntu.17.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.17.04-arm": [ "ubuntu.17.04-arm", "ubuntu.17.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.17.04-arm64": [ "ubuntu.17.04-arm64", "ubuntu.17.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.17.04-x64": [ "ubuntu.17.04-x64", "ubuntu.17.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.17.04-x86": [ "ubuntu.17.04-x86", "ubuntu.17.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.17.10": [ "ubuntu.17.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.17.10-arm": [ "ubuntu.17.10-arm", "ubuntu.17.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.17.10-arm64": [ "ubuntu.17.10-arm64", "ubuntu.17.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.17.10-x64": [ "ubuntu.17.10-x64", "ubuntu.17.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.17.10-x86": [ "ubuntu.17.10-x86", "ubuntu.17.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.18.04": [ "ubuntu.18.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.18.04-arm": [ "ubuntu.18.04-arm", "ubuntu.18.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.18.04-arm64": [ "ubuntu.18.04-arm64", "ubuntu.18.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.18.04-x64": [ "ubuntu.18.04-x64", "ubuntu.18.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.18.04-x86": [ "ubuntu.18.04-x86", "ubuntu.18.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.18.10": [ "ubuntu.18.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.18.10-arm": [ "ubuntu.18.10-arm", "ubuntu.18.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.18.10-arm64": [ "ubuntu.18.10-arm64", "ubuntu.18.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.18.10-x64": [ "ubuntu.18.10-x64", "ubuntu.18.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.18.10-x86": [ "ubuntu.18.10-x86", "ubuntu.18.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.19.04": [ "ubuntu.19.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.19.04-arm": [ "ubuntu.19.04-arm", "ubuntu.19.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.19.04-arm64": [ "ubuntu.19.04-arm64", "ubuntu.19.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.19.04-x64": [ "ubuntu.19.04-x64", "ubuntu.19.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.19.04-x86": [ "ubuntu.19.04-x86", "ubuntu.19.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.19.10": [ "ubuntu.19.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.19.10-arm": [ "ubuntu.19.10-arm", "ubuntu.19.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.19.10-arm64": [ "ubuntu.19.10-arm64", "ubuntu.19.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.19.10-x64": [ "ubuntu.19.10-x64", "ubuntu.19.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.19.10-x86": [ "ubuntu.19.10-x86", "ubuntu.19.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.20.04": [ "ubuntu.20.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.20.04-arm": [ "ubuntu.20.04-arm", "ubuntu.20.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.20.04-arm64": [ "ubuntu.20.04-arm64", "ubuntu.20.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.20.04-x64": [ "ubuntu.20.04-x64", "ubuntu.20.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.20.04-x86": [ "ubuntu.20.04-x86", "ubuntu.20.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.20.10": [ "ubuntu.20.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.20.10-arm": [ "ubuntu.20.10-arm", "ubuntu.20.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.20.10-arm64": [ "ubuntu.20.10-arm64", "ubuntu.20.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.20.10-x64": [ "ubuntu.20.10-x64", "ubuntu.20.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.20.10-x86": [ "ubuntu.20.10-x86", "ubuntu.20.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.21.04": [ "ubuntu.21.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.21.04-arm": [ "ubuntu.21.04-arm", "ubuntu.21.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.21.04-arm64": [ "ubuntu.21.04-arm64", "ubuntu.21.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.21.04-x64": [ "ubuntu.21.04-x64", "ubuntu.21.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.21.04-x86": [ "ubuntu.21.04-x86", "ubuntu.21.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.21.10": [ "ubuntu.21.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.21.10-arm": [ "ubuntu.21.10-arm", "ubuntu.21.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.21.10-arm64": [ "ubuntu.21.10-arm64", "ubuntu.21.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.21.10-x64": [ "ubuntu.21.10-x64", "ubuntu.21.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.21.10-x86": [ "ubuntu.21.10-x86", "ubuntu.21.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "unix": [ "unix", "any", "base" ], "unix-arm": [ "unix-arm", "unix", "any", "base" ], "unix-arm64": [ "unix-arm64", "unix", "any", "base" ], "unix-armel": [ "unix-armel", "unix", "any", "base" ], "unix-loongarch64": [ "unix-loongarch64", "unix", "any", "base" ], "unix-armv6": [ "unix-armv6", "unix", "any", "base" ], "unix-mips64": [ "unix-mips64", "unix", "any", "base" ], "unix-s390x": [ "unix-s390x", "unix", "any", "base" ], "unix-x64": [ "unix-x64", "unix", "any", "base" ], "unix-x86": [ "unix-x86", "unix", "any", "base" ], "win": [ "win", "any", "base" ], "win-aot": [ "win-aot", "win", "aot", "any", "base" ], "win-arm": [ "win-arm", "win", "any", "base" ], "win-arm-aot": [ "win-arm-aot", "win-aot", "win-arm", "win", "aot", "any", "base" ], "win-arm64": [ "win-arm64", "win", "any", "base" ], "win-arm64-aot": [ "win-arm64-aot", "win-aot", "win-arm64", "win", "aot", "any", "base" ], "win-x64": [ "win-x64", "win", "any", "base" ], "win-x64-aot": [ "win-x64-aot", "win-aot", "win-x64", "win", "aot", "any", "base" ], "win-x86": [ "win-x86", "win", "any", "base" ], "win-x86-aot": [ "win-x86-aot", "win-aot", "win-x86", "win", "aot", "any", "base" ], "win10": [ "win10", "win81", "win8", "win7", "win", "any", "base" ], "win10-aot": [ "win10-aot", "win10", "win81-aot", "win81", "win8-aot", "win8", "win7-aot", "win7", "win-aot", "win", "aot", "any", "base" ], "win10-arm": [ "win10-arm", "win10", "win81-arm", "win81", "win8-arm", "win8", "win7-arm", "win7", "win-arm", "win", "any", "base" ], "win10-arm-aot": [ "win10-arm-aot", "win10-aot", "win10-arm", "win10", "win81-arm-aot", "win81-aot", "win81-arm", "win81", "win8-arm-aot", "win8-aot", "win8-arm", "win8", "win7-arm-aot", "win7-aot", "win7-arm", "win7", "win-arm-aot", "win-aot", "win-arm", "win", "aot", "any", "base" ], "win10-arm64": [ "win10-arm64", "win10", "win81-arm64", "win81", "win8-arm64", "win8", "win7-arm64", "win7", "win-arm64", "win", "any", "base" ], "win10-arm64-aot": [ "win10-arm64-aot", "win10-aot", "win10-arm64", "win10", "win81-arm64-aot", "win81-aot", "win81-arm64", "win81", "win8-arm64-aot", "win8-aot", "win8-arm64", "win8", "win7-arm64-aot", "win7-aot", "win7-arm64", "win7", "win-arm64-aot", "win-aot", "win-arm64", "win", "aot", "any", "base" ], "win10-x64": [ "win10-x64", "win10", "win81-x64", "win81", "win8-x64", "win8", "win7-x64", "win7", "win-x64", "win", "any", "base" ], "win10-x64-aot": [ "win10-x64-aot", "win10-aot", "win10-x64", "win10", "win81-x64-aot", "win81-aot", "win81-x64", "win81", "win8-x64-aot", "win8-aot", "win8-x64", "win8", "win7-x64-aot", "win7-aot", "win7-x64", "win7", "win-x64-aot", "win-aot", "win-x64", "win", "aot", "any", "base" ], "win10-x86": [ "win10-x86", "win10", "win81-x86", "win81", "win8-x86", "win8", "win7-x86", "win7", "win-x86", "win", "any", "base" ], "win10-x86-aot": [ "win10-x86-aot", "win10-aot", "win10-x86", "win10", "win81-x86-aot", "win81-aot", "win81-x86", "win81", "win8-x86-aot", "win8-aot", "win8-x86", "win8", "win7-x86-aot", "win7-aot", "win7-x86", "win7", "win-x86-aot", "win-aot", "win-x86", "win", "aot", "any", "base" ], "win7": [ "win7", "win", "any", "base" ], "win7-aot": [ "win7-aot", "win7", "win-aot", "win", "aot", "any", "base" ], "win7-arm": [ "win7-arm", "win7", "win-arm", "win", "any", "base" ], "win7-arm-aot": [ "win7-arm-aot", "win7-aot", "win7-arm", "win7", "win-arm-aot", "win-aot", "win-arm", "win", "aot", "any", "base" ], "win7-arm64": [ "win7-arm64", "win7", "win-arm64", "win", "any", "base" ], "win7-arm64-aot": [ "win7-arm64-aot", "win7-aot", "win7-arm64", "win7", "win-arm64-aot", "win-aot", "win-arm64", "win", "aot", "any", "base" ], "win7-x64": [ "win7-x64", "win7", "win-x64", "win", "any", "base" ], "win7-x64-aot": [ "win7-x64-aot", "win7-aot", "win7-x64", "win7", "win-x64-aot", "win-aot", "win-x64", "win", "aot", "any", "base" ], "win7-x86": [ "win7-x86", "win7", "win-x86", "win", "any", "base" ], "win7-x86-aot": [ "win7-x86-aot", "win7-aot", "win7-x86", "win7", "win-x86-aot", "win-aot", "win-x86", "win", "aot", "any", "base" ], "win8": [ "win8", "win7", "win", "any", "base" ], "win8-aot": [ "win8-aot", "win8", "win7-aot", "win7", "win-aot", "win", "aot", "any", "base" ], "win8-arm": [ "win8-arm", "win8", "win7-arm", "win7", "win-arm", "win", "any", "base" ], "win8-arm-aot": [ "win8-arm-aot", "win8-aot", "win8-arm", "win8", "win7-arm-aot", "win7-aot", "win7-arm", "win7", "win-arm-aot", "win-aot", "win-arm", "win", "aot", "any", "base" ], "win8-arm64": [ "win8-arm64", "win8", "win7-arm64", "win7", "win-arm64", "win", "any", "base" ], "win8-arm64-aot": [ "win8-arm64-aot", "win8-aot", "win8-arm64", "win8", "win7-arm64-aot", "win7-aot", "win7-arm64", "win7", "win-arm64-aot", "win-aot", "win-arm64", "win", "aot", "any", "base" ], "win8-x64": [ "win8-x64", "win8", "win7-x64", "win7", "win-x64", "win", "any", "base" ], "win8-x64-aot": [ "win8-x64-aot", "win8-aot", "win8-x64", "win8", "win7-x64-aot", "win7-aot", "win7-x64", "win7", "win-x64-aot", "win-aot", "win-x64", "win", "aot", "any", "base" ], "win8-x86": [ "win8-x86", "win8", "win7-x86", "win7", "win-x86", "win", "any", "base" ], "win8-x86-aot": [ "win8-x86-aot", "win8-aot", "win8-x86", "win8", "win7-x86-aot", "win7-aot", "win7-x86", "win7", "win-x86-aot", "win-aot", "win-x86", "win", "aot", "any", "base" ], "win81": [ "win81", "win8", "win7", "win", "any", "base" ], "win81-aot": [ "win81-aot", "win81", "win8-aot", "win8", "win7-aot", "win7", "win-aot", "win", "aot", "any", "base" ], "win81-arm": [ "win81-arm", "win81", "win8-arm", "win8", "win7-arm", "win7", "win-arm", "win", "any", "base" ], "win81-arm-aot": [ "win81-arm-aot", "win81-aot", "win81-arm", "win81", "win8-arm-aot", "win8-aot", "win8-arm", "win8", "win7-arm-aot", "win7-aot", "win7-arm", "win7", "win-arm-aot", "win-aot", "win-arm", "win", "aot", "any", "base" ], "win81-arm64": [ "win81-arm64", "win81", "win8-arm64", "win8", "win7-arm64", "win7", "win-arm64", "win", "any", "base" ], "win81-arm64-aot": [ "win81-arm64-aot", "win81-aot", "win81-arm64", "win81", "win8-arm64-aot", "win8-aot", "win8-arm64", "win8", "win7-arm64-aot", "win7-aot", "win7-arm64", "win7", "win-arm64-aot", "win-aot", "win-arm64", "win", "aot", "any", "base" ], "win81-x64": [ "win81-x64", "win81", "win8-x64", "win8", "win7-x64", "win7", "win-x64", "win", "any", "base" ], "win81-x64-aot": [ "win81-x64-aot", "win81-aot", "win81-x64", "win81", "win8-x64-aot", "win8-aot", "win8-x64", "win8", "win7-x64-aot", "win7-aot", "win7-x64", "win7", "win-x64-aot", "win-aot", "win-x64", "win", "aot", "any", "base" ], "win81-x86": [ "win81-x86", "win81", "win8-x86", "win8", "win7-x86", "win7", "win-x86", "win", "any", "base" ], "win81-x86-aot": [ "win81-x86-aot", "win81-aot", "win81-x86", "win81", "win8-x86-aot", "win8-aot", "win8-x86", "win8", "win7-x86-aot", "win7-aot", "win7-x86", "win7", "win-x86-aot", "win-aot", "win-x86", "win", "aot", "any", "base" ] }
{ "alpine": [ "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine-arm": [ "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine-arm64": [ "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine-x64": [ "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.10": [ "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.10-arm": [ "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.10-arm64": [ "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.10-x64": [ "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.11": [ "alpine.3.11", "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.11-arm": [ "alpine.3.11-arm", "alpine.3.11", "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.11-arm64": [ "alpine.3.11-arm64", "alpine.3.11", "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.11-x64": [ "alpine.3.11-x64", "alpine.3.11", "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.12": [ "alpine.3.12", "alpine.3.11", "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.12-arm": [ "alpine.3.12-arm", "alpine.3.12", "alpine.3.11-arm", "alpine.3.11", "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.12-arm64": [ "alpine.3.12-arm64", "alpine.3.12", "alpine.3.11-arm64", "alpine.3.11", "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.12-x64": [ "alpine.3.12-x64", "alpine.3.12", "alpine.3.11-x64", "alpine.3.11", "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.13": [ "alpine.3.13", "alpine.3.12", "alpine.3.11", "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.13-arm": [ "alpine.3.13-arm", "alpine.3.13", "alpine.3.12-arm", "alpine.3.12", "alpine.3.11-arm", "alpine.3.11", "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.13-arm64": [ "alpine.3.13-arm64", "alpine.3.13", "alpine.3.12-arm64", "alpine.3.12", "alpine.3.11-arm64", "alpine.3.11", "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.13-x64": [ "alpine.3.13-x64", "alpine.3.13", "alpine.3.12-x64", "alpine.3.12", "alpine.3.11-x64", "alpine.3.11", "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.14": [ "alpine.3.14", "alpine.3.13", "alpine.3.12", "alpine.3.11", "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.14-arm": [ "alpine.3.14-arm", "alpine.3.14", "alpine.3.13-arm", "alpine.3.13", "alpine.3.12-arm", "alpine.3.12", "alpine.3.11-arm", "alpine.3.11", "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.14-arm64": [ "alpine.3.14-arm64", "alpine.3.14", "alpine.3.13-arm64", "alpine.3.13", "alpine.3.12-arm64", "alpine.3.12", "alpine.3.11-arm64", "alpine.3.11", "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.14-x64": [ "alpine.3.14-x64", "alpine.3.14", "alpine.3.13-x64", "alpine.3.13", "alpine.3.12-x64", "alpine.3.12", "alpine.3.11-x64", "alpine.3.11", "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.15": [ "alpine.3.15", "alpine.3.14", "alpine.3.13", "alpine.3.12", "alpine.3.11", "alpine.3.10", "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.15-arm": [ "alpine.3.15-arm", "alpine.3.15", "alpine.3.14-arm", "alpine.3.14", "alpine.3.13-arm", "alpine.3.13", "alpine.3.12-arm", "alpine.3.12", "alpine.3.11-arm", "alpine.3.11", "alpine.3.10-arm", "alpine.3.10", "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.15-arm64": [ "alpine.3.15-arm64", "alpine.3.15", "alpine.3.14-arm64", "alpine.3.14", "alpine.3.13-arm64", "alpine.3.13", "alpine.3.12-arm64", "alpine.3.12", "alpine.3.11-arm64", "alpine.3.11", "alpine.3.10-arm64", "alpine.3.10", "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.15-x64": [ "alpine.3.15-x64", "alpine.3.15", "alpine.3.14-x64", "alpine.3.14", "alpine.3.13-x64", "alpine.3.13", "alpine.3.12-x64", "alpine.3.12", "alpine.3.11-x64", "alpine.3.11", "alpine.3.10-x64", "alpine.3.10", "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.6": [ "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.6-arm": [ "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.6-arm64": [ "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.6-x64": [ "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.7": [ "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.7-arm": [ "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.7-arm64": [ "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.7-x64": [ "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.8": [ "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.8-arm": [ "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.8-arm64": [ "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.8-x64": [ "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "alpine.3.9": [ "alpine.3.9", "alpine.3.8", "alpine.3.7", "alpine.3.6", "alpine", "linux-musl", "linux", "unix", "any", "base" ], "alpine.3.9-arm": [ "alpine.3.9-arm", "alpine.3.9", "alpine.3.8-arm", "alpine.3.8", "alpine.3.7-arm", "alpine.3.7", "alpine.3.6-arm", "alpine.3.6", "alpine-arm", "alpine", "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "alpine.3.9-arm64": [ "alpine.3.9-arm64", "alpine.3.9", "alpine.3.8-arm64", "alpine.3.8", "alpine.3.7-arm64", "alpine.3.7", "alpine.3.6-arm64", "alpine.3.6", "alpine-arm64", "alpine", "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "alpine.3.9-x64": [ "alpine.3.9-x64", "alpine.3.9", "alpine.3.8-x64", "alpine.3.8", "alpine.3.7-x64", "alpine.3.7", "alpine.3.6-x64", "alpine.3.6", "alpine-x64", "alpine", "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android": [ "android", "linux", "unix", "any", "base" ], "android-arm": [ "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android-arm64": [ "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android-x64": [ "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android-x86": [ "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.21": [ "android.21", "android", "linux", "unix", "any", "base" ], "android.21-arm": [ "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.21-arm64": [ "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.21-x64": [ "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.21-x86": [ "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.22": [ "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.22-arm": [ "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.22-arm64": [ "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.22-x64": [ "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.22-x86": [ "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.23": [ "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.23-arm": [ "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.23-arm64": [ "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.23-x64": [ "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.23-x86": [ "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.24": [ "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.24-arm": [ "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.24-arm64": [ "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.24-x64": [ "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.24-x86": [ "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.25": [ "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.25-arm": [ "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.25-arm64": [ "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.25-x64": [ "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.25-x86": [ "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.26": [ "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.26-arm": [ "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.26-arm64": [ "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.26-x64": [ "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.26-x86": [ "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.27": [ "android.27", "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.27-arm": [ "android.27-arm", "android.27", "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.27-arm64": [ "android.27-arm64", "android.27", "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.27-x64": [ "android.27-x64", "android.27", "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.27-x86": [ "android.27-x86", "android.27", "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.28": [ "android.28", "android.27", "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.28-arm": [ "android.28-arm", "android.28", "android.27-arm", "android.27", "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.28-arm64": [ "android.28-arm64", "android.28", "android.27-arm64", "android.27", "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.28-x64": [ "android.28-x64", "android.28", "android.27-x64", "android.27", "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.28-x86": [ "android.28-x86", "android.28", "android.27-x86", "android.27", "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.29": [ "android.29", "android.28", "android.27", "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.29-arm": [ "android.29-arm", "android.29", "android.28-arm", "android.28", "android.27-arm", "android.27", "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.29-arm64": [ "android.29-arm64", "android.29", "android.28-arm64", "android.28", "android.27-arm64", "android.27", "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.29-x64": [ "android.29-x64", "android.29", "android.28-x64", "android.28", "android.27-x64", "android.27", "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.29-x86": [ "android.29-x86", "android.29", "android.28-x86", "android.28", "android.27-x86", "android.27", "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.30": [ "android.30", "android.29", "android.28", "android.27", "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.30-arm": [ "android.30-arm", "android.30", "android.29-arm", "android.29", "android.28-arm", "android.28", "android.27-arm", "android.27", "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.30-arm64": [ "android.30-arm64", "android.30", "android.29-arm64", "android.29", "android.28-arm64", "android.28", "android.27-arm64", "android.27", "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.30-x64": [ "android.30-x64", "android.30", "android.29-x64", "android.29", "android.28-x64", "android.28", "android.27-x64", "android.27", "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.30-x86": [ "android.30-x86", "android.30", "android.29-x86", "android.29", "android.28-x86", "android.28", "android.27-x86", "android.27", "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "android.31": [ "android.31", "android.30", "android.29", "android.28", "android.27", "android.26", "android.25", "android.24", "android.23", "android.22", "android.21", "android", "linux", "unix", "any", "base" ], "android.31-arm": [ "android.31-arm", "android.31", "android.30-arm", "android.30", "android.29-arm", "android.29", "android.28-arm", "android.28", "android.27-arm", "android.27", "android.26-arm", "android.26", "android.25-arm", "android.25", "android.24-arm", "android.24", "android.23-arm", "android.23", "android.22-arm", "android.22", "android.21-arm", "android.21", "android-arm", "android", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "android.31-arm64": [ "android.31-arm64", "android.31", "android.30-arm64", "android.30", "android.29-arm64", "android.29", "android.28-arm64", "android.28", "android.27-arm64", "android.27", "android.26-arm64", "android.26", "android.25-arm64", "android.25", "android.24-arm64", "android.24", "android.23-arm64", "android.23", "android.22-arm64", "android.22", "android.21-arm64", "android.21", "android-arm64", "android", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "android.31-x64": [ "android.31-x64", "android.31", "android.30-x64", "android.30", "android.29-x64", "android.29", "android.28-x64", "android.28", "android.27-x64", "android.27", "android.26-x64", "android.26", "android.25-x64", "android.25", "android.24-x64", "android.24", "android.23-x64", "android.23", "android.22-x64", "android.22", "android.21-x64", "android.21", "android-x64", "android", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "android.31-x86": [ "android.31-x86", "android.31", "android.30-x86", "android.30", "android.29-x86", "android.29", "android.28-x86", "android.28", "android.27-x86", "android.27", "android.26-x86", "android.26", "android.25-x86", "android.25", "android.24-x86", "android.24", "android.23-x86", "android.23", "android.22-x86", "android.22", "android.21-x86", "android.21", "android-x86", "android", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "any": [ "any", "base" ], "aot": [ "aot", "any", "base" ], "arch": [ "arch", "linux", "unix", "any", "base" ], "arch-x64": [ "arch-x64", "arch", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "base": [ "base" ], "browser": [ "browser", "any", "base" ], "browser-wasm": [ "browser-wasm", "browser", "any", "base" ], "centos": [ "centos", "rhel", "linux", "unix", "any", "base" ], "centos-arm64": [ "centos-arm64", "centos", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "centos-x64": [ "centos-x64", "centos", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "centos.7": [ "centos.7", "centos", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "centos.7-x64": [ "centos.7-x64", "centos.7", "centos-x64", "rhel.7-x64", "centos", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "centos.8": [ "centos.8", "centos", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "centos.8-arm64": [ "centos.8-arm64", "centos.8", "centos-arm64", "rhel.8-arm64", "centos", "rhel.8", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "centos.8-x64": [ "centos.8-x64", "centos.8", "centos-x64", "rhel.8-x64", "centos", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "centos.9": [ "centos.9", "centos", "rhel.9", "rhel", "linux", "unix", "any", "base" ], "centos.9-arm64": [ "centos.9-arm64", "centos.9", "centos-arm64", "rhel.9-arm64", "centos", "rhel.9", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "centos.9-x64": [ "centos.9-x64", "centos.9", "centos-x64", "rhel.9-x64", "centos", "rhel.9", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian": [ "debian", "linux", "unix", "any", "base" ], "debian-arm": [ "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "debian-arm64": [ "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "debian-armel": [ "debian-armel", "debian", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "debian-x64": [ "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian-x86": [ "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "debian.10": [ "debian.10", "debian", "linux", "unix", "any", "base" ], "debian.10-arm": [ "debian.10-arm", "debian.10", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "debian.10-arm64": [ "debian.10-arm64", "debian.10", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "debian.10-armel": [ "debian.10-armel", "debian.10", "debian-armel", "debian", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "debian.10-x64": [ "debian.10-x64", "debian.10", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian.10-x86": [ "debian.10-x86", "debian.10", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "debian.11": [ "debian.11", "debian", "linux", "unix", "any", "base" ], "debian.11-arm": [ "debian.11-arm", "debian.11", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "debian.11-arm64": [ "debian.11-arm64", "debian.11", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "debian.11-armel": [ "debian.11-armel", "debian.11", "debian-armel", "debian", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "debian.11-x64": [ "debian.11-x64", "debian.11", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian.11-x86": [ "debian.11-x86", "debian.11", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "debian.8": [ "debian.8", "debian", "linux", "unix", "any", "base" ], "debian.8-arm": [ "debian.8-arm", "debian.8", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "debian.8-arm64": [ "debian.8-arm64", "debian.8", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "debian.8-armel": [ "debian.8-armel", "debian.8", "debian-armel", "debian", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "debian.8-x64": [ "debian.8-x64", "debian.8", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian.8-x86": [ "debian.8-x86", "debian.8", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "debian.9": [ "debian.9", "debian", "linux", "unix", "any", "base" ], "debian.9-arm": [ "debian.9-arm", "debian.9", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "debian.9-arm64": [ "debian.9-arm64", "debian.9", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "debian.9-armel": [ "debian.9-armel", "debian.9", "debian-armel", "debian", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "debian.9-x64": [ "debian.9-x64", "debian.9", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "debian.9-x86": [ "debian.9-x86", "debian.9", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "exherbo": [ "exherbo", "linux", "unix", "any", "base" ], "exherbo-x64": [ "exherbo-x64", "exherbo", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora": [ "fedora", "linux", "unix", "any", "base" ], "fedora-arm64": [ "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora-x64": [ "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.23": [ "fedora.23", "fedora", "linux", "unix", "any", "base" ], "fedora.23-arm64": [ "fedora.23-arm64", "fedora.23", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.23-x64": [ "fedora.23-x64", "fedora.23", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.24": [ "fedora.24", "fedora", "linux", "unix", "any", "base" ], "fedora.24-arm64": [ "fedora.24-arm64", "fedora.24", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.24-x64": [ "fedora.24-x64", "fedora.24", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.25": [ "fedora.25", "fedora", "linux", "unix", "any", "base" ], "fedora.25-arm64": [ "fedora.25-arm64", "fedora.25", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.25-x64": [ "fedora.25-x64", "fedora.25", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.26": [ "fedora.26", "fedora", "linux", "unix", "any", "base" ], "fedora.26-arm64": [ "fedora.26-arm64", "fedora.26", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.26-x64": [ "fedora.26-x64", "fedora.26", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.27": [ "fedora.27", "fedora", "linux", "unix", "any", "base" ], "fedora.27-arm64": [ "fedora.27-arm64", "fedora.27", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.27-x64": [ "fedora.27-x64", "fedora.27", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.28": [ "fedora.28", "fedora", "linux", "unix", "any", "base" ], "fedora.28-arm64": [ "fedora.28-arm64", "fedora.28", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.28-x64": [ "fedora.28-x64", "fedora.28", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.29": [ "fedora.29", "fedora", "linux", "unix", "any", "base" ], "fedora.29-arm64": [ "fedora.29-arm64", "fedora.29", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.29-x64": [ "fedora.29-x64", "fedora.29", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.30": [ "fedora.30", "fedora", "linux", "unix", "any", "base" ], "fedora.30-arm64": [ "fedora.30-arm64", "fedora.30", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.30-x64": [ "fedora.30-x64", "fedora.30", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.31": [ "fedora.31", "fedora", "linux", "unix", "any", "base" ], "fedora.31-arm64": [ "fedora.31-arm64", "fedora.31", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.31-x64": [ "fedora.31-x64", "fedora.31", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.32": [ "fedora.32", "fedora", "linux", "unix", "any", "base" ], "fedora.32-arm64": [ "fedora.32-arm64", "fedora.32", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.32-x64": [ "fedora.32-x64", "fedora.32", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.33": [ "fedora.33", "fedora", "linux", "unix", "any", "base" ], "fedora.33-arm64": [ "fedora.33-arm64", "fedora.33", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.33-x64": [ "fedora.33-x64", "fedora.33", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.34": [ "fedora.34", "fedora", "linux", "unix", "any", "base" ], "fedora.34-arm64": [ "fedora.34-arm64", "fedora.34", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.34-x64": [ "fedora.34-x64", "fedora.34", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.35": [ "fedora.35", "fedora", "linux", "unix", "any", "base" ], "fedora.35-arm64": [ "fedora.35-arm64", "fedora.35", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.35-x64": [ "fedora.35-x64", "fedora.35", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "fedora.36": [ "fedora.36", "fedora", "linux", "unix", "any", "base" ], "fedora.36-arm64": [ "fedora.36-arm64", "fedora.36", "fedora-arm64", "fedora", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "fedora.36-x64": [ "fedora.36-x64", "fedora.36", "fedora-x64", "fedora", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "freebsd": [ "freebsd", "unix", "any", "base" ], "freebsd-x64": [ "freebsd-x64", "freebsd", "unix-x64", "unix", "any", "base" ], "freebsd.12": [ "freebsd.12", "freebsd", "unix", "any", "base" ], "freebsd.12-x64": [ "freebsd.12-x64", "freebsd.12", "freebsd-x64", "freebsd", "unix-x64", "unix", "any", "base" ], "freebsd.13": [ "freebsd.13", "freebsd.12", "freebsd", "unix", "any", "base" ], "freebsd.13-x64": [ "freebsd.13-x64", "freebsd.13", "freebsd.12-x64", "freebsd.12", "freebsd-x64", "freebsd", "unix-x64", "unix", "any", "base" ], "gentoo": [ "gentoo", "linux", "unix", "any", "base" ], "gentoo-x64": [ "gentoo-x64", "gentoo", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "illumos": [ "illumos", "unix", "any", "base" ], "illumos-x64": [ "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "ios": [ "ios", "unix", "any", "base" ], "ios-arm": [ "ios-arm", "ios", "unix-arm", "unix", "any", "base" ], "ios-arm64": [ "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios-x64": [ "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios-x86": [ "ios-x86", "ios", "unix-x86", "unix", "any", "base" ], "ios.10": [ "ios.10", "ios", "unix", "any", "base" ], "ios.10-arm": [ "ios.10-arm", "ios.10", "ios-arm", "ios", "unix-arm", "unix", "any", "base" ], "ios.10-arm64": [ "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.10-x64": [ "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios.10-x86": [ "ios.10-x86", "ios.10", "ios-x86", "ios", "unix-x86", "unix", "any", "base" ], "ios.11": [ "ios.11", "ios.10", "ios", "unix", "any", "base" ], "ios.11-arm64": [ "ios.11-arm64", "ios.11", "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.11-x64": [ "ios.11-x64", "ios.11", "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios.12": [ "ios.12", "ios.11", "ios.10", "ios", "unix", "any", "base" ], "ios.12-arm64": [ "ios.12-arm64", "ios.12", "ios.11-arm64", "ios.11", "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.12-x64": [ "ios.12-x64", "ios.12", "ios.11-x64", "ios.11", "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios.13": [ "ios.13", "ios.12", "ios.11", "ios.10", "ios", "unix", "any", "base" ], "ios.13-arm64": [ "ios.13-arm64", "ios.13", "ios.12-arm64", "ios.12", "ios.11-arm64", "ios.11", "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.13-x64": [ "ios.13-x64", "ios.13", "ios.12-x64", "ios.12", "ios.11-x64", "ios.11", "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios.14": [ "ios.14", "ios.13", "ios.12", "ios.11", "ios.10", "ios", "unix", "any", "base" ], "ios.14-arm64": [ "ios.14-arm64", "ios.14", "ios.13-arm64", "ios.13", "ios.12-arm64", "ios.12", "ios.11-arm64", "ios.11", "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.14-x64": [ "ios.14-x64", "ios.14", "ios.13-x64", "ios.13", "ios.12-x64", "ios.12", "ios.11-x64", "ios.11", "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "ios.15": [ "ios.15", "ios.14", "ios.13", "ios.12", "ios.11", "ios.10", "ios", "unix", "any", "base" ], "ios.15-arm64": [ "ios.15-arm64", "ios.15", "ios.14-arm64", "ios.14", "ios.13-arm64", "ios.13", "ios.12-arm64", "ios.12", "ios.11-arm64", "ios.11", "ios.10-arm64", "ios.10", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "ios.15-x64": [ "ios.15-x64", "ios.15", "ios.14-x64", "ios.14", "ios.13-x64", "ios.13", "ios.12-x64", "ios.12", "ios.11-x64", "ios.11", "ios.10-x64", "ios.10", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator": [ "iossimulator", "ios", "unix", "any", "base" ], "iossimulator-arm64": [ "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator-x64": [ "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator-x86": [ "iossimulator-x86", "iossimulator", "ios-x86", "ios", "unix-x86", "unix", "any", "base" ], "iossimulator.10": [ "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.10-arm64": [ "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.10-x64": [ "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator.10-x86": [ "iossimulator.10-x86", "iossimulator.10", "iossimulator-x86", "iossimulator", "ios-x86", "ios", "unix-x86", "unix", "any", "base" ], "iossimulator.11": [ "iossimulator.11", "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.11-arm64": [ "iossimulator.11-arm64", "iossimulator.11", "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.11-x64": [ "iossimulator.11-x64", "iossimulator.11", "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator.12": [ "iossimulator.12", "iossimulator.11", "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.12-arm64": [ "iossimulator.12-arm64", "iossimulator.12", "iossimulator.11-arm64", "iossimulator.11", "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.12-x64": [ "iossimulator.12-x64", "iossimulator.12", "iossimulator.11-x64", "iossimulator.11", "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator.13": [ "iossimulator.13", "iossimulator.12", "iossimulator.11", "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.13-arm64": [ "iossimulator.13-arm64", "iossimulator.13", "iossimulator.12-arm64", "iossimulator.12", "iossimulator.11-arm64", "iossimulator.11", "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.13-x64": [ "iossimulator.13-x64", "iossimulator.13", "iossimulator.12-x64", "iossimulator.12", "iossimulator.11-x64", "iossimulator.11", "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator.14": [ "iossimulator.14", "iossimulator.13", "iossimulator.12", "iossimulator.11", "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.14-arm64": [ "iossimulator.14-arm64", "iossimulator.14", "iossimulator.13-arm64", "iossimulator.13", "iossimulator.12-arm64", "iossimulator.12", "iossimulator.11-arm64", "iossimulator.11", "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.14-x64": [ "iossimulator.14-x64", "iossimulator.14", "iossimulator.13-x64", "iossimulator.13", "iossimulator.12-x64", "iossimulator.12", "iossimulator.11-x64", "iossimulator.11", "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "iossimulator.15": [ "iossimulator.15", "iossimulator.14", "iossimulator.13", "iossimulator.12", "iossimulator.11", "iossimulator.10", "iossimulator", "ios", "unix", "any", "base" ], "iossimulator.15-arm64": [ "iossimulator.15-arm64", "iossimulator.15", "iossimulator.14-arm64", "iossimulator.14", "iossimulator.13-arm64", "iossimulator.13", "iossimulator.12-arm64", "iossimulator.12", "iossimulator.11-arm64", "iossimulator.11", "iossimulator.10-arm64", "iossimulator.10", "iossimulator-arm64", "iossimulator", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "iossimulator.15-x64": [ "iossimulator.15-x64", "iossimulator.15", "iossimulator.14-x64", "iossimulator.14", "iossimulator.13-x64", "iossimulator.13", "iossimulator.12-x64", "iossimulator.12", "iossimulator.11-x64", "iossimulator.11", "iossimulator.10-x64", "iossimulator.10", "iossimulator-x64", "iossimulator", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "linux": [ "linux", "unix", "any", "base" ], "linux-arm": [ "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "linux-arm64": [ "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "linux-armel": [ "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "linux-loongarch64": [ "linux-loongarch64", "linux", "unix-loongarch64", "unix", "any", "base" ], "linux-armv6": [ "linux-armv6", "linux", "unix-armv6", "unix", "any", "base" ], "linux-mips64": [ "linux-mips64", "linux", "unix-mips64", "unix", "any", "base" ], "linux-musl": [ "linux-musl", "linux", "unix", "any", "base" ], "linux-musl-arm": [ "linux-musl-arm", "linux-musl", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "linux-musl-arm64": [ "linux-musl-arm64", "linux-musl", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "linux-musl-armel": [ "linux-musl-armel", "linux-musl", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "linux-musl-s390x": [ "linux-musl-s390x", "linux-musl", "linux-s390x", "linux", "unix-s390x", "unix", "any", "base" ], "linux-musl-x64": [ "linux-musl-x64", "linux-musl", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linux-musl-x86": [ "linux-musl-x86", "linux-musl", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "linux-s390x": [ "linux-s390x", "linux", "unix-s390x", "unix", "any", "base" ], "linux-x64": [ "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linux-x86": [ "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "linuxmint.17": [ "linuxmint.17", "ubuntu.14.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.17-x64": [ "linuxmint.17-x64", "linuxmint.17", "ubuntu.14.04-x64", "ubuntu.14.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.17.1": [ "linuxmint.17.1", "linuxmint.17", "ubuntu.14.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.17.1-x64": [ "linuxmint.17.1-x64", "linuxmint.17.1", "linuxmint.17-x64", "linuxmint.17", "ubuntu.14.04-x64", "ubuntu.14.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.17.2": [ "linuxmint.17.2", "linuxmint.17.1", "linuxmint.17", "ubuntu.14.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.17.2-x64": [ "linuxmint.17.2-x64", "linuxmint.17.2", "linuxmint.17.1-x64", "linuxmint.17.1", "linuxmint.17-x64", "linuxmint.17", "ubuntu.14.04-x64", "ubuntu.14.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.17.3": [ "linuxmint.17.3", "linuxmint.17.2", "linuxmint.17.1", "linuxmint.17", "ubuntu.14.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.17.3-x64": [ "linuxmint.17.3-x64", "linuxmint.17.3", "linuxmint.17.2-x64", "linuxmint.17.2", "linuxmint.17.1-x64", "linuxmint.17.1", "linuxmint.17-x64", "linuxmint.17", "ubuntu.14.04-x64", "ubuntu.14.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.18": [ "linuxmint.18", "ubuntu.16.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.18-x64": [ "linuxmint.18-x64", "linuxmint.18", "ubuntu.16.04-x64", "ubuntu.16.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.18.1": [ "linuxmint.18.1", "linuxmint.18", "ubuntu.16.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.18.1-x64": [ "linuxmint.18.1-x64", "linuxmint.18.1", "linuxmint.18-x64", "linuxmint.18", "ubuntu.16.04-x64", "ubuntu.16.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.18.2": [ "linuxmint.18.2", "linuxmint.18.1", "linuxmint.18", "ubuntu.16.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.18.2-x64": [ "linuxmint.18.2-x64", "linuxmint.18.2", "linuxmint.18.1-x64", "linuxmint.18.1", "linuxmint.18-x64", "linuxmint.18", "ubuntu.16.04-x64", "ubuntu.16.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.18.3": [ "linuxmint.18.3", "linuxmint.18.2", "linuxmint.18.1", "linuxmint.18", "ubuntu.16.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.18.3-x64": [ "linuxmint.18.3-x64", "linuxmint.18.3", "linuxmint.18.2-x64", "linuxmint.18.2", "linuxmint.18.1-x64", "linuxmint.18.1", "linuxmint.18-x64", "linuxmint.18", "ubuntu.16.04-x64", "ubuntu.16.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.19": [ "linuxmint.19", "ubuntu.18.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.19-x64": [ "linuxmint.19-x64", "linuxmint.19", "ubuntu.18.04-x64", "ubuntu.18.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.19.1": [ "linuxmint.19.1", "linuxmint.19", "ubuntu.18.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.19.1-x64": [ "linuxmint.19.1-x64", "linuxmint.19.1", "linuxmint.19-x64", "linuxmint.19", "ubuntu.18.04-x64", "ubuntu.18.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "linuxmint.19.2": [ "linuxmint.19.2", "linuxmint.19.1", "linuxmint.19", "ubuntu.18.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "linuxmint.19.2-x64": [ "linuxmint.19.2-x64", "linuxmint.19.2", "linuxmint.19.1-x64", "linuxmint.19.1", "linuxmint.19-x64", "linuxmint.19", "ubuntu.18.04-x64", "ubuntu.18.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "maccatalyst": [ "maccatalyst", "ios", "unix", "any", "base" ], "maccatalyst-arm64": [ "maccatalyst-arm64", "maccatalyst", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "maccatalyst-x64": [ "maccatalyst-x64", "maccatalyst", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "maccatalyst.13": [ "maccatalyst.13", "maccatalyst", "ios", "unix", "any", "base" ], "maccatalyst.13-arm64": [ "maccatalyst.13-arm64", "maccatalyst.13", "maccatalyst-arm64", "maccatalyst", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "maccatalyst.13-x64": [ "maccatalyst.13-x64", "maccatalyst.13", "maccatalyst-x64", "maccatalyst", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "maccatalyst.14": [ "maccatalyst.14", "maccatalyst.13", "maccatalyst", "ios", "unix", "any", "base" ], "maccatalyst.14-arm64": [ "maccatalyst.14-arm64", "maccatalyst.14", "maccatalyst.13-arm64", "maccatalyst.13", "maccatalyst-arm64", "maccatalyst", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "maccatalyst.14-x64": [ "maccatalyst.14-x64", "maccatalyst.14", "maccatalyst.13-x64", "maccatalyst.13", "maccatalyst-x64", "maccatalyst", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "maccatalyst.15": [ "maccatalyst.15", "maccatalyst.14", "maccatalyst.13", "maccatalyst", "ios", "unix", "any", "base" ], "maccatalyst.15-arm64": [ "maccatalyst.15-arm64", "maccatalyst.15", "maccatalyst.14-arm64", "maccatalyst.14", "maccatalyst.13-arm64", "maccatalyst.13", "maccatalyst-arm64", "maccatalyst", "ios-arm64", "ios", "unix-arm64", "unix", "any", "base" ], "maccatalyst.15-x64": [ "maccatalyst.15-x64", "maccatalyst.15", "maccatalyst.14-x64", "maccatalyst.14", "maccatalyst.13-x64", "maccatalyst.13", "maccatalyst-x64", "maccatalyst", "ios-x64", "ios", "unix-x64", "unix", "any", "base" ], "manjaro": [ "manjaro", "arch", "linux", "unix", "any", "base" ], "manjaro-x64": [ "manjaro-x64", "manjaro", "arch-x64", "arch", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol": [ "ol", "rhel", "linux", "unix", "any", "base" ], "ol-x64": [ "ol-x64", "ol", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7": [ "ol.7", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7-x64": [ "ol.7-x64", "ol.7", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.0": [ "ol.7.0", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.0-x64": [ "ol.7.0-x64", "ol.7.0", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.1": [ "ol.7.1", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.1-x64": [ "ol.7.1-x64", "ol.7.1", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.2": [ "ol.7.2", "ol.7.1", "rhel.7.2", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.2-x64": [ "ol.7.2-x64", "ol.7.2", "ol.7.1-x64", "rhel.7.2-x64", "ol.7.1", "rhel.7.2", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.3": [ "ol.7.3", "ol.7.2", "rhel.7.3", "ol.7.1", "rhel.7.2", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.3-x64": [ "ol.7.3-x64", "ol.7.3", "ol.7.2-x64", "rhel.7.3-x64", "ol.7.2", "rhel.7.3", "ol.7.1-x64", "rhel.7.2-x64", "ol.7.1", "rhel.7.2", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.4": [ "ol.7.4", "ol.7.3", "rhel.7.4", "ol.7.2", "rhel.7.3", "ol.7.1", "rhel.7.2", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.4-x64": [ "ol.7.4-x64", "ol.7.4", "ol.7.3-x64", "rhel.7.4-x64", "ol.7.3", "rhel.7.4", "ol.7.2-x64", "rhel.7.3-x64", "ol.7.2", "rhel.7.3", "ol.7.1-x64", "rhel.7.2-x64", "ol.7.1", "rhel.7.2", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.5": [ "ol.7.5", "ol.7.4", "rhel.7.5", "ol.7.3", "rhel.7.4", "ol.7.2", "rhel.7.3", "ol.7.1", "rhel.7.2", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.5-x64": [ "ol.7.5-x64", "ol.7.5", "ol.7.4-x64", "rhel.7.5-x64", "ol.7.4", "rhel.7.5", "ol.7.3-x64", "rhel.7.4-x64", "ol.7.3", "rhel.7.4", "ol.7.2-x64", "rhel.7.3-x64", "ol.7.2", "rhel.7.3", "ol.7.1-x64", "rhel.7.2-x64", "ol.7.1", "rhel.7.2", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.7.6": [ "ol.7.6", "ol.7.5", "rhel.7.6", "ol.7.4", "rhel.7.5", "ol.7.3", "rhel.7.4", "ol.7.2", "rhel.7.3", "ol.7.1", "rhel.7.2", "ol.7.0", "rhel.7.1", "ol.7", "rhel.7.0", "ol", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "ol.7.6-x64": [ "ol.7.6-x64", "ol.7.6", "ol.7.5-x64", "rhel.7.6-x64", "ol.7.5", "rhel.7.6", "ol.7.4-x64", "rhel.7.5-x64", "ol.7.4", "rhel.7.5", "ol.7.3-x64", "rhel.7.4-x64", "ol.7.3", "rhel.7.4", "ol.7.2-x64", "rhel.7.3-x64", "ol.7.2", "rhel.7.3", "ol.7.1-x64", "rhel.7.2-x64", "ol.7.1", "rhel.7.2", "ol.7.0-x64", "rhel.7.1-x64", "ol.7.0", "rhel.7.1", "ol.7-x64", "rhel.7.0-x64", "ol.7", "rhel.7.0", "ol-x64", "rhel.7-x64", "ol", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.8": [ "ol.8", "ol", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "ol.8-x64": [ "ol.8-x64", "ol.8", "ol-x64", "rhel.8-x64", "ol", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ol.8.0": [ "ol.8.0", "ol.8", "rhel.8.0", "ol", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "ol.8.0-x64": [ "ol.8.0-x64", "ol.8.0", "ol.8-x64", "rhel.8.0-x64", "ol.8", "rhel.8.0", "ol-x64", "rhel.8-x64", "ol", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "omnios": [ "omnios", "illumos", "unix", "any", "base" ], "omnios-x64": [ "omnios-x64", "omnios", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "omnios.15": [ "omnios.15", "omnios", "illumos", "unix", "any", "base" ], "omnios.15-x64": [ "omnios.15-x64", "omnios.15", "omnios-x64", "omnios", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "openindiana": [ "openindiana", "illumos", "unix", "any", "base" ], "openindiana-x64": [ "openindiana-x64", "openindiana", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "opensuse": [ "opensuse", "linux", "unix", "any", "base" ], "opensuse-x64": [ "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.13.2": [ "opensuse.13.2", "opensuse", "linux", "unix", "any", "base" ], "opensuse.13.2-x64": [ "opensuse.13.2-x64", "opensuse.13.2", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.15.0": [ "opensuse.15.0", "opensuse", "linux", "unix", "any", "base" ], "opensuse.15.0-x64": [ "opensuse.15.0-x64", "opensuse.15.0", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.15.1": [ "opensuse.15.1", "opensuse", "linux", "unix", "any", "base" ], "opensuse.15.1-x64": [ "opensuse.15.1-x64", "opensuse.15.1", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.42.1": [ "opensuse.42.1", "opensuse", "linux", "unix", "any", "base" ], "opensuse.42.1-x64": [ "opensuse.42.1-x64", "opensuse.42.1", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.42.2": [ "opensuse.42.2", "opensuse", "linux", "unix", "any", "base" ], "opensuse.42.2-x64": [ "opensuse.42.2-x64", "opensuse.42.2", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "opensuse.42.3": [ "opensuse.42.3", "opensuse", "linux", "unix", "any", "base" ], "opensuse.42.3-x64": [ "opensuse.42.3-x64", "opensuse.42.3", "opensuse-x64", "opensuse", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "osx": [ "osx", "unix", "any", "base" ], "osx-arm64": [ "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx-x64": [ "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.10": [ "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.10-arm64": [ "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.10-x64": [ "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.11": [ "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.11-arm64": [ "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.11-x64": [ "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.12": [ "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.12-arm64": [ "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.12-x64": [ "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.13": [ "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.13-arm64": [ "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.13-x64": [ "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.14": [ "osx.10.14", "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.14-arm64": [ "osx.10.14-arm64", "osx.10.14", "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.14-x64": [ "osx.10.14-x64", "osx.10.14", "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.15": [ "osx.10.15", "osx.10.14", "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.15-arm64": [ "osx.10.15-arm64", "osx.10.15", "osx.10.14-arm64", "osx.10.14", "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.15-x64": [ "osx.10.15-x64", "osx.10.15", "osx.10.14-x64", "osx.10.14", "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.10.16": [ "osx.10.16", "osx.10.15", "osx.10.14", "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.10.16-arm64": [ "osx.10.16-arm64", "osx.10.16", "osx.10.15-arm64", "osx.10.15", "osx.10.14-arm64", "osx.10.14", "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.10.16-x64": [ "osx.10.16-x64", "osx.10.16", "osx.10.15-x64", "osx.10.15", "osx.10.14-x64", "osx.10.14", "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.11.0": [ "osx.11.0", "osx.10.16", "osx.10.15", "osx.10.14", "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.11.0-arm64": [ "osx.11.0-arm64", "osx.11.0", "osx.10.16-arm64", "osx.10.16", "osx.10.15-arm64", "osx.10.15", "osx.10.14-arm64", "osx.10.14", "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.11.0-x64": [ "osx.11.0-x64", "osx.11.0", "osx.10.16-x64", "osx.10.16", "osx.10.15-x64", "osx.10.15", "osx.10.14-x64", "osx.10.14", "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "osx.12": [ "osx.12", "osx.11.0", "osx.10.16", "osx.10.15", "osx.10.14", "osx.10.13", "osx.10.12", "osx.10.11", "osx.10.10", "osx", "unix", "any", "base" ], "osx.12-arm64": [ "osx.12-arm64", "osx.12", "osx.11.0-arm64", "osx.11.0", "osx.10.16-arm64", "osx.10.16", "osx.10.15-arm64", "osx.10.15", "osx.10.14-arm64", "osx.10.14", "osx.10.13-arm64", "osx.10.13", "osx.10.12-arm64", "osx.10.12", "osx.10.11-arm64", "osx.10.11", "osx.10.10-arm64", "osx.10.10", "osx-arm64", "osx", "unix-arm64", "unix", "any", "base" ], "osx.12-x64": [ "osx.12-x64", "osx.12", "osx.11.0-x64", "osx.11.0", "osx.10.16-x64", "osx.10.16", "osx.10.15-x64", "osx.10.15", "osx.10.14-x64", "osx.10.14", "osx.10.13-x64", "osx.10.13", "osx.10.12-x64", "osx.10.12", "osx.10.11-x64", "osx.10.11", "osx.10.10-x64", "osx.10.10", "osx-x64", "osx", "unix-x64", "unix", "any", "base" ], "rhel": [ "rhel", "linux", "unix", "any", "base" ], "rhel-arm64": [ "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rhel-x64": [ "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.6": [ "rhel.6", "rhel", "linux", "unix", "any", "base" ], "rhel.6-x64": [ "rhel.6-x64", "rhel.6", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7": [ "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7-x64": [ "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.0": [ "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.0-x64": [ "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.1": [ "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.1-x64": [ "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.2": [ "rhel.7.2", "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.2-x64": [ "rhel.7.2-x64", "rhel.7.2", "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.3": [ "rhel.7.3", "rhel.7.2", "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.3-x64": [ "rhel.7.3-x64", "rhel.7.3", "rhel.7.2-x64", "rhel.7.2", "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.4": [ "rhel.7.4", "rhel.7.3", "rhel.7.2", "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.4-x64": [ "rhel.7.4-x64", "rhel.7.4", "rhel.7.3-x64", "rhel.7.3", "rhel.7.2-x64", "rhel.7.2", "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.5": [ "rhel.7.5", "rhel.7.4", "rhel.7.3", "rhel.7.2", "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.5-x64": [ "rhel.7.5-x64", "rhel.7.5", "rhel.7.4-x64", "rhel.7.4", "rhel.7.3-x64", "rhel.7.3", "rhel.7.2-x64", "rhel.7.2", "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.7.6": [ "rhel.7.6", "rhel.7.5", "rhel.7.4", "rhel.7.3", "rhel.7.2", "rhel.7.1", "rhel.7.0", "rhel.7", "rhel", "linux", "unix", "any", "base" ], "rhel.7.6-x64": [ "rhel.7.6-x64", "rhel.7.6", "rhel.7.5-x64", "rhel.7.5", "rhel.7.4-x64", "rhel.7.4", "rhel.7.3-x64", "rhel.7.3", "rhel.7.2-x64", "rhel.7.2", "rhel.7.1-x64", "rhel.7.1", "rhel.7.0-x64", "rhel.7.0", "rhel.7-x64", "rhel.7", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.8": [ "rhel.8", "rhel", "linux", "unix", "any", "base" ], "rhel.8-arm64": [ "rhel.8-arm64", "rhel.8", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rhel.8-x64": [ "rhel.8-x64", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.8.0": [ "rhel.8.0", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "rhel.8.0-arm64": [ "rhel.8.0-arm64", "rhel.8.0", "rhel.8-arm64", "rhel.8", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rhel.8.0-x64": [ "rhel.8.0-x64", "rhel.8.0", "rhel.8-x64", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.8.1": [ "rhel.8.1", "rhel.8.0", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "rhel.8.1-arm64": [ "rhel.8.1-arm64", "rhel.8.1", "rhel.8.0-arm64", "rhel.8.0", "rhel.8-arm64", "rhel.8", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rhel.8.1-x64": [ "rhel.8.1-x64", "rhel.8.1", "rhel.8.0-x64", "rhel.8.0", "rhel.8-x64", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rhel.9": [ "rhel.9", "rhel", "linux", "unix", "any", "base" ], "rhel.9-arm64": [ "rhel.9-arm64", "rhel.9", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rhel.9-x64": [ "rhel.9-x64", "rhel.9", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rocky": [ "rocky", "rhel", "linux", "unix", "any", "base" ], "rocky-arm64": [ "rocky-arm64", "rocky", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rocky-x64": [ "rocky-x64", "rocky", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rocky.8": [ "rocky.8", "rocky", "rhel.8", "rhel", "linux", "unix", "any", "base" ], "rocky.8-arm64": [ "rocky.8-arm64", "rocky.8", "rocky-arm64", "rhel.8-arm64", "rocky", "rhel.8", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rocky.8-x64": [ "rocky.8-x64", "rocky.8", "rocky-x64", "rhel.8-x64", "rocky", "rhel.8", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "rocky.9": [ "rocky.9", "rocky", "rhel.9", "rhel", "linux", "unix", "any", "base" ], "rocky.9-arm64": [ "rocky.9-arm64", "rocky.9", "rocky-arm64", "rhel.9-arm64", "rocky", "rhel.9", "rhel-arm64", "rhel", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "rocky.9-x64": [ "rocky.9-x64", "rocky.9", "rocky-x64", "rhel.9-x64", "rocky", "rhel.9", "rhel-x64", "rhel", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles": [ "sles", "linux", "unix", "any", "base" ], "sles-x64": [ "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.12": [ "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.12-x64": [ "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.12.1": [ "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.12.1-x64": [ "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.12.2": [ "sles.12.2", "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.12.2-x64": [ "sles.12.2-x64", "sles.12.2", "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.12.3": [ "sles.12.3", "sles.12.2", "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.12.3-x64": [ "sles.12.3-x64", "sles.12.3", "sles.12.2-x64", "sles.12.2", "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.12.4": [ "sles.12.4", "sles.12.3", "sles.12.2", "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.12.4-x64": [ "sles.12.4-x64", "sles.12.4", "sles.12.3-x64", "sles.12.3", "sles.12.2-x64", "sles.12.2", "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.15": [ "sles.15", "sles.12.4", "sles.12.3", "sles.12.2", "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.15-x64": [ "sles.15-x64", "sles.15", "sles.12.4-x64", "sles.12.4", "sles.12.3-x64", "sles.12.3", "sles.12.2-x64", "sles.12.2", "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "sles.15.1": [ "sles.15.1", "sles.15", "sles.12.4", "sles.12.3", "sles.12.2", "sles.12.1", "sles.12", "sles", "linux", "unix", "any", "base" ], "sles.15.1-x64": [ "sles.15.1-x64", "sles.15.1", "sles.15-x64", "sles.15", "sles.12.4-x64", "sles.12.4", "sles.12.3-x64", "sles.12.3", "sles.12.2-x64", "sles.12.2", "sles.12.1-x64", "sles.12.1", "sles.12-x64", "sles.12", "sles-x64", "sles", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "smartos": [ "smartos", "illumos", "unix", "any", "base" ], "smartos-x64": [ "smartos-x64", "smartos", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "smartos.2020": [ "smartos.2020", "smartos", "illumos", "unix", "any", "base" ], "smartos.2020-x64": [ "smartos.2020-x64", "smartos.2020", "smartos-x64", "smartos", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "smartos.2021": [ "smartos.2021", "smartos.2020", "smartos", "illumos", "unix", "any", "base" ], "smartos.2021-x64": [ "smartos.2021-x64", "smartos.2021", "smartos.2020-x64", "smartos.2020", "smartos-x64", "smartos", "illumos-x64", "illumos", "unix-x64", "unix", "any", "base" ], "solaris": [ "solaris", "unix", "any", "base" ], "solaris-x64": [ "solaris-x64", "solaris", "unix-x64", "unix", "any", "base" ], "solaris.11": [ "solaris.11", "solaris", "unix", "any", "base" ], "solaris.11-x64": [ "solaris.11-x64", "solaris.11", "solaris-x64", "solaris", "unix-x64", "unix", "any", "base" ], "tizen": [ "tizen", "linux", "unix", "any", "base" ], "tizen-arm64": [ "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen-armel": [ "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen-x86": [ "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tizen.4.0.0": [ "tizen.4.0.0", "tizen", "linux", "unix", "any", "base" ], "tizen.4.0.0-arm64": [ "tizen.4.0.0-arm64", "tizen.4.0.0", "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen.4.0.0-armel": [ "tizen.4.0.0-armel", "tizen.4.0.0", "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen.4.0.0-x86": [ "tizen.4.0.0-x86", "tizen.4.0.0", "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tizen.5.0.0": [ "tizen.5.0.0", "tizen.4.0.0", "tizen", "linux", "unix", "any", "base" ], "tizen.5.0.0-arm64": [ "tizen.5.0.0-arm64", "tizen.5.0.0", "tizen.4.0.0-arm64", "tizen.4.0.0", "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen.5.0.0-armel": [ "tizen.5.0.0-armel", "tizen.5.0.0", "tizen.4.0.0-armel", "tizen.4.0.0", "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen.5.0.0-x86": [ "tizen.5.0.0-x86", "tizen.5.0.0", "tizen.4.0.0-x86", "tizen.4.0.0", "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tizen.5.5.0": [ "tizen.5.5.0", "tizen.5.0.0", "tizen.4.0.0", "tizen", "linux", "unix", "any", "base" ], "tizen.5.5.0-arm64": [ "tizen.5.5.0-arm64", "tizen.5.5.0", "tizen.5.0.0-arm64", "tizen.5.0.0", "tizen.4.0.0-arm64", "tizen.4.0.0", "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen.5.5.0-armel": [ "tizen.5.5.0-armel", "tizen.5.5.0", "tizen.5.0.0-armel", "tizen.5.0.0", "tizen.4.0.0-armel", "tizen.4.0.0", "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen.5.5.0-x86": [ "tizen.5.5.0-x86", "tizen.5.5.0", "tizen.5.0.0-x86", "tizen.5.0.0", "tizen.4.0.0-x86", "tizen.4.0.0", "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tizen.6.0.0": [ "tizen.6.0.0", "tizen.5.5.0", "tizen.5.0.0", "tizen.4.0.0", "tizen", "linux", "unix", "any", "base" ], "tizen.6.0.0-arm64": [ "tizen.6.0.0-arm64", "tizen.6.0.0", "tizen.5.5.0-arm64", "tizen.5.5.0", "tizen.5.0.0-arm64", "tizen.5.0.0", "tizen.4.0.0-arm64", "tizen.4.0.0", "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen.6.0.0-armel": [ "tizen.6.0.0-armel", "tizen.6.0.0", "tizen.5.5.0-armel", "tizen.5.5.0", "tizen.5.0.0-armel", "tizen.5.0.0", "tizen.4.0.0-armel", "tizen.4.0.0", "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen.6.0.0-x86": [ "tizen.6.0.0-x86", "tizen.6.0.0", "tizen.5.5.0-x86", "tizen.5.5.0", "tizen.5.0.0-x86", "tizen.5.0.0", "tizen.4.0.0-x86", "tizen.4.0.0", "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tizen.6.5.0": [ "tizen.6.5.0", "tizen.6.0.0", "tizen.5.5.0", "tizen.5.0.0", "tizen.4.0.0", "tizen", "linux", "unix", "any", "base" ], "tizen.6.5.0-arm64": [ "tizen.6.5.0-arm64", "tizen.6.5.0", "tizen.6.0.0-arm64", "tizen.6.0.0", "tizen.5.5.0-arm64", "tizen.5.5.0", "tizen.5.0.0-arm64", "tizen.5.0.0", "tizen.4.0.0-arm64", "tizen.4.0.0", "tizen-arm64", "tizen", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "tizen.6.5.0-armel": [ "tizen.6.5.0-armel", "tizen.6.5.0", "tizen.6.0.0-armel", "tizen.6.0.0", "tizen.5.5.0-armel", "tizen.5.5.0", "tizen.5.0.0-armel", "tizen.5.0.0", "tizen.4.0.0-armel", "tizen.4.0.0", "tizen-armel", "tizen", "linux-armel", "linux", "unix-armel", "unix", "any", "base" ], "tizen.6.5.0-x86": [ "tizen.6.5.0-x86", "tizen.6.5.0", "tizen.6.0.0-x86", "tizen.6.0.0", "tizen.5.5.0-x86", "tizen.5.5.0", "tizen.5.0.0-x86", "tizen.5.0.0", "tizen.4.0.0-x86", "tizen.4.0.0", "tizen-x86", "tizen", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "tvos": [ "tvos", "unix", "any", "base" ], "tvos-arm64": [ "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos-x64": [ "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.10": [ "tvos.10", "tvos", "unix", "any", "base" ], "tvos.10-arm64": [ "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.10-x64": [ "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.11": [ "tvos.11", "tvos.10", "tvos", "unix", "any", "base" ], "tvos.11-arm64": [ "tvos.11-arm64", "tvos.11", "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.11-x64": [ "tvos.11-x64", "tvos.11", "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.12": [ "tvos.12", "tvos.11", "tvos.10", "tvos", "unix", "any", "base" ], "tvos.12-arm64": [ "tvos.12-arm64", "tvos.12", "tvos.11-arm64", "tvos.11", "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.12-x64": [ "tvos.12-x64", "tvos.12", "tvos.11-x64", "tvos.11", "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.13": [ "tvos.13", "tvos.12", "tvos.11", "tvos.10", "tvos", "unix", "any", "base" ], "tvos.13-arm64": [ "tvos.13-arm64", "tvos.13", "tvos.12-arm64", "tvos.12", "tvos.11-arm64", "tvos.11", "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.13-x64": [ "tvos.13-x64", "tvos.13", "tvos.12-x64", "tvos.12", "tvos.11-x64", "tvos.11", "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.14": [ "tvos.14", "tvos.13", "tvos.12", "tvos.11", "tvos.10", "tvos", "unix", "any", "base" ], "tvos.14-arm64": [ "tvos.14-arm64", "tvos.14", "tvos.13-arm64", "tvos.13", "tvos.12-arm64", "tvos.12", "tvos.11-arm64", "tvos.11", "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.14-x64": [ "tvos.14-x64", "tvos.14", "tvos.13-x64", "tvos.13", "tvos.12-x64", "tvos.12", "tvos.11-x64", "tvos.11", "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvos.15": [ "tvos.15", "tvos.14", "tvos.13", "tvos.12", "tvos.11", "tvos.10", "tvos", "unix", "any", "base" ], "tvos.15-arm64": [ "tvos.15-arm64", "tvos.15", "tvos.14-arm64", "tvos.14", "tvos.13-arm64", "tvos.13", "tvos.12-arm64", "tvos.12", "tvos.11-arm64", "tvos.11", "tvos.10-arm64", "tvos.10", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvos.15-x64": [ "tvos.15-x64", "tvos.15", "tvos.14-x64", "tvos.14", "tvos.13-x64", "tvos.13", "tvos.12-x64", "tvos.12", "tvos.11-x64", "tvos.11", "tvos.10-x64", "tvos.10", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator": [ "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator-arm64": [ "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator-x64": [ "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.10": [ "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.10-arm64": [ "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.10-x64": [ "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.11": [ "tvossimulator.11", "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.11-arm64": [ "tvossimulator.11-arm64", "tvossimulator.11", "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.11-x64": [ "tvossimulator.11-x64", "tvossimulator.11", "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.12": [ "tvossimulator.12", "tvossimulator.11", "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.12-arm64": [ "tvossimulator.12-arm64", "tvossimulator.12", "tvossimulator.11-arm64", "tvossimulator.11", "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.12-x64": [ "tvossimulator.12-x64", "tvossimulator.12", "tvossimulator.11-x64", "tvossimulator.11", "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.13": [ "tvossimulator.13", "tvossimulator.12", "tvossimulator.11", "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.13-arm64": [ "tvossimulator.13-arm64", "tvossimulator.13", "tvossimulator.12-arm64", "tvossimulator.12", "tvossimulator.11-arm64", "tvossimulator.11", "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.13-x64": [ "tvossimulator.13-x64", "tvossimulator.13", "tvossimulator.12-x64", "tvossimulator.12", "tvossimulator.11-x64", "tvossimulator.11", "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.14": [ "tvossimulator.14", "tvossimulator.13", "tvossimulator.12", "tvossimulator.11", "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.14-arm64": [ "tvossimulator.14-arm64", "tvossimulator.14", "tvossimulator.13-arm64", "tvossimulator.13", "tvossimulator.12-arm64", "tvossimulator.12", "tvossimulator.11-arm64", "tvossimulator.11", "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.14-x64": [ "tvossimulator.14-x64", "tvossimulator.14", "tvossimulator.13-x64", "tvossimulator.13", "tvossimulator.12-x64", "tvossimulator.12", "tvossimulator.11-x64", "tvossimulator.11", "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "tvossimulator.15": [ "tvossimulator.15", "tvossimulator.14", "tvossimulator.13", "tvossimulator.12", "tvossimulator.11", "tvossimulator.10", "tvossimulator", "tvos", "unix", "any", "base" ], "tvossimulator.15-arm64": [ "tvossimulator.15-arm64", "tvossimulator.15", "tvossimulator.14-arm64", "tvossimulator.14", "tvossimulator.13-arm64", "tvossimulator.13", "tvossimulator.12-arm64", "tvossimulator.12", "tvossimulator.11-arm64", "tvossimulator.11", "tvossimulator.10-arm64", "tvossimulator.10", "tvossimulator-arm64", "tvossimulator", "tvos-arm64", "tvos", "unix-arm64", "unix", "any", "base" ], "tvossimulator.15-x64": [ "tvossimulator.15-x64", "tvossimulator.15", "tvossimulator.14-x64", "tvossimulator.14", "tvossimulator.13-x64", "tvossimulator.13", "tvossimulator.12-x64", "tvossimulator.12", "tvossimulator.11-x64", "tvossimulator.11", "tvossimulator.10-x64", "tvossimulator.10", "tvossimulator-x64", "tvossimulator", "tvos-x64", "tvos", "unix-x64", "unix", "any", "base" ], "ubuntu": [ "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu-arm": [ "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu-arm64": [ "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu-x64": [ "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu-x86": [ "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.14.04": [ "ubuntu.14.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.14.04-arm": [ "ubuntu.14.04-arm", "ubuntu.14.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.14.04-x64": [ "ubuntu.14.04-x64", "ubuntu.14.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.14.04-x86": [ "ubuntu.14.04-x86", "ubuntu.14.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.14.10": [ "ubuntu.14.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.14.10-arm": [ "ubuntu.14.10-arm", "ubuntu.14.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.14.10-x64": [ "ubuntu.14.10-x64", "ubuntu.14.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.14.10-x86": [ "ubuntu.14.10-x86", "ubuntu.14.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.15.04": [ "ubuntu.15.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.15.04-arm": [ "ubuntu.15.04-arm", "ubuntu.15.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.15.04-x64": [ "ubuntu.15.04-x64", "ubuntu.15.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.15.04-x86": [ "ubuntu.15.04-x86", "ubuntu.15.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.15.10": [ "ubuntu.15.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.15.10-arm": [ "ubuntu.15.10-arm", "ubuntu.15.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.15.10-x64": [ "ubuntu.15.10-x64", "ubuntu.15.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.15.10-x86": [ "ubuntu.15.10-x86", "ubuntu.15.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.16.04": [ "ubuntu.16.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.16.04-arm": [ "ubuntu.16.04-arm", "ubuntu.16.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.16.04-arm64": [ "ubuntu.16.04-arm64", "ubuntu.16.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.16.04-x64": [ "ubuntu.16.04-x64", "ubuntu.16.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.16.04-x86": [ "ubuntu.16.04-x86", "ubuntu.16.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.16.10": [ "ubuntu.16.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.16.10-arm": [ "ubuntu.16.10-arm", "ubuntu.16.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.16.10-arm64": [ "ubuntu.16.10-arm64", "ubuntu.16.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.16.10-x64": [ "ubuntu.16.10-x64", "ubuntu.16.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.16.10-x86": [ "ubuntu.16.10-x86", "ubuntu.16.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.17.04": [ "ubuntu.17.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.17.04-arm": [ "ubuntu.17.04-arm", "ubuntu.17.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.17.04-arm64": [ "ubuntu.17.04-arm64", "ubuntu.17.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.17.04-x64": [ "ubuntu.17.04-x64", "ubuntu.17.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.17.04-x86": [ "ubuntu.17.04-x86", "ubuntu.17.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.17.10": [ "ubuntu.17.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.17.10-arm": [ "ubuntu.17.10-arm", "ubuntu.17.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.17.10-arm64": [ "ubuntu.17.10-arm64", "ubuntu.17.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.17.10-x64": [ "ubuntu.17.10-x64", "ubuntu.17.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.17.10-x86": [ "ubuntu.17.10-x86", "ubuntu.17.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.18.04": [ "ubuntu.18.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.18.04-arm": [ "ubuntu.18.04-arm", "ubuntu.18.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.18.04-arm64": [ "ubuntu.18.04-arm64", "ubuntu.18.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.18.04-x64": [ "ubuntu.18.04-x64", "ubuntu.18.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.18.04-x86": [ "ubuntu.18.04-x86", "ubuntu.18.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.18.10": [ "ubuntu.18.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.18.10-arm": [ "ubuntu.18.10-arm", "ubuntu.18.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.18.10-arm64": [ "ubuntu.18.10-arm64", "ubuntu.18.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.18.10-x64": [ "ubuntu.18.10-x64", "ubuntu.18.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.18.10-x86": [ "ubuntu.18.10-x86", "ubuntu.18.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.19.04": [ "ubuntu.19.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.19.04-arm": [ "ubuntu.19.04-arm", "ubuntu.19.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.19.04-arm64": [ "ubuntu.19.04-arm64", "ubuntu.19.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.19.04-x64": [ "ubuntu.19.04-x64", "ubuntu.19.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.19.04-x86": [ "ubuntu.19.04-x86", "ubuntu.19.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.19.10": [ "ubuntu.19.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.19.10-arm": [ "ubuntu.19.10-arm", "ubuntu.19.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.19.10-arm64": [ "ubuntu.19.10-arm64", "ubuntu.19.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.19.10-x64": [ "ubuntu.19.10-x64", "ubuntu.19.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.19.10-x86": [ "ubuntu.19.10-x86", "ubuntu.19.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.20.04": [ "ubuntu.20.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.20.04-arm": [ "ubuntu.20.04-arm", "ubuntu.20.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.20.04-arm64": [ "ubuntu.20.04-arm64", "ubuntu.20.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.20.04-x64": [ "ubuntu.20.04-x64", "ubuntu.20.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.20.04-x86": [ "ubuntu.20.04-x86", "ubuntu.20.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.20.10": [ "ubuntu.20.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.20.10-arm": [ "ubuntu.20.10-arm", "ubuntu.20.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.20.10-arm64": [ "ubuntu.20.10-arm64", "ubuntu.20.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.20.10-x64": [ "ubuntu.20.10-x64", "ubuntu.20.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.20.10-x86": [ "ubuntu.20.10-x86", "ubuntu.20.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.21.04": [ "ubuntu.21.04", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.21.04-arm": [ "ubuntu.21.04-arm", "ubuntu.21.04", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.21.04-arm64": [ "ubuntu.21.04-arm64", "ubuntu.21.04", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.21.04-x64": [ "ubuntu.21.04-x64", "ubuntu.21.04", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.21.04-x86": [ "ubuntu.21.04-x86", "ubuntu.21.04", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "ubuntu.21.10": [ "ubuntu.21.10", "ubuntu", "debian", "linux", "unix", "any", "base" ], "ubuntu.21.10-arm": [ "ubuntu.21.10-arm", "ubuntu.21.10", "ubuntu-arm", "ubuntu", "debian-arm", "debian", "linux-arm", "linux", "unix-arm", "unix", "any", "base" ], "ubuntu.21.10-arm64": [ "ubuntu.21.10-arm64", "ubuntu.21.10", "ubuntu-arm64", "ubuntu", "debian-arm64", "debian", "linux-arm64", "linux", "unix-arm64", "unix", "any", "base" ], "ubuntu.21.10-x64": [ "ubuntu.21.10-x64", "ubuntu.21.10", "ubuntu-x64", "ubuntu", "debian-x64", "debian", "linux-x64", "linux", "unix-x64", "unix", "any", "base" ], "ubuntu.21.10-x86": [ "ubuntu.21.10-x86", "ubuntu.21.10", "ubuntu-x86", "ubuntu", "debian-x86", "debian", "linux-x86", "linux", "unix-x86", "unix", "any", "base" ], "unix": [ "unix", "any", "base" ], "unix-arm": [ "unix-arm", "unix", "any", "base" ], "unix-arm64": [ "unix-arm64", "unix", "any", "base" ], "unix-armel": [ "unix-armel", "unix", "any", "base" ], "unix-loongarch64": [ "unix-loongarch64", "unix", "any", "base" ], "unix-armv6": [ "unix-armv6", "unix", "any", "base" ], "unix-mips64": [ "unix-mips64", "unix", "any", "base" ], "unix-s390x": [ "unix-s390x", "unix", "any", "base" ], "unix-x64": [ "unix-x64", "unix", "any", "base" ], "unix-x86": [ "unix-x86", "unix", "any", "base" ], "win": [ "win", "any", "base" ], "win-aot": [ "win-aot", "win", "aot", "any", "base" ], "win-arm": [ "win-arm", "win", "any", "base" ], "win-arm-aot": [ "win-arm-aot", "win-aot", "win-arm", "win", "aot", "any", "base" ], "win-arm64": [ "win-arm64", "win", "any", "base" ], "win-arm64-aot": [ "win-arm64-aot", "win-aot", "win-arm64", "win", "aot", "any", "base" ], "win-x64": [ "win-x64", "win", "any", "base" ], "win-x64-aot": [ "win-x64-aot", "win-aot", "win-x64", "win", "aot", "any", "base" ], "win-x86": [ "win-x86", "win", "any", "base" ], "win-x86-aot": [ "win-x86-aot", "win-aot", "win-x86", "win", "aot", "any", "base" ], "win10": [ "win10", "win81", "win8", "win7", "win", "any", "base" ], "win10-aot": [ "win10-aot", "win10", "win81-aot", "win81", "win8-aot", "win8", "win7-aot", "win7", "win-aot", "win", "aot", "any", "base" ], "win10-arm": [ "win10-arm", "win10", "win81-arm", "win81", "win8-arm", "win8", "win7-arm", "win7", "win-arm", "win", "any", "base" ], "win10-arm-aot": [ "win10-arm-aot", "win10-aot", "win10-arm", "win10", "win81-arm-aot", "win81-aot", "win81-arm", "win81", "win8-arm-aot", "win8-aot", "win8-arm", "win8", "win7-arm-aot", "win7-aot", "win7-arm", "win7", "win-arm-aot", "win-aot", "win-arm", "win", "aot", "any", "base" ], "win10-arm64": [ "win10-arm64", "win10", "win81-arm64", "win81", "win8-arm64", "win8", "win7-arm64", "win7", "win-arm64", "win", "any", "base" ], "win10-arm64-aot": [ "win10-arm64-aot", "win10-aot", "win10-arm64", "win10", "win81-arm64-aot", "win81-aot", "win81-arm64", "win81", "win8-arm64-aot", "win8-aot", "win8-arm64", "win8", "win7-arm64-aot", "win7-aot", "win7-arm64", "win7", "win-arm64-aot", "win-aot", "win-arm64", "win", "aot", "any", "base" ], "win10-x64": [ "win10-x64", "win10", "win81-x64", "win81", "win8-x64", "win8", "win7-x64", "win7", "win-x64", "win", "any", "base" ], "win10-x64-aot": [ "win10-x64-aot", "win10-aot", "win10-x64", "win10", "win81-x64-aot", "win81-aot", "win81-x64", "win81", "win8-x64-aot", "win8-aot", "win8-x64", "win8", "win7-x64-aot", "win7-aot", "win7-x64", "win7", "win-x64-aot", "win-aot", "win-x64", "win", "aot", "any", "base" ], "win10-x86": [ "win10-x86", "win10", "win81-x86", "win81", "win8-x86", "win8", "win7-x86", "win7", "win-x86", "win", "any", "base" ], "win10-x86-aot": [ "win10-x86-aot", "win10-aot", "win10-x86", "win10", "win81-x86-aot", "win81-aot", "win81-x86", "win81", "win8-x86-aot", "win8-aot", "win8-x86", "win8", "win7-x86-aot", "win7-aot", "win7-x86", "win7", "win-x86-aot", "win-aot", "win-x86", "win", "aot", "any", "base" ], "win7": [ "win7", "win", "any", "base" ], "win7-aot": [ "win7-aot", "win7", "win-aot", "win", "aot", "any", "base" ], "win7-arm": [ "win7-arm", "win7", "win-arm", "win", "any", "base" ], "win7-arm-aot": [ "win7-arm-aot", "win7-aot", "win7-arm", "win7", "win-arm-aot", "win-aot", "win-arm", "win", "aot", "any", "base" ], "win7-arm64": [ "win7-arm64", "win7", "win-arm64", "win", "any", "base" ], "win7-arm64-aot": [ "win7-arm64-aot", "win7-aot", "win7-arm64", "win7", "win-arm64-aot", "win-aot", "win-arm64", "win", "aot", "any", "base" ], "win7-x64": [ "win7-x64", "win7", "win-x64", "win", "any", "base" ], "win7-x64-aot": [ "win7-x64-aot", "win7-aot", "win7-x64", "win7", "win-x64-aot", "win-aot", "win-x64", "win", "aot", "any", "base" ], "win7-x86": [ "win7-x86", "win7", "win-x86", "win", "any", "base" ], "win7-x86-aot": [ "win7-x86-aot", "win7-aot", "win7-x86", "win7", "win-x86-aot", "win-aot", "win-x86", "win", "aot", "any", "base" ], "win8": [ "win8", "win7", "win", "any", "base" ], "win8-aot": [ "win8-aot", "win8", "win7-aot", "win7", "win-aot", "win", "aot", "any", "base" ], "win8-arm": [ "win8-arm", "win8", "win7-arm", "win7", "win-arm", "win", "any", "base" ], "win8-arm-aot": [ "win8-arm-aot", "win8-aot", "win8-arm", "win8", "win7-arm-aot", "win7-aot", "win7-arm", "win7", "win-arm-aot", "win-aot", "win-arm", "win", "aot", "any", "base" ], "win8-arm64": [ "win8-arm64", "win8", "win7-arm64", "win7", "win-arm64", "win", "any", "base" ], "win8-arm64-aot": [ "win8-arm64-aot", "win8-aot", "win8-arm64", "win8", "win7-arm64-aot", "win7-aot", "win7-arm64", "win7", "win-arm64-aot", "win-aot", "win-arm64", "win", "aot", "any", "base" ], "win8-x64": [ "win8-x64", "win8", "win7-x64", "win7", "win-x64", "win", "any", "base" ], "win8-x64-aot": [ "win8-x64-aot", "win8-aot", "win8-x64", "win8", "win7-x64-aot", "win7-aot", "win7-x64", "win7", "win-x64-aot", "win-aot", "win-x64", "win", "aot", "any", "base" ], "win8-x86": [ "win8-x86", "win8", "win7-x86", "win7", "win-x86", "win", "any", "base" ], "win8-x86-aot": [ "win8-x86-aot", "win8-aot", "win8-x86", "win8", "win7-x86-aot", "win7-aot", "win7-x86", "win7", "win-x86-aot", "win-aot", "win-x86", "win", "aot", "any", "base" ], "win81": [ "win81", "win8", "win7", "win", "any", "base" ], "win81-aot": [ "win81-aot", "win81", "win8-aot", "win8", "win7-aot", "win7", "win-aot", "win", "aot", "any", "base" ], "win81-arm": [ "win81-arm", "win81", "win8-arm", "win8", "win7-arm", "win7", "win-arm", "win", "any", "base" ], "win81-arm-aot": [ "win81-arm-aot", "win81-aot", "win81-arm", "win81", "win8-arm-aot", "win8-aot", "win8-arm", "win8", "win7-arm-aot", "win7-aot", "win7-arm", "win7", "win-arm-aot", "win-aot", "win-arm", "win", "aot", "any", "base" ], "win81-arm64": [ "win81-arm64", "win81", "win8-arm64", "win8", "win7-arm64", "win7", "win-arm64", "win", "any", "base" ], "win81-arm64-aot": [ "win81-arm64-aot", "win81-aot", "win81-arm64", "win81", "win8-arm64-aot", "win8-aot", "win8-arm64", "win8", "win7-arm64-aot", "win7-aot", "win7-arm64", "win7", "win-arm64-aot", "win-aot", "win-arm64", "win", "aot", "any", "base" ], "win81-x64": [ "win81-x64", "win81", "win8-x64", "win8", "win7-x64", "win7", "win-x64", "win", "any", "base" ], "win81-x64-aot": [ "win81-x64-aot", "win81-aot", "win81-x64", "win81", "win8-x64-aot", "win8-aot", "win8-x64", "win8", "win7-x64-aot", "win7-aot", "win7-x64", "win7", "win-x64-aot", "win-aot", "win-x64", "win", "aot", "any", "base" ], "win81-x86": [ "win81-x86", "win81", "win8-x86", "win8", "win7-x86", "win7", "win-x86", "win", "any", "base" ], "win81-x86-aot": [ "win81-x86-aot", "win81-aot", "win81-x86", "win81", "win8-x86-aot", "win8-aot", "win8-x86", "win8", "win7-x86-aot", "win7-aot", "win7-x86", "win7", "win-x86-aot", "win-aot", "win-x86", "win", "aot", "any", "base" ] }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.IO.Ports/pkg/runtime.linux-x64.runtime.native.System.IO.Ports.proj
<Project> <Import Project="runtime.native.System.IO.Ports.props" /> </Project>
<Project> <Import Project="runtime.native.System.IO.Ports.props" /> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/coreclr/templates/format-job.yml
parameters: buildConfig: '' archType: '' osGroup: '' osSubgroup: '' container: '' crossBuild: false crossrootfsDir: '' dependOnEvaluatePaths: false timeoutInMinutes: '' stagedBuild: false variables: {} pool: '' condition: true ### Format job jobs: - template: xplat-pipeline-job.yml parameters: buildConfig: ${{ parameters.buildConfig }} archType: ${{ parameters.archType }} osGroup: ${{ parameters.osGroup }} osSubgroup: ${{ parameters.osSubgroup }} container: ${{ parameters.container }} crossBuild: ${{ parameters.crossBuild }} crossrootfsDir: ${{ parameters.crossrootfsDir }} dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths }} stagedBuild: ${{ parameters.stagedBuild }} timeoutInMinutes: ${{ parameters.timeoutInMinutes }} name: ${{ format('format_{0}{1}_{2}', parameters.osGroup, parameters.osSubgroup, parameters.archType) }} displayName: ${{ format('Formatting {0}{1} {2}', parameters.osGroup, parameters.osSubgroup, parameters.archType) }} helixType: 'format' ${{ if eq(parameters.osGroup, 'windows') }}: pool: vmImage: 'windows-2019' ${{ if ne(parameters.osGroup, 'windows') }}: pool: ${{ parameters.pool }} variables: ${{ parameters.variables }} condition: ${{ parameters.condition }} steps: - task: UseDotNet@2 # This should match what jitutils YML uses to build. displayName: 'Install .NET SDK' inputs: packageType: 'sdk' version: '3.x' includePreviewVersions: true installationPath: $(Agent.ToolsDirectory)/dotnet - task: UsePythonVersion@0 inputs: versionSpec: '3.x' addToPath: true architecture: 'x64' condition: ${{ eq(parameters.osGroup, 'windows') }} - task: PythonScript@0 displayName: Run tests/scripts/format.py inputs: scriptSource: 'filePath' scriptPath: $(Build.SourcesDirectory)/src/tests/Common/scripts/format.py arguments: '-c $(Build.SourcesDirectory)/src/coreclr -o $(osGroup) -a $(archType)' - task: PublishBuildArtifacts@1 displayName: Publish format.patch inputs: PathtoPublish: '$(Build.SourcesDirectory)/src/coreclr/format.patch' ArtifactName: format.$(osGroup).$(archType).patch continueOnError: true condition: failed()
parameters: buildConfig: '' archType: '' osGroup: '' osSubgroup: '' container: '' crossBuild: false crossrootfsDir: '' dependOnEvaluatePaths: false timeoutInMinutes: '' stagedBuild: false variables: {} pool: '' condition: true ### Format job jobs: - template: xplat-pipeline-job.yml parameters: buildConfig: ${{ parameters.buildConfig }} archType: ${{ parameters.archType }} osGroup: ${{ parameters.osGroup }} osSubgroup: ${{ parameters.osSubgroup }} container: ${{ parameters.container }} crossBuild: ${{ parameters.crossBuild }} crossrootfsDir: ${{ parameters.crossrootfsDir }} dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths }} stagedBuild: ${{ parameters.stagedBuild }} timeoutInMinutes: ${{ parameters.timeoutInMinutes }} name: ${{ format('format_{0}{1}_{2}', parameters.osGroup, parameters.osSubgroup, parameters.archType) }} displayName: ${{ format('Formatting {0}{1} {2}', parameters.osGroup, parameters.osSubgroup, parameters.archType) }} helixType: 'format' ${{ if eq(parameters.osGroup, 'windows') }}: pool: vmImage: 'windows-2019' ${{ if ne(parameters.osGroup, 'windows') }}: pool: ${{ parameters.pool }} variables: ${{ parameters.variables }} condition: ${{ parameters.condition }} steps: - task: UseDotNet@2 # This should match what jitutils YML uses to build. displayName: 'Install .NET SDK' inputs: packageType: 'sdk' version: '3.x' includePreviewVersions: true installationPath: $(Agent.ToolsDirectory)/dotnet - task: UsePythonVersion@0 inputs: versionSpec: '3.x' addToPath: true architecture: 'x64' condition: ${{ eq(parameters.osGroup, 'windows') }} - task: PythonScript@0 displayName: Run tests/scripts/format.py inputs: scriptSource: 'filePath' scriptPath: $(Build.SourcesDirectory)/src/tests/Common/scripts/format.py arguments: '-c $(Build.SourcesDirectory)/src/coreclr -o $(osGroup) -a $(archType)' - task: PublishBuildArtifacts@1 displayName: Publish format.patch inputs: PathtoPublish: '$(Build.SourcesDirectory)/src/coreclr/format.patch' ArtifactName: format.$(osGroup).$(archType).patch continueOnError: true condition: failed()
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/mono/nuget/Microsoft.NET.Workload.Mono.Toolchain.Manifest/localize/WorkloadManifest.zh-Hant.json
{ "workloads/wasm-tools/description": ".NET WebAssembly 組建工具" }
{ "workloads/wasm-tools/description": ".NET WebAssembly 組建工具" }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/common/templates/wasm-runtime-tests.yml
parameters: alwaysRun: false isExtraPlatformsBuild: false platforms: [] jobs: # # Build the whole product using Mono and run runtime tests # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml buildConfig: Release runtimeFlavor: mono platforms: ${{ parameters.platforms }} variables: - name: allWasmContainsChange value: $[ dependencies.evaluate_paths.outputs['SetPathVars_allwasm.containsChange'] ] - name: alwaysRunVar value: ${{ parameters.alwaysRun }} - name: timeoutPerTestInMinutes value: 10 - name: timeoutPerTestCollectionInMinutes value: 200 jobParameters: testGroup: innerloop isExtraPlatforms: ${{ parameters.isExtraPlatformsBuild }} nameSuffix: AllSubsets_Mono_RuntimeTests buildArgs: -s mono+libs -c $(_BuildConfig) timeoutInMinutes: 180 condition: >- or( eq(variables['alwaysRunVar'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_mono.containsChange'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_allwasm.containsChange'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_runtimetests.containsChange'], true)) extraStepsTemplate: /eng/pipelines/common/templates/runtimes/wasm-runtime-and-send-to-helix.yml extraStepsParameters: creator: dotnet-bot testRunNamePrefixSuffix: Mono_$(_BuildConfig)
parameters: alwaysRun: false isExtraPlatformsBuild: false platforms: [] jobs: # # Build the whole product using Mono and run runtime tests # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml buildConfig: Release runtimeFlavor: mono platforms: ${{ parameters.platforms }} variables: - name: allWasmContainsChange value: $[ dependencies.evaluate_paths.outputs['SetPathVars_allwasm.containsChange'] ] - name: alwaysRunVar value: ${{ parameters.alwaysRun }} - name: timeoutPerTestInMinutes value: 10 - name: timeoutPerTestCollectionInMinutes value: 200 jobParameters: testGroup: innerloop isExtraPlatforms: ${{ parameters.isExtraPlatformsBuild }} nameSuffix: AllSubsets_Mono_RuntimeTests buildArgs: -s mono+libs -c $(_BuildConfig) timeoutInMinutes: 180 condition: >- or( eq(variables['alwaysRunVar'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_mono.containsChange'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_allwasm.containsChange'], true), eq(dependencies.evaluate_paths.outputs['SetPathVars_runtimetests.containsChange'], true)) extraStepsTemplate: /eng/pipelines/common/templates/runtimes/wasm-runtime-and-send-to-helix.yml extraStepsParameters: creator: dotnet-bot testRunNamePrefixSuffix: Mono_$(_BuildConfig)
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/runtime-official.yml
trigger: batch: true branches: include: - main - release/* - internal/release/* paths: include: - '*' - docs/manpages/* exclude: - .github/* - docs/* - CODE-OF-CONDUCT.md - CONTRIBUTING.md - LICENSE.TXT - PATENTS.TXT - README.md - SECURITY.md - THIRD-PARTY-NOTICES.TXT # This is an official pipeline that should not be triggerable from a PR, # there is no public pipeline assosiated with it. pr: none variables: - template: /eng/pipelines/common/variables.yml # TODO: (Consolidation) Switch away from old signing/validation variables from former Core-Setup. https://github.com/dotnet/runtime/issues/1027 - name: TeamName value: dotnet-core-acquisition # Set the target blob feed for package publish during official and validation builds. - name: _DotNetArtifactsCategory value: .NETCore - name: _DotNetValidationArtifactsCategory value: .NETCoreValidation - name: PostBuildSign value: true stages: - stage: Build jobs: # # Localization build # - ${{ if eq(variables['Build.SourceBranch'], 'refs/heads/main') }}: - template: /eng/common/templates/job/onelocbuild.yml parameters: MirrorRepo: runtime LclSource: lclFilesfromPackage LclPackageId: 'LCL-JUNO-PROD-RUNTIME' # # Source Index Build # - ${{ if eq(variables['Build.SourceBranch'], 'refs/heads/main') }}: - template: /eng/common/templates/job/source-index-stage1.yml parameters: sourceIndexBuildCommand: build.cmd -subset libs.sfx+libs.oob -binarylog -os Linux -ci # # Build CoreCLR # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/build-job.yml buildConfig: release platforms: - OSX_arm64 - OSX_x64 - Linux_x64 - Linux_arm - Linux_arm64 - Linux_musl_x64 - Linux_musl_arm - Linux_musl_arm64 - windows_x86 - windows_x64 - windows_arm - windows_arm64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} signBinaries: ${{ variables.isOfficialBuild }} timeoutInMinutes: 120 - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossdac-pack.yml buildConfig: release platforms: - windows_x64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} timeoutInMinutes: 120 crossDacPlatforms: - Linux_x64 - Linux_arm - Linux_arm64 - Linux_musl_x64 - Linux_musl_arm - Linux_musl_arm64 - windows_x64 - windows_arm - windows_arm64 # # Build Mono runtime packs # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: release runtimeFlavor: mono platforms: - Android_x64 - Android_x86 - Android_arm - Android_arm64 - MacCatalyst_x64 - MacCatalyst_arm64 - tvOSSimulator_x64 - tvOSSimulator_arm64 - tvOS_arm64 - iOSSimulator_x64 - iOSSimulator_x86 - iOSSimulator_arm64 - iOS_arm - iOS_arm64 - OSX_x64 - OSX_arm64 - Linux_x64 - Linux_arm - Linux_arm64 - Linux_musl_x64 - Browser_wasm # - Linux_musl_arm # - Linux_musl_arm64 - windows_x64 - windows_x86 # - windows_arm # - windows_arm64 jobParameters: buildArgs: -s mono+libs+host+packs+mono.mscordbi -c $(_BuildConfig) nameSuffix: AllSubsets_Mono isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks # Build Mono AOT offset headers once, for consumption elsewhere # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/mono/templates/generate-offsets.yml buildConfig: release platforms: - Android_x64 - Browser_wasm - tvOS_arm64 - iOS_arm64 - MacCatalyst_x64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} # # Build Mono release AOT cross-compilers # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml runtimeFlavor: mono buildConfig: release platforms: - Linux_x64 jobParameters: buildArgs: -s mono+packs -c $(_BuildConfig) /p:MonoCrossAOTTargetOS=Android+Browser /p:SkipMonoCrossJitConfigure=true /p:BuildMonoAOTCrossCompilerOnly=true nameSuffix: CrossAOT_Mono runtimeVariant: crossaot dependsOn: - mono_android_offsets - mono_browser_offsets monoCrossAOTTargetOS: - Android - Browser isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml runtimeFlavor: mono buildConfig: release platforms: - Windows_x64 jobParameters: buildArgs: -s mono+packs -c $(_BuildConfig) /p:MonoCrossAOTTargetOS=Android+Browser /p:SkipMonoCrossJitConfigure=true /p:BuildMonoAOTCrossCompilerOnly=true nameSuffix: CrossAOT_Mono runtimeVariant: crossaot dependsOn: - mono_android_offsets - mono_browser_offsets monoCrossAOTTargetOS: - Android - Browser isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml runtimeFlavor: mono buildConfig: release platforms: - OSX_x64 jobParameters: buildArgs: -s mono+packs -c $(_BuildConfig) /p:MonoCrossAOTTargetOS=Android+Browser+tvOS+iOS+MacCatalyst /p:SkipMonoCrossJitConfigure=true /p:BuildMonoAOTCrossCompilerOnly=true nameSuffix: CrossAOT_Mono runtimeVariant: crossaot dependsOn: - mono_android_offsets - mono_browser_offsets - mono_tvos_offsets - mono_ios_offsets - mono_maccatalyst_offsets monoCrossAOTTargetOS: - Android - Browser - tvOS - iOS - MacCatalyst isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks # # Build Mono LLVM runtime packs # - template: /eng/pipelines/common/platform-matrix-multijob.yml parameters: platforms: - OSX_x64 - Linux_x64 # - Linux_arm - Linux_arm64 # - Linux_musl_x64 # - Linux_musl_arm64 # - windows_x64 # - windows_x86 # - windows_arm # - windows_arm64 jobTemplates: # LLVMJIT - jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: release runtimeFlavor: mono jobParameters: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=false nameSuffix: AllSubsets_Mono_LLVMJIT runtimeVariant: LLVMJIT isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks #LLVMAOT - jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: release runtimeFlavor: mono jobParameters: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true nameSuffix: AllSubsets_Mono_LLVMAOT runtimeVariant: LLVMAOT isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks # # Build libraries using live CoreLib from CoreCLR # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/libraries/build-job.yml buildConfig: Release platforms: - OSX_arm64 - OSX_x64 - Linux_x64 - Linux_arm - Linux_arm64 - Linux_musl_x64 - Linux_musl_arm - Linux_musl_arm64 - windows_x86 - windows_x64 - windows_arm - windows_arm64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} liveRuntimeBuildConfig: release # Official builds don't run tests, locally or on Helix runTests: false useHelix: false # # Build libraries AllConfigurations for packages # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/libraries/build-job.yml buildConfig: Release platforms: - windows_x64 jobParameters: framework: allConfigurations isOfficialBuild: ${{ variables.isOfficialBuild }} isOfficialAllConfigurations: true liveRuntimeBuildConfig: release # Official builds don't run tests, locally or on Helix runTests: false useHelix: false # # Build Sourcebuild leg # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: Release helixQueueGroup: ci platforms: - SourceBuild_Linux_x64 jobParameters: nameSuffix: SourceBuild extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: SourceBuildPackages timeoutInMinutes: 95 # # Installer Build # - template: /eng/pipelines/installer/installer-matrix.yml parameters: jobParameters: liveRuntimeBuildConfig: release liveLibrariesBuildConfig: Release isOfficialBuild: ${{ variables.isOfficialBuild }} platforms: - OSX_arm64 - OSX_x64 - Linux_x64 - Linux_arm - Linux_arm64 - Linux_musl_x64 - Linux_musl_arm - Linux_musl_arm64 - windows_x86 - windows_x64 - windows_arm - windows_arm64 # # Build PGO CoreCLR release # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/build-job.yml buildConfig: release platforms: - windows_x64 - windows_x86 - Linux_x64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} signBinaries: false testGroup: innerloop pgoType: 'PGO' # # PGO Build # - template: /eng/pipelines/installer/installer-matrix.yml parameters: buildConfig: Release jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} liveRuntimeBuildConfig: release liveLibrariesBuildConfig: Release pgoType: 'PGO' platforms: - windows_x64 - windows_x86 - Linux_x64 # # Build Workloads # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/mono/templates/workloads-build.yml buildConfig: release platforms: - windows_x64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} timeoutInMinutes: 120 dependsOn: - Build_Android_arm_release_AllSubsets_Mono - Build_Android_arm64_release_AllSubsets_Mono - Build_Android_x86_release_AllSubsets_Mono - Build_Android_x64_release_AllSubsets_Mono - Build_Browser_wasm_Linux_release_AllSubsets_Mono - Build_iOS_arm_release_AllSubsets_Mono - Build_iOS_arm64_release_AllSubsets_Mono - Build_iOSSimulator_x64_release_AllSubsets_Mono - Build_iOSSimulator_x86_release_AllSubsets_Mono - Build_iOSSimulator_arm64_release_AllSubsets_Mono - Build_MacCatalyst_arm64_release_AllSubsets_Mono - Build_MacCatalyst_x64_release_AllSubsets_Mono - Build_tvOS_arm64_release_AllSubsets_Mono - Build_tvOSSimulator_arm64_release_AllSubsets_Mono - Build_tvOSSimulator_x64_release_AllSubsets_Mono - Build_Windows_x64_release_CrossAOT_Mono - ${{ if eq(variables.isOfficialBuild, true) }}: - template: /eng/pipelines/official/stages/publish.yml parameters: isOfficialBuild: ${{ variables.isOfficialBuild }}
trigger: batch: true branches: include: - main - release/* - internal/release/* paths: include: - '*' - docs/manpages/* exclude: - .github/* - docs/* - CODE-OF-CONDUCT.md - CONTRIBUTING.md - LICENSE.TXT - PATENTS.TXT - README.md - SECURITY.md - THIRD-PARTY-NOTICES.TXT # This is an official pipeline that should not be triggerable from a PR, # there is no public pipeline assosiated with it. pr: none variables: - template: /eng/pipelines/common/variables.yml # TODO: (Consolidation) Switch away from old signing/validation variables from former Core-Setup. https://github.com/dotnet/runtime/issues/1027 - name: TeamName value: dotnet-core-acquisition # Set the target blob feed for package publish during official and validation builds. - name: _DotNetArtifactsCategory value: .NETCore - name: _DotNetValidationArtifactsCategory value: .NETCoreValidation - name: PostBuildSign value: true stages: - stage: Build jobs: # # Localization build # - ${{ if eq(variables['Build.SourceBranch'], 'refs/heads/main') }}: - template: /eng/common/templates/job/onelocbuild.yml parameters: MirrorRepo: runtime LclSource: lclFilesfromPackage LclPackageId: 'LCL-JUNO-PROD-RUNTIME' # # Source Index Build # - ${{ if eq(variables['Build.SourceBranch'], 'refs/heads/main') }}: - template: /eng/common/templates/job/source-index-stage1.yml parameters: sourceIndexBuildCommand: build.cmd -subset libs.sfx+libs.oob -binarylog -os Linux -ci # # Build CoreCLR # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/build-job.yml buildConfig: release platforms: - OSX_arm64 - OSX_x64 - Linux_x64 - Linux_arm - Linux_arm64 - Linux_musl_x64 - Linux_musl_arm - Linux_musl_arm64 - windows_x86 - windows_x64 - windows_arm - windows_arm64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} signBinaries: ${{ variables.isOfficialBuild }} timeoutInMinutes: 120 - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/crossdac-pack.yml buildConfig: release platforms: - windows_x64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} timeoutInMinutes: 120 crossDacPlatforms: - Linux_x64 - Linux_arm - Linux_arm64 - Linux_musl_x64 - Linux_musl_arm - Linux_musl_arm64 - windows_x64 - windows_arm - windows_arm64 # # Build Mono runtime packs # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: release runtimeFlavor: mono platforms: - Android_x64 - Android_x86 - Android_arm - Android_arm64 - MacCatalyst_x64 - MacCatalyst_arm64 - tvOSSimulator_x64 - tvOSSimulator_arm64 - tvOS_arm64 - iOSSimulator_x64 - iOSSimulator_x86 - iOSSimulator_arm64 - iOS_arm - iOS_arm64 - OSX_x64 - OSX_arm64 - Linux_x64 - Linux_arm - Linux_arm64 - Linux_musl_x64 - Browser_wasm # - Linux_musl_arm # - Linux_musl_arm64 - windows_x64 - windows_x86 # - windows_arm # - windows_arm64 jobParameters: buildArgs: -s mono+libs+host+packs+mono.mscordbi -c $(_BuildConfig) nameSuffix: AllSubsets_Mono isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks # Build Mono AOT offset headers once, for consumption elsewhere # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/mono/templates/generate-offsets.yml buildConfig: release platforms: - Android_x64 - Browser_wasm - tvOS_arm64 - iOS_arm64 - MacCatalyst_x64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} # # Build Mono release AOT cross-compilers # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml runtimeFlavor: mono buildConfig: release platforms: - Linux_x64 jobParameters: buildArgs: -s mono+packs -c $(_BuildConfig) /p:MonoCrossAOTTargetOS=Android+Browser /p:SkipMonoCrossJitConfigure=true /p:BuildMonoAOTCrossCompilerOnly=true nameSuffix: CrossAOT_Mono runtimeVariant: crossaot dependsOn: - mono_android_offsets - mono_browser_offsets monoCrossAOTTargetOS: - Android - Browser isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml runtimeFlavor: mono buildConfig: release platforms: - Windows_x64 jobParameters: buildArgs: -s mono+packs -c $(_BuildConfig) /p:MonoCrossAOTTargetOS=Android+Browser /p:SkipMonoCrossJitConfigure=true /p:BuildMonoAOTCrossCompilerOnly=true nameSuffix: CrossAOT_Mono runtimeVariant: crossaot dependsOn: - mono_android_offsets - mono_browser_offsets monoCrossAOTTargetOS: - Android - Browser isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml runtimeFlavor: mono buildConfig: release platforms: - OSX_x64 jobParameters: buildArgs: -s mono+packs -c $(_BuildConfig) /p:MonoCrossAOTTargetOS=Android+Browser+tvOS+iOS+MacCatalyst /p:SkipMonoCrossJitConfigure=true /p:BuildMonoAOTCrossCompilerOnly=true nameSuffix: CrossAOT_Mono runtimeVariant: crossaot dependsOn: - mono_android_offsets - mono_browser_offsets - mono_tvos_offsets - mono_ios_offsets - mono_maccatalyst_offsets monoCrossAOTTargetOS: - Android - Browser - tvOS - iOS - MacCatalyst isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks # # Build Mono LLVM runtime packs # - template: /eng/pipelines/common/platform-matrix-multijob.yml parameters: platforms: - OSX_x64 - Linux_x64 # - Linux_arm - Linux_arm64 # - Linux_musl_x64 # - Linux_musl_arm64 # - windows_x64 # - windows_x86 # - windows_arm # - windows_arm64 jobTemplates: # LLVMJIT - jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: release runtimeFlavor: mono jobParameters: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=false nameSuffix: AllSubsets_Mono_LLVMJIT runtimeVariant: LLVMJIT isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks #LLVMAOT - jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: release runtimeFlavor: mono jobParameters: buildArgs: -s mono+libs+host+packs -c $(_BuildConfig) /p:MonoEnableLLVM=true /p:MonoBundleLLVMOptimizer=true nameSuffix: AllSubsets_Mono_LLVMAOT runtimeVariant: LLVMAOT isOfficialBuild: ${{ variables.isOfficialBuild }} extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: MonoRuntimePacks # # Build libraries using live CoreLib from CoreCLR # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/libraries/build-job.yml buildConfig: Release platforms: - OSX_arm64 - OSX_x64 - Linux_x64 - Linux_arm - Linux_arm64 - Linux_musl_x64 - Linux_musl_arm - Linux_musl_arm64 - windows_x86 - windows_x64 - windows_arm - windows_arm64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} liveRuntimeBuildConfig: release # Official builds don't run tests, locally or on Helix runTests: false useHelix: false # # Build libraries AllConfigurations for packages # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/libraries/build-job.yml buildConfig: Release platforms: - windows_x64 jobParameters: framework: allConfigurations isOfficialBuild: ${{ variables.isOfficialBuild }} isOfficialAllConfigurations: true liveRuntimeBuildConfig: release # Official builds don't run tests, locally or on Helix runTests: false useHelix: false # # Build Sourcebuild leg # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/global-build-job.yml buildConfig: Release helixQueueGroup: ci platforms: - SourceBuild_Linux_x64 jobParameters: nameSuffix: SourceBuild extraStepsTemplate: /eng/pipelines/common/upload-intermediate-artifacts-step.yml extraStepsParameters: name: SourceBuildPackages timeoutInMinutes: 95 # # Installer Build # - template: /eng/pipelines/installer/installer-matrix.yml parameters: jobParameters: liveRuntimeBuildConfig: release liveLibrariesBuildConfig: Release isOfficialBuild: ${{ variables.isOfficialBuild }} platforms: - OSX_arm64 - OSX_x64 - Linux_x64 - Linux_arm - Linux_arm64 - Linux_musl_x64 - Linux_musl_arm - Linux_musl_arm64 - windows_x86 - windows_x64 - windows_arm - windows_arm64 # # Build PGO CoreCLR release # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/coreclr/templates/build-job.yml buildConfig: release platforms: - windows_x64 - windows_x86 - Linux_x64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} signBinaries: false testGroup: innerloop pgoType: 'PGO' # # PGO Build # - template: /eng/pipelines/installer/installer-matrix.yml parameters: buildConfig: Release jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} liveRuntimeBuildConfig: release liveLibrariesBuildConfig: Release pgoType: 'PGO' platforms: - windows_x64 - windows_x86 - Linux_x64 # # Build Workloads # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/mono/templates/workloads-build.yml buildConfig: release platforms: - windows_x64 jobParameters: isOfficialBuild: ${{ variables.isOfficialBuild }} timeoutInMinutes: 120 dependsOn: - Build_Android_arm_release_AllSubsets_Mono - Build_Android_arm64_release_AllSubsets_Mono - Build_Android_x86_release_AllSubsets_Mono - Build_Android_x64_release_AllSubsets_Mono - Build_Browser_wasm_Linux_release_AllSubsets_Mono - Build_iOS_arm_release_AllSubsets_Mono - Build_iOS_arm64_release_AllSubsets_Mono - Build_iOSSimulator_x64_release_AllSubsets_Mono - Build_iOSSimulator_x86_release_AllSubsets_Mono - Build_iOSSimulator_arm64_release_AllSubsets_Mono - Build_MacCatalyst_arm64_release_AllSubsets_Mono - Build_MacCatalyst_x64_release_AllSubsets_Mono - Build_tvOS_arm64_release_AllSubsets_Mono - Build_tvOSSimulator_arm64_release_AllSubsets_Mono - Build_tvOSSimulator_x64_release_AllSubsets_Mono - Build_Windows_x64_release_CrossAOT_Mono - ${{ if eq(variables.isOfficialBuild, true) }}: - template: /eng/pipelines/official/stages/publish.yml parameters: isOfficialBuild: ${{ variables.isOfficialBuild }}
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/libraries/helix.yml
parameters: runtimeFlavor: '' archType: '' buildConfig: '' creator: '' helixQueues: '' osGroup: '' targetRid: '' testRunNamePrefixSuffix: '' testScope: 'innerloop' # innerloop | outerloop | all interpreter: '' condition: always() extraHelixArguments: '' shouldContinueOnError: false scenarios: '' steps: - script: $(_msbuildCommand) $(_warnAsErrorParamHelixOverride) -restore $(Build.SourcesDirectory)/src/libraries/sendtohelix.proj /p:RuntimeFlavor=${{ parameters.runtimeFlavor }} /p:TargetArchitecture=${{ parameters.archType }} /p:TargetRuntimeIdentifier=${{ parameters.targetRid }} /p:Configuration=${{ parameters.buildConfig }} /p:TargetOS=${{ parameters.osGroup }} /p:MonoForceInterpreter=${{ parameters.interpreter }} /p:TestScope=${{ parameters.testScope }} /p:TestRunNamePrefixSuffix=${{ parameters.testRunNamePrefixSuffix }} /p:HelixBuild=$(Build.BuildNumber) ${{ parameters.extraHelixArguments }} /bl:$(Build.SourcesDirectory)/artifacts/log/$(_BuildConfig)/SendToHelix.binlog displayName: Send to Helix condition: and(succeeded(), ${{ parameters.condition }}) continueOnError: ${{ eq(parameters.shouldContinueOnError, true) }} env: SYSTEM_ACCESSTOKEN: $(System.AccessToken) # We need to set this env var to publish helix results to Azure Dev Ops _Scenarios: ${{ join(',', parameters.scenarios) }} # Pass scenarios to MSBuild as env var to avoid need of escaping comma separated list ${{ if eq(variables['System.TeamProject'], 'internal') }}: HelixAccessToken: $(HelixApiAccessToken) HelixTargetQueues: ${{ replace(lower(join('+', parameters.helixQueues)), '.open', '') }} Creator: '' ${{ if eq(variables['System.TeamProject'], 'public') }}: HelixTargetQueues: ${{ join('+', parameters.helixQueues) }} Creator: ${{ parameters.creator }}
parameters: runtimeFlavor: '' archType: '' buildConfig: '' creator: '' helixQueues: '' osGroup: '' targetRid: '' testRunNamePrefixSuffix: '' testScope: 'innerloop' # innerloop | outerloop | all interpreter: '' condition: always() extraHelixArguments: '' shouldContinueOnError: false scenarios: '' steps: - script: $(_msbuildCommand) $(_warnAsErrorParamHelixOverride) -restore $(Build.SourcesDirectory)/src/libraries/sendtohelix.proj /p:RuntimeFlavor=${{ parameters.runtimeFlavor }} /p:TargetArchitecture=${{ parameters.archType }} /p:TargetRuntimeIdentifier=${{ parameters.targetRid }} /p:Configuration=${{ parameters.buildConfig }} /p:TargetOS=${{ parameters.osGroup }} /p:MonoForceInterpreter=${{ parameters.interpreter }} /p:TestScope=${{ parameters.testScope }} /p:TestRunNamePrefixSuffix=${{ parameters.testRunNamePrefixSuffix }} /p:HelixBuild=$(Build.BuildNumber) ${{ parameters.extraHelixArguments }} /bl:$(Build.SourcesDirectory)/artifacts/log/$(_BuildConfig)/SendToHelix.binlog displayName: Send to Helix condition: and(succeeded(), ${{ parameters.condition }}) continueOnError: ${{ eq(parameters.shouldContinueOnError, true) }} env: SYSTEM_ACCESSTOKEN: $(System.AccessToken) # We need to set this env var to publish helix results to Azure Dev Ops _Scenarios: ${{ join(',', parameters.scenarios) }} # Pass scenarios to MSBuild as env var to avoid need of escaping comma separated list ${{ if eq(variables['System.TeamProject'], 'internal') }}: HelixAccessToken: $(HelixApiAccessToken) HelixTargetQueues: ${{ replace(lower(join('+', parameters.helixQueues)), '.open', '') }} Creator: '' ${{ if eq(variables['System.TeamProject'], 'public') }}: HelixTargetQueues: ${{ join('+', parameters.helixQueues) }} Creator: ${{ parameters.creator }}
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/common/templates/post-build/common-variables.yml
variables: - group: AzureDevOps-Artifact-Feeds-Pats - group: DotNet-Blob-Feed - group: DotNet-DotNetCli-Storage - group: DotNet-MSRC-Storage - group: Publish-Build-Assets # Whether the build is internal or not - name: IsInternalBuild value: ${{ and(ne(variables['System.TeamProject'], 'public'), contains(variables['Build.SourceBranch'], 'internal')) }} # Default Maestro++ API Endpoint and API Version - name: MaestroApiEndPoint value: "https://maestro-prod.westus2.cloudapp.azure.com" - name: MaestroApiAccessToken value: $(MaestroAccessToken) - name: MaestroApiVersion value: "2020-02-20" - name: SourceLinkCLIVersion value: 3.0.0 - name: SymbolToolVersion value: 1.0.1 - name: runCodesignValidationInjection value: false
variables: - group: AzureDevOps-Artifact-Feeds-Pats - group: DotNet-Blob-Feed - group: DotNet-DotNetCli-Storage - group: DotNet-MSRC-Storage - group: Publish-Build-Assets # Whether the build is internal or not - name: IsInternalBuild value: ${{ and(ne(variables['System.TeamProject'], 'public'), contains(variables['Build.SourceBranch'], 'internal')) }} # Default Maestro++ API Endpoint and API Version - name: MaestroApiEndPoint value: "https://maestro-prod.westus2.cloudapp.azure.com" - name: MaestroApiAccessToken value: $(MaestroAccessToken) - name: MaestroApiVersion value: "2020-02-20" - name: SourceLinkCLIVersion value: 3.0.0 - name: SymbolToolVersion value: 1.0.1 - name: runCodesignValidationInjection value: false
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/mono/wasm/debugger/tests/debugger-test/weather.json
[ { "dateFormatted": "06/05/2018", "temperatureC": 1, "summary": "Freezing", "temperatureF": 33 }, { "dateFormatted": "07/05/2018", "temperatureC": 14, "summary": "Bracing", "temperatureF": 57 }, { "dateFormatted": "08/05/2018", "temperatureC": -13, "summary": "Freezing", "temperatureF": 9 }, { "dateFormatted": "09/05/2018", "temperatureC": -16, "summary": "Balmy", "temperatureF": 4 }, { "dateFormatted": "10/05/2018", "temperatureC": -2, "summary": "Chilly", "temperatureF": 29 } ]
[ { "dateFormatted": "06/05/2018", "temperatureC": 1, "summary": "Freezing", "temperatureF": 33 }, { "dateFormatted": "07/05/2018", "temperatureC": 14, "summary": "Bracing", "temperatureF": 57 }, { "dateFormatted": "08/05/2018", "temperatureC": -13, "summary": "Freezing", "temperatureF": 9 }, { "dateFormatted": "09/05/2018", "temperatureC": -16, "summary": "Balmy", "temperatureF": 4 }, { "dateFormatted": "10/05/2018", "temperatureC": -2, "summary": "Chilly", "temperatureF": 29 } ]
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/installer/pkg/sfx/installers/dotnet-runtime-deps/dotnet-runtime-deps-centos.7.proj
<Project Sdk="Microsoft.Build.NoTargets"> <PropertyGroup> <GenerateInstallers Condition="'$(BuildRpmPackage)' != 'true'">false</GenerateInstallers> <PackageTargetOS>centos.7</PackageTargetOS> </PropertyGroup> <ItemGroup> <LinuxPackageDependency Include="openssl-libs;libicu;krb5-libs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.Build.NoTargets"> <PropertyGroup> <GenerateInstallers Condition="'$(BuildRpmPackage)' != 'true'">false</GenerateInstallers> <PackageTargetOS>centos.7</PackageTargetOS> </PropertyGroup> <ItemGroup> <LinuxPackageDependency Include="openssl-libs;libicu;krb5-libs" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/tests.proj
<Project Sdk="Microsoft.Build.Traversal"> <PropertyGroup Condition="'$(BuildAllConfigurations)' != 'true'"> <!-- Build for NetCoreAppCurrent by default if no BuildTargetFramework is supplied or if not all configurations are built. --> <TargetFramework>$([MSBuild]::ValueOrDefault('$(BuildTargetFramework)', '$(NetCoreAppCurrent)'))-$(TargetOS)</TargetFramework> <!-- Filter ProjectReferences to build the best matching target framework only. --> <FilterTraversalProjectReferences>true</FilterTraversalProjectReferences> </PropertyGroup> <PropertyGroup> <TestInParallel Condition="'$(Coverage)' == 'true'">false</TestInParallel> <!-- For tests we want to continue running if a test run failed. --> <TestContinueOnError>ErrorAndContinue</TestContinueOnError> <TraversalGlobalProperties>BuildAllProjects=true</TraversalGlobalProperties> <CoverageReportInputPath>$(ArtifactsBinDir)\*.Tests\**\coverage.opencover.xml</CoverageReportInputPath> <CoverageReportDir>$(ArtifactsDir)coverage</CoverageReportDir> <EnableCoverageSupport>true</EnableCoverageSupport> <TestAssemblies Condition="'$(TestAssemblies)' == ''">true</TestAssemblies> <TestPackages Condition="'$(TestPackages)' == ''">false</TestPackages> <TestTrimming Condition="'$(TestTrimming)' == ''">false</TestTrimming> </PropertyGroup> <!-- Samples which are too complex for CI --> <ItemGroup Condition="'$(TargetOS)' == 'Browser'"> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\console-node-ts\Wasm.Console.Node.TS.Sample.csproj" /> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\browser-webpack\Wasm.Browser.WebPack.Sample.csproj" /> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\browser-nextjs\Wasm.Browser.NextJs.Sample.csproj" /> <!-- These tests are completely disabled on wasm --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.RegularExpressions/tests/System.Text.RegularExpressions.Generators.Tests/System.Text.RegularExpressions.Generators.Tests.csproj" /> </ItemGroup> <!-- Wasm aot on all platforms --> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true' and '$(RunAOTCompilation)' == 'true'"> <!-- https://github.com/dotnet/runtime/issues/61756 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.RegularExpressions\tests\System.Text.RegularExpressions.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/65356 - OOM while linking --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.SourceGeneration.Tests\System.Text.Json.SourceGeneration.Roslyn3.11.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/65411 - possible OOM when compiling System.Text.Json.SourceGeneration.Roslyn4.0.Tests.dll.bc -> .o --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.SourceGeneration.Tests\System.Text.Json.SourceGeneration.Roslyn4.0.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/61524 - OOM while linking --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.Tests\System.Text.Json.Tests.csproj" /> </ItemGroup> <!-- Projects that don't support code coverage measurement. --> <ItemGroup Condition="'$(Coverage)' == 'true'"> <ProjectExclusions Include="$(CommonTestPath)Common.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.XmlSerializer.Generator\tests\Microsoft.XmlSerializer.Generator.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Transactions.Local\tests\System.Transactions.Local.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'ARMv6'"> <!-- https://github.com/dotnet/runtime/issues/64673 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Ping\tests\FunctionalTests\System.Net.Ping.Functional.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetsMobile)' == 'true' or '$(TargetArchitecture)' == 'ARMv6'"> <!-- DllImportGenerator runtime tests depend on DNNE, which does not support mobile platforms. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.Tests\DllImportGenerator.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.UnitTests\DllImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'windows' and '$(TargetArchitecture)' == 'arm'"> <!-- DllImportGenerator runtime tests depend on DNNE, which does not support Windows ARM32 as we don't officially support it. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.Tests\DllImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'armel'"> <!-- DllImportGenerator runtime tests depend on DNNE, which does not support armel as we don't officially support it. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.Tests\DllImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'arm'"> <!-- Issue: https://github.com/dotnet/runtime/issues/60705 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.UnitTests\DllImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'FreeBSD'"> <!-- DllImportGenerator runtime tests build depends pulling down a pre-built nethost binary, which is not available for FreeBSD. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.Tests\DllImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'linux' and '$(TargetArchitecture)' == 's390x'"> <!-- DllImportGenerator runtime tests build depends pulling down a pre-built nethost binary, which is not available for s390x. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.Tests\DllImportGenerator.Tests.csproj" /> <!-- DllImportGenerator unit tests fail since NuGet 5.6.0 signature verification does not work on big-endian systems (needs >= 5.11.0). --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.UnitTests\DllImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Windows' and '$(RuntimeFlavor)' == 'Mono' and '$(RunDisabledMonoTestsOnWindows)' != 'true'"> <!-- Issue: https://github.com/dotnet/runtime/issues/53281 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebSockets.Client\tests\System.Net.WebSockets.Client.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.UnitTests\DllImportGenerator.Unit.Tests.csproj" /> <!-- Issue: https://github.com/dotnet/runtime/issues/63723 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Drawing.Common\tests\System.Drawing.Common.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android'"> <!-- Never going to run on Android --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl\tests\System.Security.Cryptography.OpenSsl.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Tests time out intermittently --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting\tests\UnitTests\Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Security\tests\FunctionalTests\System.Net.Security.Tests.csproj" /> <!-- Tests crash --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Globalization\tests\Invariant\Invariant.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem\tests\System.IO.FileSystem.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem.Watcher\tests\System.IO.FileSystem.Watcher.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.Ports\tests\System.IO.Ports.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Quic\tests\FunctionalTests\System.Net.Quic.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection\tests\System.Reflection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Serialization.Formatters\tests\System.Runtime.Serialization.Formatters.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Algorithms\tests\System.Security.Cryptography.Algorithms.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Csp\tests\System.Security.Cryptography.Csp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading.Thread\tests\System.Threading.Thread.Tests.csproj" /> <!-- Actual test failures --> <!-- https://github.com/dotnet/runtime/issues/50871 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection\tests\DI.Tests\Microsoft.Extensions.DependencyInjection.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50874 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Logging.EventSource\tests\Microsoft.Extensions.Logging.EventSource.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50923 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Data.Common\tests\System.Data.Common.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50926 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing\tests\System.Diagnostics.Tracing.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/49936 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.Tests\System.Text.Json.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Http\tests\FunctionalTests\System.Net.Http.Functional.Tests.csproj" /> <!-- Execution may be compromised --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Caching.Memory\tests\Microsoft.Extensions.Caching.Memory.Tests.csproj" /> <!-- PSNE --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Console/tests/System.Console.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Primitives/tests/FunctionalTests/System.Net.Primitives.Functional.Tests.csproj" /> <!-- Crashes on CI (possibly flakey) --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/Misc/System.Xml.Misc.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime/tests/System.Dynamic.Runtime.Tests.csproj"/> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Crashes on CI (possibly flakey) https://github.com/dotnet/runtime/issues/52615 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Configuration/tests/FunctionalTests/Microsoft.Extensions.Configuration.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Configuration.Binder/tests/Microsoft.Extensions.Configuration.Binder.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.FileProviders.Physical/tests/Microsoft.Extensions.FileProviders.Physical.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Win32.Primitives/tests/Microsoft.Win32.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Collections.Concurrent/tests/System.Collections.Concurrent.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Collections.Specialized/tests/System.Collections.Specialized.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.ComponentModel.Annotations/tests/System.ComponentModel.Annotations.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Composition.Hosting/tests/System.Composition.Hosting.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem.Primitives/tests/System.IO.FileSystem.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Memory/tests/System.Memory.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Mail/tests/Functional/System.Net.Mail.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.NameResolution/tests/PalTests/System.Net.NameResolution.Pal.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebClient/tests/System.Net.WebClient.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Numerics.Tensors/tests/System.Numerics.Tensors.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/XmlReader/Tests/System.Xml.RW.XmlReader.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/XPath/XPathDocument/System.Xml.XPath.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Uri/tests/ExtendedFunctionalTests/System.Private.Uri.ExtendedFunctional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.DispatchProxy/tests/System.Reflection.DispatchProxy.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.X509Certificates\tests\System.Security.Cryptography.X509Certificates.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'x64' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Test flakiness on x64 https://github.com/dotnet/runtime/issues/49937 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading\tests\System.Threading.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'x86' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Crashes only on x86 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Primitives\tests\Microsoft.Extensions.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Extensions\tests\System.Runtime.Extensions.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50493 --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\Android\Device_Emulator\AOT\Android.Device_Emulator.Aot.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'iOS' and '$(RunDisablediOSTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <!-- Ref.Emit in XSLCompiledTransform --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Xslt\XslCompiledTransformApi\System.Xml.Xsl.XslCompiledTransformApi.Tests.csproj" /> <!-- Functional tests on devices have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\$(TargetOS)\Device\**\*.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'iOSSimulator' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisablediOSTests)' != 'true'"> <!-- Functional tests on arm64 simulator have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" /> </ItemGroup> <!-- Excluding all tests for devices until building on helix works properly --> <ItemGroup Condition="'$(TargetOS)' == 'tvOS' and '$(RunDisablediOSTests)' != 'true'"> <!-- Ref.Emit in XSLCompiledTransform --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Xslt\XslCompiledTransformApi\System.Xml.Xsl.XslCompiledTransformApi.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection\tests\System.Reflection.Emit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.Emit.ILGeneration/tests/System.Reflection.Emit.ILGeneration.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.Emit.Lightweight/tests/System.Reflection.Emit.Lightweight.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.DispatchProxy/tests/System.Reflection.DispatchProxy.Tests.csproj" /> <!-- Has deps that JIT, need re-done in order to pass --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting/tests/UnitTests/Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <!-- Test suites hang and time out. https://github.com/dotnet/runtime/issues/60713 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection/tests/DI.External.Tests/Microsoft.Extensions.DependencyInjection.ExternalContainers.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.NetworkInformation/tests/FunctionalTests/System.Net.NetworkInformation.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection/tests/System.Reflection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading.Channels/tests/System.Threading.Channels.Tests.csproj" /> <!-- Functional tests on devices have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\$(TargetOS)\Device\**\*.Test.csproj" /> </ItemGroup> <!-- Excluding all tests for aot catalyst until building on helix works properly --> <ItemGroup Condition="('$(TargetOS)' == 'MacCatalyst' and '$(BuildTestsOnHelix)' == 'true') and '$(RunDisablediOSTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <!-- No functional tests until helix stabilizes --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'MacCatalyst'"> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/XmlFormatWriterGeneratorAOT/iOS.Simulator.XmlFormatWriterGeneratorAot.Test.csproj" /> </ItemGroup> <!-- Run only explicitly selected tests for Mac Catalyst in App Sandbox --> <ItemGroup Condition="'$(TargetOS)' == 'MacCatalyst' and '$(EnableAppSandbox)' == 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*/tests/**/*.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/**/*.Test.csproj" /> <!-- https://github.com/dotnet/runtime/pull/61507 --> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Diagnostics.Process/tests/System.Diagnostics.Process.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOS' or '$(TargetOS)' == 'tvOSSimulator' or '$(TargetOS)' == 'MacCatalyst') and '$(RunDisablediOSTests)' != 'true'"> <!-- PNSE --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Quic/tests/FunctionalTests/System.Net.Quic.Functional.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/51414 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl/tests/System.Security.Cryptography.OpenSsl.Tests.csproj" /> <!-- App Crash https://github.com/dotnet/runtime/issues/53624 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.CSharp/tests/Microsoft.CSharp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing/tests/System.Diagnostics.Tracing.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime/tests/System.Dynamic.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions/tests/System.Linq.Expressions.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Requests/tests/System.Net.Requests.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Security/tests/FunctionalTests/System.Net.Security.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json/tests/System.Text.Json.Tests/System.Text.Json.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/PInvoke/iOS.Simulator.PInvoke.Test.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/tvOS/Simulator/AOT/tvOS.Simulator.Aot.Test.csproj" /> <!-- Crashes randomly during test runs https://github.com/dotnet/runtime/issues/52460 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading.Tasks\tests\System.Threading.Tasks.Tests.csproj" /> <!-- Crash https://github.com/dotnet/runtime/issues/56085 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json/tests/System.Text.Json.SourceGeneration.Tests/System.Text.Json.SourceGeneration.Roslyn3.11.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json/tests/System.Text.Json.SourceGeneration.Tests/System.Text.Json.SourceGeneration.Roslyn4.0.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOS' or '$(TargetOS)' == 'tvOSSimulator') and '$(RunDisablediOSTests)' != 'true'"> <!-- https://github.com/dotnet/runtime/issues/51335 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.Pipes/tests/System.IO.Pipes.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(RunDisabledWasmTests)' != 'true' and '$(RunAOTCompilation)' != 'true'"> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(RunDisabledWasmTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.NETCore.Platforms\tests\Microsoft.NETCore.Platforms.Tests.csproj" /> <!-- This test is disabled via an assembly-level attribute in source. We exclude it here to avoid queuing/running a work item entirely. https://github.com/dotnet/runtime/issues/35970 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Caching.Memory\tests\Microsoft.Extensions.Caching.Memory.Tests.csproj" /> <!-- This test is disabled via an assembly-level attribute in source. We exclude it here to avoid queuing/running a work item entirely. https://github.com/mono/mono/issues/16417 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.ComponentModel.Composition\tests\System.ComponentModel.Composition.Tests.csproj" /> <!-- Mono-Browser ignores runtimeconfig.template.json (e.g. for this it has "System.Globalization.EnforceJapaneseEraYearRanges": true) --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Globalization.Calendars\tests\CalendarTestWithConfigSwitch\System.Globalization.CalendarsWithConfigSwitch.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/37669 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyModel\tests\Microsoft.Extensions.DependencyModel.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting\tests\UnitTests\Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Csp\tests\System.Security.Cryptography.Csp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Encoding\tests\System.Security.Cryptography.Encoding.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl\tests\System.Security.Cryptography.OpenSsl.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Pkcs\tests\System.Security.Cryptography.Pkcs.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Primitives\tests\System.Security.Cryptography.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Xml\tests\System.Security.Cryptography.Xml.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.X509Certificates\tests\System.Security.Cryptography.X509Certificates.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Cose\tests\System.Security.Cryptography.Cose.Tests.csproj" /> <!-- This OuterLoop test requires browser UI, but the Helix agents are headless --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebSockets.Client\tests\wasm\System.Net.WebSockets.Client.Wasm.Tests.csproj" /> </ItemGroup> <!-- Aggressive Trimming related failures --> <ItemGroup Condition="('$(TargetOS)' != 'Browser' and '$(RunAOTCompilation)' == 'true' and '$(MonoForceInterpreter)' != 'true') or ('$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true')"> <!-- Issue: https://github.com/dotnet/runtime/issues/59926 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Serialization.Xml\tests\System.Runtime.Serialization.Xml.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' != 'Browser' and '$(RunAOTCompilation)' == 'true' and '$(MonoForceInterpreter)' != 'true') or ('$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true') or ('$(TargetOS)' == 'iOS' and '$(BuildTestsOnHelix)' == 'true')"> <!-- Issue: https://github.com/dotnet/runtime/issues/50724 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Composition\tests\System.Composition.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime\tests\System.Dynamic.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection\tests\DI.Tests\Microsoft.Extensions.DependencyInjection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Logging.EventSource\tests\Microsoft.Extensions.Logging.EventSource.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.DiagnosticSource\tests\System.Diagnostics.DiagnosticSource.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing/tests/System.Diagnostics.Tracing.Tests.csproj" /> <!-- Issue: https://github.com/dotnet/runtime/issues/51708 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions\tests\System.Linq.Expressions.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(BrowserHost)' == 'windows' and '$(Scenario)' == 'WasmTestOnBrowser' and '$(RunDisabledWasmTestsOnWindows)' != 'true'"> <!-- Issue: https://github.com/dotnet/runtime/issues/55429 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.VisualBasic.Core\tests\Microsoft.VisualBasic.Core.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO\tests\System.IO.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions\tests\System.Linq.Expressions.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Memory\tests\System.Memory.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Writers\XmlWriterApi\System.Xml.RW.XmlWriterApi.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Build.Tests\Wasm.Build.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'OSX' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisabledAppleSiliconTests)' != 'true'"> <!-- ActiveIssue Apple Silicon No usable version of libssl was found https://github.com/dotnet/runtime/issues/49083 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Algorithms/tests/System.Security.Cryptography.Algorithms.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl/tests/System.Security.Cryptography.OpenSsl.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 's390x' and '$(RunDisableds390xTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Drawing.Common\tests\System.Drawing.Common.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TestSingleFile)' == 'true' and '$(TestNativeAot)' != 'true'"> <!-- Run only a small randomly chosen set of passing test suites --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Collections\tests\System.Collections.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.IO.IsolatedStorage\tests\System.IO.IsolatedStorage.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TestNativeAot)' == 'true'"> <!-- Run only a small randomly chosen set of passing test suites --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Collections\tests\System.Collections.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(RunSmokeTestsOnly)' == 'true'"> <ProjectReference Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(RunSmokeTestsOnly)' != 'true'"> <ProjectReference Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TestAssemblies)' == 'true'" /> <ProjectReference Include="$(MSBuildThisFileDirectory)testPackages\testPackages.proj" Condition="'$(TestPackages)' == 'true'" /> <TrimmingTestProjects Include="$(MSBuildThisFileDirectory)*\tests\**\*.TrimmingTests.proj" Exclude="@(ProjectExclusions)" Condition="'$(TestTrimming)' == 'true'" AdditionalProperties="%(AdditionalProperties);SkipTrimmingProjectsRestore=true" /> <ProjectReference Include="@(TrimmingTestProjects)" /> <!-- wasm.build.tests are run on _WasmBuildTests job on CI, and with library tests locally. --> <ProjectReference Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Build.Tests\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TargetOS)' == 'Browser' and (('$(ContinuousIntegrationBuild)' == 'true' and '$(TestWasmBuildTests)' == 'true') or ('$(ContinuousIntegrationBuild)' != 'true' and '$(TestAssemblies)' == 'true'))" BuildInParallel="false" /> <ProjectReference Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Debugger.Tests\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TargetOS)' == 'Browser' and (('$(ContinuousIntegrationBuild)' == 'true' and '$(TestWasmDebuggerTests)' == 'true') or ('$(ContinuousIntegrationBuild)' != 'true' and '$(TestAssemblies)' == 'true'))" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(TargetOS)' == 'iOS'"> <!-- Only System.Runtime tests on iOS for now --> <ProjectReference Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Device\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'iOSSimulator'"> <!-- https://github.com/dotnet/runtime/issues/57666 --> <!-- <ProjectReference Include="$(MonoProjectRoot)sample\iOS\Program.csproj" BuildInParallel="false" /> --> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'tvOS'"> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\tvOS\Device\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'MacCatalyst'"> <!-- https://github.com/dotnet/runtime/issues/57666 --> <!-- <ProjectReference Include="$(MonoProjectRoot)sample\iOS\Program.csproj" BuildInParallel="false" /> --> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'tvOSSimulator'"> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\tvOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'Android'"> <ProjectReference Include="$(MonoProjectRoot)sample\Android\AndroidSampleApp.csproj" BuildInParallel="false" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\Android\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <PropertyGroup> <Samples_BuildInParallel Condition="'$(OS)' == 'Windows_NT'">false</Samples_BuildInParallel> <Samples_BuildInParallel Condition="'$(OS)' != 'Windows_NT'">true</Samples_BuildInParallel> </PropertyGroup> <!-- Don't build samples, and functional tests on EAT, AOT, WBT, and Debugger lanes --> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' != 'true' and '$(TestWasmBuildTests)' != 'true' and '$(TestWasmDebuggerTests)' != 'true'"> <ProjectReference Include="$(MonoProjectRoot)sample\wasm\**\*.Sample.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="$(Samples_BuildInParallel)" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\WebAssembly\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <Target Name="GenerateMergedCoverageReport" AfterTargets="Test" DependsOnTargets="GenerateCoverageReport" Condition="'$(TestAssemblies)' == 'true' and '$(Coverage)' == 'true'" /> <!-- Build Apple app bundles using AppBundleRoot --> <UsingTask Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" TaskName="XcodeCreateProject" AssemblyFile="$(AppleAppBuilderTasksAssemblyPath)" /> <UsingTask Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" TaskName="XcodeBuildApp" AssemblyFile="$(AppleAppBuilderTasksAssemblyPath)" /> <Target Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" Name="BuildAppleAppBundles" AfterTargets="Build"> <PropertyGroup> <!-- TODO: Unify this with TestArchiveTestsRoot in src/libraries/Directory.Build.props somehow, we can't use IsFunctionalTest==true here because it is only set in the context of the .csproj --> <TestArchiveNormalTestsRoot>$(TestArchiveRoot)tests/</TestArchiveNormalTestsRoot> <TestArchiveFunctionalTestsRoot>$(TestArchiveRoot)runonly/</TestArchiveFunctionalTestsRoot> <TestArchiveNormalTestsDir>$(TestArchiveNormalTestsRoot)$(OSPlatformConfig)/</TestArchiveNormalTestsDir> <TestArchiveFunctionalTestsDir>$(TestArchiveFunctionalTestsRoot)$(OSPlatformConfig)/</TestArchiveFunctionalTestsDir> <NormalTestsAppBundleRoot>$(AppBundleRoot)/tests/</NormalTestsAppBundleRoot> <FunctionalTestsAppBundleRoot>$(AppBundleRoot)/runonly/</FunctionalTestsAppBundleRoot> <NormalTestsAllAppBundlesRoot>$(AppBundleRoot)/tests.all/</NormalTestsAllAppBundlesRoot> <FunctionalTestsAllAppBundlesRoot>$(AppBundleRoot)/runonly.all/</FunctionalTestsAllAppBundlesRoot> </PropertyGroup> <ItemGroup> <NormalTestAppBundles Include="$(NormalTestsAppBundleRoot)*/AppBundle/CMakeLists.txt" /> <NormalTestCMakeEntries Include="cmake_minimum_required(VERSION 3.16)" /> <NormalTestCMakeEntries Include="project(NormalTestAppBundles)" /> <NormalTestCMakeEntries Include="add_subdirectory(%(NormalTestAppBundles.RootDir)%(NormalTestAppBundles.Directory) %(NormalTestAppBundles.RecursiveDir) EXCLUDE_FROM_ALL)" /> <FunctionalTestAppBundles Include="$(FunctionalTestsAppBundleRoot)*/AppBundle/CMakeLists.txt" /> <FunctionalTestCMakeEntries Include="cmake_minimum_required(VERSION 3.16)" /> <FunctionalTestCMakeEntries Include="project(FunctionalTestAppBundles)" /> <FunctionalTestCMakeEntries Include="add_subdirectory(%(FunctionalTestAppBundles.RootDir)%(FunctionalTestAppBundles.Directory) %(FunctionalTestAppBundles.RecursiveDir) EXCLUDE_FROM_ALL)" /> </ItemGroup> <WriteLinesToFile File="$(NormalTestsAllAppBundlesRoot)CMakeLists.txt" Lines="@(NormalTestCMakeEntries)" Overwrite="true" WriteOnlyWhenDifferent="true" /> <WriteLinesToFile File="$(FunctionalTestsAllAppBundlesRoot)CMakeLists.txt" Lines="@(FunctionalTestCMakeEntries)" Overwrite="true" WriteOnlyWhenDifferent="true" /> <XcodeCreateProject TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" ProjectName="NormalTestAppBundles" CMakeListsDirectory="$(NormalTestsAllAppBundlesRoot)" Condition="'@(NormalTestAppBundles)' != ''" /> <XcodeCreateProject TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" ProjectName="FunctionalTestAppBundles" CMakeListsDirectory="$(FunctionalTestsAllAppBundlesRoot)" Condition="'@(FunctionalTestAppBundles)' != ''" /> <MakeDir Directories="$(TestArchiveNormalTestsDir)" /> <MakeDir Directories="$(TestArchiveFunctionalTestsDir)" /> <ItemGroup> <!-- xcodeproj are directories, not files --> <XcodeProjects Condition="'@(NormalTestAppBundles)' != ''" Include="$([System.IO.Directory]::GetDirectories('$(NormalTestsAllAppBundlesRoot)NormalTestAppBundles/%(NormalTestAppBundles.RecursiveDir)', '*.xcodeproj'))" DestinationFolder="$(TestArchiveNormalTestsDir)" /> <XcodeProjects Condition="'@(FunctionalTestAppBundles)' != ''" Include="$([System.IO.Directory]::GetDirectories('$(FunctionalTestsAllAppBundlesRoot)FunctionalTestAppBundles/%(FunctionalTestAppBundles.RecursiveDir)', '*.xcodeproj'))" DestinationFolder="$(TestArchiveFunctionalTestsDir)" /> </ItemGroup> <XcodeBuildApp TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" XcodeProjectPath="%(XcodeProjects.Identity)" DevTeamProvisioning="$(DevTeamProvisioning)" Optimized="True" DestinationFolder="%(XcodeProjects.DestinationFolder)" /> <RemoveDir Condition="'$(ArchiveTests)' == 'true'" Directories="$(AppBundleRoot)" /> </Target> <!-- Restoring all trimming test projects upfront in one single call to RestoreTrimmingProjects so as to avoid possible race conditions that could happen if we restore each individually. --> <Target Name="RestoreTrimmingProjects" BeforeTargets="Build" Condition="'$(TestTrimming)' == 'true'"> <MSBuild Projects="@(TrimmingTestProjects)" Targets="GetTrimmingProjectsToRestore"> <Output TaskParameter="TargetOutputs" ItemName="_TrimmingProjectsToRestore" /> </MSBuild> <MSBuild Projects="@(_TrimmingProjectsToRestore)" Targets="Restore" Properties="MSBuildRestoreSessionId=$([System.Guid]::NewGuid());Configuration=$(Configuration)" /> </Target> </Project>
<Project Sdk="Microsoft.Build.Traversal"> <PropertyGroup Condition="'$(BuildAllConfigurations)' != 'true'"> <!-- Build for NetCoreAppCurrent by default if no BuildTargetFramework is supplied or if not all configurations are built. --> <TargetFramework>$([MSBuild]::ValueOrDefault('$(BuildTargetFramework)', '$(NetCoreAppCurrent)'))-$(TargetOS)</TargetFramework> <!-- Filter ProjectReferences to build the best matching target framework only. --> <FilterTraversalProjectReferences>true</FilterTraversalProjectReferences> </PropertyGroup> <PropertyGroup> <TestInParallel Condition="'$(Coverage)' == 'true'">false</TestInParallel> <!-- For tests we want to continue running if a test run failed. --> <TestContinueOnError>ErrorAndContinue</TestContinueOnError> <TraversalGlobalProperties>BuildAllProjects=true</TraversalGlobalProperties> <CoverageReportInputPath>$(ArtifactsBinDir)\*.Tests\**\coverage.opencover.xml</CoverageReportInputPath> <CoverageReportDir>$(ArtifactsDir)coverage</CoverageReportDir> <EnableCoverageSupport>true</EnableCoverageSupport> <TestAssemblies Condition="'$(TestAssemblies)' == ''">true</TestAssemblies> <TestPackages Condition="'$(TestPackages)' == ''">false</TestPackages> <TestTrimming Condition="'$(TestTrimming)' == ''">false</TestTrimming> </PropertyGroup> <!-- Samples which are too complex for CI --> <ItemGroup Condition="'$(TargetOS)' == 'Browser'"> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\console-node-ts\Wasm.Console.Node.TS.Sample.csproj" /> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\browser-webpack\Wasm.Browser.WebPack.Sample.csproj" /> <ProjectExclusions Include="$(MonoProjectRoot)sample\wasm\browser-nextjs\Wasm.Browser.NextJs.Sample.csproj" /> <!-- These tests are completely disabled on wasm --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.RegularExpressions/tests/System.Text.RegularExpressions.Generators.Tests/System.Text.RegularExpressions.Generators.Tests.csproj" /> </ItemGroup> <!-- Wasm aot on all platforms --> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true' and '$(RunAOTCompilation)' == 'true'"> <!-- https://github.com/dotnet/runtime/issues/61756 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.RegularExpressions\tests\System.Text.RegularExpressions.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/65356 - OOM while linking --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.SourceGeneration.Tests\System.Text.Json.SourceGeneration.Roslyn3.11.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/65411 - possible OOM when compiling System.Text.Json.SourceGeneration.Roslyn4.0.Tests.dll.bc -> .o --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.SourceGeneration.Tests\System.Text.Json.SourceGeneration.Roslyn4.0.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/61524 - OOM while linking --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.Tests\System.Text.Json.Tests.csproj" /> </ItemGroup> <!-- Projects that don't support code coverage measurement. --> <ItemGroup Condition="'$(Coverage)' == 'true'"> <ProjectExclusions Include="$(CommonTestPath)Common.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.XmlSerializer.Generator\tests\Microsoft.XmlSerializer.Generator.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Transactions.Local\tests\System.Transactions.Local.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'ARMv6'"> <!-- https://github.com/dotnet/runtime/issues/64673 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Ping\tests\FunctionalTests\System.Net.Ping.Functional.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetsMobile)' == 'true' or '$(TargetArchitecture)' == 'ARMv6'"> <!-- DllImportGenerator runtime tests depend on DNNE, which does not support mobile platforms. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.Tests\DllImportGenerator.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.UnitTests\DllImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'windows' and '$(TargetArchitecture)' == 'arm'"> <!-- DllImportGenerator runtime tests depend on DNNE, which does not support Windows ARM32 as we don't officially support it. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.Tests\DllImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'armel'"> <!-- DllImportGenerator runtime tests depend on DNNE, which does not support armel as we don't officially support it. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.Tests\DllImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 'arm'"> <!-- Issue: https://github.com/dotnet/runtime/issues/60705 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.UnitTests\DllImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'FreeBSD'"> <!-- DllImportGenerator runtime tests build depends pulling down a pre-built nethost binary, which is not available for FreeBSD. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.Tests\DllImportGenerator.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'linux' and '$(TargetArchitecture)' == 's390x'"> <!-- DllImportGenerator runtime tests build depends pulling down a pre-built nethost binary, which is not available for s390x. --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.Tests\DllImportGenerator.Tests.csproj" /> <!-- DllImportGenerator unit tests fail since NuGet 5.6.0 signature verification does not work on big-endian systems (needs >= 5.11.0). --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.UnitTests\DllImportGenerator.Unit.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Windows' and '$(RuntimeFlavor)' == 'Mono' and '$(RunDisabledMonoTestsOnWindows)' != 'true'"> <!-- Issue: https://github.com/dotnet/runtime/issues/53281 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebSockets.Client\tests\System.Net.WebSockets.Client.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.InteropServices\tests\DllImportGenerator.UnitTests\DllImportGenerator.Unit.Tests.csproj" /> <!-- Issue: https://github.com/dotnet/runtime/issues/63723 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Drawing.Common\tests\System.Drawing.Common.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android'"> <!-- Never going to run on Android --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl\tests\System.Security.Cryptography.OpenSsl.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Tests time out intermittently --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting\tests\UnitTests\Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Security\tests\FunctionalTests\System.Net.Security.Tests.csproj" /> <!-- Tests crash --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Globalization\tests\Invariant\Invariant.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem\tests\System.IO.FileSystem.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem.Watcher\tests\System.IO.FileSystem.Watcher.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.Ports\tests\System.IO.Ports.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Quic\tests\FunctionalTests\System.Net.Quic.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection\tests\System.Reflection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Serialization.Formatters\tests\System.Runtime.Serialization.Formatters.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Algorithms\tests\System.Security.Cryptography.Algorithms.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Csp\tests\System.Security.Cryptography.Csp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading.Thread\tests\System.Threading.Thread.Tests.csproj" /> <!-- Actual test failures --> <!-- https://github.com/dotnet/runtime/issues/50871 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection\tests\DI.Tests\Microsoft.Extensions.DependencyInjection.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50874 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Logging.EventSource\tests\Microsoft.Extensions.Logging.EventSource.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50923 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Data.Common\tests\System.Data.Common.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50926 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing\tests\System.Diagnostics.Tracing.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/49936 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json\tests\System.Text.Json.Tests\System.Text.Json.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Http\tests\FunctionalTests\System.Net.Http.Functional.Tests.csproj" /> <!-- Execution may be compromised --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Caching.Memory\tests\Microsoft.Extensions.Caching.Memory.Tests.csproj" /> <!-- PSNE --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Console/tests/System.Console.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Primitives/tests/FunctionalTests/System.Net.Primitives.Functional.Tests.csproj" /> <!-- Crashes on CI (possibly flakey) --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/Misc/System.Xml.Misc.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime/tests/System.Dynamic.Runtime.Tests.csproj"/> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Crashes on CI (possibly flakey) https://github.com/dotnet/runtime/issues/52615 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Configuration/tests/FunctionalTests/Microsoft.Extensions.Configuration.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Configuration.Binder/tests/Microsoft.Extensions.Configuration.Binder.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.FileProviders.Physical/tests/Microsoft.Extensions.FileProviders.Physical.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Win32.Primitives/tests/Microsoft.Win32.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Collections.Concurrent/tests/System.Collections.Concurrent.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Collections.Specialized/tests/System.Collections.Specialized.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.ComponentModel.Annotations/tests/System.ComponentModel.Annotations.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Composition.Hosting/tests/System.Composition.Hosting.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.FileSystem.Primitives/tests/System.IO.FileSystem.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Memory/tests/System.Memory.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Mail/tests/Functional/System.Net.Mail.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.NameResolution/tests/PalTests/System.Net.NameResolution.Pal.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebClient/tests/System.Net.WebClient.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Numerics.Tensors/tests/System.Numerics.Tensors.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/XmlReader/Tests/System.Xml.RW.XmlReader.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml/tests/XPath/XPathDocument/System.Xml.XPath.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Uri/tests/ExtendedFunctionalTests/System.Private.Uri.ExtendedFunctional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.DispatchProxy/tests/System.Reflection.DispatchProxy.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.X509Certificates\tests\System.Security.Cryptography.X509Certificates.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'x64' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Test flakiness on x64 https://github.com/dotnet/runtime/issues/49937 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading\tests\System.Threading.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Android' and '$(TargetArchitecture)' == 'x86' and '$(RunDisabledAndroidTests)' != 'true'"> <!-- Crashes only on x86 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Primitives\tests\Microsoft.Extensions.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Extensions\tests\System.Runtime.Extensions.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/50493 --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\Android\Device_Emulator\AOT\Android.Device_Emulator.Aot.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'iOS' and '$(RunDisablediOSTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <!-- Ref.Emit in XSLCompiledTransform --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Xslt\XslCompiledTransformApi\System.Xml.Xsl.XslCompiledTransformApi.Tests.csproj" /> <!-- Functional tests on devices have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\$(TargetOS)\Device\**\*.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'iOSSimulator' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisablediOSTests)' != 'true'"> <!-- Functional tests on arm64 simulator have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" /> </ItemGroup> <!-- Excluding all tests for devices until building on helix works properly --> <ItemGroup Condition="'$(TargetOS)' == 'tvOS' and '$(RunDisablediOSTests)' != 'true'"> <!-- Ref.Emit in XSLCompiledTransform --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Xslt\XslCompiledTransformApi\System.Xml.Xsl.XslCompiledTransformApi.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection\tests\System.Reflection.Emit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.Emit.ILGeneration/tests/System.Reflection.Emit.ILGeneration.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.Emit.Lightweight/tests/System.Reflection.Emit.Lightweight.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection.DispatchProxy/tests/System.Reflection.DispatchProxy.Tests.csproj" /> <!-- Has deps that JIT, need re-done in order to pass --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting/tests/UnitTests/Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <!-- Test suites hang and time out. https://github.com/dotnet/runtime/issues/60713 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection/tests/DI.External.Tests/Microsoft.Extensions.DependencyInjection.ExternalContainers.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.NetworkInformation/tests/FunctionalTests/System.Net.NetworkInformation.Functional.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Reflection/tests/System.Reflection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading.Channels/tests/System.Threading.Channels.Tests.csproj" /> <!-- Functional tests on devices have problems with return codes from mlaunch --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\$(TargetOS)\Device\**\*.Test.csproj" /> </ItemGroup> <!-- Excluding all tests for aot catalyst until building on helix works properly --> <ItemGroup Condition="('$(TargetOS)' == 'MacCatalyst' and '$(BuildTestsOnHelix)' == 'true') and '$(RunDisablediOSTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <!-- No functional tests until helix stabilizes --> <ProjectExclusions Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'MacCatalyst'"> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/XmlFormatWriterGeneratorAOT/iOS.Simulator.XmlFormatWriterGeneratorAot.Test.csproj" /> </ItemGroup> <!-- Run only explicitly selected tests for Mac Catalyst in App Sandbox --> <ItemGroup Condition="'$(TargetOS)' == 'MacCatalyst' and '$(EnableAppSandbox)' == 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*/tests/**/*.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/**/*.Test.csproj" /> <!-- https://github.com/dotnet/runtime/pull/61507 --> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Diagnostics.Process/tests/System.Diagnostics.Process.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOS' or '$(TargetOS)' == 'tvOSSimulator' or '$(TargetOS)' == 'MacCatalyst') and '$(RunDisablediOSTests)' != 'true'"> <!-- PNSE --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Quic/tests/FunctionalTests/System.Net.Quic.Functional.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/51414 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl/tests/System.Security.Cryptography.OpenSsl.Tests.csproj" /> <!-- App Crash https://github.com/dotnet/runtime/issues/53624 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.CSharp/tests/Microsoft.CSharp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing/tests/System.Diagnostics.Tracing.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime/tests/System.Dynamic.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions/tests/System.Linq.Expressions.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Requests/tests/System.Net.Requests.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.Security/tests/FunctionalTests/System.Net.Security.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json/tests/System.Text.Json.Tests/System.Text.Json.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/iOS/Simulator/PInvoke/iOS.Simulator.PInvoke.Test.csproj" /> <ProjectExclusions Include="$(RepoRoot)/src/tests/FunctionalTests/tvOS/Simulator/AOT/tvOS.Simulator.Aot.Test.csproj" /> <!-- Crashes randomly during test runs https://github.com/dotnet/runtime/issues/52460 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Threading.Tasks\tests\System.Threading.Tasks.Tests.csproj" /> <!-- Crash https://github.com/dotnet/runtime/issues/56085 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json/tests/System.Text.Json.SourceGeneration.Tests/System.Text.Json.SourceGeneration.Roslyn3.11.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Text.Json/tests/System.Text.Json.SourceGeneration.Tests/System.Text.Json.SourceGeneration.Roslyn4.0.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' == 'iOS' or '$(TargetOS)' == 'iOSSimulator' or '$(TargetOS)' == 'tvOS' or '$(TargetOS)' == 'tvOSSimulator') and '$(RunDisablediOSTests)' != 'true'"> <!-- https://github.com/dotnet/runtime/issues/51335 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO.Pipes/tests/System.IO.Pipes.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(RunDisabledWasmTests)' != 'true' and '$(RunAOTCompilation)' != 'true'"> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(RunDisabledWasmTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.NETCore.Platforms\tests\Microsoft.NETCore.Platforms.Tests.csproj" /> <!-- This test is disabled via an assembly-level attribute in source. We exclude it here to avoid queuing/running a work item entirely. https://github.com/dotnet/runtime/issues/35970 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Caching.Memory\tests\Microsoft.Extensions.Caching.Memory.Tests.csproj" /> <!-- This test is disabled via an assembly-level attribute in source. We exclude it here to avoid queuing/running a work item entirely. https://github.com/mono/mono/issues/16417 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.ComponentModel.Composition\tests\System.ComponentModel.Composition.Tests.csproj" /> <!-- Mono-Browser ignores runtimeconfig.template.json (e.g. for this it has "System.Globalization.EnforceJapaneseEraYearRanges": true) --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Globalization.Calendars\tests\CalendarTestWithConfigSwitch\System.Globalization.CalendarsWithConfigSwitch.Tests.csproj" /> <!-- https://github.com/dotnet/runtime/issues/37669 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyModel\tests\Microsoft.Extensions.DependencyModel.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Hosting\tests\UnitTests\Microsoft.Extensions.Hosting.Unit.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Csp\tests\System.Security.Cryptography.Csp.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Encoding\tests\System.Security.Cryptography.Encoding.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl\tests\System.Security.Cryptography.OpenSsl.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Pkcs\tests\System.Security.Cryptography.Pkcs.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Primitives\tests\System.Security.Cryptography.Primitives.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Xml\tests\System.Security.Cryptography.Xml.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.X509Certificates\tests\System.Security.Cryptography.X509Certificates.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Cose\tests\System.Security.Cryptography.Cose.Tests.csproj" /> <!-- This OuterLoop test requires browser UI, but the Helix agents are headless --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Net.WebSockets.Client\tests\wasm\System.Net.WebSockets.Client.Wasm.Tests.csproj" /> </ItemGroup> <!-- Aggressive Trimming related failures --> <ItemGroup Condition="('$(TargetOS)' != 'Browser' and '$(RunAOTCompilation)' == 'true' and '$(MonoForceInterpreter)' != 'true') or ('$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true')"> <!-- Issue: https://github.com/dotnet/runtime/issues/59926 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime.Serialization.Xml\tests\System.Runtime.Serialization.Xml.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="('$(TargetOS)' != 'Browser' and '$(RunAOTCompilation)' == 'true' and '$(MonoForceInterpreter)' != 'true') or ('$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' == 'true' and '$(RunDisabledWasmTests)' != 'true') or ('$(TargetOS)' == 'iOS' and '$(BuildTestsOnHelix)' == 'true')"> <!-- Issue: https://github.com/dotnet/runtime/issues/50724 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Composition\tests\System.Composition.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Dynamic.Runtime\tests\System.Dynamic.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.DependencyInjection\tests\DI.Tests\Microsoft.Extensions.DependencyInjection.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.Extensions.Logging.EventSource\tests\Microsoft.Extensions.Logging.EventSource.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.DiagnosticSource\tests\System.Diagnostics.DiagnosticSource.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Diagnostics.Tracing/tests/System.Diagnostics.Tracing.Tests.csproj" /> <!-- Issue: https://github.com/dotnet/runtime/issues/51708 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions\tests\System.Linq.Expressions.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'Browser' and '$(BrowserHost)' == 'windows' and '$(Scenario)' == 'WasmTestOnBrowser' and '$(RunDisabledWasmTestsOnWindows)' != 'true'"> <!-- Issue: https://github.com/dotnet/runtime/issues/55429 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)Microsoft.VisualBasic.Core\tests\Microsoft.VisualBasic.Core.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.IO\tests\System.IO.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Linq.Expressions\tests\System.Linq.Expressions.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Memory\tests\System.Memory.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Private.Xml\tests\Writers\XmlWriterApi\System.Xml.RW.XmlWriterApi.Tests.csproj" /> <ProjectExclusions Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Build.Tests\Wasm.Build.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetOS)' == 'OSX' and '$(TargetArchitecture)' == 'arm64' and '$(RunDisabledAppleSiliconTests)' != 'true'"> <!-- ActiveIssue Apple Silicon No usable version of libssl was found https://github.com/dotnet/runtime/issues/49083 --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.Algorithms/tests/System.Security.Cryptography.Algorithms.Tests.csproj" /> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Security.Cryptography.OpenSsl/tests/System.Security.Cryptography.OpenSsl.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TargetArchitecture)' == 's390x' and '$(RunDisableds390xTests)' != 'true'"> <ProjectExclusions Include="$(MSBuildThisFileDirectory)System.Drawing.Common\tests\System.Drawing.Common.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TestSingleFile)' == 'true' and '$(TestNativeAot)' != 'true'"> <!-- Run only a small randomly chosen set of passing test suites --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Collections\tests\System.Collections.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.IO.IsolatedStorage\tests\System.IO.IsolatedStorage.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(TestNativeAot)' == 'true'"> <!-- Run only a small randomly chosen set of passing test suites --> <ProjectExclusions Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" /> <ProjectExclusions Remove="$(MSBuildThisFileDirectory)System.Collections\tests\System.Collections.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(RunSmokeTestsOnly)' == 'true'"> <ProjectReference Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> </ItemGroup> <ItemGroup Condition="'$(RunSmokeTestsOnly)' != 'true'"> <ProjectReference Include="$(MSBuildThisFileDirectory)*\tests\**\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TestAssemblies)' == 'true'" /> <ProjectReference Include="$(MSBuildThisFileDirectory)testPackages\testPackages.proj" Condition="'$(TestPackages)' == 'true'" /> <TrimmingTestProjects Include="$(MSBuildThisFileDirectory)*\tests\**\*.TrimmingTests.proj" Exclude="@(ProjectExclusions)" Condition="'$(TestTrimming)' == 'true'" AdditionalProperties="%(AdditionalProperties);SkipTrimmingProjectsRestore=true" /> <ProjectReference Include="@(TrimmingTestProjects)" /> <!-- wasm.build.tests are run on _WasmBuildTests job on CI, and with library tests locally. --> <ProjectReference Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Build.Tests\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TargetOS)' == 'Browser' and (('$(ContinuousIntegrationBuild)' == 'true' and '$(TestWasmBuildTests)' == 'true') or ('$(ContinuousIntegrationBuild)' != 'true' and '$(TestAssemblies)' == 'true'))" BuildInParallel="false" /> <ProjectReference Include="$(RepoRoot)\src\tests\BuildWasmApps\Wasm.Debugger.Tests\*.Tests.csproj" Exclude="@(ProjectExclusions)" Condition="'$(TargetOS)' == 'Browser' and (('$(ContinuousIntegrationBuild)' == 'true' and '$(TestWasmDebuggerTests)' == 'true') or ('$(ContinuousIntegrationBuild)' != 'true' and '$(TestAssemblies)' == 'true'))" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(TargetOS)' == 'iOS'"> <!-- Only System.Runtime tests on iOS for now --> <ProjectReference Include="$(MSBuildThisFileDirectory)System.Runtime\tests\System.Runtime.Tests.csproj" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Device\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'iOSSimulator'"> <!-- https://github.com/dotnet/runtime/issues/57666 --> <!-- <ProjectReference Include="$(MonoProjectRoot)sample\iOS\Program.csproj" BuildInParallel="false" /> --> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'tvOS'"> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\tvOS\Device\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'MacCatalyst'"> <!-- https://github.com/dotnet/runtime/issues/57666 --> <!-- <ProjectReference Include="$(MonoProjectRoot)sample\iOS\Program.csproj" BuildInParallel="false" /> --> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\iOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'tvOSSimulator'"> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\tvOS\Simulator\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'Android'"> <ProjectReference Include="$(MonoProjectRoot)sample\Android\AndroidSampleApp.csproj" BuildInParallel="false" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\Android\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <PropertyGroup> <Samples_BuildInParallel Condition="'$(OS)' == 'Windows_NT'">false</Samples_BuildInParallel> <Samples_BuildInParallel Condition="'$(OS)' != 'Windows_NT'">true</Samples_BuildInParallel> </PropertyGroup> <!-- Don't build samples, and functional tests on EAT, AOT, WBT, and Debugger lanes --> <ItemGroup Condition="'$(ArchiveTests)' == 'true' and '$(RunSmokeTestsOnly)' != 'true' and '$(TargetOS)' == 'Browser' and '$(BuildAOTTestsOnHelix)' != 'true' and '$(TestWasmBuildTests)' != 'true' and '$(TestWasmDebuggerTests)' != 'true'"> <ProjectReference Include="$(MonoProjectRoot)sample\wasm\**\*.Sample.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="$(Samples_BuildInParallel)" /> <ProjectReference Include="$(RepoRoot)\src\tests\FunctionalTests\WebAssembly\**\*.Test.csproj" Exclude="@(ProjectExclusions)" BuildInParallel="false" /> </ItemGroup> <Target Name="GenerateMergedCoverageReport" AfterTargets="Test" DependsOnTargets="GenerateCoverageReport" Condition="'$(TestAssemblies)' == 'true' and '$(Coverage)' == 'true'" /> <!-- Build Apple app bundles using AppBundleRoot --> <UsingTask Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" TaskName="XcodeCreateProject" AssemblyFile="$(AppleAppBuilderTasksAssemblyPath)" /> <UsingTask Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" TaskName="XcodeBuildApp" AssemblyFile="$(AppleAppBuilderTasksAssemblyPath)" /> <Target Condition="'$(UseAppBundleRootForBuildingTests)' == 'true'" Name="BuildAppleAppBundles" AfterTargets="Build"> <PropertyGroup> <!-- TODO: Unify this with TestArchiveTestsRoot in src/libraries/Directory.Build.props somehow, we can't use IsFunctionalTest==true here because it is only set in the context of the .csproj --> <TestArchiveNormalTestsRoot>$(TestArchiveRoot)tests/</TestArchiveNormalTestsRoot> <TestArchiveFunctionalTestsRoot>$(TestArchiveRoot)runonly/</TestArchiveFunctionalTestsRoot> <TestArchiveNormalTestsDir>$(TestArchiveNormalTestsRoot)$(OSPlatformConfig)/</TestArchiveNormalTestsDir> <TestArchiveFunctionalTestsDir>$(TestArchiveFunctionalTestsRoot)$(OSPlatformConfig)/</TestArchiveFunctionalTestsDir> <NormalTestsAppBundleRoot>$(AppBundleRoot)/tests/</NormalTestsAppBundleRoot> <FunctionalTestsAppBundleRoot>$(AppBundleRoot)/runonly/</FunctionalTestsAppBundleRoot> <NormalTestsAllAppBundlesRoot>$(AppBundleRoot)/tests.all/</NormalTestsAllAppBundlesRoot> <FunctionalTestsAllAppBundlesRoot>$(AppBundleRoot)/runonly.all/</FunctionalTestsAllAppBundlesRoot> </PropertyGroup> <ItemGroup> <NormalTestAppBundles Include="$(NormalTestsAppBundleRoot)*/AppBundle/CMakeLists.txt" /> <NormalTestCMakeEntries Include="cmake_minimum_required(VERSION 3.16)" /> <NormalTestCMakeEntries Include="project(NormalTestAppBundles)" /> <NormalTestCMakeEntries Include="add_subdirectory(%(NormalTestAppBundles.RootDir)%(NormalTestAppBundles.Directory) %(NormalTestAppBundles.RecursiveDir) EXCLUDE_FROM_ALL)" /> <FunctionalTestAppBundles Include="$(FunctionalTestsAppBundleRoot)*/AppBundle/CMakeLists.txt" /> <FunctionalTestCMakeEntries Include="cmake_minimum_required(VERSION 3.16)" /> <FunctionalTestCMakeEntries Include="project(FunctionalTestAppBundles)" /> <FunctionalTestCMakeEntries Include="add_subdirectory(%(FunctionalTestAppBundles.RootDir)%(FunctionalTestAppBundles.Directory) %(FunctionalTestAppBundles.RecursiveDir) EXCLUDE_FROM_ALL)" /> </ItemGroup> <WriteLinesToFile File="$(NormalTestsAllAppBundlesRoot)CMakeLists.txt" Lines="@(NormalTestCMakeEntries)" Overwrite="true" WriteOnlyWhenDifferent="true" /> <WriteLinesToFile File="$(FunctionalTestsAllAppBundlesRoot)CMakeLists.txt" Lines="@(FunctionalTestCMakeEntries)" Overwrite="true" WriteOnlyWhenDifferent="true" /> <XcodeCreateProject TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" ProjectName="NormalTestAppBundles" CMakeListsDirectory="$(NormalTestsAllAppBundlesRoot)" Condition="'@(NormalTestAppBundles)' != ''" /> <XcodeCreateProject TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" ProjectName="FunctionalTestAppBundles" CMakeListsDirectory="$(FunctionalTestsAllAppBundlesRoot)" Condition="'@(FunctionalTestAppBundles)' != ''" /> <MakeDir Directories="$(TestArchiveNormalTestsDir)" /> <MakeDir Directories="$(TestArchiveFunctionalTestsDir)" /> <ItemGroup> <!-- xcodeproj are directories, not files --> <XcodeProjects Condition="'@(NormalTestAppBundles)' != ''" Include="$([System.IO.Directory]::GetDirectories('$(NormalTestsAllAppBundlesRoot)NormalTestAppBundles/%(NormalTestAppBundles.RecursiveDir)', '*.xcodeproj'))" DestinationFolder="$(TestArchiveNormalTestsDir)" /> <XcodeProjects Condition="'@(FunctionalTestAppBundles)' != ''" Include="$([System.IO.Directory]::GetDirectories('$(FunctionalTestsAllAppBundlesRoot)FunctionalTestAppBundles/%(FunctionalTestAppBundles.RecursiveDir)', '*.xcodeproj'))" DestinationFolder="$(TestArchiveFunctionalTestsDir)" /> </ItemGroup> <XcodeBuildApp TargetOS="$(TargetOS)" Arch="$(TargetArchitecture)" XcodeProjectPath="%(XcodeProjects.Identity)" DevTeamProvisioning="$(DevTeamProvisioning)" Optimized="True" DestinationFolder="%(XcodeProjects.DestinationFolder)" /> <RemoveDir Condition="'$(ArchiveTests)' == 'true'" Directories="$(AppBundleRoot)" /> </Target> <!-- Restoring all trimming test projects upfront in one single call to RestoreTrimmingProjects so as to avoid possible race conditions that could happen if we restore each individually. --> <Target Name="RestoreTrimmingProjects" BeforeTargets="Build" Condition="'$(TestTrimming)' == 'true'"> <MSBuild Projects="@(TrimmingTestProjects)" Targets="GetTrimmingProjectsToRestore"> <Output TaskParameter="TargetOutputs" ItemName="_TrimmingProjectsToRestore" /> </MSBuild> <MSBuild Projects="@(_TrimmingProjectsToRestore)" Targets="Restore" Properties="MSBuildRestoreSessionId=$([System.Guid]::NewGuid());Configuration=$(Configuration)" /> </Target> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Runtime.Loader/tests/ApplyUpdate/System.Reflection.Metadata.ApplyUpdate.Test.AsyncMethodChange/deltascript.json
{ "changes": [ {"document": "AsyncMethodChange.cs", "update": "AsyncMethodChange_v1.cs"} ] }
{ "changes": [ {"document": "AsyncMethodChange.cs", "update": "AsyncMethodChange_v1.cs"} ] }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/common/templates/steps/run-on-windows.yml
parameters: agentOs: '' steps: [] steps: - ${{ if eq(parameters.agentOs, 'Windows_NT') }}: - ${{ parameters.steps }}
parameters: agentOs: '' steps: [] steps: - ${{ if eq(parameters.agentOs, 'Windows_NT') }}: - ${{ parameters.steps }}
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/installer/pkg/sfx/installers/dotnet-runtime-deps/dotnet-runtime-deps-opensuse.42.proj
<Project Sdk="Microsoft.Build.NoTargets"> <PropertyGroup> <GenerateInstallers Condition="'$(BuildRpmPackage)' != 'true'">false</GenerateInstallers> <PackageTargetOS>opensuse.42</PackageTargetOS> </PropertyGroup> <ItemGroup> <LinuxPackageDependency Include="libopenssl1_0_0;libicu;krb5" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.Build.NoTargets"> <PropertyGroup> <GenerateInstallers Condition="'$(BuildRpmPackage)' != 'true'">false</GenerateInstallers> <PackageTargetOS>opensuse.42</PackageTargetOS> </PropertyGroup> <ItemGroup> <LinuxPackageDependency Include="libopenssl1_0_0;libicu;krb5" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/mono/nuget/Microsoft.NET.Workload.Mono.Toolchain.Manifest/localize/WorkloadManifest.ru.json
{ "workloads/wasm-tools/description": "Средства сборки WebAssembly .NET" }
{ "workloads/wasm-tools/description": "Средства сборки WebAssembly .NET" }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/common/templates/jobs/codeql-build.yml
parameters: # See schema documentation in /Documentation/AzureDevOps/TemplateSchema.md continueOnError: false # Required: A collection of jobs to run - https://docs.microsoft.com/en-us/azure/devops/pipelines/yaml-schema?view=vsts&tabs=schema#job jobs: [] # Optional: if specified, restore and use this version of Guardian instead of the default. overrideGuardianVersion: '' jobs: - template: /eng/common/templates/jobs/jobs.yml parameters: enableMicrobuild: false enablePublishBuildArtifacts: false enablePublishTestResults: false enablePublishBuildAssets: false enablePublishUsingPipelines: false enableTelemetry: true variables: - group: Publish-Build-Assets # The Guardian version specified in 'eng/common/sdl/packages.config'. This value must be kept in # sync with the packages.config file. - name: DefaultGuardianVersion value: 0.109.0 - name: GuardianPackagesConfigFile value: $(Build.SourcesDirectory)\eng\common\sdl\packages.config - name: GuardianVersion value: ${{ coalesce(parameters.overrideGuardianVersion, '$(DefaultGuardianVersion)') }} jobs: ${{ parameters.jobs }}
parameters: # See schema documentation in /Documentation/AzureDevOps/TemplateSchema.md continueOnError: false # Required: A collection of jobs to run - https://docs.microsoft.com/en-us/azure/devops/pipelines/yaml-schema?view=vsts&tabs=schema#job jobs: [] # Optional: if specified, restore and use this version of Guardian instead of the default. overrideGuardianVersion: '' jobs: - template: /eng/common/templates/jobs/jobs.yml parameters: enableMicrobuild: false enablePublishBuildArtifacts: false enablePublishTestResults: false enablePublishBuildAssets: false enablePublishUsingPipelines: false enableTelemetry: true variables: - group: Publish-Build-Assets # The Guardian version specified in 'eng/common/sdl/packages.config'. This value must be kept in # sync with the packages.config file. - name: DefaultGuardianVersion value: 0.109.0 - name: GuardianPackagesConfigFile value: $(Build.SourcesDirectory)\eng\common\sdl\packages.config - name: GuardianVersion value: ${{ coalesce(parameters.overrideGuardianVersion, '$(DefaultGuardianVersion)') }} jobs: ${{ parameters.jobs }}
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/shims.proj
<Project Sdk="Microsoft.Build.Traversal"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)-$(TargetOS)</TargetFramework> </PropertyGroup> <!-- Reference the source project shims which themselves then reference their corresponding ref project, if it exists. --> <ItemGroup> <ProjectReference Include="shims\src\*.csproj" /> </ItemGroup> <ItemGroup Condition="'$(RefOnly)' == 'true'"> <ReferenceShimProject Include="shims\ref\*.csproj" /> <ProjectReference Include="@(ReferenceShimProject)" /> <!-- Omit projects which depend on source projects to be built. --> <ProjectReference Remove="@(ReferenceShimProject->'shims\src\%(Filename)%(Extension)')" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.Build.Traversal"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)-$(TargetOS)</TargetFramework> </PropertyGroup> <!-- Reference the source project shims which themselves then reference their corresponding ref project, if it exists. --> <ItemGroup> <ProjectReference Include="shims\src\*.csproj" /> </ItemGroup> <ItemGroup Condition="'$(RefOnly)' == 'true'"> <ReferenceShimProject Include="shims\ref\*.csproj" /> <ProjectReference Include="@(ReferenceShimProject)" /> <!-- Omit projects which depend on source projects to be built. --> <ProjectReference Remove="@(ReferenceShimProject->'shims\src\%(Filename)%(Extension)')" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/installer/pkg/sfx/installers/dotnet-runtime-deps/dotnet-runtime-deps-cm.2.proj
<Project Sdk="Microsoft.Build.NoTargets"> <PropertyGroup> <GenerateInstallers Condition="'$(BuildRpmPackage)' != 'true'">false</GenerateInstallers> <PackageTargetOS>cm.2</PackageTargetOS> </PropertyGroup> <ItemGroup> <LinuxPackageDependency Include="openssl-libs;icu;krb5" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.Build.NoTargets"> <PropertyGroup> <GenerateInstallers Condition="'$(BuildRpmPackage)' != 'true'">false</GenerateInstallers> <PackageTargetOS>cm.2</PackageTargetOS> </PropertyGroup> <ItemGroup> <LinuxPackageDependency Include="openssl-libs;icu;krb5" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/libraries/execute-trimming-tests-steps.yml
parameters: archType: '' extraTestArgs: '' steps: # Execute tests - script: $(Build.SourcesDirectory)$(dir)build$(scriptExt) -ci -arch ${{ parameters.archType }} $(_osParameter) -s libs.tests -c $(_BuildConfig) /p:TestAssemblies=false /p:TestTrimming=true $(_officialBuildParameter) $(_crossBuildPropertyArg) /bl:$(Build.SourcesDirectory)/artifacts/log/$(buildConfigUpper)/TrimmingTests.binlog ${{ parameters.extraTestArgs }} displayName: Run Trimming Tests
parameters: archType: '' extraTestArgs: '' steps: # Execute tests - script: $(Build.SourcesDirectory)$(dir)build$(scriptExt) -ci -arch ${{ parameters.archType }} $(_osParameter) -s libs.tests -c $(_BuildConfig) /p:TestAssemblies=false /p:TestTrimming=true $(_officialBuildParameter) $(_crossBuildPropertyArg) /bl:$(Build.SourcesDirectory)/artifacts/log/$(buildConfigUpper)/TrimmingTests.binlog ${{ parameters.extraTestArgs }} displayName: Run Trimming Tests
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/coreclr/crossgen-corelib.proj
<Project Sdk="Microsoft.Build.NoTargets"> <Target Name="PrepareForCrossgen"> <PropertyGroup> <OSPlatformConfig>$(TargetOS).$(TargetArchitecture).$(Configuration)</OSPlatformConfig> <RootBinDir>$([MSBuild]::NormalizeDirectory('$(RepoRoot)', 'artifacts'))</RootBinDir> <LogsDir>$([MSBuild]::NormalizeDirectory('$(RootBinDir)', 'log'))</LogsDir> <BinDir>$([MSBuild]::NormalizeDirectory('$(RootBinDir)', 'bin', 'coreclr', $(OSPlatformConfig)))</BinDir> <IntermediatesDir>$([MSBuild]::NormalizeDirectory('$(RootBinDir)', 'obj', 'coreclr', $(OSPlatformConfig)))</IntermediatesDir> <DotNetCli>$([MSBuild]::NormalizePath('$(RepoRoot)', 'dotnet.sh'))</DotNetCli> <DotNetCli Condition="'$(OS)' == 'Windows_NT'">$([MSBuild]::NormalizePath('$(RepoRoot)', 'dotnet.cmd'))</DotNetCli> </PropertyGroup> <PropertyGroup> <CrossDir></CrossDir> </PropertyGroup> <PropertyGroup Condition="'$(BuildArchitecture)' != '$(TargetArchitecture)'"> <CrossDir Condition="'$(TargetArchitecture)' == 'arm' or '$(TargetArchitecture)' == 'arm64' or '$(TargetArchitecture)' == 'armel'">x64</CrossDir> <CrossDir Condition="'$(TargetArchitecture)' == 'x86'">$(BuildArchitecture)</CrossDir> </PropertyGroup> <PropertyGroup> <BuildDll>true</BuildDll> <BuildDll Condition="'$(CrossBuild)' == 'true' and '$(CrossDir)' == ''">false</BuildDll> <BuildPdb>false</BuildPdb> <BuildPdb Condition="$(BuildDll) and '$(OS)' == 'Windows_NT' and '$(TargetOS)' == 'Windows'">true</BuildPdb> <BuildPerfMap>false</BuildPerfMap> <BuildPerfMap Condition="$(BuildDll) and '$(TargetOS)' == 'Linux'">true</BuildPerfMap> </PropertyGroup> <ItemGroup> <CrossGen2DllFiles Condition="'$(CrossDir)' == ''" Include="$(BinDir)/crossgen2/*" /> <CrossGen2DllFiles Condition="'$(CrossDir)' != ''" Include="$(BinDir)/$(CrossDir)/crossgen2/*" /> </ItemGroup> <ItemGroup> <OptimizationMibcFiles Include="$(MibcOptimizationDataDir)/$(TargetOS)/$(TargetArchitecture)/**/*.mibc" /> </ItemGroup> <PropertyGroup> <CoreLibAssemblyName>System.Private.CoreLib</CoreLibAssemblyName> <CoreLibInputPath>$([MSBuild]::NormalizePath('$(BinDir)', 'IL', '$(CoreLibAssemblyName).dll'))</CoreLibInputPath> <CoreLibOutputPath>$([MSBuild]::NormalizePath('$(BinDir)', '$(CoreLibAssemblyName).dll'))</CoreLibOutputPath> <CoreLibNiPdbPath></CoreLibNiPdbPath> <CoreLibPerfMapPath></CoreLibPerfMapPath> <CoreLibNiPdbPath Condition="$(BuildPdb)">$([MSBuild]::NormalizePath('$(BinDir)', 'PDB', '$(CoreLibAssemblyName).ni.pdb'))</CoreLibNiPdbPath> <CoreLibPerfMapPath Condition="$(BuildPerfMap)">$([MSBuild]::NormalizePath('$(BinDir)', '$(CoreLibAssemblyName).ni.r2rmap'))</CoreLibPerfMapPath> <MergedMibcPath>$([MSBuild]::NormalizePath('$(BinDir)', 'StandardOptimizationData.mibc'))</MergedMibcPath> </PropertyGroup> </Target> <Target Name="CreateMergedMibcFile" DependsOnTargets="PrepareForCrossgen" Inputs="@(OptimizationMibcFiles)" Outputs="$(MergedMibcPath)"> <PropertyGroup> <DotNetPgoCmd>$(DotNetCli) $([MSBuild]::NormalizePath('$(BinDir)', 'dotnet-pgo', 'dotnet-pgo.dll')) merge</DotNetPgoCmd> <DotNetPgoCmd>$(DotNetPgoCmd) -o:$(MergedMibcPath)</DotNetPgoCmd> <DotNetPgoCmd>$(DotNetPgoCmd) @(OptimizationMibcFiles->'-i:%(Identity)', ' ')</DotNetPgoCmd> <DotNetPgoCmd>$(DotNetPgoCmd) --inherit-timestamp</DotNetPgoCmd> <!-- For incremental builds, otherwise timestamp is too far in the future --> </PropertyGroup> <Message Condition="'$(DotNetBuildFromSource)' != 'true'" Importance="High" Text="$(DotNetPgoCmd)"/> <Exec Condition="'$(DotNetBuildFromSource)' != 'true'" Command="$(DotNetPgoCmd)" /> </Target> <Target Name="InvokeCrossgen" DependsOnTargets="PrepareForCrossgen;CreateMergedMibcFile" Inputs="$(CoreLibInputPath);@(CrossGen2DllFiles);$(MergedMibcPath)" Outputs="$(CoreLibOutputPath);$(CoreLibNiPdbPath);$(CoreLibPerfMapPath)" AfterTargets="Build"> <MakeDir Directories="$(BinDir);$(IntermediatesDir);$(LogsDir)" /> <Message Importance="High" Text="Generating native image of System.Private.CoreLib for $(OSPlatformConfig). Logging to $(CrossGenCoreLibLog)" /> <PropertyGroup> <CrossGenDllCmd>$(DotNetCli) $([MSBuild]::NormalizePath('$(BinDir)', '$(CrossDir)', 'crossgen2', 'crossgen2.dll'))</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) -o:$(CoreLibOutputPath)</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) -r:$([MSBuild]::NormalizePath('$(BinDir)', 'IL', '*.dll'))</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) --targetarch:$(TargetArchitecture)</CrossGenDllCmd> <MibcArgs>@(OptimizationMibcFiles->'-m:$(MergedMibcPath)', ' ')</MibcArgs> <CrossGenDllCmd Condition="'$(UsingToolIbcOptimization)' != 'true' and '$(EnableNgenOptimization)' == 'true'">$(CrossGenDllCmd) $(MibcArgs) --embed-pgo-data</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) -O</CrossGenDllCmd> <CrossGenDllCmd Condition="'$(Configuration)' == 'Debug' or '$(Configuration)' == 'Checked'">$(CrossGenDllCmd) --verify-type-and-field-layout</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) $(CoreLibInputPath)</CrossGenDllCmd> </PropertyGroup> <PropertyGroup Condition="$(BuildPdb)"> <CrossGenDllCmd>$(CrossGenDllCmd) --pdb --pdb-path:$([MSBuild]::NormalizePath('$(BinDir)', 'PDB'))</CrossGenDllCmd> </PropertyGroup> <PropertyGroup Condition="$(BuildPerfMap)"> <CrossGenDllCmd>$(CrossGenDllCmd) --perfmap-format-version:1</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) --perfmap --perfmap-path:$(BinDir)</CrossGenDllCmd> </PropertyGroup> <Message Condition="$(BuildDll)" Importance="High" Text="$(CrossGenDllCmd)" /> <Exec Condition="$(BuildDll)" Command="$(CrossGenDllCmd)" /> <Message Condition="$(BuildPdb)" Importance="High" Text="$(CrossGenPdbCmd)" /> <Exec Condition="$(BuildPdb) and '$(CrossGenPdbCmd)' != ''" Command="$(CrossGenPdbCmd)" /> <Message Condition="$(BuildPerfMap)" Importance="High" Text="$(CrossGenPerfMapCmd)" /> <Exec Condition="$(BuildPerfMap) and '$(CrossGenPerfMapCmd)' != ''" Command="$(CrossGenPerfMapCmd)" /> <Copy Condition="!$(BuildDll)" SourceFiles="$(CoreLibInputPath)" DestinationFiles="$(CoreLibOutputPath)" UseHardlinksIfPossible="true" /> <Message Importance="High" Text="Crossgenning of System.Private.CoreLib succeeded. Finished at $(TIME)" /> <Message Importance="High" Text="Product binaries are available at $(BinDir)" /> </Target> </Project>
<Project Sdk="Microsoft.Build.NoTargets"> <Target Name="PrepareForCrossgen"> <PropertyGroup> <OSPlatformConfig>$(TargetOS).$(TargetArchitecture).$(Configuration)</OSPlatformConfig> <RootBinDir>$([MSBuild]::NormalizeDirectory('$(RepoRoot)', 'artifacts'))</RootBinDir> <LogsDir>$([MSBuild]::NormalizeDirectory('$(RootBinDir)', 'log'))</LogsDir> <BinDir>$([MSBuild]::NormalizeDirectory('$(RootBinDir)', 'bin', 'coreclr', $(OSPlatformConfig)))</BinDir> <IntermediatesDir>$([MSBuild]::NormalizeDirectory('$(RootBinDir)', 'obj', 'coreclr', $(OSPlatformConfig)))</IntermediatesDir> <DotNetCli>$([MSBuild]::NormalizePath('$(RepoRoot)', 'dotnet.sh'))</DotNetCli> <DotNetCli Condition="'$(OS)' == 'Windows_NT'">$([MSBuild]::NormalizePath('$(RepoRoot)', 'dotnet.cmd'))</DotNetCli> </PropertyGroup> <PropertyGroup> <CrossDir></CrossDir> </PropertyGroup> <PropertyGroup Condition="'$(BuildArchitecture)' != '$(TargetArchitecture)'"> <CrossDir Condition="'$(TargetArchitecture)' == 'arm' or '$(TargetArchitecture)' == 'arm64' or '$(TargetArchitecture)' == 'armel'">x64</CrossDir> <CrossDir Condition="'$(TargetArchitecture)' == 'x86'">$(BuildArchitecture)</CrossDir> </PropertyGroup> <PropertyGroup> <BuildDll>true</BuildDll> <BuildDll Condition="'$(CrossBuild)' == 'true' and '$(CrossDir)' == ''">false</BuildDll> <BuildPdb>false</BuildPdb> <BuildPdb Condition="$(BuildDll) and '$(OS)' == 'Windows_NT' and '$(TargetOS)' == 'Windows'">true</BuildPdb> <BuildPerfMap>false</BuildPerfMap> <BuildPerfMap Condition="$(BuildDll) and '$(TargetOS)' == 'Linux'">true</BuildPerfMap> </PropertyGroup> <ItemGroup> <CrossGen2DllFiles Condition="'$(CrossDir)' == ''" Include="$(BinDir)/crossgen2/*" /> <CrossGen2DllFiles Condition="'$(CrossDir)' != ''" Include="$(BinDir)/$(CrossDir)/crossgen2/*" /> </ItemGroup> <ItemGroup> <OptimizationMibcFiles Include="$(MibcOptimizationDataDir)/$(TargetOS)/$(TargetArchitecture)/**/*.mibc" /> </ItemGroup> <PropertyGroup> <CoreLibAssemblyName>System.Private.CoreLib</CoreLibAssemblyName> <CoreLibInputPath>$([MSBuild]::NormalizePath('$(BinDir)', 'IL', '$(CoreLibAssemblyName).dll'))</CoreLibInputPath> <CoreLibOutputPath>$([MSBuild]::NormalizePath('$(BinDir)', '$(CoreLibAssemblyName).dll'))</CoreLibOutputPath> <CoreLibNiPdbPath></CoreLibNiPdbPath> <CoreLibPerfMapPath></CoreLibPerfMapPath> <CoreLibNiPdbPath Condition="$(BuildPdb)">$([MSBuild]::NormalizePath('$(BinDir)', 'PDB', '$(CoreLibAssemblyName).ni.pdb'))</CoreLibNiPdbPath> <CoreLibPerfMapPath Condition="$(BuildPerfMap)">$([MSBuild]::NormalizePath('$(BinDir)', '$(CoreLibAssemblyName).ni.r2rmap'))</CoreLibPerfMapPath> <MergedMibcPath>$([MSBuild]::NormalizePath('$(BinDir)', 'StandardOptimizationData.mibc'))</MergedMibcPath> </PropertyGroup> </Target> <Target Name="CreateMergedMibcFile" DependsOnTargets="PrepareForCrossgen" Inputs="@(OptimizationMibcFiles)" Outputs="$(MergedMibcPath)"> <PropertyGroup> <DotNetPgoCmd>$(DotNetCli) $([MSBuild]::NormalizePath('$(BinDir)', 'dotnet-pgo', 'dotnet-pgo.dll')) merge</DotNetPgoCmd> <DotNetPgoCmd>$(DotNetPgoCmd) -o:$(MergedMibcPath)</DotNetPgoCmd> <DotNetPgoCmd>$(DotNetPgoCmd) @(OptimizationMibcFiles->'-i:%(Identity)', ' ')</DotNetPgoCmd> <DotNetPgoCmd>$(DotNetPgoCmd) --inherit-timestamp</DotNetPgoCmd> <!-- For incremental builds, otherwise timestamp is too far in the future --> </PropertyGroup> <Message Condition="'$(DotNetBuildFromSource)' != 'true'" Importance="High" Text="$(DotNetPgoCmd)"/> <Exec Condition="'$(DotNetBuildFromSource)' != 'true'" Command="$(DotNetPgoCmd)" /> </Target> <Target Name="InvokeCrossgen" DependsOnTargets="PrepareForCrossgen;CreateMergedMibcFile" Inputs="$(CoreLibInputPath);@(CrossGen2DllFiles);$(MergedMibcPath)" Outputs="$(CoreLibOutputPath);$(CoreLibNiPdbPath);$(CoreLibPerfMapPath)" AfterTargets="Build"> <MakeDir Directories="$(BinDir);$(IntermediatesDir);$(LogsDir)" /> <Message Importance="High" Text="Generating native image of System.Private.CoreLib for $(OSPlatformConfig). Logging to $(CrossGenCoreLibLog)" /> <PropertyGroup> <CrossGenDllCmd>$(DotNetCli) $([MSBuild]::NormalizePath('$(BinDir)', '$(CrossDir)', 'crossgen2', 'crossgen2.dll'))</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) -o:$(CoreLibOutputPath)</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) -r:$([MSBuild]::NormalizePath('$(BinDir)', 'IL', '*.dll'))</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) --targetarch:$(TargetArchitecture)</CrossGenDllCmd> <MibcArgs>@(OptimizationMibcFiles->'-m:$(MergedMibcPath)', ' ')</MibcArgs> <CrossGenDllCmd Condition="'$(UsingToolIbcOptimization)' != 'true' and '$(EnableNgenOptimization)' == 'true'">$(CrossGenDllCmd) $(MibcArgs) --embed-pgo-data</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) -O</CrossGenDllCmd> <CrossGenDllCmd Condition="'$(Configuration)' == 'Debug' or '$(Configuration)' == 'Checked'">$(CrossGenDllCmd) --verify-type-and-field-layout</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) $(CoreLibInputPath)</CrossGenDllCmd> </PropertyGroup> <PropertyGroup Condition="$(BuildPdb)"> <CrossGenDllCmd>$(CrossGenDllCmd) --pdb --pdb-path:$([MSBuild]::NormalizePath('$(BinDir)', 'PDB'))</CrossGenDllCmd> </PropertyGroup> <PropertyGroup Condition="$(BuildPerfMap)"> <CrossGenDllCmd>$(CrossGenDllCmd) --perfmap-format-version:1</CrossGenDllCmd> <CrossGenDllCmd>$(CrossGenDllCmd) --perfmap --perfmap-path:$(BinDir)</CrossGenDllCmd> </PropertyGroup> <Message Condition="$(BuildDll)" Importance="High" Text="$(CrossGenDllCmd)" /> <Exec Condition="$(BuildDll)" Command="$(CrossGenDllCmd)" /> <Message Condition="$(BuildPdb)" Importance="High" Text="$(CrossGenPdbCmd)" /> <Exec Condition="$(BuildPdb) and '$(CrossGenPdbCmd)' != ''" Command="$(CrossGenPdbCmd)" /> <Message Condition="$(BuildPerfMap)" Importance="High" Text="$(CrossGenPerfMapCmd)" /> <Exec Condition="$(BuildPerfMap) and '$(CrossGenPerfMapCmd)' != ''" Command="$(CrossGenPerfMapCmd)" /> <Copy Condition="!$(BuildDll)" SourceFiles="$(CoreLibInputPath)" DestinationFiles="$(CoreLibOutputPath)" UseHardlinksIfPossible="true" /> <Message Importance="High" Text="Crossgenning of System.Private.CoreLib succeeded. Finished at $(TIME)" /> <Message Importance="High" Text="Product binaries are available at $(BinDir)" /> </Target> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/common/macos-sign-with-entitlements.yml
parameters: filesToSign: [] steps: - task: UseDotNet@2 displayName: 'Use .NET Core SDK 2.1.808' inputs: packageType: sdk version: 2.1.808 - ${{ each file in parameters.filesToSign }}: - script: codesign -s - -f --entitlements ${{ file.entitlementsFile }} ${{ file.path }}/${{ file.name }} displayName: 'Add entitlements to ${{ file.name }}' - task: CopyFiles@2 displayName: 'Copy entitled file ${{ file.name }}' inputs: contents: '${{ file.path }}/${{ file.name }}' targetFolder: '$(Build.ArtifactStagingDirectory)/mac_entitled' overWrite: true - task: ArchiveFiles@2 displayName: 'Zip MacOS files for signing' inputs: rootFolderOrFile: '$(Build.ArtifactStagingDirectory)/mac_entitled' archiveFile: '$(Build.ArtifactStagingDirectory)/mac_entitled_to_sign.zip' archiveType: zip includeRootFolder: true replaceExistingArchive: true - task: SFP.build-tasks.custom-build-task-1.EsrpCodeSigning@1 displayName: 'ESRP CodeSigning' inputs: ConnectedServiceName: 'ESRP CodeSigning' FolderPath: '$(Build.ArtifactStagingDirectory)/' Pattern: 'mac_entitled_to_sign.zip' UseMinimatch: true signConfigType: inlineSignParams inlineOperation: | [ { "keyCode": "CP-401337-Apple", "operationCode": "MacAppDeveloperSign", "parameters" : { "hardening": "Enable" }, "toolName": "sign", "toolVersion": "1.0" } ] - task: ExtractFiles@1 displayName: 'Extract MacOS after signing' inputs: archiveFilePatterns: '$(Build.ArtifactStagingDirectory)/mac_entitled_to_sign.zip' destinationFolder: '$(Build.ArtifactStagingDirectory)/mac_entitled_signed' - ${{ each file in parameters.filesToSign }}: - task: CopyFiles@2 displayName: 'Copy ${{ file.name }} to destination' inputs: contents: ${{ file.name }} sourceFolder: '$(Build.ArtifactStagingDirectory)/mac_entitled_signed' targetFolder: '${{ file.path }}' overWrite: true
parameters: filesToSign: [] steps: - task: UseDotNet@2 displayName: 'Use .NET Core SDK 2.1.808' inputs: packageType: sdk version: 2.1.808 - ${{ each file in parameters.filesToSign }}: - script: codesign -s - -f --entitlements ${{ file.entitlementsFile }} ${{ file.path }}/${{ file.name }} displayName: 'Add entitlements to ${{ file.name }}' - task: CopyFiles@2 displayName: 'Copy entitled file ${{ file.name }}' inputs: contents: '${{ file.path }}/${{ file.name }}' targetFolder: '$(Build.ArtifactStagingDirectory)/mac_entitled' overWrite: true - task: ArchiveFiles@2 displayName: 'Zip MacOS files for signing' inputs: rootFolderOrFile: '$(Build.ArtifactStagingDirectory)/mac_entitled' archiveFile: '$(Build.ArtifactStagingDirectory)/mac_entitled_to_sign.zip' archiveType: zip includeRootFolder: true replaceExistingArchive: true - task: SFP.build-tasks.custom-build-task-1.EsrpCodeSigning@1 displayName: 'ESRP CodeSigning' inputs: ConnectedServiceName: 'ESRP CodeSigning' FolderPath: '$(Build.ArtifactStagingDirectory)/' Pattern: 'mac_entitled_to_sign.zip' UseMinimatch: true signConfigType: inlineSignParams inlineOperation: | [ { "keyCode": "CP-401337-Apple", "operationCode": "MacAppDeveloperSign", "parameters" : { "hardening": "Enable" }, "toolName": "sign", "toolVersion": "1.0" } ] - task: ExtractFiles@1 displayName: 'Extract MacOS after signing' inputs: archiveFilePatterns: '$(Build.ArtifactStagingDirectory)/mac_entitled_to_sign.zip' destinationFolder: '$(Build.ArtifactStagingDirectory)/mac_entitled_signed' - ${{ each file in parameters.filesToSign }}: - task: CopyFiles@2 displayName: 'Copy ${{ file.name }} to destination' inputs: contents: ${{ file.name }} sourceFolder: '$(Build.ArtifactStagingDirectory)/mac_entitled_signed' targetFolder: '${{ file.path }}' overWrite: true
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/libraries/helix-queues-setup.yml
parameters: jobTemplate: '' variables: [] osGroup: '' osSubgroup: '' archType: '' container: '' pool: '' platform: '' runtimeFlavorDisplayName: '' shouldContinueOnError: false dependOnEvaluatePaths: false jobParameters: {} jobs: - template: ${{ parameters.jobTemplate }} parameters: variables: ${{ parameters.variables }} osGroup: ${{ parameters.osGroup }} osSubgroup: ${{ parameters.osSubgroup }} archType: ${{ parameters.archType }} container: ${{ parameters.container }} pool: ${{ parameters.pool }} platform: ${{ parameters.platform }} shouldContinueOnError: ${{ parameters.shouldContinueOnError }} dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths}} runtimeFlavorDisplayName: ${{ parameters.runtimeFlavorDisplayName }} helixQueues: # Linux arm - ${{ if eq(parameters.platform, 'Linux_arm') }}: - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Debian.10.Arm32.Open)Ubuntu.1804.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-10-helix-arm32v7-20210304164340-6616c63 - (Debian.11.Arm32.Open)Ubuntu.1804.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-11-helix-arm32v7-20210304164347-5a7c380 # Linux armv6 - ${{ if eq(parameters.platform, 'Linux_armv6') }}: # - ${{ if eq(parameters.jobParameters.isFullMatrix, true) }}: - (Raspbian.10.Armv6.Open)Ubuntu.1804.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:raspbian-10-helix-arm32v6-20211215185610-60748cc # Linux arm64 - ${{ if eq(parameters.platform, 'Linux_arm64') }}: - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Ubuntu.2110.Arm64.Open)Ubuntu.1804.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-21.10-helix-arm64v8-20211116135000-0f8d97e - ${{ if or(ne(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Ubuntu.1804.ArmArch.Open)Ubuntu.1804.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-helix-arm64v8-20210106155927-56c6673 # Linux musl x64 - ${{ if eq(parameters.platform, 'Linux_musl_x64') }}: - ${{ if or(ne(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Alpine.314.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.14-helix-amd64-20210910135833-1848e19 - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Alpine.313.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.13-helix-amd64-20210910135845-8a6f4f3 # Linux musl arm64 - ${{ if and(eq(parameters.platform, 'Linux_musl_arm64'), or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true))) }}: - (Alpine.313.Arm64.Open)ubuntu.1804.armarch.open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.13-helix-arm64v8-20210910135808-8a6f4f3 - (Alpine.314.Arm64.Open)ubuntu.1804.armarch.open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.14-helix-arm64v8-20210910135810-8a6f4f3 # Linux x64 - ${{ if eq(parameters.platform, 'Linux_x64') }}: - ${{ if and(eq(parameters.jobParameters.interpreter, ''), ne(parameters.jobParameters.isSingleFile, true)) }}: - ${{ if and(eq(parameters.jobParameters.testScope, 'outerloop'), eq(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - (Centos.8.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:centos-8-helix-20201229003624-c1bf759 - RedHat.7.Amd64.Open - SLES.15.Amd64.Open - (Fedora.34.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-34-helix-20210913123654-4f64125 - (Ubuntu.2110.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-21.10-helix-amd64-20211116135132-0f8d97e - (Debian.10.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-10-helix-amd64-bfcd90a-20200121150006 - ${{ if or(ne(parameters.jobParameters.testScope, 'outerloop'), ne(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Centos.8.Amd64.Open)Ubuntu.1604.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:centos-8-helix-20201229003624-c1bf759 - SLES.15.Amd64.Open - (Fedora.34.Amd64.Open)ubuntu.1604.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-34-helix-20210913123654-4f64125 - (Ubuntu.2110.Amd64.Open)ubuntu.1604.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-21.04-helix-amd64-20210922170909-34a2d72 - (Debian.11.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-11-helix-amd64-20210304164428-5a7c380 - (Mariner.1.0.Amd64.Open)ubuntu.1604.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:cbl-mariner-1.0-helix-20210528192219-92bf620 - (openSUSE.15.2.Amd64.Open)ubuntu.1604.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:opensuse-15.2-helix-amd64-20211018152525-9cc02fe - ${{ if or(ne(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Centos.7.Amd64.Open)Ubuntu.1604.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:centos-7-mlnet-helix-20210714125435-dde38af - RedHat.7.Amd64.Open - (Debian.10.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-10-helix-amd64-20210304164434-56c6673 - Ubuntu.1804.Amd64.Open - ${{ if or(eq(parameters.jobParameters.interpreter, 'true'), eq(parameters.jobParameters.isSingleFile, true)) }}: # Limiting interp runs as we don't need as much coverage. - (Debian.10.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-10-helix-amd64-20210304164434-56c6673 # Linux s390x - ${{ if eq(parameters.platform, 'Linux_s390x') }}: - Ubuntu.2004.S390X.Experimental.Open # OSX arm64 - ${{ if eq(parameters.platform, 'OSX_arm64') }}: - OSX.1200.ARM64.Open # OSX x64 - ${{ if eq(parameters.platform, 'OSX_x64') }}: - OSX.1200.Amd64.Open # Android - ${{ if in(parameters.platform, 'Android_x86', 'Android_x64') }}: - Ubuntu.1804.Amd64.Android.29.Open - ${{ if in(parameters.platform, 'Android_arm', 'Android_arm64') }}: - Windows.10.Amd64.Android.Open # iOS Simulator/Mac Catalyst arm64 - ${{ if in(parameters.platform, 'MacCatalyst_arm64', 'iOSSimulator_arm64') }}: - OSX.1100.Arm64.Open # iOS/tvOS simulator x64/x86 & MacCatalyst x64 - ${{ if in(parameters.platform, 'iOSSimulator_x64', 'iOSSimulator_x86', 'tvOSSimulator_x64', 'MacCatalyst_x64') }}: - OSX.1015.Amd64.Open # iOS devices - ${{ if in(parameters.platform, 'iOS_arm64') }}: - OSX.1015.Amd64.Iphone.Open # tvOS devices - ${{ if in(parameters.platform, 'tvOS_arm64') }}: - OSX.1015.Amd64.AppleTV.Open # windows x64 - ${{ if eq(parameters.platform, 'windows_x64') }}: # netcoreapp - ${{ if notIn(parameters.jobParameters.framework, 'net48') }}: # libraries on mono outerloop - ${{ if and(eq(parameters.jobParameters.testScope, 'outerloop'), eq(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - Windows.81.Amd64.Open - Windows.Amd64.Server2022.Open # libraries on coreclr (outerloop and innerloop), or libraries on mono innerloop - ${{ if or(ne(parameters.jobParameters.testScope, 'outerloop'), ne(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - Windows.10.Amd64.ServerRS5.Open - ${{ if ne(parameters.jobParameters.testScope, 'outerloop') }}: - Windows.Amd64.Server2022.Open - (Windows.Server.Core.1909.Amd64.Open)windows.10.amd64.server20h2.open@mcr.microsoft.com/dotnet-buildtools/prereqs:windowsservercore-2004-helix-amd64-20200904200251-272704c - ${{ if or(ne(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - Windows.81.Amd64.Open - Windows.10.Amd64.Server19H1.ES.Open - Windows.11.Amd64.ClientPre.Open - ${{ if eq(parameters.jobParameters.testScope, 'outerloop') }}: - (Windows.Server.Core.1909.Amd64.Open)windows.10.amd64.server20h2.open@mcr.microsoft.com/dotnet-buildtools/prereqs:windowsservercore-2004-helix-amd64-20200904200251-272704c - ${{ if ne(parameters.jobParameters.runtimeFlavor, 'mono') }}: - (Windows.Nano.1809.Amd64.Open)windows.10.amd64.serverrs5.open@mcr.microsoft.com/dotnet-buildtools/prereqs:nanoserver-1809-helix-amd64-08e8e40-20200107182504 # .NETFramework - ${{ if eq(parameters.jobParameters.framework, 'net48') }}: - Windows.10.Amd64.Client21H1.Open # windows x86 - ${{ if eq(parameters.platform, 'windows_x86') }}: # netcoreapp - ${{ if notIn(parameters.jobParameters.framework, 'net48') }}: # mono outerloop - ${{ if and(eq(parameters.jobParameters.testScope, 'outerloop'), eq(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - Windows.7.Amd64.Open - Windows.10.Amd64.ServerRS5.Open # libraries on coreclr (outerloop and innerloop), or libraries on mono innerloop - ${{ if or(ne(parameters.jobParameters.testScope, 'outerloop'), ne(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - Windows.10.Amd64.ServerRS5.Open - Windows.Amd64.Server2022.Open - ${{ if or(ne(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - Windows.10.Amd64.Server19H1.ES.Open - Windows.7.Amd64.Open # .NETFramework - ${{ if eq(parameters.jobParameters.framework, 'net48') }}: - Windows.10.Amd64.Client21H1.Open # windows arm - ${{ if eq(parameters.platform, 'windows_arm') }}: - Windows.10.Arm64v8.Open # windows arm64 - ${{ if eq(parameters.platform, 'windows_arm64') }}: - Windows.10.Arm64.Open # WebAssembly - ${{ if eq(parameters.platform, 'Browser_wasm') }}: - Ubuntu.1804.Amd64.Open # WebAssembly windows - ${{ if eq(parameters.platform, 'Browser_wasm_win') }}: - (Windows.Server.Core.1909.Amd64.Open)windows.10.amd64.server20h2.open@mcr.microsoft.com/dotnet-buildtools/prereqs:windowsservercore-2004-helix-webassembly-amd64-20210702131541-6837048 ${{ insert }}: ${{ parameters.jobParameters }}
parameters: jobTemplate: '' variables: [] osGroup: '' osSubgroup: '' archType: '' container: '' pool: '' platform: '' runtimeFlavorDisplayName: '' shouldContinueOnError: false dependOnEvaluatePaths: false jobParameters: {} jobs: - template: ${{ parameters.jobTemplate }} parameters: variables: ${{ parameters.variables }} osGroup: ${{ parameters.osGroup }} osSubgroup: ${{ parameters.osSubgroup }} archType: ${{ parameters.archType }} container: ${{ parameters.container }} pool: ${{ parameters.pool }} platform: ${{ parameters.platform }} shouldContinueOnError: ${{ parameters.shouldContinueOnError }} dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths}} runtimeFlavorDisplayName: ${{ parameters.runtimeFlavorDisplayName }} helixQueues: # Linux arm - ${{ if eq(parameters.platform, 'Linux_arm') }}: - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Debian.10.Arm32.Open)Ubuntu.1804.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-10-helix-arm32v7-20210304164340-6616c63 - (Debian.11.Arm32.Open)Ubuntu.1804.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-11-helix-arm32v7-20210304164347-5a7c380 # Linux armv6 - ${{ if eq(parameters.platform, 'Linux_armv6') }}: # - ${{ if eq(parameters.jobParameters.isFullMatrix, true) }}: - (Raspbian.10.Armv6.Open)Ubuntu.1804.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:raspbian-10-helix-arm32v6-20211215185610-60748cc # Linux arm64 - ${{ if eq(parameters.platform, 'Linux_arm64') }}: - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Ubuntu.2110.Arm64.Open)Ubuntu.1804.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-21.10-helix-arm64v8-20211116135000-0f8d97e - ${{ if or(ne(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Ubuntu.1804.ArmArch.Open)Ubuntu.1804.ArmArch.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-16.04-helix-arm64v8-20210106155927-56c6673 # Linux musl x64 - ${{ if eq(parameters.platform, 'Linux_musl_x64') }}: - ${{ if or(ne(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Alpine.314.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.14-helix-amd64-20210910135833-1848e19 - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Alpine.313.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.13-helix-amd64-20210910135845-8a6f4f3 # Linux musl arm64 - ${{ if and(eq(parameters.platform, 'Linux_musl_arm64'), or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true))) }}: - (Alpine.313.Arm64.Open)ubuntu.1804.armarch.open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.13-helix-arm64v8-20210910135808-8a6f4f3 - (Alpine.314.Arm64.Open)ubuntu.1804.armarch.open@mcr.microsoft.com/dotnet-buildtools/prereqs:alpine-3.14-helix-arm64v8-20210910135810-8a6f4f3 # Linux x64 - ${{ if eq(parameters.platform, 'Linux_x64') }}: - ${{ if and(eq(parameters.jobParameters.interpreter, ''), ne(parameters.jobParameters.isSingleFile, true)) }}: - ${{ if and(eq(parameters.jobParameters.testScope, 'outerloop'), eq(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - (Centos.8.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:centos-8-helix-20201229003624-c1bf759 - RedHat.7.Amd64.Open - SLES.15.Amd64.Open - (Fedora.34.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-34-helix-20210913123654-4f64125 - (Ubuntu.2110.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-21.10-helix-amd64-20211116135132-0f8d97e - (Debian.10.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-10-helix-amd64-bfcd90a-20200121150006 - ${{ if or(ne(parameters.jobParameters.testScope, 'outerloop'), ne(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Centos.8.Amd64.Open)Ubuntu.1604.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:centos-8-helix-20201229003624-c1bf759 - SLES.15.Amd64.Open - (Fedora.34.Amd64.Open)ubuntu.1604.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:fedora-34-helix-20210913123654-4f64125 - (Ubuntu.2110.Amd64.Open)ubuntu.1604.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:ubuntu-21.04-helix-amd64-20210922170909-34a2d72 - (Debian.11.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-11-helix-amd64-20210304164428-5a7c380 - (Mariner.1.0.Amd64.Open)ubuntu.1604.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:cbl-mariner-1.0-helix-20210528192219-92bf620 - (openSUSE.15.2.Amd64.Open)ubuntu.1604.amd64.open@mcr.microsoft.com/dotnet-buildtools/prereqs:opensuse-15.2-helix-amd64-20211018152525-9cc02fe - ${{ if or(ne(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - (Centos.7.Amd64.Open)Ubuntu.1604.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:centos-7-mlnet-helix-20210714125435-dde38af - RedHat.7.Amd64.Open - (Debian.10.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-10-helix-amd64-20210304164434-56c6673 - Ubuntu.1804.Amd64.Open - ${{ if or(eq(parameters.jobParameters.interpreter, 'true'), eq(parameters.jobParameters.isSingleFile, true)) }}: # Limiting interp runs as we don't need as much coverage. - (Debian.10.Amd64.Open)Ubuntu.1804.Amd64.Open@mcr.microsoft.com/dotnet-buildtools/prereqs:debian-10-helix-amd64-20210304164434-56c6673 # Linux s390x - ${{ if eq(parameters.platform, 'Linux_s390x') }}: - Ubuntu.2004.S390X.Experimental.Open # OSX arm64 - ${{ if eq(parameters.platform, 'OSX_arm64') }}: - OSX.1200.ARM64.Open # OSX x64 - ${{ if eq(parameters.platform, 'OSX_x64') }}: - OSX.1200.Amd64.Open # Android - ${{ if in(parameters.platform, 'Android_x86', 'Android_x64') }}: - Ubuntu.1804.Amd64.Android.29.Open - ${{ if in(parameters.platform, 'Android_arm', 'Android_arm64') }}: - Windows.10.Amd64.Android.Open # iOS Simulator/Mac Catalyst arm64 - ${{ if in(parameters.platform, 'MacCatalyst_arm64', 'iOSSimulator_arm64') }}: - OSX.1100.Arm64.Open # iOS/tvOS simulator x64/x86 & MacCatalyst x64 - ${{ if in(parameters.platform, 'iOSSimulator_x64', 'iOSSimulator_x86', 'tvOSSimulator_x64', 'MacCatalyst_x64') }}: - OSX.1015.Amd64.Open # iOS devices - ${{ if in(parameters.platform, 'iOS_arm64') }}: - OSX.1015.Amd64.Iphone.Open # tvOS devices - ${{ if in(parameters.platform, 'tvOS_arm64') }}: - OSX.1015.Amd64.AppleTV.Open # windows x64 - ${{ if eq(parameters.platform, 'windows_x64') }}: # netcoreapp - ${{ if notIn(parameters.jobParameters.framework, 'net48') }}: # libraries on mono outerloop - ${{ if and(eq(parameters.jobParameters.testScope, 'outerloop'), eq(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - Windows.81.Amd64.Open - Windows.Amd64.Server2022.Open # libraries on coreclr (outerloop and innerloop), or libraries on mono innerloop - ${{ if or(ne(parameters.jobParameters.testScope, 'outerloop'), ne(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - Windows.10.Amd64.ServerRS5.Open - ${{ if ne(parameters.jobParameters.testScope, 'outerloop') }}: - Windows.Amd64.Server2022.Open - (Windows.Server.Core.1909.Amd64.Open)windows.10.amd64.server20h2.open@mcr.microsoft.com/dotnet-buildtools/prereqs:windowsservercore-2004-helix-amd64-20200904200251-272704c - ${{ if or(ne(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - Windows.81.Amd64.Open - Windows.10.Amd64.Server19H1.ES.Open - Windows.11.Amd64.ClientPre.Open - ${{ if eq(parameters.jobParameters.testScope, 'outerloop') }}: - (Windows.Server.Core.1909.Amd64.Open)windows.10.amd64.server20h2.open@mcr.microsoft.com/dotnet-buildtools/prereqs:windowsservercore-2004-helix-amd64-20200904200251-272704c - ${{ if ne(parameters.jobParameters.runtimeFlavor, 'mono') }}: - (Windows.Nano.1809.Amd64.Open)windows.10.amd64.serverrs5.open@mcr.microsoft.com/dotnet-buildtools/prereqs:nanoserver-1809-helix-amd64-08e8e40-20200107182504 # .NETFramework - ${{ if eq(parameters.jobParameters.framework, 'net48') }}: - Windows.10.Amd64.Client21H1.Open # windows x86 - ${{ if eq(parameters.platform, 'windows_x86') }}: # netcoreapp - ${{ if notIn(parameters.jobParameters.framework, 'net48') }}: # mono outerloop - ${{ if and(eq(parameters.jobParameters.testScope, 'outerloop'), eq(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - Windows.7.Amd64.Open - Windows.10.Amd64.ServerRS5.Open # libraries on coreclr (outerloop and innerloop), or libraries on mono innerloop - ${{ if or(ne(parameters.jobParameters.testScope, 'outerloop'), ne(parameters.jobParameters.runtimeFlavor, 'mono')) }}: - ${{ if or(eq(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - Windows.10.Amd64.ServerRS5.Open - Windows.Amd64.Server2022.Open - ${{ if or(ne(parameters.jobParameters.isExtraPlatforms, true), eq(parameters.jobParameters.includeAllPlatforms, true)) }}: - Windows.10.Amd64.Server19H1.ES.Open - Windows.7.Amd64.Open # .NETFramework - ${{ if eq(parameters.jobParameters.framework, 'net48') }}: - Windows.10.Amd64.Client21H1.Open # windows arm - ${{ if eq(parameters.platform, 'windows_arm') }}: - Windows.10.Arm64v8.Open # windows arm64 - ${{ if eq(parameters.platform, 'windows_arm64') }}: - Windows.10.Arm64.Open # WebAssembly - ${{ if eq(parameters.platform, 'Browser_wasm') }}: - Ubuntu.1804.Amd64.Open # WebAssembly windows - ${{ if eq(parameters.platform, 'Browser_wasm_win') }}: - (Windows.Server.Core.1909.Amd64.Open)windows.10.amd64.server20h2.open@mcr.microsoft.com/dotnet-buildtools/prereqs:windowsservercore-2004-helix-webassembly-amd64-20210702131541-6837048 ${{ insert }}: ${{ parameters.jobParameters }}
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/regenerate-download-table.proj
<Project Sdk="Microsoft.Build.NoTargets"> <!-- installer.tasks needs to be built in advance. --> <UsingTask TaskName="RegenerateDownloadTable" AssemblyFile="$(InstallerTasksAssemblyPath)" /> <Target Name="RegenerateDownloadTable" AfterTargets="Build"> <!-- Column and row configuration for the readme table. The abbreviations match up with reference-style Markdown links in the readme file to figure out what to put in the cells. --> <PropertyGroup> <TablePath>$([MSBuild]::NormalizePath('$(RepoRoot)', 'docs', 'project', 'dogfooding.md'))</TablePath> </PropertyGroup> <ItemGroup> <Branch Include="Main" Abbr="6.0.X" /> <Platform Include="Windows (x64)" Abbr="win-x64" /> <Platform Include="Windows (x86)" Abbr="win-x86" /> <Platform Include="Windows (arm64)" Abbr="win-arm64" /> <Platform Include="macOS (x64)" Abbr="osx-x64" /> <Platform Include="macOS (arm64)" Abbr="osx-arm64" /> <Platform Include="Linux (x64)" Parenthetical=" (for glibc based OS)" Abbr="linux-x64" /> <Platform Include="Linux (armhf)" Parenthetical=" (for glibc based OS)" Abbr="linux-arm"/> <Platform Include="Linux (arm64)" Parenthetical=" (for glibc based OS)" Abbr="linux-arm64" /> <Platform Include="Linux-musl (x64)" Abbr="linux-musl-x64" /> <Platform Include="Linux-musl (arm)" Abbr="linux-musl-arm" /> <Platform Include="Linux-musl (arm64)" Abbr="linux-musl-arm64" /> <Platform Include="Dpkg Based Systems (x64)" Abbr="deb" /> <Platform Include="CentOS 7 (x64)" Abbr="centos-7" /> <Platform Include="RHEL 7.2 (x64)" Abbr="rhel7" /> <Platform Include="Fedora 27 (x64)" Abbr="fedora-27" /> <Platform Include="SLES 12 (x64)" Abbr="sles-12" /> <Platform Include="OpenSUSE 42 (x64)" Abbr="OpenSUSE-42" /> </ItemGroup> <RegenerateDownloadTable ReadmeFile="$(TablePath)" Branches="@(Branch)" Platforms="@(Platform)" /> <Message Text="$(MSBuildProjectName) -> $(TablePath)" Importance="High" /> </Target> </Project>
<Project Sdk="Microsoft.Build.NoTargets"> <!-- installer.tasks needs to be built in advance. --> <UsingTask TaskName="RegenerateDownloadTable" AssemblyFile="$(InstallerTasksAssemblyPath)" /> <Target Name="RegenerateDownloadTable" AfterTargets="Build"> <!-- Column and row configuration for the readme table. The abbreviations match up with reference-style Markdown links in the readme file to figure out what to put in the cells. --> <PropertyGroup> <TablePath>$([MSBuild]::NormalizePath('$(RepoRoot)', 'docs', 'project', 'dogfooding.md'))</TablePath> </PropertyGroup> <ItemGroup> <Branch Include="Main" Abbr="6.0.X" /> <Platform Include="Windows (x64)" Abbr="win-x64" /> <Platform Include="Windows (x86)" Abbr="win-x86" /> <Platform Include="Windows (arm64)" Abbr="win-arm64" /> <Platform Include="macOS (x64)" Abbr="osx-x64" /> <Platform Include="macOS (arm64)" Abbr="osx-arm64" /> <Platform Include="Linux (x64)" Parenthetical=" (for glibc based OS)" Abbr="linux-x64" /> <Platform Include="Linux (armhf)" Parenthetical=" (for glibc based OS)" Abbr="linux-arm"/> <Platform Include="Linux (arm64)" Parenthetical=" (for glibc based OS)" Abbr="linux-arm64" /> <Platform Include="Linux-musl (x64)" Abbr="linux-musl-x64" /> <Platform Include="Linux-musl (arm)" Abbr="linux-musl-arm" /> <Platform Include="Linux-musl (arm64)" Abbr="linux-musl-arm64" /> <Platform Include="Dpkg Based Systems (x64)" Abbr="deb" /> <Platform Include="CentOS 7 (x64)" Abbr="centos-7" /> <Platform Include="RHEL 7.2 (x64)" Abbr="rhel7" /> <Platform Include="Fedora 27 (x64)" Abbr="fedora-27" /> <Platform Include="SLES 12 (x64)" Abbr="sles-12" /> <Platform Include="OpenSUSE 42 (x64)" Abbr="OpenSUSE-42" /> </ItemGroup> <RegenerateDownloadTable ReadmeFile="$(TablePath)" Branches="@(Branch)" Platforms="@(Platform)" /> <Message Text="$(MSBuildProjectName) -> $(TablePath)" Importance="High" /> </Target> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/libraries/enterprise/linux.yml
# Disable pipeline for ordinary pushes to the branches trigger: none # To reduce load on the pipeline, enable it only for PRs that affect critical networking code pr: branches: include: - main - release/*.* paths: # If you are changing these and start including eng/common, adjust the Maestro subscriptions # so that this build can block dependency auto-updates (this build is currently ignored) include: - eng/pipelines/libraries/enterprise/* - src/libraries/Common/src/System/Net/* - src/libraries/Common/tests/System/Net/* - src/native/libs/System.Net.Security.Native/* - src/libraries/System.Net.Http/* - src/libraries/System.Net.Security/* pool: name: NetCore1ESPool-Public demands: ImageOverride -equals Build.Ubuntu.1804.Amd64.Open variables: - template: ../variables.yml - name: enterpriseTestsSetup value: $(sourcesRoot)/Common/tests/System/Net/EnterpriseTests/setup - name: containerRunTestsCommand value: /repo/dotnet.sh build /t:test - name: containerLibrariesRoot value: /repo/src/libraries steps: - bash: | cd $(enterpriseTestsSetup) docker-compose build displayName: Build test machine images env: DOTNET_RUNTIME_REPO_ROOT: $(Build.SourcesDirectory) - bash: | cd $(enterpriseTestsSetup) docker-compose up -d displayName: Start test network and machines env: DOTNET_RUNTIME_REPO_ROOT: $(Build.SourcesDirectory) - bash: | docker exec linuxclient bash /setup/test-webserver.sh displayName: Test linuxclient connection to web server - bash: | docker exec linuxclient bash -c '/repo/build.sh -subset clr+libs -runtimeconfiguration release -ci /p:NativeOptimizationDataSupported=false' displayName: Build product sources - bash: | docker exec linuxclient $(containerRunTestsCommand) $(containerLibrariesRoot)/System.Net.Http/tests/EnterpriseTests/System.Net.Http.Enterprise.Tests.csproj docker exec linuxclient $(containerRunTestsCommand) $(containerLibrariesRoot)/System.Net.Security/tests/EnterpriseTests/System.Net.Security.Enterprise.Tests.csproj displayName: Build and run tests - bash: | cd $(enterpriseTestsSetup) docker-compose down displayName: Stop test network and machines env: DOTNET_RUNTIME_REPO_ROOT: $(Build.SourcesDirectory) - task: PublishTestResults@2 inputs: testRunner: 'xUnit' testResultsFiles: '**/testResults.xml' testRunTitle: 'Enterprise Tests' mergeTestResults: true failTaskOnFailedTests: true
# Disable pipeline for ordinary pushes to the branches trigger: none # To reduce load on the pipeline, enable it only for PRs that affect critical networking code pr: branches: include: - main - release/*.* paths: # If you are changing these and start including eng/common, adjust the Maestro subscriptions # so that this build can block dependency auto-updates (this build is currently ignored) include: - eng/pipelines/libraries/enterprise/* - src/libraries/Common/src/System/Net/* - src/libraries/Common/tests/System/Net/* - src/native/libs/System.Net.Security.Native/* - src/libraries/System.Net.Http/* - src/libraries/System.Net.Security/* pool: name: NetCore1ESPool-Public demands: ImageOverride -equals Build.Ubuntu.1804.Amd64.Open variables: - template: ../variables.yml - name: enterpriseTestsSetup value: $(sourcesRoot)/Common/tests/System/Net/EnterpriseTests/setup - name: containerRunTestsCommand value: /repo/dotnet.sh build /t:test - name: containerLibrariesRoot value: /repo/src/libraries steps: - bash: | cd $(enterpriseTestsSetup) docker-compose build displayName: Build test machine images env: DOTNET_RUNTIME_REPO_ROOT: $(Build.SourcesDirectory) - bash: | cd $(enterpriseTestsSetup) docker-compose up -d displayName: Start test network and machines env: DOTNET_RUNTIME_REPO_ROOT: $(Build.SourcesDirectory) - bash: | docker exec linuxclient bash /setup/test-webserver.sh displayName: Test linuxclient connection to web server - bash: | docker exec linuxclient bash -c '/repo/build.sh -subset clr+libs -runtimeconfiguration release -ci /p:NativeOptimizationDataSupported=false' displayName: Build product sources - bash: | docker exec linuxclient $(containerRunTestsCommand) $(containerLibrariesRoot)/System.Net.Http/tests/EnterpriseTests/System.Net.Http.Enterprise.Tests.csproj docker exec linuxclient $(containerRunTestsCommand) $(containerLibrariesRoot)/System.Net.Security/tests/EnterpriseTests/System.Net.Security.Enterprise.Tests.csproj displayName: Build and run tests - bash: | cd $(enterpriseTestsSetup) docker-compose down displayName: Stop test network and machines env: DOTNET_RUNTIME_REPO_ROOT: $(Build.SourcesDirectory) - task: PublishTestResults@2 inputs: testRunner: 'xUnit' testResultsFiles: '**/testResults.xml' testRunTitle: 'Enterprise Tests' mergeTestResults: true failTaskOnFailedTests: true
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/installer/tests/Assets/TestUtils/SDKLookup/dotnet.runtimeconfig.json
{ "runtimeOptions": { /* This is a multiline comment to test that the JSON parser is correctly * set up to ignore them. */ "framework": { "name": "Microsoft.NETCore.App", // And this is a single-line comment "version": "9999.0.0" // that should be ignored by the parser } } }
{ "runtimeOptions": { /* This is a multiline comment to test that the JSON parser is correctly * set up to ignore them. */ "framework": { "name": "Microsoft.NETCore.App", // And this is a single-line comment "version": "9999.0.0" // that should be ignored by the parser } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/coreclr/gc-standalone.yml
trigger: none schedules: - cron: "0 5 * * *" displayName: Mon through Sun at 9:00 PM (UTC-8:00) branches: include: - main always: true jobs: - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml buildConfig: checked platforms: - Linux_arm64 - windows_arm64 - windows_x64 - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: gc-standalone - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml buildConfig: checked platforms: - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: gc-standalone - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml buildConfig: checked platforms: - Linux_arm64 - Linux_x64 - windows_arm64 - windows_x64 helixQueueGroup: ci helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: gc-standalone displayNameArgs: GCStandAlone liveLibrariesBuildConfig: Release - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml buildConfig: checked platforms: - Linux_arm64 - Linux_x64 - windows_arm64 - windows_x64 helixQueueGroup: ci helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: gc-standalone-server displayNameArgs: GCStandAloneServer liveLibrariesBuildConfig: Release
trigger: none schedules: - cron: "0 5 * * *" displayName: Mon through Sun at 9:00 PM (UTC-8:00) branches: include: - main always: true jobs: - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml buildConfig: checked platforms: - Linux_arm64 - windows_arm64 - windows_x64 - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: gc-standalone - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml buildConfig: checked platforms: - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: gc-standalone - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml buildConfig: checked platforms: - Linux_arm64 - Linux_x64 - windows_arm64 - windows_x64 helixQueueGroup: ci helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: gc-standalone displayNameArgs: GCStandAlone liveLibrariesBuildConfig: Release - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml buildConfig: checked platforms: - Linux_arm64 - Linux_x64 - windows_arm64 - windows_x64 helixQueueGroup: ci helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: gc-standalone-server displayNameArgs: GCStandAloneServer liveLibrariesBuildConfig: Release
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/oob-ref.proj
<Project Sdk="Microsoft.Build.Traversal"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <!-- Filter ProjectReferences to build the best matching target framework only. --> <FilterTraversalProjectReferences>true</FilterTraversalProjectReferences> </PropertyGroup> <!-- Reference all NetCoreAppCurrent out-of-band ref projects. --> <ItemGroup> <ProjectReference Include="$(MSBuildThisFileDirectory)*\ref\*.csproj" Exclude="@(ProjectExclusions); shims\ref\*.csproj; @(NetCoreAppLibrary->'%(Identity)\ref\%(Identity).csproj')" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.Build.Traversal"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <!-- Filter ProjectReferences to build the best matching target framework only. --> <FilterTraversalProjectReferences>true</FilterTraversalProjectReferences> </PropertyGroup> <!-- Reference all NetCoreAppCurrent out-of-band ref projects. --> <ItemGroup> <ProjectReference Include="$(MSBuildThisFileDirectory)*\ref\*.csproj" Exclude="@(ProjectExclusions); shims\ref\*.csproj; @(NetCoreAppLibrary->'%(Identity)\ref\%(Identity).csproj')" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/coreclr/vm/.vscode/c_cpp_properties.json
{ "configurations": [ { "name": "Win32-Debug", "includePath": [ "../../../../artifacts/obj/coreclr/windows.x64.Debug/src/vm/dac", "../../src/vm/dac", "../../src/vm", "../../src/pal/prebuilt/inc", "../../../../artifacts/obj", "../../src/inc", "../../src/inc/winrt", "../../src/debug/inc", "../../src/debug/inc/amd64", "../../src/debug/inc/dump", "../../src/md/inc", "../../src/classlibnative/bcltype", "../../src/classlibnative/inc", "../../../../artifacts/obj/coreclr/windows.x64.Debug/src/inc", "../../../../artifacts/obj/coreclr/windows.x64.Debug/src/inc/etw", "../../src/vm/amd64" ], "defines": [ "HOST_AMD64", "_BLD_CLR", "_CRT_SECURE_NO_WARNINGS", "_DBG", "_SECURE_SCL=0", "TARGET_64BIT=1", "TARGET_AMD64=1", "_UNICODE", "_WIN32", "_WIN32_WINNT=0x0602", "HOST_64BIT", "AMD64", "HOST_64BIT=1", "BUILDENV_CHECKED=1", "DACCESS_COMPILE", "TARGET_64BIT=1", "TARGET_AMD64=1", "TARGET_64BIT=1", "DEBUGGING_SUPPORTED", "EnC_SUPPORTED", "FEATURE_ARRAYSTUB_AS_IL", "FEATURE_BASICFREEZE", "FEATURE_CODE_VERSIONING", "FEATURE_COLLECTIBLE_TYPES", "FEATURE_COMINTEROP", "FEATURE_COMINTEROP_APARTMENT_SUPPORT", "FEATURE_COMINTEROP_UNMANAGED_ACTIVATION", "FEATURE_COMINTEROP_WINRT_MANAGED_ACTIVATION", "FEATURE_CORECLR", "FEATURE_DATABREAKPOINT", "FEATURE_DEFAULT_INTERFACES", "FEATURE_EVENT_TRACE=1", "FEATURE_HIJACK", "FEATURE_ICASTABLE", "FEATURE_INTEROP_DEBUGGING", "FEATURE_ISYM_READER", "FEATURE_MANAGED_ETW", "FEATURE_MANAGED_ETW_CHANNELS", "FEATURE_MULTICASTSTUB_AS_IL", "FEATURE_MULTICOREJIT", "FEATURE_PERFTRACING=1", "FEATURE_PROFAPI_ATTACH_DETACH", "FEATURE_READYTORUN", "FEATURE_REJIT", "FEATURE_STANDALONE_GC", "FEATURE_SVR_GC", "FEATURE_SYMDIFF", "FEATURE_TIERED_COMPILATION", "FEATURE_TYPEEQUIVALENCE", "FEATURE_USE_ASM_GC_WRITE_BARRIERS", "FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP", "FEATURE_UTF8STRING=1", "FEATURE_WIN32_REGISTRY", "TARGET_WINDOWS=1", "PROFILING_SUPPORTED_DATA", "UNICODE", "UNIX_AMD64_ABI_ITF", "URTBLDENV_FRIENDLY=Checked", "WIN32", "WIN32_LEAN_AND_MEAN=1", "WINVER=0x0602", "WRITE_BARRIER_CHECK=1" ], "cStandard": "c11", "cppStandard": "c++17", "intelliSenseMode": "msvc-x64" } ], "version": 4 }
{ "configurations": [ { "name": "Win32-Debug", "includePath": [ "../../../../artifacts/obj/coreclr/windows.x64.Debug/src/vm/dac", "../../src/vm/dac", "../../src/vm", "../../src/pal/prebuilt/inc", "../../../../artifacts/obj", "../../src/inc", "../../src/inc/winrt", "../../src/debug/inc", "../../src/debug/inc/amd64", "../../src/debug/inc/dump", "../../src/md/inc", "../../src/classlibnative/bcltype", "../../src/classlibnative/inc", "../../../../artifacts/obj/coreclr/windows.x64.Debug/src/inc", "../../../../artifacts/obj/coreclr/windows.x64.Debug/src/inc/etw", "../../src/vm/amd64" ], "defines": [ "HOST_AMD64", "_BLD_CLR", "_CRT_SECURE_NO_WARNINGS", "_DBG", "_SECURE_SCL=0", "TARGET_64BIT=1", "TARGET_AMD64=1", "_UNICODE", "_WIN32", "_WIN32_WINNT=0x0602", "HOST_64BIT", "AMD64", "HOST_64BIT=1", "BUILDENV_CHECKED=1", "DACCESS_COMPILE", "TARGET_64BIT=1", "TARGET_AMD64=1", "TARGET_64BIT=1", "DEBUGGING_SUPPORTED", "EnC_SUPPORTED", "FEATURE_ARRAYSTUB_AS_IL", "FEATURE_BASICFREEZE", "FEATURE_CODE_VERSIONING", "FEATURE_COLLECTIBLE_TYPES", "FEATURE_COMINTEROP", "FEATURE_COMINTEROP_APARTMENT_SUPPORT", "FEATURE_COMINTEROP_UNMANAGED_ACTIVATION", "FEATURE_COMINTEROP_WINRT_MANAGED_ACTIVATION", "FEATURE_CORECLR", "FEATURE_DATABREAKPOINT", "FEATURE_DEFAULT_INTERFACES", "FEATURE_EVENT_TRACE=1", "FEATURE_HIJACK", "FEATURE_ICASTABLE", "FEATURE_INTEROP_DEBUGGING", "FEATURE_ISYM_READER", "FEATURE_MANAGED_ETW", "FEATURE_MANAGED_ETW_CHANNELS", "FEATURE_MULTICASTSTUB_AS_IL", "FEATURE_MULTICOREJIT", "FEATURE_PERFTRACING=1", "FEATURE_PROFAPI_ATTACH_DETACH", "FEATURE_READYTORUN", "FEATURE_REJIT", "FEATURE_STANDALONE_GC", "FEATURE_SVR_GC", "FEATURE_SYMDIFF", "FEATURE_TIERED_COMPILATION", "FEATURE_TYPEEQUIVALENCE", "FEATURE_USE_ASM_GC_WRITE_BARRIERS", "FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP", "FEATURE_UTF8STRING=1", "FEATURE_WIN32_REGISTRY", "TARGET_WINDOWS=1", "PROFILING_SUPPORTED_DATA", "UNICODE", "UNIX_AMD64_ABI_ITF", "URTBLDENV_FRIENDLY=Checked", "WIN32", "WIN32_LEAN_AND_MEAN=1", "WINVER=0x0602", "WRITE_BARRIER_CHECK=1" ], "cStandard": "c11", "cppStandard": "c++17", "intelliSenseMode": "msvc-x64" } ], "version": 4 }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Runtime.Loader/tests/ApplyUpdate/System.Reflection.Metadata.ApplyUpdate.Test.AddStaticLambda/deltascript.json
{ "changes": [ {"document": "AddStaticLambda.cs", "update": "AddStaticLambda_v1.cs"}, ] }
{ "changes": [ {"document": "AddStaticLambda.cs", "update": "AddStaticLambda_v1.cs"}, ] }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/coreclr/runincontext.yml
trigger: none schedules: - cron: "0 13 * * 6,0" displayName: Sat and Sun at 5:00 AM (UTC-8:00) branches: include: - main always: true jobs: - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml buildConfig: checked platforms: - Linux_x64 - windows_x64 - windows_x86 - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml buildConfig: checked platforms: - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml buildConfig: checked platforms: - Linux_x64 - windows_x64 - windows_x86 helixQueueGroup: ci helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop runInUnloadableContext: true displayNameArgs: RunInContext liveLibrariesBuildConfig: Release
trigger: none schedules: - cron: "0 13 * * 6,0" displayName: Sat and Sun at 5:00 AM (UTC-8:00) branches: include: - main always: true jobs: - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml buildConfig: checked platforms: - Linux_x64 - windows_x64 - windows_x86 - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/build-test-job.yml buildConfig: checked platforms: - CoreClrTestBuildHost # Either OSX_x64 or Linux_x64 jobParameters: testGroup: outerloop - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/templates/runtimes/run-test-job.yml buildConfig: checked platforms: - Linux_x64 - windows_x64 - windows_x86 helixQueueGroup: ci helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: testGroup: outerloop runInUnloadableContext: true displayNameArgs: RunInContext liveLibrariesBuildConfig: Release
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/sfx.proj
<Project Sdk="Microsoft.Build.NoTargets"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)-$(TargetOS)</TargetFramework> <BuildInParallel>false</BuildInParallel> </PropertyGroup> <ItemGroup> <!-- The sfx ref projects must be built first as sfx src projects don't use P2Ps to reference each other. Instead the references are defined manually via Reference items, therefore the reference assemblies must exist before that. --> <ProjectReference Include="sfx-ref.proj"> <OutputItemType Condition="'$(RefOnly)' == 'true'">SharedFrameworkAssembly</OutputItemType> </ProjectReference> <!-- Support building only the reference assemblies. --> <ProjectReference Include="sfx-src.proj" OutputItemType="SharedFrameworkAssembly" Condition="'$(RefOnly)' != 'true'" /> </ItemGroup> <!-- Generate the targeting pack's framework list so that out-of-band projects can leverage it. --> <Import Project="frameworklist.targets" /> <!-- Import the illink file which contains some of the logic required to illink the shared framework assemblies. --> <Import Project="$(RepositoryEngineeringDir)illink.targets" /> <Target Name="GetTrimSharedFrameworkAssembliesInputs"> <PropertyGroup> <SharedFrameworkAssembliesMarkerFile>$(IntermediateOutputPath)linker-$(TargetArchitecture)-marker.txt</SharedFrameworkAssembliesMarkerFile> </PropertyGroup> <ItemGroup> <!-- Include suppression XML files bin-placed in earlier per-library linker run. --> <SharedFrameworkSuppressionsXml Include="$(ILLinkTrimAssemblyRuntimePackSuppressionsXmlsDir)*.xml" /> <!-- Collect CoreLib suppression XML files not bin-placed in earlier per-library linker run. CoreLib doesn't use bin-place logic. --> <SharedFrameworkSuppressionsXml Include="$(CoreLibSharedDir)ILLink\ILLink.Suppressions.LibraryBuild.xml" /> <SharedFrameworkSuppressionsXml Condition="'$(RuntimeFlavor)' == 'CoreCLR'" Include="$(CoreClrProjectRoot)System.Private.CoreLib\src\ILLink\ILLink.Suppressions.LibraryBuild.xml" /> </ItemGroup> </Target> <Target Name="TrimSharedFrameworkAssemblies" AfterTargets="Build" DependsOnTargets="ResolveProjectReferences;GetTrimSharedFrameworkAssembliesInputs;PrepareForAssembliesTrim" Condition="'$(RefOnly)' != 'true'" Inputs="@(SharedFrameworkAssembly);@(SharedFrameworkSuppressionsXml);$(ILLinkTasksAssembly)" Outputs="$(SharedFrameworkAssembliesMarkerFile)"> <Message Text="$(MSBuildProjectName) -> Trimming $(PackageRID) shared framework assemblies with ILLinker..." Importance="high" /> <PropertyGroup> <SharedFrameworkILLinkArgs>$(ILLinkArgs)</SharedFrameworkILLinkArgs> <!-- update debug symbols --> <SharedFrameworkILLinkArgs>$(SharedFrameworkILLinkArgs) -b true</SharedFrameworkILLinkArgs> <SharedFrameworkILLinkArgs Condition="'@(SharedFrameworkSuppressionsXml)' != ''" >$(SharedFrameworkILLinkArgs) --link-attributes &quot;@(SharedFrameworkSuppressionsXml->'%(FullPath)', '&quot; --link-attributes &quot;')&quot;</SharedFrameworkILLinkArgs> </PropertyGroup> <ItemGroup> <SharedFrameworkAssembly RootMode="library" /> </ItemGroup> <ILLink AssemblyPaths="" RootAssemblyNames="@(SharedFrameworkAssembly)" OutputDirectory="$([MSBuild]::NormalizeDirectory('$(ILLinkTrimAssemblyArtifactsRootDir)', 'trimmed-runtimepack'))" ExtraArgs="$(SharedFrameworkILLinkArgs)" ToolExe="$(_DotNetHostFileName)" ToolPath="$(_DotNetHostDirectory)" /> <!-- Create a marker file which serves as the target's output to enable incremental builds. --> <MakeDir Directories="$([System.IO.Path]::GetDirectoryName('$(SharedFrameworkAssembliesMarkerFile)'))" /> <Touch Files="$(SharedFrameworkAssembliesMarkerFile)" AlwaysCreate="true" /> </Target> </Project>
<Project Sdk="Microsoft.Build.NoTargets"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)-$(TargetOS)</TargetFramework> <BuildInParallel>false</BuildInParallel> </PropertyGroup> <ItemGroup> <!-- The sfx ref projects must be built first as sfx src projects don't use P2Ps to reference each other. Instead the references are defined manually via Reference items, therefore the reference assemblies must exist before that. --> <ProjectReference Include="sfx-ref.proj"> <OutputItemType Condition="'$(RefOnly)' == 'true'">SharedFrameworkAssembly</OutputItemType> </ProjectReference> <!-- Support building only the reference assemblies. --> <ProjectReference Include="sfx-src.proj" OutputItemType="SharedFrameworkAssembly" Condition="'$(RefOnly)' != 'true'" /> </ItemGroup> <!-- Generate the targeting pack's framework list so that out-of-band projects can leverage it. --> <Import Project="frameworklist.targets" /> <!-- Import the illink file which contains some of the logic required to illink the shared framework assemblies. --> <Import Project="$(RepositoryEngineeringDir)illink.targets" /> <Target Name="GetTrimSharedFrameworkAssembliesInputs"> <PropertyGroup> <SharedFrameworkAssembliesMarkerFile>$(IntermediateOutputPath)linker-$(TargetArchitecture)-marker.txt</SharedFrameworkAssembliesMarkerFile> </PropertyGroup> <ItemGroup> <!-- Include suppression XML files bin-placed in earlier per-library linker run. --> <SharedFrameworkSuppressionsXml Include="$(ILLinkTrimAssemblyRuntimePackSuppressionsXmlsDir)*.xml" /> <!-- Collect CoreLib suppression XML files not bin-placed in earlier per-library linker run. CoreLib doesn't use bin-place logic. --> <SharedFrameworkSuppressionsXml Include="$(CoreLibSharedDir)ILLink\ILLink.Suppressions.LibraryBuild.xml" /> <SharedFrameworkSuppressionsXml Condition="'$(RuntimeFlavor)' == 'CoreCLR'" Include="$(CoreClrProjectRoot)System.Private.CoreLib\src\ILLink\ILLink.Suppressions.LibraryBuild.xml" /> </ItemGroup> </Target> <Target Name="TrimSharedFrameworkAssemblies" AfterTargets="Build" DependsOnTargets="ResolveProjectReferences;GetTrimSharedFrameworkAssembliesInputs;PrepareForAssembliesTrim" Condition="'$(RefOnly)' != 'true'" Inputs="@(SharedFrameworkAssembly);@(SharedFrameworkSuppressionsXml);$(ILLinkTasksAssembly)" Outputs="$(SharedFrameworkAssembliesMarkerFile)"> <Message Text="$(MSBuildProjectName) -> Trimming $(PackageRID) shared framework assemblies with ILLinker..." Importance="high" /> <PropertyGroup> <SharedFrameworkILLinkArgs>$(ILLinkArgs)</SharedFrameworkILLinkArgs> <!-- update debug symbols --> <SharedFrameworkILLinkArgs>$(SharedFrameworkILLinkArgs) -b true</SharedFrameworkILLinkArgs> <SharedFrameworkILLinkArgs Condition="'@(SharedFrameworkSuppressionsXml)' != ''" >$(SharedFrameworkILLinkArgs) --link-attributes &quot;@(SharedFrameworkSuppressionsXml->'%(FullPath)', '&quot; --link-attributes &quot;')&quot;</SharedFrameworkILLinkArgs> </PropertyGroup> <ItemGroup> <SharedFrameworkAssembly RootMode="library" /> </ItemGroup> <ILLink AssemblyPaths="" RootAssemblyNames="@(SharedFrameworkAssembly)" OutputDirectory="$([MSBuild]::NormalizeDirectory('$(ILLinkTrimAssemblyArtifactsRootDir)', 'trimmed-runtimepack'))" ExtraArgs="$(SharedFrameworkILLinkArgs)" ToolExe="$(_DotNetHostFileName)" ToolPath="$(_DotNetHostDirectory)" /> <!-- Create a marker file which serves as the target's output to enable incremental builds. --> <MakeDir Directories="$([System.IO.Path]::GetDirectoryName('$(SharedFrameworkAssembliesMarkerFile)'))" /> <Touch Files="$(SharedFrameworkAssembliesMarkerFile)" AlwaysCreate="true" /> </Target> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/common/variables.yml
variables: # These values enable longer delays, configurable number of retries, and special understanding of TCP hang-up # See https://github.com/NuGet/Home/issues/11027 for details - name: NUGET_ENABLE_EXPERIMENTAL_HTTP_RETRY value: true - name: NUGET_EXPERIMENTAL_MAX_NETWORK_TRY_COUNT value: 6 - name: NUGET_EXPERIMENTAL_NETWORK_RETRY_DELAY_MILLISECONDS value: 1000 - name: isOfficialBuild value: ${{ and(eq(variables['System.TeamProject'], 'internal'), eq(variables['Build.DefinitionName'], 'dotnet-runtime-official')) }} - name: isRollingBuild value: ${{ ne(variables['Build.Reason'], 'PullRequest') }} - name: isExtraPlatformsBuild value: ${{ eq(variables['Build.DefinitionName'], 'runtime-extra-platforms') }} - name: isNotExtraPlatformsBuild value: ${{ ne(variables['Build.DefinitionName'], 'runtime-extra-platforms') }} - name: isWasmOnlyBuild value: ${{ eq(variables['Build.DefinitionName'], 'runtime-wasm') }} - name: isRunSmokeTestsOnly value: ${{ and(ne(variables['Build.DefinitionName'], 'runtime-extra-platforms'), ne(variables['Build.DefinitionName'], 'runtime-wasm')) }} - name: isNotSpecificPlatformOnlyBuild value: ${{ ne(variables['Build.DefinitionName'], 'runtime-wasm') }} # We only run evaluate paths on runtime, runtime-staging and runtime-community pipelines on PRs # keep in sync with /eng/pipelines/common/xplat-setup.yml - name: dependOnEvaluatePaths value: ${{ and(eq(variables['Build.Reason'], 'PullRequest'), in(variables['Build.DefinitionName'], 'runtime', 'runtime-staging', 'runtime-community', 'runtime-extra-platforms')) }} - name: debugOnPrReleaseOnRolling ${{ if ne(variables['Build.Reason'], 'PullRequest') }}: value: Release ${{ if eq(variables['Build.Reason'], 'PullRequest') }}: value: Debug
variables: # These values enable longer delays, configurable number of retries, and special understanding of TCP hang-up # See https://github.com/NuGet/Home/issues/11027 for details - name: NUGET_ENABLE_EXPERIMENTAL_HTTP_RETRY value: true - name: NUGET_EXPERIMENTAL_MAX_NETWORK_TRY_COUNT value: 6 - name: NUGET_EXPERIMENTAL_NETWORK_RETRY_DELAY_MILLISECONDS value: 1000 - name: isOfficialBuild value: ${{ and(eq(variables['System.TeamProject'], 'internal'), eq(variables['Build.DefinitionName'], 'dotnet-runtime-official')) }} - name: isRollingBuild value: ${{ ne(variables['Build.Reason'], 'PullRequest') }} - name: isExtraPlatformsBuild value: ${{ eq(variables['Build.DefinitionName'], 'runtime-extra-platforms') }} - name: isNotExtraPlatformsBuild value: ${{ ne(variables['Build.DefinitionName'], 'runtime-extra-platforms') }} - name: isWasmOnlyBuild value: ${{ eq(variables['Build.DefinitionName'], 'runtime-wasm') }} - name: isRunSmokeTestsOnly value: ${{ and(ne(variables['Build.DefinitionName'], 'runtime-extra-platforms'), ne(variables['Build.DefinitionName'], 'runtime-wasm')) }} - name: isNotSpecificPlatformOnlyBuild value: ${{ ne(variables['Build.DefinitionName'], 'runtime-wasm') }} # We only run evaluate paths on runtime, runtime-staging and runtime-community pipelines on PRs # keep in sync with /eng/pipelines/common/xplat-setup.yml - name: dependOnEvaluatePaths value: ${{ and(eq(variables['Build.Reason'], 'PullRequest'), in(variables['Build.DefinitionName'], 'runtime', 'runtime-staging', 'runtime-community', 'runtime-extra-platforms')) }} - name: debugOnPrReleaseOnRolling ${{ if ne(variables['Build.Reason'], 'PullRequest') }}: value: Release ${{ if eq(variables['Build.Reason'], 'PullRequest') }}: value: Debug
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/coreclr/libraries-gcstress0x3-gcstress0xc.yml
trigger: none # This pipeline currently has too many failures to be enabled by schedule. # schedules: # - cron: "0 10 * * 6" # displayName: Sat at 2:00 AM (UTC-8:00) # branches: # include: # - main # always: true jobs: # # Build CoreCLR checked and libraries Release # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml buildConfig: checked platformGroup: gcstress jobParameters: # libraries test build platforms testBuildPlatforms: - Linux_x64 - windows_x64 # # Libraries Test Run using Release libraries, Checked CoreCLR, and stress modes # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/libraries/run-test-job.yml buildConfig: Release platformGroup: gcstress helixQueueGroup: libraries helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: # Default timeout is 150 minutes (2.5 hours), which is not enough for stress. timeoutInMinutes: 600 testScope: innerloop liveRuntimeBuildConfig: checked dependsOnTestBuildConfiguration: Release dependsOnTestArchitecture: x64 coreclrTestGroup: gcstress0x3-gcstress0xc
trigger: none # This pipeline currently has too many failures to be enabled by schedule. # schedules: # - cron: "0 10 * * 6" # displayName: Sat at 2:00 AM (UTC-8:00) # branches: # include: # - main # always: true jobs: # # Build CoreCLR checked and libraries Release # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/common/build-coreclr-and-libraries-job.yml buildConfig: checked platformGroup: gcstress jobParameters: # libraries test build platforms testBuildPlatforms: - Linux_x64 - windows_x64 # # Libraries Test Run using Release libraries, Checked CoreCLR, and stress modes # - template: /eng/pipelines/common/platform-matrix.yml parameters: jobTemplate: /eng/pipelines/libraries/run-test-job.yml buildConfig: Release platformGroup: gcstress helixQueueGroup: libraries helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml jobParameters: # Default timeout is 150 minutes (2.5 hours), which is not enough for stress. timeoutInMinutes: 600 testScope: innerloop liveRuntimeBuildConfig: checked dependsOnTestBuildConfiguration: Release dependsOnTestArchitecture: x64 coreclrTestGroup: gcstress0x3-gcstress0xc
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./eng/pipelines/common/templates/runtimes/run-test-job.yml
parameters: buildConfig: '' archType: '' osGroup: '' osSubgroup: '' container: '' testGroup: '' crossBuild: false crossrootfsDir: '' readyToRun: false liveLibrariesBuildConfig: '' crossgen2: false compositeBuildMode: false helixQueues: '' condition: true stagedBuild: false displayNameArgs: '' runInUnloadableContext: false tieringTest: false runtimeVariant: '' variables: {} pool: '' runtimeFlavor: 'coreclr' runtimeFlavorDisplayName: 'CoreCLR' shouldContinueOnError: false dependsOn: [] dependOnEvaluatePaths: false ### Test run job ### Each test run job depends on a corresponding test build job with the same ### buildConfig and archType. jobs: - template: /eng/pipelines/${{ parameters.runtimeFlavor }}/templates/xplat-pipeline-job.yml parameters: buildConfig: ${{ parameters.buildConfig }} archType: ${{ parameters.archType }} osGroup: ${{ parameters.osGroup }} osSubgroup: ${{ parameters.osSubgroup }} container: ${{ parameters.container }} testGroup: ${{ parameters.testGroup }} crossBuild: ${{ parameters.crossBuild }} crossrootfsDir: ${{ parameters.crossrootfsDir }} stagedBuild: ${{ parameters.stagedBuild }} liveLibrariesBuildConfig: ${{ parameters.liveLibrariesBuildConfig }} helixType: 'build/tests/' runtimeVariant: ${{ parameters.runtimeVariant }} pool: ${{ parameters.pool }} condition: ${{ parameters.condition }} dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths }} # Test jobs should continue on error for internal builds ${{ if eq(variables['System.TeamProject'], 'internal') }}: continueOnError: true ${{ if ne(parameters.dependsOn[0], '') }}: dependsOn: ${{ parameters.dependsOn }} ${{ if eq(parameters.dependsOn[0], '') }}: dependsOn: - ${{ if in(parameters.testGroup, 'innerloop', 'clrinterpreter') }}: - '${{ parameters.runtimeFlavor }}_common_test_build_p0_AnyOS_AnyCPU_${{parameters.buildConfig }}' - ${{ if notIn(parameters.testGroup, 'innerloop', 'clrinterpreter') }}: - '${{ parameters.runtimeFlavor }}_common_test_build_p1_AnyOS_AnyCPU_${{parameters.buildConfig }}' - ${{ if ne(parameters.stagedBuild, true) }}: - ${{ if or( eq(parameters.runtimeVariant, 'minijit'), eq(parameters.runtimeVariant, 'monointerpreter'), eq(parameters.runtimeVariant, 'llvmaot'), eq(parameters.runtimeVariant, 'llvmfullaot')) }}: # This is needed for creating a CORE_ROOT in the current design. - ${{ format('coreclr_{0}_product_build_{1}{2}_{3}_{4}', '', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }} - ${{ if or( eq(parameters.runtimeVariant, 'minijit'), eq(parameters.runtimeVariant, 'monointerpreter')) }} : # minijit and mono interpreter runtimevariants do not require any special build of the runtime - ${{ format('{0}_{1}_product_build_{2}{3}_{4}_{5}', parameters.runtimeFlavor, '', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }} - ${{ if not(or(eq(parameters.runtimeVariant, 'minijit'), eq(parameters.runtimeVariant, 'monointerpreter'))) }}: - ${{ if eq(parameters.runtimeVariant, 'llvmfullaot') }}: - ${{ format('{0}_llvmaot_product_build_{1}{2}_{3}_{4}', parameters.runtimeFlavor, parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }} - ${{ if ne(parameters.runtimeVariant, 'llvmfullaot') }}: - ${{ format('{0}_{1}_product_build_{2}{3}_{4}_{5}', parameters.runtimeFlavor, parameters.runtimeVariant, parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }} - ${{ if ne(parameters.liveLibrariesBuildConfig, '') }}: - ${{ format('libraries_build_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.liveLibrariesBuildConfig) }} # Compute job name from template parameters ${{ if in(parameters.testGroup, 'innerloop', 'clrinterpreter') }}: name: 'run_test_p0_${{ parameters.runtimeFlavor }}${{ parameters.runtimeVariant }}_${{ parameters.displayNameArgs }}_${{ parameters.osGroup }}${{ parameters.osSubgroup }}_${{ parameters.archType }}_${{ parameters.buildConfig }}' displayName: '${{ parameters.runtimeFlavorDisplayName }} ${{ parameters.runtimeVariant}} Pri0 Runtime Tests Run ${{ parameters.displayNameArgs }} ${{ parameters.osGroup }}${{ parameters.osSubgroup }} ${{ parameters.archType }} ${{ parameters.buildConfig }}' ${{ if notIn(parameters.testGroup, 'innerloop', 'clrinterpreter') }}: name: 'run_test_p1_${{ parameters.displayNameArgs }}_${{ parameters.osGroup }}${{ parameters.osSubgroup }}_${{ parameters.archType }}_${{ parameters.buildConfig }}' displayName: '${{ parameters.runtimeFlavorDisplayName }} ${{ parameters.runtimeVariant }} Pri1 Runtime Tests Run ${{ parameters.displayNameArgs }} ${{ parameters.osGroup }}${{ parameters.osSubgroup }} ${{ parameters.archType }} ${{ parameters.buildConfig }}' variables: - name: monoAotBuildshCommand value: '' - ${{ if eq(parameters.runtimeVariant, 'llvmaot') }}: - name: monoAotBuildshCommand value: 'mono_aot' - ${{ if eq(parameters.runtimeVariant, 'llvmfullaot') }}: - name: monoAotBuildshCommand value: 'mono_fullaot' - name: runtimeFlavorArgs value: '' - ${{ if eq(parameters.runtimeFlavor, 'mono') }}: - name: runtimeFlavorArgs value: '-mono' - name: runtimeVariantArg value: '' - ${{ if ne(parameters.runtimeVariant, '') }}: - name: runtimeVariantArg value: '/p:RuntimeVariant=${{ parameters.runtimeVariant }}' - name: crossgenArg value: '' - name: LogNamePrefix value: TestRunLogs - ${{ if eq(parameters.readyToRun, true) }}: - name: crossgenArg # Switch R2R to use cg2 by default value: 'crossgen2' - name: LogNamePrefix value: TestRunLogs_R2R - ${{ if eq(parameters.crossgen2, true) }}: - name: crossgenArg value: 'crossgen2' - name: LogNamePrefix value: TestRunLogs_R2R_CG2 - ${{ if eq(parameters.compositeBuildMode, true) }}: - name: crossgenArg value: 'composite' - name: LogNamePrefix value: TestRunLogs_R2R_CG2_Composite # Set job timeouts # # "timeoutPerTestCollectionInMinutes" is the time needed for the "biggest" xUnit test collection to complete. # In case xUnit test wrappers get refactored this number should also be adjusted. # # "timeoutPerTestInMinutes" corresponds to individual test running time. This is implemented by setting # the __TestTimeout variable, which is later read by the coreclr xunit test wrapper code (the code in the # xunit test dlls that invokes the actual tests). # # Note that "timeoutInMinutes" is an Azure DevOps Pipelines parameter for a "job" that specifies the # total time allowed for a job, and is specified lower down. Make sure you set it properly for any new testGroup. # # Please note that for Crossgen / Crossgen2 R2R runs, the "test running time" also includes the # time needed to compile the test into native code with the Crossgen compiler. - name: timeoutPerTestInMinutes value: 10 - name: timeoutPerTestCollectionInMinutes value: 30 - ${{ if in(parameters.testGroup, 'outerloop') }}: - name: timeoutPerTestCollectionInMinutes value: 120 - ${{ if eq(parameters.crossgen2, true) }}: - name: timeoutPerTestCollectionInMinutes value: 90 - name: timeoutPerTestInMinutes value: 30 - ${{ if in(parameters.testGroup, 'gc-longrunning', 'gc-simulator') }}: - name: timeoutPerTestCollectionInMinutes value: 360 # gc reliability may take up to 2 hours to shutdown. Some scenarios have very long iteration times. - name: timeoutPerTestInMinutes value: 240 - ${{ if in(parameters.testGroup, 'jitstress', 'jitstress-isas-arm', 'jitstress-isas-x86', 'jitstressregs-x86', 'jitstressregs', 'jitstress2-jitstressregs', 'jitelthookenabled' ) }}: - name: timeoutPerTestCollectionInMinutes value: 120 - name: timeoutPerTestInMinutes value: 30 - ${{ if in(parameters.testGroup, 'gcstress0x3-gcstress0xc') }}: - name: timeoutPerTestCollectionInMinutes value: 240 - name: timeoutPerTestInMinutes value: 60 - ${{ if in(parameters.testGroup, 'gcstress-extra', 'r2r-extra') }}: - name: timeoutPerTestCollectionInMinutes value: 300 - name: timeoutPerTestInMinutes value: 90 - ${{ if eq(parameters.testGroup, 'ilasm') }}: # ilasm-ildasm round trip testing runs every test twice, plus runs ilasm and ildasm, so double the 'outerloop' timeout numbers. - name: timeoutPerTestInMinutes value: 20 - name: timeoutPerTestCollectionInMinutes value: 240 - ${{ if in(parameters.testGroup, 'clrinterpreter') }}: - name: timeoutPerTestCollectionInMinutes value: 180 - name: timeoutPerTestInMinutes value: 30 - ${{ if in(parameters.testGroup, 'pgo') }}: - name: timeoutPerTestCollectionInMinutes value: 120 - ${{ if in(parameters.testGroup, 'jit-cfg') }}: - name: timeoutPerTestCollectionInMinutes value: 120 - ${{ if eq(parameters.compositeBuildMode, true) }}: - name: crossgenArg value: 'composite' - ${{ if eq(variables['System.TeamProject'], 'internal') }}: - group: DotNet-HelixApi-Access - ${{ parameters.variables }} # TODO: update these numbers as they were determined long ago ${{ if eq(parameters.testGroup, 'innerloop') }}: timeoutInMinutes: 200 ${{ if in(parameters.testGroup, 'outerloop', 'jit-experimental', 'pgo', 'jit-cfg') }}: timeoutInMinutes: 270 ${{ if in(parameters.testGroup, 'gc-longrunning', 'gc-simulator') }}: timeoutInMinutes: 480 ${{ if in(parameters.testGroup, 'jitstress', 'jitstress-isas-arm', 'jitstressregs-x86', 'jitstressregs', 'jitstress2-jitstressregs', 'gcstress0x3-gcstress0xc', 'ilasm') }}: timeoutInMinutes: 390 ${{ if in(parameters.testGroup, 'gcstress-extra', 'r2r-extra', 'clrinterpreter') }}: timeoutInMinutes: 510 ${{ if eq(parameters.testGroup, 'jitstress-isas-x86') }}: timeoutInMinutes: 960 steps: # Optionally download live-built libraries - ${{ if ne(parameters.liveLibrariesBuildConfig, '') }}: - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: $(librariesDownloadDir) cleanUnpackFolder: false artifactFileName: '$(librariesBuildArtifactName)$(archiveExtension)' artifactName: '$(librariesBuildArtifactName)' displayName: 'live-built libraries' # Download and unzip managed test artifacts - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: '$(managedTestArtifactRootFolderPath)' artifactFileName: '$(managedGenericTestArtifactName).tar.gz' artifactName: '$(managedGenericTestArtifactName)' displayName: 'generic managed test artifacts' # Download product binaries directory - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: $(buildProductRootFolderPath) artifactFileName: '$(buildProductArtifactName)$(archiveExtension)' artifactName: '$(buildProductArtifactName)' displayName: 'product build' - ${{ if eq(parameters.runtimeFlavor, 'mono') }}: # We need to explictly download CoreCLR for Mono - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: $(coreClrProductRootFolderPath) artifactFileName: '$(coreClrProductArtifactName)$(archiveExtension)' artifactName: '$(coreClrProductArtifactName)' displayName: 'CoreCLR product download for Mono' # Download and unzip the Microsoft.NET.Sdk.IL package needed for traversing # ilproj test projects during copynativeonly. - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: '$(microsoftNetSdkIlFolderPath)' artifactFileName: '$(microsoftNetSdkIlArtifactName).tar.gz' artifactName: '$(microsoftNetSdkIlArtifactName)' displayName: 'Microsoft.NET.Sdk.IL package' # Download and unzip native test artifacts - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: '$(nativeTestArtifactRootFolderPath)' artifactFileName: '$(nativeTestArtifactName)$(archiveExtension)' artifactName: '$(nativeTestArtifactName)' displayName: 'native test artifacts' # Publish native test components to test output folder. Sadly we cannot do this # during product build (so that we could zip up the files in their final test location # and directly unzip them there after download). Unfortunately the logic to copy # the native artifacts to the final test folders is dependent on availability of the # managed test artifacts. - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) copynativeonly $(logRootNameArg)Native $(runtimeFlavorArgs) $(crossgenArg) $(buildConfig) $(archType) $(priorityArg) $(librariesOverrideArg) displayName: Copy native test components to test output folder # Generate test wrappers. This is the step that examines issues.targets to exclude tests. - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) buildtestwrappersonly $(logRootNameArg)Wrappers $(runtimeFlavorArgs) $(runtimeVariantArg) $(crossgenArg) $(buildConfig) $(archType) $(crossArg) $(priorityArg) $(librariesOverrideArg) displayName: Generate test wrappers # Compose the Core_Root folder containing all artifacts needed for running # CoreCLR tests. This step also compiles the framework using Crossgen / Crossgen2 # in ReadyToRun jobs. - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) generatelayoutonly $(logRootNameArg)Layout $(runtimeFlavorArgs) $(crossgenArg) $(buildConfig) $(archType) $(crossArg) $(priorityArg) $(librariesOverrideArg) displayName: Generate CORE_ROOT # Build a Mono LLVM AOT cross-compiler for non-amd64 targets (in this case, just arm64) - ${{ if and(eq(parameters.runtimeFlavor, 'mono'), or(eq(parameters.runtimeVariant, 'llvmaot'), eq(parameters.runtimeVariant, 'llvmfullaot'))) }}: - ${{ if eq(parameters.archType, 'arm64') }}: - script: ./build.sh -subset mono -c $(buildConfigUpper) -arch $(archType) /p:BuildMonoAotCrossCompiler=true /p:BuildMonoAotCrossCompilerOnly=true /p:MonoLibClang="/usr/lib/llvm-9/lib/libclang-9.so.1" /p:MonoAOTEnableLLVM=true /p:MonoAOTLLVMUseCxx11Abi=true displayName: "Build Mono LLVM AOT cross compiler" - ${{ if and(eq(parameters.runtimeFlavor, 'mono'), or(eq(parameters.runtimeVariant, 'llvmaot'), eq(parameters.runtimeVariant, 'llvmfullaot'))) }}: - ${{ if eq(parameters.archType, 'x64') }}: - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot $(monoAotBuildshCommand) $(buildConfig) $(archType) $(runtimeVariantArg) displayName: "LLVM AOT compile CoreCLR tests" - ${{ if eq(parameters.archType, 'arm64') }}: - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot $(monoAotBuildshCommand) $(buildConfig) $(archType) cross $(runtimeVariantArg) -maxcpucount:2 displayName: "LLVM AOT cross-compile CoreCLR tests" env: __MonoToolPrefix: aarch64-linux-gnu- # Send tests to Helix - template: /eng/pipelines/common/templates/runtimes/send-to-helix-step.yml parameters: displayName: Send tests to Helix buildConfig: $(buildConfigUpper) archType: ${{ parameters.archType }} osGroup: ${{ parameters.osGroup }} osSubgroup: ${{ parameters.osSubgroup}} runtimeFlavorDisplayName: ${{ parameters.runtimeFlavorDisplayName }} shouldContinueOnError: ${{ parameters.shouldContinueOnError }} runtimeVariant: ${{ parameters.runtimeVariant }} ${{ if eq(variables['System.TeamProject'], 'public') }}: creator: $(Build.DefinitionName) helixBuild: $(Build.BuildNumber) helixSource: $(_HelixSource) # REVIEW: not sure why "cli" is part of the names here. Leave it for the ones that already had it, # but don't add it to new ones. ${{ if eq(parameters.readyToRun, true) }}: helixType: 'test/functional/r2r/cli/' ${{ if ne(parameters.readyToRun, true) }}: helixType: 'test/functional/cli/' helixQueues: ${{ parameters.helixQueues }} # This tests whether an array is empty ${{ if eq(join('', parameters.helixQueues), '') }}: condition: false publishTestResults: true timeoutPerTestInMinutes: $(timeoutPerTestInMinutes) timeoutPerTestCollectionInMinutes: $(timeoutPerTestCollectionInMinutes) runCrossGen: ${{ and(eq(parameters.readyToRun, true), ne(parameters.crossgen2, true)) }} runCrossGen2: ${{ and(eq(parameters.readyToRun, true), eq(parameters.crossgen2, true)) }} ${{ if and(ne(parameters.testGroup, 'innerloop'), eq(parameters.runtimeFlavor, 'coreclr')) }}: runPALTestsDir: '$(coreClrProductRootFolderPath)/paltests' compositeBuildMode: ${{ parameters.compositeBuildMode }} runInUnloadableContext: ${{ parameters.runInUnloadableContext }} tieringTest: ${{ parameters.tieringTest }} ${{ if eq(variables['System.TeamProject'], 'internal') }}: # Access token variable for internal project from the # DotNet-HelixApi-Access variable group helixAccessToken: $(HelixApiAccessToken) helixProjectArguments: '$(Build.SourcesDirectory)/src/tests/Common/helixpublishwitharcade.proj' ${{ if in(parameters.testGroup, 'innerloop', 'outerloop') }}: scenarios: - normal - ${{ if ne(parameters.runtimeFlavor, 'mono') }}: # tiered compilation isn't done on mono yet - no_tiered_compilation ${{ if in(parameters.testGroup, 'jitstress') }}: scenarios: - jitminopts - jitstress1 - jitstress1_tiered - jitstress2 - jitstress2_tiered - zapdisable - tailcallstress ${{ if in(parameters.testGroup, 'jitstress-isas-arm') }}: scenarios: - jitstress_isas_incompletehwintrinsic - jitstress_isas_nohwintrinsic - jitstress_isas_nohwintrinsic_nosimd - jitstress_isas_nosimd ${{ if in(parameters.testGroup, 'jitstress-isas-x86') }}: scenarios: - jitstress_isas_incompletehwintrinsic - jitstress_isas_nohwintrinsic - jitstress_isas_nohwintrinsic_nosimd - jitstress_isas_nosimd - jitstress_isas_x86_noaes - jitstress_isas_x86_noavx - jitstress_isas_x86_noavx2 - jitstress_isas_x86_nobmi1 - jitstress_isas_x86_nobmi2 - jitstress_isas_x86_nofma - jitstress_isas_x86_nohwintrinsic - jitstress_isas_x86_nolzcnt - jitstress_isas_x86_nopclmulqdq - jitstress_isas_x86_nopopcnt - jitstress_isas_x86_nosse - jitstress_isas_x86_nosse2 - jitstress_isas_x86_nosse3 - jitstress_isas_x86_nosse3_4 - jitstress_isas_x86_nosse41 - jitstress_isas_x86_nosse42 - jitstress_isas_x86_nossse3 - jitstress_isas_1_x86_noaes - jitstress_isas_1_x86_noavx - jitstress_isas_1_x86_noavx2 - jitstress_isas_1_x86_nobmi1 - jitstress_isas_1_x86_nobmi2 - jitstress_isas_1_x86_nofma - jitstress_isas_1_x86_nohwintrinsic - jitstress_isas_1_x86_nolzcnt - jitstress_isas_1_x86_nopclmulqdq - jitstress_isas_1_x86_nopopcnt - jitstress_isas_1_x86_nosse - jitstress_isas_1_x86_nosse2 - jitstress_isas_1_x86_nosse3 - jitstress_isas_1_x86_nosse3_4 - jitstress_isas_1_x86_nosse41 - jitstress_isas_1_x86_nosse42 - jitstress_isas_1_x86_nossse3 - jitstress_isas_2_x86_noaes - jitstress_isas_2_x86_noavx - jitstress_isas_2_x86_noavx2 - jitstress_isas_2_x86_nobmi1 - jitstress_isas_2_x86_nobmi2 - jitstress_isas_2_x86_nofma - jitstress_isas_2_x86_nohwintrinsic - jitstress_isas_2_x86_nolzcnt - jitstress_isas_2_x86_nopclmulqdq - jitstress_isas_2_x86_nopopcnt - jitstress_isas_2_x86_nosse - jitstress_isas_2_x86_nosse2 - jitstress_isas_2_x86_nosse3 - jitstress_isas_2_x86_nosse3_4 - jitstress_isas_2_x86_nosse41 - jitstress_isas_2_x86_nosse42 - jitstress_isas_2_x86_nossse3 ${{ if in(parameters.testGroup, 'jitstressregs-x86') }}: scenarios: - jitstressregs1_x86_noavx - jitstressregs2_x86_noavx - jitstressregs3_x86_noavx - jitstressregs4_x86_noavx - jitstressregs8_x86_noavx - jitstressregs0x10_x86_noavx - jitstressregs0x80_x86_noavx - jitstressregs0x1000_x86_noavx ${{ if in(parameters.testGroup, 'jitstressregs' ) }}: scenarios: - jitstressregs1 - jitstressregs2 - jitstressregs3 - jitstressregs4 - jitstressregs8 - jitstressregs0x10 - jitstressregs0x80 - jitstressregs0x1000 ${{ if in(parameters.testGroup, 'jitstress2-jitstressregs') }}: scenarios: - jitstress2_jitstressregs1 - jitstress2_jitstressregs2 - jitstress2_jitstressregs3 - jitstress2_jitstressregs4 - jitstress2_jitstressregs8 - jitstress2_jitstressregs0x10 - jitstress2_jitstressregs0x80 - jitstress2_jitstressregs0x1000 ${{ if in(parameters.testGroup, 'gcstress0x3-gcstress0xc') }}: scenarios: - gcstress0x3 - gcstress0xc ${{ if in(parameters.testGroup, 'gcstress-extra') }}: scenarios: - heapverify1 - gcstress0xc_zapdisable - gcstress0xc_zapdisable_jitstress2 - gcstress0xc_zapdisable_heapverify1 - gcstress0xc_jitstress1 - gcstress0xc_jitstress2 - gcstress0xc_tailcallstress - gcstress0xc_jitminopts_heapverify1 ${{ if in(parameters.testGroup, 'r2r-extra') }}: scenarios: - jitstress1 - jitstress2 - jitstress1_tiered - jitstress2_tiered - jitstressregs1 - jitstressregs2 - jitstressregs3 - jitstressregs4 - jitstressregs8 - jitstressregs0x10 - jitstressregs0x80 - jitstressregs0x1000 - jitminopts - forcerelocs - gcstress0xf ${{ if in(parameters.testGroup, 'pgo') }}: scenarios: - nopgo - defaultpgo - dynamicpgo - fullpgo - fullpgo_random_gdv - fullpgo_random_edge - fullpgo_random_gdv_edge ${{ if in(parameters.testGroup, 'gc-longrunning') }}: longRunningGcTests: true scenarios: - normal ${{ if in(parameters.testGroup, 'gc-simulator') }}: gcSimulatorTests: true scenarios: - normal ${{ if in(parameters.testGroup, 'gc-standalone') }}: scenarios: - gcstandalone ${{ if in(parameters.testGroup, 'gc-standalone-server') }}: scenarios: - gcstandaloneserver ${{ if in(parameters.testGroup, 'jitelthookenabled') }}: scenarios: - jitelthookenabled - jitelthookenabled_tiered ${{ if in(parameters.testGroup, 'jit-experimental') }}: scenarios: - jitosr - jitosr_stress - jitosr_pgo - jitosr_stress_random - jitpartialcompilation - jitpartialcompilation_osr - jitpartialcompilation_osr_pgo - jitobjectstackallocation ${{ if in(parameters.testGroup, 'jit-cfg') }}: scenarios: - jitcfg - jitcfg_dispatcher_always - jitcfg_dispatcher_never - jitcfg_gcstress0xc ${{ if in(parameters.testGroup, 'ilasm') }}: scenarios: - ilasmroundtrip ${{ if in(parameters.testGroup, 'clrinterpreter') }}: scenarios: - clrinterpreter # Publish Logs - task: PublishPipelineArtifact@1 displayName: Publish Logs inputs: targetPath: $(Build.SourcesDirectory)/artifacts/log artifactName: '${{ parameters.runtimeFlavor }}_${{ parameters.runtimeVariant }}_$(LogNamePrefix)_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.testGroup }}' continueOnError: true condition: always()
parameters: buildConfig: '' archType: '' osGroup: '' osSubgroup: '' container: '' testGroup: '' crossBuild: false crossrootfsDir: '' readyToRun: false liveLibrariesBuildConfig: '' crossgen2: false compositeBuildMode: false helixQueues: '' condition: true stagedBuild: false displayNameArgs: '' runInUnloadableContext: false tieringTest: false runtimeVariant: '' variables: {} pool: '' runtimeFlavor: 'coreclr' runtimeFlavorDisplayName: 'CoreCLR' shouldContinueOnError: false dependsOn: [] dependOnEvaluatePaths: false ### Test run job ### Each test run job depends on a corresponding test build job with the same ### buildConfig and archType. jobs: - template: /eng/pipelines/${{ parameters.runtimeFlavor }}/templates/xplat-pipeline-job.yml parameters: buildConfig: ${{ parameters.buildConfig }} archType: ${{ parameters.archType }} osGroup: ${{ parameters.osGroup }} osSubgroup: ${{ parameters.osSubgroup }} container: ${{ parameters.container }} testGroup: ${{ parameters.testGroup }} crossBuild: ${{ parameters.crossBuild }} crossrootfsDir: ${{ parameters.crossrootfsDir }} stagedBuild: ${{ parameters.stagedBuild }} liveLibrariesBuildConfig: ${{ parameters.liveLibrariesBuildConfig }} helixType: 'build/tests/' runtimeVariant: ${{ parameters.runtimeVariant }} pool: ${{ parameters.pool }} condition: ${{ parameters.condition }} dependOnEvaluatePaths: ${{ parameters.dependOnEvaluatePaths }} # Test jobs should continue on error for internal builds ${{ if eq(variables['System.TeamProject'], 'internal') }}: continueOnError: true ${{ if ne(parameters.dependsOn[0], '') }}: dependsOn: ${{ parameters.dependsOn }} ${{ if eq(parameters.dependsOn[0], '') }}: dependsOn: - ${{ if in(parameters.testGroup, 'innerloop', 'clrinterpreter') }}: - '${{ parameters.runtimeFlavor }}_common_test_build_p0_AnyOS_AnyCPU_${{parameters.buildConfig }}' - ${{ if notIn(parameters.testGroup, 'innerloop', 'clrinterpreter') }}: - '${{ parameters.runtimeFlavor }}_common_test_build_p1_AnyOS_AnyCPU_${{parameters.buildConfig }}' - ${{ if ne(parameters.stagedBuild, true) }}: - ${{ if or( eq(parameters.runtimeVariant, 'minijit'), eq(parameters.runtimeVariant, 'monointerpreter'), eq(parameters.runtimeVariant, 'llvmaot'), eq(parameters.runtimeVariant, 'llvmfullaot')) }}: # This is needed for creating a CORE_ROOT in the current design. - ${{ format('coreclr_{0}_product_build_{1}{2}_{3}_{4}', '', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }} - ${{ if or( eq(parameters.runtimeVariant, 'minijit'), eq(parameters.runtimeVariant, 'monointerpreter')) }} : # minijit and mono interpreter runtimevariants do not require any special build of the runtime - ${{ format('{0}_{1}_product_build_{2}{3}_{4}_{5}', parameters.runtimeFlavor, '', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }} - ${{ if not(or(eq(parameters.runtimeVariant, 'minijit'), eq(parameters.runtimeVariant, 'monointerpreter'))) }}: - ${{ if eq(parameters.runtimeVariant, 'llvmfullaot') }}: - ${{ format('{0}_llvmaot_product_build_{1}{2}_{3}_{4}', parameters.runtimeFlavor, parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }} - ${{ if ne(parameters.runtimeVariant, 'llvmfullaot') }}: - ${{ format('{0}_{1}_product_build_{2}{3}_{4}_{5}', parameters.runtimeFlavor, parameters.runtimeVariant, parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.buildConfig) }} - ${{ if ne(parameters.liveLibrariesBuildConfig, '') }}: - ${{ format('libraries_build_{0}{1}_{2}_{3}', parameters.osGroup, parameters.osSubgroup, parameters.archType, parameters.liveLibrariesBuildConfig) }} # Compute job name from template parameters ${{ if in(parameters.testGroup, 'innerloop', 'clrinterpreter') }}: name: 'run_test_p0_${{ parameters.runtimeFlavor }}${{ parameters.runtimeVariant }}_${{ parameters.displayNameArgs }}_${{ parameters.osGroup }}${{ parameters.osSubgroup }}_${{ parameters.archType }}_${{ parameters.buildConfig }}' displayName: '${{ parameters.runtimeFlavorDisplayName }} ${{ parameters.runtimeVariant}} Pri0 Runtime Tests Run ${{ parameters.displayNameArgs }} ${{ parameters.osGroup }}${{ parameters.osSubgroup }} ${{ parameters.archType }} ${{ parameters.buildConfig }}' ${{ if notIn(parameters.testGroup, 'innerloop', 'clrinterpreter') }}: name: 'run_test_p1_${{ parameters.displayNameArgs }}_${{ parameters.osGroup }}${{ parameters.osSubgroup }}_${{ parameters.archType }}_${{ parameters.buildConfig }}' displayName: '${{ parameters.runtimeFlavorDisplayName }} ${{ parameters.runtimeVariant }} Pri1 Runtime Tests Run ${{ parameters.displayNameArgs }} ${{ parameters.osGroup }}${{ parameters.osSubgroup }} ${{ parameters.archType }} ${{ parameters.buildConfig }}' variables: - name: monoAotBuildshCommand value: '' - ${{ if eq(parameters.runtimeVariant, 'llvmaot') }}: - name: monoAotBuildshCommand value: 'mono_aot' - ${{ if eq(parameters.runtimeVariant, 'llvmfullaot') }}: - name: monoAotBuildshCommand value: 'mono_fullaot' - name: runtimeFlavorArgs value: '' - ${{ if eq(parameters.runtimeFlavor, 'mono') }}: - name: runtimeFlavorArgs value: '-mono' - name: runtimeVariantArg value: '' - ${{ if ne(parameters.runtimeVariant, '') }}: - name: runtimeVariantArg value: '/p:RuntimeVariant=${{ parameters.runtimeVariant }}' - name: crossgenArg value: '' - name: LogNamePrefix value: TestRunLogs - ${{ if eq(parameters.readyToRun, true) }}: - name: crossgenArg # Switch R2R to use cg2 by default value: 'crossgen2' - name: LogNamePrefix value: TestRunLogs_R2R - ${{ if eq(parameters.crossgen2, true) }}: - name: crossgenArg value: 'crossgen2' - name: LogNamePrefix value: TestRunLogs_R2R_CG2 - ${{ if eq(parameters.compositeBuildMode, true) }}: - name: crossgenArg value: 'composite' - name: LogNamePrefix value: TestRunLogs_R2R_CG2_Composite # Set job timeouts # # "timeoutPerTestCollectionInMinutes" is the time needed for the "biggest" xUnit test collection to complete. # In case xUnit test wrappers get refactored this number should also be adjusted. # # "timeoutPerTestInMinutes" corresponds to individual test running time. This is implemented by setting # the __TestTimeout variable, which is later read by the coreclr xunit test wrapper code (the code in the # xunit test dlls that invokes the actual tests). # # Note that "timeoutInMinutes" is an Azure DevOps Pipelines parameter for a "job" that specifies the # total time allowed for a job, and is specified lower down. Make sure you set it properly for any new testGroup. # # Please note that for Crossgen / Crossgen2 R2R runs, the "test running time" also includes the # time needed to compile the test into native code with the Crossgen compiler. - name: timeoutPerTestInMinutes value: 10 - name: timeoutPerTestCollectionInMinutes value: 30 - ${{ if in(parameters.testGroup, 'outerloop') }}: - name: timeoutPerTestCollectionInMinutes value: 120 - ${{ if eq(parameters.crossgen2, true) }}: - name: timeoutPerTestCollectionInMinutes value: 90 - name: timeoutPerTestInMinutes value: 30 - ${{ if in(parameters.testGroup, 'gc-longrunning', 'gc-simulator') }}: - name: timeoutPerTestCollectionInMinutes value: 360 # gc reliability may take up to 2 hours to shutdown. Some scenarios have very long iteration times. - name: timeoutPerTestInMinutes value: 240 - ${{ if in(parameters.testGroup, 'jitstress', 'jitstress-isas-arm', 'jitstress-isas-x86', 'jitstressregs-x86', 'jitstressregs', 'jitstress2-jitstressregs', 'jitelthookenabled' ) }}: - name: timeoutPerTestCollectionInMinutes value: 120 - name: timeoutPerTestInMinutes value: 30 - ${{ if in(parameters.testGroup, 'gcstress0x3-gcstress0xc') }}: - name: timeoutPerTestCollectionInMinutes value: 240 - name: timeoutPerTestInMinutes value: 60 - ${{ if in(parameters.testGroup, 'gcstress-extra', 'r2r-extra') }}: - name: timeoutPerTestCollectionInMinutes value: 300 - name: timeoutPerTestInMinutes value: 90 - ${{ if eq(parameters.testGroup, 'ilasm') }}: # ilasm-ildasm round trip testing runs every test twice, plus runs ilasm and ildasm, so double the 'outerloop' timeout numbers. - name: timeoutPerTestInMinutes value: 20 - name: timeoutPerTestCollectionInMinutes value: 240 - ${{ if in(parameters.testGroup, 'clrinterpreter') }}: - name: timeoutPerTestCollectionInMinutes value: 180 - name: timeoutPerTestInMinutes value: 30 - ${{ if in(parameters.testGroup, 'pgo') }}: - name: timeoutPerTestCollectionInMinutes value: 120 - ${{ if in(parameters.testGroup, 'jit-cfg') }}: - name: timeoutPerTestCollectionInMinutes value: 120 - ${{ if eq(parameters.compositeBuildMode, true) }}: - name: crossgenArg value: 'composite' - ${{ if eq(variables['System.TeamProject'], 'internal') }}: - group: DotNet-HelixApi-Access - ${{ parameters.variables }} # TODO: update these numbers as they were determined long ago ${{ if eq(parameters.testGroup, 'innerloop') }}: timeoutInMinutes: 200 ${{ if in(parameters.testGroup, 'outerloop', 'jit-experimental', 'pgo', 'jit-cfg') }}: timeoutInMinutes: 270 ${{ if in(parameters.testGroup, 'gc-longrunning', 'gc-simulator') }}: timeoutInMinutes: 480 ${{ if in(parameters.testGroup, 'jitstress', 'jitstress-isas-arm', 'jitstressregs-x86', 'jitstressregs', 'jitstress2-jitstressregs', 'gcstress0x3-gcstress0xc', 'ilasm') }}: timeoutInMinutes: 390 ${{ if in(parameters.testGroup, 'gcstress-extra', 'r2r-extra', 'clrinterpreter') }}: timeoutInMinutes: 510 ${{ if eq(parameters.testGroup, 'jitstress-isas-x86') }}: timeoutInMinutes: 960 steps: # Optionally download live-built libraries - ${{ if ne(parameters.liveLibrariesBuildConfig, '') }}: - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: $(librariesDownloadDir) cleanUnpackFolder: false artifactFileName: '$(librariesBuildArtifactName)$(archiveExtension)' artifactName: '$(librariesBuildArtifactName)' displayName: 'live-built libraries' # Download and unzip managed test artifacts - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: '$(managedTestArtifactRootFolderPath)' artifactFileName: '$(managedGenericTestArtifactName).tar.gz' artifactName: '$(managedGenericTestArtifactName)' displayName: 'generic managed test artifacts' # Download product binaries directory - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: $(buildProductRootFolderPath) artifactFileName: '$(buildProductArtifactName)$(archiveExtension)' artifactName: '$(buildProductArtifactName)' displayName: 'product build' - ${{ if eq(parameters.runtimeFlavor, 'mono') }}: # We need to explictly download CoreCLR for Mono - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: $(coreClrProductRootFolderPath) artifactFileName: '$(coreClrProductArtifactName)$(archiveExtension)' artifactName: '$(coreClrProductArtifactName)' displayName: 'CoreCLR product download for Mono' # Download and unzip the Microsoft.NET.Sdk.IL package needed for traversing # ilproj test projects during copynativeonly. - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: '$(microsoftNetSdkIlFolderPath)' artifactFileName: '$(microsoftNetSdkIlArtifactName).tar.gz' artifactName: '$(microsoftNetSdkIlArtifactName)' displayName: 'Microsoft.NET.Sdk.IL package' # Download and unzip native test artifacts - template: /eng/pipelines/common/download-artifact-step.yml parameters: unpackFolder: '$(nativeTestArtifactRootFolderPath)' artifactFileName: '$(nativeTestArtifactName)$(archiveExtension)' artifactName: '$(nativeTestArtifactName)' displayName: 'native test artifacts' # Publish native test components to test output folder. Sadly we cannot do this # during product build (so that we could zip up the files in their final test location # and directly unzip them there after download). Unfortunately the logic to copy # the native artifacts to the final test folders is dependent on availability of the # managed test artifacts. - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) copynativeonly $(logRootNameArg)Native $(runtimeFlavorArgs) $(crossgenArg) $(buildConfig) $(archType) $(priorityArg) $(librariesOverrideArg) displayName: Copy native test components to test output folder # Generate test wrappers. This is the step that examines issues.targets to exclude tests. - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) buildtestwrappersonly $(logRootNameArg)Wrappers $(runtimeFlavorArgs) $(runtimeVariantArg) $(crossgenArg) $(buildConfig) $(archType) $(crossArg) $(priorityArg) $(librariesOverrideArg) displayName: Generate test wrappers # Compose the Core_Root folder containing all artifacts needed for running # CoreCLR tests. This step also compiles the framework using Crossgen / Crossgen2 # in ReadyToRun jobs. - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) generatelayoutonly $(logRootNameArg)Layout $(runtimeFlavorArgs) $(crossgenArg) $(buildConfig) $(archType) $(crossArg) $(priorityArg) $(librariesOverrideArg) displayName: Generate CORE_ROOT # Build a Mono LLVM AOT cross-compiler for non-amd64 targets (in this case, just arm64) - ${{ if and(eq(parameters.runtimeFlavor, 'mono'), or(eq(parameters.runtimeVariant, 'llvmaot'), eq(parameters.runtimeVariant, 'llvmfullaot'))) }}: - ${{ if eq(parameters.archType, 'arm64') }}: - script: ./build.sh -subset mono -c $(buildConfigUpper) -arch $(archType) /p:BuildMonoAotCrossCompiler=true /p:BuildMonoAotCrossCompilerOnly=true /p:MonoLibClang="/usr/lib/llvm-9/lib/libclang-9.so.1" /p:MonoAOTEnableLLVM=true /p:MonoAOTLLVMUseCxx11Abi=true displayName: "Build Mono LLVM AOT cross compiler" - ${{ if and(eq(parameters.runtimeFlavor, 'mono'), or(eq(parameters.runtimeVariant, 'llvmaot'), eq(parameters.runtimeVariant, 'llvmfullaot'))) }}: - ${{ if eq(parameters.archType, 'x64') }}: - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot $(monoAotBuildshCommand) $(buildConfig) $(archType) $(runtimeVariantArg) displayName: "LLVM AOT compile CoreCLR tests" - ${{ if eq(parameters.archType, 'arm64') }}: - script: $(Build.SourcesDirectory)/src/tests/build$(scriptExt) $(logRootNameArg)MonoAot $(monoAotBuildshCommand) $(buildConfig) $(archType) cross $(runtimeVariantArg) -maxcpucount:2 displayName: "LLVM AOT cross-compile CoreCLR tests" env: __MonoToolPrefix: aarch64-linux-gnu- # Send tests to Helix - template: /eng/pipelines/common/templates/runtimes/send-to-helix-step.yml parameters: displayName: Send tests to Helix buildConfig: $(buildConfigUpper) archType: ${{ parameters.archType }} osGroup: ${{ parameters.osGroup }} osSubgroup: ${{ parameters.osSubgroup}} runtimeFlavorDisplayName: ${{ parameters.runtimeFlavorDisplayName }} shouldContinueOnError: ${{ parameters.shouldContinueOnError }} runtimeVariant: ${{ parameters.runtimeVariant }} ${{ if eq(variables['System.TeamProject'], 'public') }}: creator: $(Build.DefinitionName) helixBuild: $(Build.BuildNumber) helixSource: $(_HelixSource) # REVIEW: not sure why "cli" is part of the names here. Leave it for the ones that already had it, # but don't add it to new ones. ${{ if eq(parameters.readyToRun, true) }}: helixType: 'test/functional/r2r/cli/' ${{ if ne(parameters.readyToRun, true) }}: helixType: 'test/functional/cli/' helixQueues: ${{ parameters.helixQueues }} # This tests whether an array is empty ${{ if eq(join('', parameters.helixQueues), '') }}: condition: false publishTestResults: true timeoutPerTestInMinutes: $(timeoutPerTestInMinutes) timeoutPerTestCollectionInMinutes: $(timeoutPerTestCollectionInMinutes) runCrossGen: ${{ and(eq(parameters.readyToRun, true), ne(parameters.crossgen2, true)) }} runCrossGen2: ${{ and(eq(parameters.readyToRun, true), eq(parameters.crossgen2, true)) }} ${{ if and(ne(parameters.testGroup, 'innerloop'), eq(parameters.runtimeFlavor, 'coreclr')) }}: runPALTestsDir: '$(coreClrProductRootFolderPath)/paltests' compositeBuildMode: ${{ parameters.compositeBuildMode }} runInUnloadableContext: ${{ parameters.runInUnloadableContext }} tieringTest: ${{ parameters.tieringTest }} ${{ if eq(variables['System.TeamProject'], 'internal') }}: # Access token variable for internal project from the # DotNet-HelixApi-Access variable group helixAccessToken: $(HelixApiAccessToken) helixProjectArguments: '$(Build.SourcesDirectory)/src/tests/Common/helixpublishwitharcade.proj' ${{ if in(parameters.testGroup, 'innerloop', 'outerloop') }}: scenarios: - normal - ${{ if ne(parameters.runtimeFlavor, 'mono') }}: # tiered compilation isn't done on mono yet - no_tiered_compilation ${{ if in(parameters.testGroup, 'jitstress') }}: scenarios: - jitminopts - jitstress1 - jitstress1_tiered - jitstress2 - jitstress2_tiered - zapdisable - tailcallstress ${{ if in(parameters.testGroup, 'jitstress-isas-arm') }}: scenarios: - jitstress_isas_incompletehwintrinsic - jitstress_isas_nohwintrinsic - jitstress_isas_nohwintrinsic_nosimd - jitstress_isas_nosimd ${{ if in(parameters.testGroup, 'jitstress-isas-x86') }}: scenarios: - jitstress_isas_incompletehwintrinsic - jitstress_isas_nohwintrinsic - jitstress_isas_nohwintrinsic_nosimd - jitstress_isas_nosimd - jitstress_isas_x86_noaes - jitstress_isas_x86_noavx - jitstress_isas_x86_noavx2 - jitstress_isas_x86_nobmi1 - jitstress_isas_x86_nobmi2 - jitstress_isas_x86_nofma - jitstress_isas_x86_nohwintrinsic - jitstress_isas_x86_nolzcnt - jitstress_isas_x86_nopclmulqdq - jitstress_isas_x86_nopopcnt - jitstress_isas_x86_nosse - jitstress_isas_x86_nosse2 - jitstress_isas_x86_nosse3 - jitstress_isas_x86_nosse3_4 - jitstress_isas_x86_nosse41 - jitstress_isas_x86_nosse42 - jitstress_isas_x86_nossse3 - jitstress_isas_1_x86_noaes - jitstress_isas_1_x86_noavx - jitstress_isas_1_x86_noavx2 - jitstress_isas_1_x86_nobmi1 - jitstress_isas_1_x86_nobmi2 - jitstress_isas_1_x86_nofma - jitstress_isas_1_x86_nohwintrinsic - jitstress_isas_1_x86_nolzcnt - jitstress_isas_1_x86_nopclmulqdq - jitstress_isas_1_x86_nopopcnt - jitstress_isas_1_x86_nosse - jitstress_isas_1_x86_nosse2 - jitstress_isas_1_x86_nosse3 - jitstress_isas_1_x86_nosse3_4 - jitstress_isas_1_x86_nosse41 - jitstress_isas_1_x86_nosse42 - jitstress_isas_1_x86_nossse3 - jitstress_isas_2_x86_noaes - jitstress_isas_2_x86_noavx - jitstress_isas_2_x86_noavx2 - jitstress_isas_2_x86_nobmi1 - jitstress_isas_2_x86_nobmi2 - jitstress_isas_2_x86_nofma - jitstress_isas_2_x86_nohwintrinsic - jitstress_isas_2_x86_nolzcnt - jitstress_isas_2_x86_nopclmulqdq - jitstress_isas_2_x86_nopopcnt - jitstress_isas_2_x86_nosse - jitstress_isas_2_x86_nosse2 - jitstress_isas_2_x86_nosse3 - jitstress_isas_2_x86_nosse3_4 - jitstress_isas_2_x86_nosse41 - jitstress_isas_2_x86_nosse42 - jitstress_isas_2_x86_nossse3 ${{ if in(parameters.testGroup, 'jitstressregs-x86') }}: scenarios: - jitstressregs1_x86_noavx - jitstressregs2_x86_noavx - jitstressregs3_x86_noavx - jitstressregs4_x86_noavx - jitstressregs8_x86_noavx - jitstressregs0x10_x86_noavx - jitstressregs0x80_x86_noavx - jitstressregs0x1000_x86_noavx ${{ if in(parameters.testGroup, 'jitstressregs' ) }}: scenarios: - jitstressregs1 - jitstressregs2 - jitstressregs3 - jitstressregs4 - jitstressregs8 - jitstressregs0x10 - jitstressregs0x80 - jitstressregs0x1000 ${{ if in(parameters.testGroup, 'jitstress2-jitstressregs') }}: scenarios: - jitstress2_jitstressregs1 - jitstress2_jitstressregs2 - jitstress2_jitstressregs3 - jitstress2_jitstressregs4 - jitstress2_jitstressregs8 - jitstress2_jitstressregs0x10 - jitstress2_jitstressregs0x80 - jitstress2_jitstressregs0x1000 ${{ if in(parameters.testGroup, 'gcstress0x3-gcstress0xc') }}: scenarios: - gcstress0x3 - gcstress0xc ${{ if in(parameters.testGroup, 'gcstress-extra') }}: scenarios: - heapverify1 - gcstress0xc_zapdisable - gcstress0xc_zapdisable_jitstress2 - gcstress0xc_zapdisable_heapverify1 - gcstress0xc_jitstress1 - gcstress0xc_jitstress2 - gcstress0xc_tailcallstress - gcstress0xc_jitminopts_heapverify1 ${{ if in(parameters.testGroup, 'r2r-extra') }}: scenarios: - jitstress1 - jitstress2 - jitstress1_tiered - jitstress2_tiered - jitstressregs1 - jitstressregs2 - jitstressregs3 - jitstressregs4 - jitstressregs8 - jitstressregs0x10 - jitstressregs0x80 - jitstressregs0x1000 - jitminopts - forcerelocs - gcstress0xf ${{ if in(parameters.testGroup, 'pgo') }}: scenarios: - nopgo - defaultpgo - dynamicpgo - fullpgo - fullpgo_random_gdv - fullpgo_random_edge - fullpgo_random_gdv_edge ${{ if in(parameters.testGroup, 'gc-longrunning') }}: longRunningGcTests: true scenarios: - normal ${{ if in(parameters.testGroup, 'gc-simulator') }}: gcSimulatorTests: true scenarios: - normal ${{ if in(parameters.testGroup, 'gc-standalone') }}: scenarios: - gcstandalone ${{ if in(parameters.testGroup, 'gc-standalone-server') }}: scenarios: - gcstandaloneserver ${{ if in(parameters.testGroup, 'jitelthookenabled') }}: scenarios: - jitelthookenabled - jitelthookenabled_tiered ${{ if in(parameters.testGroup, 'jit-experimental') }}: scenarios: - jitosr - jitosr_stress - jitosr_pgo - jitosr_stress_random - jitpartialcompilation - jitpartialcompilation_osr - jitpartialcompilation_osr_pgo - jitobjectstackallocation ${{ if in(parameters.testGroup, 'jit-cfg') }}: scenarios: - jitcfg - jitcfg_dispatcher_always - jitcfg_dispatcher_never - jitcfg_gcstress0xc ${{ if in(parameters.testGroup, 'ilasm') }}: scenarios: - ilasmroundtrip ${{ if in(parameters.testGroup, 'clrinterpreter') }}: scenarios: - clrinterpreter # Publish Logs - task: PublishPipelineArtifact@1 displayName: Publish Logs inputs: targetPath: $(Build.SourcesDirectory)/artifacts/log artifactName: '${{ parameters.runtimeFlavor }}_${{ parameters.runtimeVariant }}_$(LogNamePrefix)_$(osGroup)$(osSubgroup)_$(archType)_$(buildConfig)_${{ parameters.testGroup }}' continueOnError: true condition: always()
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/AddPairwiseWidening.Vector64.Int16.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void AddPairwiseWidening_Vector64_Int16() { var test = new SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16 { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Int16[] inArray1, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int16> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); return testStruct; } public void RunStructFldScenario(SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16 testClass) { var result = AdvSimd.AddPairwiseWidening(_fld1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16 testClass) { fixed (Vector64<Int16>* pFld1 = &_fld1) { var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(pFld1)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static Int16[] _data1 = new Int16[Op1ElementCount]; private static Vector64<Int16> _clsVar1; private Vector64<Int16> _fld1; private DataTable _dataTable; static SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); } public SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } _dataTable = new DataTable(_data1, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.AddPairwiseWidening( Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddPairwiseWidening), new Type[] { typeof(Vector64<Int16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddPairwiseWidening), new Type[] { typeof(Vector64<Int16>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.AddPairwiseWidening( _clsVar1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int16>* pClsVar1 = &_clsVar1) { var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(pClsVar1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr); var result = AdvSimd.AddPairwiseWidening(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)); var result = AdvSimd.AddPairwiseWidening(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16(); var result = AdvSimd.AddPairwiseWidening(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16(); fixed (Vector64<Int16>* pFld1 = &test._fld1) { var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.AddPairwiseWidening(_fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int16>* pFld1 = &_fld1) { var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.AddPairwiseWidening(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(&test._fld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int16> op1, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Int16[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.AddPairwiseWidening(firstOp, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AddPairwiseWidening)}<Int32>(Vector64<Int16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void AddPairwiseWidening_Vector64_Int16() { var test = new SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16 { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Int16[] inArray1, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int16> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); return testStruct; } public void RunStructFldScenario(SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16 testClass) { var result = AdvSimd.AddPairwiseWidening(_fld1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16 testClass) { fixed (Vector64<Int16>* pFld1 = &_fld1) { var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(pFld1)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int32>>() / sizeof(Int32); private static Int16[] _data1 = new Int16[Op1ElementCount]; private static Vector64<Int16> _clsVar1; private Vector64<Int16> _fld1; private DataTable _dataTable; static SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); } public SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } _dataTable = new DataTable(_data1, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.AddPairwiseWidening( Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddPairwiseWidening), new Type[] { typeof(Vector64<Int16>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.AddPairwiseWidening), new Type[] { typeof(Vector64<Int16>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.AddPairwiseWidening( _clsVar1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int16>* pClsVar1 = &_clsVar1) { var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(pClsVar1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr); var result = AdvSimd.AddPairwiseWidening(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)); var result = AdvSimd.AddPairwiseWidening(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16(); var result = AdvSimd.AddPairwiseWidening(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleUnaryOpTest__AddPairwiseWidening_Vector64_Int16(); fixed (Vector64<Int16>* pFld1 = &test._fld1) { var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.AddPairwiseWidening(_fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int16>* pFld1 = &_fld1) { var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.AddPairwiseWidening(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.AddPairwiseWidening( AdvSimd.LoadVector64((Int16*)(&test._fld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int16> op1, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int32>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Int16[] firstOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.AddPairwiseWidening(firstOp, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.AddPairwiseWidening)}<Int32>(Vector64<Int16>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/mono/wasi/mono-wasi-driver/driver.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <mono/metadata/assembly.h> #include <mono/metadata/object.h> void mono_wasm_load_runtime (const char *unused, int debug_level); int mono_wasm_add_assembly (const char *name, const unsigned char *data, unsigned int size); MonoAssembly* mono_wasm_assembly_load(const char *name); MonoMethod* mono_wasm_assembly_get_entry_point (MonoAssembly *assembly); MonoClass* mono_wasm_assembly_find_class (MonoAssembly *assembly, const char *namespace, const char *name); MonoMethod* mono_wasm_assembly_find_method (MonoClass *klass, const char *name, int arguments); MonoObject* mono_wasm_invoke_method (MonoMethod *method, MonoObject *this_arg, void *params[], MonoObject **out_exc); int mono_unbox_int (MonoObject *obj); void mono_wasm_setenv (const char *name, const char *value); void add_assembly(const char* base_dir, const char *name); MonoArray* mono_wasm_obj_array_new (int size); void mono_wasm_obj_array_set (MonoArray *array, int idx, MonoObject *obj); MonoArray* mono_wasm_string_array_new (int size); MonoString *mono_wasm_string_from_js (const char *str); int mono_wasm_array_length(MonoArray* array); char *mono_wasm_string_get_utf8 (MonoString *str); MonoMethod* lookup_dotnet_method(const char* assembly_name, const char* namespace, const char* type_name, const char* method_name, int num_params);
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include <mono/metadata/assembly.h> #include <mono/metadata/object.h> void mono_wasm_load_runtime (const char *unused, int debug_level); int mono_wasm_add_assembly (const char *name, const unsigned char *data, unsigned int size); MonoAssembly* mono_wasm_assembly_load(const char *name); MonoMethod* mono_wasm_assembly_get_entry_point (MonoAssembly *assembly); MonoClass* mono_wasm_assembly_find_class (MonoAssembly *assembly, const char *namespace, const char *name); MonoMethod* mono_wasm_assembly_find_method (MonoClass *klass, const char *name, int arguments); MonoObject* mono_wasm_invoke_method (MonoMethod *method, MonoObject *this_arg, void *params[], MonoObject **out_exc); int mono_unbox_int (MonoObject *obj); void mono_wasm_setenv (const char *name, const char *value); void add_assembly(const char* base_dir, const char *name); MonoArray* mono_wasm_obj_array_new (int size); void mono_wasm_obj_array_set (MonoArray *array, int idx, MonoObject *obj); MonoArray* mono_wasm_string_array_new (int size); MonoString *mono_wasm_string_from_js (const char *str); int mono_wasm_array_length(MonoArray* array); char *mono_wasm_string_get_utf8 (MonoString *str); MonoMethod* lookup_dotnet_method(const char* assembly_name, const char* namespace, const char* type_name, const char* method_name, int num_params);
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Linq/tests/JoinTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using Xunit; namespace System.Linq.Tests { public class JoinTests : EnumerableTests { public struct CustomerRec { public string name; public int custID; } public struct OrderRec { public int orderID; public int custID; public int total; } public struct AnagramRec { public string name; public int orderID; public int total; } public struct JoinRec { public string name; public int orderID; public int total; } public static JoinRec createJoinRec(CustomerRec cr, OrderRec or) { return new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }; } public static JoinRec createJoinRec(CustomerRec cr, AnagramRec or) { return new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }; } [Fact] public void OuterEmptyInnerNonEmpty() { CustomerRec[] outer = { }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 98022, total = 50 }, new OrderRec{ orderID = 97865, custID = 32103, total = 25 } }; Assert.Empty(outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void FirstOuterMatchesLastInnerLastOuterMatchesFirstInnerSameNumberElements() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 99022, total = 50 }, new OrderRec{ orderID = 43421, custID = 29022, total = 20 }, new OrderRec{ orderID = 95421, custID = 98022, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 95421, total = 9 }, new JoinRec{ name = "Robert", orderID = 45321, total = 50 } }; Assert.Equal(expected, outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void NullComparer() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 323232, total = 9 } }; Assert.Equal(expected, outer.Join(inner, e => e.name, e => e.name, createJoinRec, null)); } [Fact] public void CustomComparer() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 323232, total = 9 }, new JoinRec{ name = "Tim", orderID = 43455, total = 10 } }; Assert.Equal(expected, outer.Join(inner, e => e.name, e => e.name, createJoinRec, new AnagramEqualityComparer())); } [Fact] public void OuterNull() { CustomerRec[] outer = null; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outer", () => outer.Join(inner, e => e.name, e => e.name, createJoinRec, new AnagramEqualityComparer())); } [Fact] public void InnerNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = null; AssertExtensions.Throws<ArgumentNullException>("inner", () => outer.Join(inner, e => e.name, e => e.name, createJoinRec, new AnagramEqualityComparer())); } [Fact] public void OuterKeySelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outerKeySelector", () => outer.Join(inner, null, e => e.name, createJoinRec, new AnagramEqualityComparer())); } [Fact] public void InnerKeySelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("innerKeySelector", () => outer.Join(inner, e => e.name, null, createJoinRec, new AnagramEqualityComparer())); } [Fact] public void ResultSelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("resultSelector", () => outer.Join(inner, e => e.name, e => e.name, (Func<CustomerRec, AnagramRec, JoinRec>)null, new AnagramEqualityComparer())); } [Fact] public void OuterNullNoComparer() { CustomerRec[] outer = null; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outer", () => outer.Join(inner, e => e.name, e => e.name, createJoinRec)); } [Fact] public void InnerNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = null; AssertExtensions.Throws<ArgumentNullException>("inner", () => outer.Join(inner, e => e.name, e => e.name, createJoinRec)); } [Fact] public void OuterKeySelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outerKeySelector", () => outer.Join(inner, null, e => e.name, createJoinRec)); } [Fact] public void InnerKeySelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("innerKeySelector", () => outer.Join(inner, e => e.name, null, createJoinRec)); } [Fact] public void ResultSelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("resultSelector", () => outer.Join(inner, e => e.name, e => e.name, (Func<CustomerRec, AnagramRec, JoinRec>)null)); } [Fact] public void SkipsNullElements() { string[] outer = new [] { null, string.Empty }; string[] inner = new [] { null, string.Empty }; string[] expected = new [] { string.Empty }; Assert.Equal(expected, outer.Join(inner, e => e, e => e, (x, y) => y, EqualityComparer<string>.Default)); } [Fact] public void OuterNonEmptyInnerEmpty() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Tim", custID = 43434 }, new CustomerRec{ name = "Bob", custID = 34093 } }; OrderRec[] inner = { }; Assert.Empty(outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void SingleElementEachAndMatches() { CustomerRec[] outer = new [] { new CustomerRec { name = "Prakash", custID = 98022 } }; OrderRec[] inner = new [] { new OrderRec { orderID = 45321, custID = 98022, total = 50 } }; JoinRec[] expected = new [] { new JoinRec { name = "Prakash", orderID = 45321, total = 50 } }; Assert.Equal(expected, outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void SingleElementEachAndDoesntMatch() { CustomerRec[] outer = new [] { new CustomerRec { name = "Prakash", custID = 98922 } }; OrderRec[] inner = new [] { new OrderRec { orderID = 45321, custID = 98022, total = 50 } }; Assert.Empty(outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void SelectorsReturnNull() { int?[] inner = { null, null, null }; int?[] outer = { null, null }; Assert.Empty(outer.Join(inner, e => e, e => e, (x, y) => x)); } [Fact] public void InnerSameKeyMoreThanOneElementAndMatches() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 98022, total = 50 }, new OrderRec{ orderID = 45421, custID = 98022, total = 10 }, new OrderRec{ orderID = 43421, custID = 99022, total = 20 }, new OrderRec{ orderID = 85421, custID = 98022, total = 18 }, new OrderRec{ orderID = 95421, custID = 99021, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 45321, total = 50 }, new JoinRec{ name = "Prakash", orderID = 45421, total = 10 }, new JoinRec{ name = "Prakash", orderID = 85421, total = 18 }, new JoinRec{ name = "Tim", orderID = 95421, total = 9 }, new JoinRec{ name = "Robert", orderID = 43421, total = 20 } }; Assert.Equal(expected, outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void OuterSameKeyMoreThanOneElementAndMatches() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Bob", custID = 99022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 98022, total = 50 }, new OrderRec{ orderID = 43421, custID = 99022, total = 20 }, new OrderRec{ orderID = 95421, custID = 99021, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 45321, total = 50 }, new JoinRec{ name = "Bob", orderID = 43421, total = 20 }, new JoinRec{ name = "Tim", orderID = 95421, total = 9 }, new JoinRec{ name = "Robert", orderID = 43421, total = 20 } }; Assert.Equal(expected, outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void NoMatches() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Bob", custID = 99022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 18022, total = 50 }, new OrderRec{ orderID = 43421, custID = 29022, total = 20 }, new OrderRec{ orderID = 95421, custID = 39021, total = 9 } }; Assert.Empty(outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void ForcedToEnumeratorDoesntEnumerate() { var iterator = NumberRangeGuaranteedNotCollectionType(0, 3).Join(Enumerable.Empty<int>(), i => i, i => i, (o, i) => i); // Don't insist on this behaviour, but check it's correct if it happens var en = iterator as IEnumerator<int>; Assert.False(en != null && en.MoveNext()); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Collections.Generic; using Xunit; namespace System.Linq.Tests { public class JoinTests : EnumerableTests { public struct CustomerRec { public string name; public int custID; } public struct OrderRec { public int orderID; public int custID; public int total; } public struct AnagramRec { public string name; public int orderID; public int total; } public struct JoinRec { public string name; public int orderID; public int total; } public static JoinRec createJoinRec(CustomerRec cr, OrderRec or) { return new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }; } public static JoinRec createJoinRec(CustomerRec cr, AnagramRec or) { return new JoinRec { name = cr.name, orderID = or.orderID, total = or.total }; } [Fact] public void OuterEmptyInnerNonEmpty() { CustomerRec[] outer = { }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 98022, total = 50 }, new OrderRec{ orderID = 97865, custID = 32103, total = 25 } }; Assert.Empty(outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void FirstOuterMatchesLastInnerLastOuterMatchesFirstInnerSameNumberElements() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 99022, total = 50 }, new OrderRec{ orderID = 43421, custID = 29022, total = 20 }, new OrderRec{ orderID = 95421, custID = 98022, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 95421, total = 9 }, new JoinRec{ name = "Robert", orderID = 45321, total = 50 } }; Assert.Equal(expected, outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void NullComparer() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 323232, total = 9 } }; Assert.Equal(expected, outer.Join(inner, e => e.name, e => e.name, createJoinRec, null)); } [Fact] public void CustomComparer() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 323232, total = 9 }, new JoinRec{ name = "Tim", orderID = 43455, total = 10 } }; Assert.Equal(expected, outer.Join(inner, e => e.name, e => e.name, createJoinRec, new AnagramEqualityComparer())); } [Fact] public void OuterNull() { CustomerRec[] outer = null; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outer", () => outer.Join(inner, e => e.name, e => e.name, createJoinRec, new AnagramEqualityComparer())); } [Fact] public void InnerNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = null; AssertExtensions.Throws<ArgumentNullException>("inner", () => outer.Join(inner, e => e.name, e => e.name, createJoinRec, new AnagramEqualityComparer())); } [Fact] public void OuterKeySelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outerKeySelector", () => outer.Join(inner, null, e => e.name, createJoinRec, new AnagramEqualityComparer())); } [Fact] public void InnerKeySelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("innerKeySelector", () => outer.Join(inner, e => e.name, null, createJoinRec, new AnagramEqualityComparer())); } [Fact] public void ResultSelectorNull() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new [] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("resultSelector", () => outer.Join(inner, e => e.name, e => e.name, (Func<CustomerRec, AnagramRec, JoinRec>)null, new AnagramEqualityComparer())); } [Fact] public void OuterNullNoComparer() { CustomerRec[] outer = null; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outer", () => outer.Join(inner, e => e.name, e => e.name, createJoinRec)); } [Fact] public void InnerNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = null; AssertExtensions.Throws<ArgumentNullException>("inner", () => outer.Join(inner, e => e.name, e => e.name, createJoinRec)); } [Fact] public void OuterKeySelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("outerKeySelector", () => outer.Join(inner, null, e => e.name, createJoinRec)); } [Fact] public void InnerKeySelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("innerKeySelector", () => outer.Join(inner, e => e.name, null, createJoinRec)); } [Fact] public void ResultSelectorNullNoComparer() { CustomerRec[] outer = new[] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; AnagramRec[] inner = new[] { new AnagramRec{ name = "miT", orderID = 43455, total = 10 }, new AnagramRec{ name = "Prakash", orderID = 323232, total = 9 } }; AssertExtensions.Throws<ArgumentNullException>("resultSelector", () => outer.Join(inner, e => e.name, e => e.name, (Func<CustomerRec, AnagramRec, JoinRec>)null)); } [Fact] public void SkipsNullElements() { string[] outer = new [] { null, string.Empty }; string[] inner = new [] { null, string.Empty }; string[] expected = new [] { string.Empty }; Assert.Equal(expected, outer.Join(inner, e => e, e => e, (x, y) => y, EqualityComparer<string>.Default)); } [Fact] public void OuterNonEmptyInnerEmpty() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Tim", custID = 43434 }, new CustomerRec{ name = "Bob", custID = 34093 } }; OrderRec[] inner = { }; Assert.Empty(outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void SingleElementEachAndMatches() { CustomerRec[] outer = new [] { new CustomerRec { name = "Prakash", custID = 98022 } }; OrderRec[] inner = new [] { new OrderRec { orderID = 45321, custID = 98022, total = 50 } }; JoinRec[] expected = new [] { new JoinRec { name = "Prakash", orderID = 45321, total = 50 } }; Assert.Equal(expected, outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void SingleElementEachAndDoesntMatch() { CustomerRec[] outer = new [] { new CustomerRec { name = "Prakash", custID = 98922 } }; OrderRec[] inner = new [] { new OrderRec { orderID = 45321, custID = 98022, total = 50 } }; Assert.Empty(outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void SelectorsReturnNull() { int?[] inner = { null, null, null }; int?[] outer = { null, null }; Assert.Empty(outer.Join(inner, e => e, e => e, (x, y) => x)); } [Fact] public void InnerSameKeyMoreThanOneElementAndMatches() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 98022, total = 50 }, new OrderRec{ orderID = 45421, custID = 98022, total = 10 }, new OrderRec{ orderID = 43421, custID = 99022, total = 20 }, new OrderRec{ orderID = 85421, custID = 98022, total = 18 }, new OrderRec{ orderID = 95421, custID = 99021, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 45321, total = 50 }, new JoinRec{ name = "Prakash", orderID = 45421, total = 10 }, new JoinRec{ name = "Prakash", orderID = 85421, total = 18 }, new JoinRec{ name = "Tim", orderID = 95421, total = 9 }, new JoinRec{ name = "Robert", orderID = 43421, total = 20 } }; Assert.Equal(expected, outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void OuterSameKeyMoreThanOneElementAndMatches() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Bob", custID = 99022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 98022, total = 50 }, new OrderRec{ orderID = 43421, custID = 99022, total = 20 }, new OrderRec{ orderID = 95421, custID = 99021, total = 9 } }; JoinRec[] expected = new [] { new JoinRec{ name = "Prakash", orderID = 45321, total = 50 }, new JoinRec{ name = "Bob", orderID = 43421, total = 20 }, new JoinRec{ name = "Tim", orderID = 95421, total = 9 }, new JoinRec{ name = "Robert", orderID = 43421, total = 20 } }; Assert.Equal(expected, outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void NoMatches() { CustomerRec[] outer = new [] { new CustomerRec{ name = "Prakash", custID = 98022 }, new CustomerRec{ name = "Bob", custID = 99022 }, new CustomerRec{ name = "Tim", custID = 99021 }, new CustomerRec{ name = "Robert", custID = 99022 } }; OrderRec[] inner = new [] { new OrderRec{ orderID = 45321, custID = 18022, total = 50 }, new OrderRec{ orderID = 43421, custID = 29022, total = 20 }, new OrderRec{ orderID = 95421, custID = 39021, total = 9 } }; Assert.Empty(outer.Join(inner, e => e.custID, e => e.custID, createJoinRec)); } [Fact] public void ForcedToEnumeratorDoesntEnumerate() { var iterator = NumberRangeGuaranteedNotCollectionType(0, 3).Join(Enumerable.Empty<int>(), i => i, i => i, (o, i) => i); // Don't insist on this behaviour, but check it's correct if it happens var en = iterator as IEnumerator<int>; Assert.False(en != null && en.MoveNext()); } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/JIT/Regression/CLR-x86-JIT/V1-M09.5-PDC/b29456/b29456.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace Tests { internal class Operators { private static bool s_t = true; private static bool s_f = false; private static byte s_by_13 = 13; private static byte s_by_3 = 3; private static sbyte s_sb_m3 = -3; private static sbyte s_sb_13 = 13; private static short s_sh_8712 = 8712; private static short s_sh_m973 = -973; private static ushort s_us_8712 = 8712; private static ushort s_us_973 = 973; private static int s_int_33452 = 33452; private static int s_int_m3097 = -3097; private static uint s_uint_33452 = 33452u; private static uint s_uint_3097 = 3097u; private static long s_long_x1 = -971239841234L; private static long s_long_x2 = 1876343; private static ulong s_ulong_x1 = 971239841234uL; private static ulong s_ulong_x2 = 1876343Lu; private static float s_float_x1 = -193.23F; private static float s_float_x2 = 1.712F; private static double s_double_x1 = -7423.2312; private static double s_double_x2 = 3.712987; private static double s_double_nan = 0.0 / 0.0; private static string s_string_null = null; private static string s_string1 = "hello "; private static string s_string2 = "world "; private static string s_string3 = "elvis "; static Operators() { System.Console.WriteLine(".cctor"); } public static int Main() { System.Console.WriteLine("----------------"); #pragma warning disable 1718 bool b1 = s_t && s_t; #pragma warning restore bool b2 = s_t && s_f; bool b3 = s_f && s_t; #pragma warning disable 1718 bool b4 = s_f && s_f; bool b5 = s_t || s_t; #pragma warning restore bool b6 = s_t || s_f; bool b7 = s_f || s_t; bool b8 = s_f || s_f; #pragma warning disable 1718 bool b9 = s_t == s_t; #pragma warning restore bool b10 = s_t == s_f; bool b11 = s_f == s_t; #pragma warning disable 1718 bool b12 = s_f == s_f; bool b13 = s_t != s_t; #pragma warning restore bool b14 = s_t != s_f; bool b15 = s_f != s_t; #pragma warning disable 1718 bool b16 = s_f != s_f; #pragma warning restore bool b17 = !s_t; bool b18 = !s_f; bool b19 = !(!s_t && (!s_f || s_t)); System.Console.WriteLine("----------------"); byte by1 = (byte)(s_by_13 + s_by_3); byte by2 = (byte)(s_by_13 - s_by_3); byte by3 = (byte)(s_by_13 * s_by_3); byte by4 = (byte)(s_by_13 / s_by_3); byte by5 = (byte)(s_by_13 % s_by_3); byte by6 = (byte)(s_by_13 & s_by_3); byte by7 = (byte)(s_by_13 | s_by_3); byte by8 = (byte)(s_by_13 ^ s_by_3); byte by9 = (byte)(-s_by_13); byte by10 = (byte)(s_by_13 >> 1); byte by11 = (byte)(s_by_13 >> 1); #pragma warning disable 1718 bool by12 = (s_by_13 == s_by_13); #pragma warning restore bool by13 = (s_by_13 == s_by_3); #pragma warning disable 1718 bool by14 = (s_by_13 != s_by_13); #pragma warning restore bool by15 = (s_by_13 != s_by_3); #pragma warning disable 1718 bool by16 = (s_by_13 >= s_by_13); #pragma warning restore bool by17 = (s_by_13 >= s_by_3); #pragma warning disable 1718 bool by18 = (s_by_13 <= s_by_13); #pragma warning restore bool by19 = (s_by_13 <= s_by_3); #pragma warning disable 1718 bool by20 = (s_by_13 < s_by_13); #pragma warning restore bool by21 = (s_by_13 < s_by_3); #pragma warning disable 1718 bool by22 = (s_by_13 > s_by_13); #pragma warning restore bool by23 = (s_by_13 > s_by_3); System.Console.WriteLine("----------------"); sbyte sb1 = (sbyte)(s_sb_13 + s_sb_m3); sbyte sb2 = (sbyte)(s_sb_13 - s_sb_m3); sbyte sb3 = (sbyte)(s_sb_13 * s_sb_m3); sbyte sb4 = (sbyte)(s_sb_13 / s_sb_m3); sbyte sb5 = (sbyte)(s_sb_13 % s_sb_m3); sbyte sb6 = (sbyte)(s_sb_13 & s_sb_m3); sbyte sb7 = (sbyte)(s_sb_13 | s_sb_m3); sbyte sb8 = (sbyte)(s_sb_13 ^ s_sb_m3); sbyte sb9 = (sbyte)(-s_sb_13); sbyte sb10 = (sbyte)(s_sb_m3 >> 1); sbyte sb11 = (sbyte)(s_sb_13 >> 1); #pragma warning disable 1718 bool sb12 = (s_sb_13 == s_sb_13); #pragma warning restore bool sb13 = (s_sb_13 == s_sb_m3); #pragma warning disable 1718 bool sb14 = (s_sb_13 != s_sb_13); #pragma warning restore bool sb15 = (s_sb_13 != s_sb_m3); #pragma warning disable 1718 bool sb16 = (s_sb_13 >= s_sb_13); #pragma warning restore bool sb17 = (s_sb_13 >= s_sb_m3); #pragma warning disable 1718 bool sb18 = (s_sb_13 <= s_sb_13); #pragma warning restore bool sb19 = (s_sb_13 <= s_sb_m3); #pragma warning disable 1718 bool sb20 = (s_sb_13 < s_sb_13); #pragma warning restore bool sb21 = (s_sb_13 < s_sb_m3); #pragma warning disable 1718 bool sb22 = (s_sb_13 > s_sb_13); #pragma warning restore bool sb23 = (s_sb_13 > s_sb_m3); System.Console.WriteLine("----------------"); short sh1 = (short)(s_sh_8712 + s_sh_m973); short sh2 = (short)(s_sh_8712 - s_sh_m973); short sh3 = (short)(s_sh_8712 * s_sh_m973); short sh4 = (short)(s_sh_8712 / s_sh_m973); short sh5 = (short)(s_sh_8712 % s_sh_m973); short sh6 = (short)(s_sh_8712 & s_sh_m973); short sh7 = (short)(s_sh_8712 | s_sh_m973); short sh8 = (short)(s_sh_8712 ^ s_sh_m973); short sh9 = (short)(-s_sh_8712); short sh10 = (short)(s_sh_8712 >> 1); short sh11 = (short)((ushort)s_sh_8712 >> 1); #pragma warning disable 1718 bool sh12 = (s_sh_8712 == s_sh_8712); #pragma warning restore bool sh13 = (s_sh_8712 == s_sh_m973); #pragma warning disable 1718 bool sh14 = (s_sh_8712 != s_sh_8712); #pragma warning restore bool sh15 = (s_sh_8712 != s_sh_m973); #pragma warning disable 1718 bool sh16 = (s_sh_8712 >= s_sh_8712); #pragma warning restore bool sh17 = (s_sh_8712 >= s_sh_m973); #pragma warning disable 1718 bool sh18 = (s_sh_8712 <= s_sh_8712); #pragma warning restore bool sh19 = (s_sh_8712 <= s_sh_m973); #pragma warning disable 1718 bool sh20 = (s_sh_8712 < s_sh_8712); #pragma warning restore bool sh21 = (s_sh_8712 < s_sh_m973); #pragma warning disable 1718 bool sh22 = (s_sh_8712 > s_sh_8712); #pragma warning restore bool sh23 = (s_sh_8712 > s_sh_m973); System.Console.WriteLine("----------------"); ushort us1 = (ushort)(s_us_8712 + s_us_973); ushort us2 = (ushort)(s_us_8712 - s_us_973); ushort us3 = (ushort)(s_us_8712 * s_us_973); ushort us4 = (ushort)(s_us_8712 / s_us_973); ushort us5 = (ushort)(s_us_8712 % s_us_973); ushort us6 = (ushort)(s_us_8712 & s_us_973); ushort us7 = (ushort)(s_us_8712 | s_us_973); ushort us8 = (ushort)(s_us_8712 ^ s_us_973); int us9 = -s_us_8712; ushort us10 = (ushort)((short)s_us_8712 >> 1); ushort us11 = (ushort)(s_us_8712 >> 1); #pragma warning disable 1718 bool us12 = (s_us_8712 == s_us_8712); #pragma warning restore bool us13 = (s_us_8712 == s_us_973); #pragma warning disable 1718 bool us14 = (s_us_8712 != s_us_8712); #pragma warning restore bool us15 = (s_us_8712 != s_us_973); #pragma warning disable 1718 bool us16 = (s_us_8712 >= s_us_8712); #pragma warning restore bool us17 = (s_us_8712 >= s_us_973); #pragma warning disable 1718 bool us18 = (s_us_8712 <= s_us_8712); #pragma warning restore bool us19 = (s_us_8712 <= s_us_973); #pragma warning disable 1718 bool us20 = (s_us_8712 < s_us_8712); #pragma warning restore bool us21 = (s_us_8712 < s_us_973); #pragma warning disable 1718 bool us22 = (s_us_8712 > s_us_8712); #pragma warning restore bool us23 = (s_us_8712 > s_us_973); System.Console.WriteLine("----------------"); int int1 = s_int_33452 + s_int_m3097; int int2 = s_int_33452 - s_int_m3097; int int3 = (int)(s_int_33452 * s_int_m3097); int int4 = s_int_33452 / s_int_m3097; int int5 = s_int_33452 % s_int_m3097; int int6 = s_int_33452 & s_int_m3097; int int7 = s_int_33452 | s_int_m3097; int int8 = s_int_33452 ^ s_int_m3097; int int9 = (-s_int_33452); int int10 = s_int_33452 >> 1; int int11 = (int)((uint)s_int_33452 >> 1); #pragma warning disable 1718 bool int12 = (s_int_33452 == s_int_33452); #pragma warning restore bool int13 = (s_int_33452 == s_int_m3097); #pragma warning disable 1718 bool int14 = (s_int_33452 != s_int_33452); #pragma warning restore bool int15 = (s_int_33452 != s_int_m3097); #pragma warning disable 1718 bool int16 = (s_int_33452 >= s_int_33452); #pragma warning restore bool int17 = (s_int_33452 >= s_int_m3097); #pragma warning disable 1718 bool int18 = (s_int_33452 <= s_int_33452); #pragma warning restore bool int19 = (s_int_33452 <= s_int_m3097); #pragma warning disable 1718 bool int20 = (s_int_33452 < s_int_33452); #pragma warning restore bool int21 = (s_int_33452 < s_int_m3097); #pragma warning disable 1718 bool int22 = (s_int_33452 > s_int_33452); #pragma warning restore bool int23 = (s_int_33452 > s_int_m3097); System.Console.WriteLine("----------------"); uint uint1 = s_uint_33452 + s_uint_3097; uint uint2 = s_uint_33452 - s_uint_3097; uint uint3 = (uint)(s_uint_33452 * s_uint_3097); uint uint4 = s_uint_33452 / s_uint_3097; uint uint5 = s_uint_33452 % s_uint_3097; uint uint6 = s_uint_33452 & s_uint_3097; uint uint7 = s_uint_33452 | s_uint_3097; uint uint8 = s_uint_33452 ^ s_uint_3097; long uint9 = -s_uint_33452; uint uint10 = s_uint_33452 >> 1; uint uint11 = s_uint_33452 >> 1; #pragma warning disable 1718 bool uint12 = (s_uint_33452 == s_uint_33452); #pragma warning restore bool uint13 = (s_uint_33452 == s_uint_3097); #pragma warning disable 1718 bool uint14 = (s_uint_33452 != s_uint_33452); #pragma warning restore bool uint15 = (s_uint_33452 != s_uint_3097); #pragma warning disable 1718 bool uint16 = (s_uint_33452 >= s_uint_33452); #pragma warning restore bool uint17 = (s_uint_33452 >= s_uint_3097); #pragma warning disable 1718 bool uint18 = (s_uint_33452 <= s_uint_33452); #pragma warning restore bool uint19 = (s_uint_33452 <= s_uint_3097); #pragma warning disable 1718 bool uint20 = (s_uint_33452 < s_uint_33452); #pragma warning restore bool uint21 = (s_uint_33452 < s_uint_3097); #pragma warning disable 1718 bool uint22 = (s_uint_33452 > s_uint_33452); #pragma warning restore bool uint23 = (s_uint_33452 > s_uint_3097); System.Console.WriteLine("----------------"); long long1 = s_long_x1 + s_long_x2; long long2 = s_long_x1 - s_long_x2; long long3 = s_long_x1 * s_long_x2; long long4 = s_long_x1 / s_long_x2; long long5 = s_long_x1 % s_long_x2; long long6 = s_long_x1 & s_long_x2; long long7 = s_long_x1 | s_long_x2; long long8 = s_long_x1 ^ s_long_x2; long long9 = (-s_long_x1); long long10 = s_long_x1 >> 1; long long11 = (long)((ulong)s_long_x1 >> 1); #pragma warning disable 1718 bool long12 = (s_long_x1 == s_long_x1); #pragma warning restore bool long13 = (s_long_x1 == s_long_x2); #pragma warning disable 1718 bool long14 = (s_long_x1 != s_long_x1); #pragma warning restore bool long15 = (s_long_x1 != s_long_x2); #pragma warning disable 1718 bool long16 = (s_long_x1 >= s_long_x1); #pragma warning restore bool long17 = (s_long_x1 >= s_long_x2); #pragma warning disable 1718 bool long18 = (s_long_x1 <= s_long_x1); #pragma warning restore bool long19 = (s_long_x1 <= s_long_x2); #pragma warning disable 1718 bool long20 = (s_long_x1 < s_long_x1); #pragma warning restore bool long21 = (s_long_x1 < s_long_x2); #pragma warning disable 1718 bool long22 = (s_long_x1 > s_long_x1); #pragma warning restore bool long23 = (s_long_x1 > s_long_x2); System.Console.WriteLine("----------------"); ulong ulong1 = s_ulong_x1 + s_ulong_x2; ulong ulong2 = s_ulong_x1 - s_ulong_x2; ulong ulong3 = s_ulong_x1 * s_ulong_x2; ulong ulong4 = s_ulong_x1 / s_ulong_x2; ulong ulong5 = s_ulong_x1 % s_ulong_x2; ulong ulong6 = s_ulong_x1 & s_ulong_x2; ulong ulong7 = s_ulong_x1 | s_ulong_x2; ulong ulong8 = s_ulong_x1 ^ s_ulong_x2; ulong ulong10 = s_ulong_x1 >> 1; ulong ulong11 = (ulong)(s_ulong_x1 >> 1); #pragma warning disable 1718 bool ulong12 = (s_ulong_x1 == s_ulong_x1); #pragma warning restore bool ulong13 = (s_ulong_x1 == s_ulong_x2); #pragma warning disable 1718 bool ulong14 = (s_ulong_x1 != s_ulong_x1); #pragma warning restore bool ulong15 = (s_ulong_x1 != s_ulong_x2); #pragma warning disable 1718 bool ulong16 = (s_ulong_x1 >= s_ulong_x1); #pragma warning restore bool ulong17 = (s_ulong_x1 >= s_ulong_x2); #pragma warning disable 1718 bool ulong18 = (s_ulong_x1 <= s_ulong_x1); #pragma warning restore bool ulong19 = (s_ulong_x1 <= s_ulong_x2); #pragma warning disable 1718 bool ulong20 = (s_ulong_x1 < s_ulong_x1); #pragma warning restore bool ulong21 = (s_ulong_x1 < s_ulong_x2); #pragma warning disable 1718 bool ulong22 = (s_ulong_x1 > s_ulong_x1); #pragma warning restore bool ulong23 = (s_ulong_x1 > s_ulong_x2); System.Console.WriteLine("----------------"); float float1 = s_float_x1 + s_float_x2; float float2 = s_float_x1 - s_float_x2; float float3 = s_float_x1 * s_float_x2; float float4 = s_float_x1 / s_float_x2; float float5 = s_float_x1 % s_float_x2; float float9 = (-s_float_x1); #pragma warning disable 1718 bool float12 = (s_float_x1 == s_float_x1); #pragma warning restore bool float13 = (s_float_x1 == s_float_x2); #pragma warning disable 1718 bool float14 = (s_float_x1 != s_float_x1); #pragma warning restore bool float15 = (s_float_x1 != s_float_x2); #pragma warning disable 1718 bool float16 = (s_float_x1 >= s_float_x1); #pragma warning restore bool float17 = (s_float_x1 >= s_float_x2); #pragma warning disable 1718 bool float18 = (s_float_x1 <= s_float_x1); #pragma warning restore bool float19 = (s_float_x1 <= s_float_x2); #pragma warning disable 1718 bool float20 = (s_float_x1 < s_float_x1); #pragma warning restore bool float21 = (s_float_x1 < s_float_x2); #pragma warning disable 1718 bool float22 = (s_float_x1 > s_float_x1); #pragma warning restore bool float23 = (s_float_x1 > s_float_x2); System.Console.WriteLine("----------------"); double double1 = s_double_x1 + s_double_x2; double double2 = s_double_x1 - s_double_x2; double double3 = s_double_x1 * s_double_x2; double double4 = s_double_x1 / s_double_x2; double double5 = s_double_x1 % s_double_x2; double double9 = (-s_double_x1); #pragma warning disable 1718 bool double12 = (s_double_x1 == s_double_x1); #pragma warning restore bool double13 = (s_double_x1 == s_double_x2); #pragma warning disable 1718 bool double14 = (s_double_x1 != s_double_x1); #pragma warning restore bool double15 = (s_double_x1 != s_double_x2); #pragma warning disable 1718 bool double16 = (s_double_x1 >= s_double_x1); #pragma warning restore bool double17 = (s_double_x1 >= s_double_x2); #pragma warning disable 1718 bool double18 = (s_double_x1 <= s_double_x1); #pragma warning restore bool double19 = (s_double_x1 <= s_double_x2); #pragma warning disable 1718 bool double20 = (s_double_x1 < s_double_x1); #pragma warning restore bool double21 = (s_double_x1 < s_double_x2); #pragma warning disable 1718 bool double22 = (s_double_x1 > s_double_x1); #pragma warning restore bool double23 = (s_double_x1 > s_double_x2); #pragma warning disable 1718 bool double24 = (s_double_nan == s_double_nan); #pragma warning restore bool double25 = (s_double_nan == s_double_x2); #pragma warning disable 1718 bool double26 = (s_double_nan != s_double_nan); #pragma warning restore bool double27 = (s_double_nan != s_double_x2); #pragma warning disable 1718 bool double28 = (s_double_nan >= s_double_nan); #pragma warning restore bool double29 = (s_double_nan >= s_double_x2); #pragma warning disable 1718 bool double30 = (s_double_nan <= s_double_nan); #pragma warning restore bool double31 = (s_double_nan <= s_double_x2); #pragma warning disable 1718 bool double32 = (s_double_nan < s_double_nan); #pragma warning restore bool double33 = (s_double_nan < s_double_x2); #pragma warning disable 1718 bool double34 = (s_double_nan > s_double_nan); #pragma warning restore bool double35 = (s_double_nan > s_double_x2); System.Console.WriteLine("----------------"); string string4 = s_string1 + s_string2; string string5 = s_string1 + s_string2 + s_string3; string string6 = s_string1 + s_string2 + s_string3 + s_string1; string string7 = s_string1 + s_string2 + s_string3 + s_string1 + s_string2; string string8 = "eric " + "is " + s_string3 + s_string1 + "clapton "; string string9 = s_string1 + s_string_null; string string10 = s_string1 + s_string_null + s_string3; string string11 = s_string_null + s_string2; Console.WriteLine("Booleans:"); Console.WriteLine(s_t); Console.WriteLine(s_f); Console.WriteLine(b1); Console.WriteLine(b2); Console.WriteLine(b3); Console.WriteLine(b4); Console.WriteLine(b5); Console.WriteLine(b6); Console.WriteLine(b7); Console.WriteLine(b8); Console.WriteLine(b9); Console.WriteLine(b10); Console.WriteLine(b11); Console.WriteLine(b12); Console.WriteLine(b13); Console.WriteLine(b14); Console.WriteLine(b15); Console.WriteLine(b16); Console.WriteLine(b17); Console.WriteLine(b18); Console.WriteLine(b19); Console.WriteLine("Bytes:"); Console.WriteLine(s_by_13); Console.WriteLine(s_by_3); Console.WriteLine(by1); Console.WriteLine(by2); Console.WriteLine(by3); Console.WriteLine(by4); Console.WriteLine(by5); Console.WriteLine(by6); Console.WriteLine(by7); Console.WriteLine(by8); Console.WriteLine(by9); Console.WriteLine(by10); Console.WriteLine(by11); Console.WriteLine(by12); Console.WriteLine(by13); Console.WriteLine(by14); Console.WriteLine(by15); Console.WriteLine(by16); Console.WriteLine(by17); Console.WriteLine(by18); Console.WriteLine(by19); Console.WriteLine(by20); Console.WriteLine(by21); Console.WriteLine(by22); Console.WriteLine(by23); Console.WriteLine("SBytes:"); Console.WriteLine(s_sb_13); Console.WriteLine(s_sb_m3); Console.WriteLine(sb1); Console.WriteLine(sb2); Console.WriteLine(sb3); Console.WriteLine(sb4); Console.WriteLine(sb5); Console.WriteLine(sb6); Console.WriteLine(sb7); Console.WriteLine(sb8); Console.WriteLine(sb9); Console.WriteLine(sb10); Console.WriteLine(sb11); Console.WriteLine(sb12); Console.WriteLine(sb13); Console.WriteLine(sb14); Console.WriteLine(sb15); Console.WriteLine(sb16); Console.WriteLine(sb17); Console.WriteLine(sb18); Console.WriteLine(sb19); Console.WriteLine(sb20); Console.WriteLine(sb21); Console.WriteLine(sb22); Console.WriteLine(sb23); Console.WriteLine("Shorts:"); Console.WriteLine(s_sh_8712); Console.WriteLine(s_sh_m973); Console.WriteLine(sh1); Console.WriteLine(sh2); Console.WriteLine(sh3); Console.WriteLine(sh4); Console.WriteLine(sh5); Console.WriteLine(sh6); Console.WriteLine(sh7); Console.WriteLine(sh8); Console.WriteLine(sh9); Console.WriteLine(sh10); Console.WriteLine(sh11); Console.WriteLine(sh12); Console.WriteLine(sh13); Console.WriteLine(sh14); Console.WriteLine(sh15); Console.WriteLine(sh16); Console.WriteLine(sh17); Console.WriteLine(sh18); Console.WriteLine(sh19); Console.WriteLine(sh20); Console.WriteLine(sh21); Console.WriteLine(sh22); Console.WriteLine(sh23); Console.WriteLine("UShorts:"); Console.WriteLine(s_us_8712); Console.WriteLine(s_us_973); Console.WriteLine(us1); Console.WriteLine(us2); Console.WriteLine(us3); Console.WriteLine(us4); Console.WriteLine(us5); Console.WriteLine(us6); Console.WriteLine(us7); Console.WriteLine(us8); Console.WriteLine(us9); Console.WriteLine(us10); Console.WriteLine(us11); Console.WriteLine(us12); Console.WriteLine(us13); Console.WriteLine(us14); Console.WriteLine(us15); Console.WriteLine(us16); Console.WriteLine(us17); Console.WriteLine(us18); Console.WriteLine(us19); Console.WriteLine(us20); Console.WriteLine(us21); Console.WriteLine(us22); Console.WriteLine(us23); Console.WriteLine("Ints:"); Console.WriteLine(s_int_33452); Console.WriteLine(s_int_m3097); Console.WriteLine(int1); Console.WriteLine(int2); Console.WriteLine(int3); Console.WriteLine(int4); Console.WriteLine(int5); Console.WriteLine(int6); Console.WriteLine(int7); Console.WriteLine(int8); Console.WriteLine(int9); Console.WriteLine(int10); Console.WriteLine(int11); Console.WriteLine(int12); Console.WriteLine(int13); Console.WriteLine(int14); Console.WriteLine(int15); Console.WriteLine(int16); Console.WriteLine(int17); Console.WriteLine(int18); Console.WriteLine(int19); Console.WriteLine(int20); Console.WriteLine(int21); Console.WriteLine(int22); Console.WriteLine(int23); Console.WriteLine("UInts:"); Console.WriteLine(s_uint_33452); Console.WriteLine(s_uint_3097); Console.WriteLine(uint1); Console.WriteLine(uint2); Console.WriteLine(uint3); Console.WriteLine(uint4); Console.WriteLine(uint5); Console.WriteLine(uint6); Console.WriteLine(uint7); Console.WriteLine(uint8); Console.WriteLine(uint9); Console.WriteLine(uint10); Console.WriteLine(uint11); Console.WriteLine(uint12); Console.WriteLine(uint13); Console.WriteLine(uint14); Console.WriteLine(uint15); Console.WriteLine(uint16); Console.WriteLine(uint17); Console.WriteLine(uint18); Console.WriteLine(uint19); Console.WriteLine(uint20); Console.WriteLine(uint21); Console.WriteLine(uint22); Console.WriteLine(uint23); Console.WriteLine("Longs:"); Console.WriteLine(s_long_x1); Console.WriteLine(s_long_x2); Console.WriteLine(long1); Console.WriteLine(long2); Console.WriteLine(long3); Console.WriteLine(long4); Console.WriteLine(long5); Console.WriteLine(long6); Console.WriteLine(long7); Console.WriteLine(long8); Console.WriteLine(long9); Console.WriteLine(long10); Console.WriteLine(long11); Console.WriteLine(long12); Console.WriteLine(long13); Console.WriteLine(long14); Console.WriteLine(long15); Console.WriteLine(long16); Console.WriteLine(long17); Console.WriteLine(long18); Console.WriteLine(long19); Console.WriteLine(long20); Console.WriteLine(long21); Console.WriteLine(long22); Console.WriteLine(long23); Console.WriteLine("ULongs:"); Console.WriteLine(s_ulong_x1); Console.WriteLine(s_ulong_x2); Console.WriteLine(ulong1); Console.WriteLine(ulong2); Console.WriteLine(ulong3); Console.WriteLine(ulong4); Console.WriteLine(ulong5); Console.WriteLine(ulong6); Console.WriteLine(ulong7); Console.WriteLine(ulong8); Console.WriteLine(ulong10); Console.WriteLine(ulong11); Console.WriteLine(ulong12); Console.WriteLine(ulong13); Console.WriteLine(ulong14); Console.WriteLine(ulong15); Console.WriteLine(ulong16); Console.WriteLine(ulong17); Console.WriteLine(ulong18); Console.WriteLine(ulong19); Console.WriteLine(ulong20); Console.WriteLine(ulong21); Console.WriteLine(ulong22); Console.WriteLine(ulong23); Console.WriteLine("Floats:"); Console.WriteLine(s_float_x1); Console.WriteLine(s_float_x2); Console.WriteLine(float1); Console.WriteLine(float2); Console.WriteLine(float3); Console.WriteLine(float4); Console.WriteLine(float5); Console.WriteLine(float9); Console.WriteLine(float12); Console.WriteLine(float13); Console.WriteLine(float14); Console.WriteLine(float15); Console.WriteLine(float16); Console.WriteLine(float17); Console.WriteLine(float18); Console.WriteLine(float19); Console.WriteLine(float20); Console.WriteLine(float21); Console.WriteLine(float22); Console.WriteLine(float23); Console.WriteLine("Doubles:"); Console.WriteLine(s_double_x1); Console.WriteLine(s_double_x2); Console.WriteLine(s_double_nan); Console.WriteLine(double1); Console.WriteLine(double2); Console.WriteLine(double3); Console.WriteLine(double4); Console.WriteLine(double5); Console.WriteLine(double9); Console.WriteLine(double12); Console.WriteLine(double13); Console.WriteLine(double14); Console.WriteLine(double15); Console.WriteLine(double16); Console.WriteLine(double17); Console.WriteLine(double18); Console.WriteLine(double19); Console.WriteLine(double20); Console.WriteLine(double21); Console.WriteLine(double22); Console.WriteLine(double23); Console.WriteLine(double24); Console.WriteLine(double25); Console.WriteLine(double26); Console.WriteLine(double27); Console.WriteLine(double28); Console.WriteLine(double29); Console.WriteLine(double30); Console.WriteLine(double31); Console.WriteLine(double32); Console.WriteLine(double33); Console.WriteLine(double34); Console.WriteLine(double35); Console.WriteLine("Strings:"); Console.WriteLine(s_string1); Console.WriteLine(s_string2); Console.WriteLine(s_string3); Console.WriteLine(string4); Console.WriteLine(string5); Console.WriteLine(string6); Console.WriteLine(string7); Console.WriteLine(string8); Console.WriteLine(string9); Console.WriteLine(string10); Console.WriteLine(string11); return 100; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; namespace Tests { internal class Operators { private static bool s_t = true; private static bool s_f = false; private static byte s_by_13 = 13; private static byte s_by_3 = 3; private static sbyte s_sb_m3 = -3; private static sbyte s_sb_13 = 13; private static short s_sh_8712 = 8712; private static short s_sh_m973 = -973; private static ushort s_us_8712 = 8712; private static ushort s_us_973 = 973; private static int s_int_33452 = 33452; private static int s_int_m3097 = -3097; private static uint s_uint_33452 = 33452u; private static uint s_uint_3097 = 3097u; private static long s_long_x1 = -971239841234L; private static long s_long_x2 = 1876343; private static ulong s_ulong_x1 = 971239841234uL; private static ulong s_ulong_x2 = 1876343Lu; private static float s_float_x1 = -193.23F; private static float s_float_x2 = 1.712F; private static double s_double_x1 = -7423.2312; private static double s_double_x2 = 3.712987; private static double s_double_nan = 0.0 / 0.0; private static string s_string_null = null; private static string s_string1 = "hello "; private static string s_string2 = "world "; private static string s_string3 = "elvis "; static Operators() { System.Console.WriteLine(".cctor"); } public static int Main() { System.Console.WriteLine("----------------"); #pragma warning disable 1718 bool b1 = s_t && s_t; #pragma warning restore bool b2 = s_t && s_f; bool b3 = s_f && s_t; #pragma warning disable 1718 bool b4 = s_f && s_f; bool b5 = s_t || s_t; #pragma warning restore bool b6 = s_t || s_f; bool b7 = s_f || s_t; bool b8 = s_f || s_f; #pragma warning disable 1718 bool b9 = s_t == s_t; #pragma warning restore bool b10 = s_t == s_f; bool b11 = s_f == s_t; #pragma warning disable 1718 bool b12 = s_f == s_f; bool b13 = s_t != s_t; #pragma warning restore bool b14 = s_t != s_f; bool b15 = s_f != s_t; #pragma warning disable 1718 bool b16 = s_f != s_f; #pragma warning restore bool b17 = !s_t; bool b18 = !s_f; bool b19 = !(!s_t && (!s_f || s_t)); System.Console.WriteLine("----------------"); byte by1 = (byte)(s_by_13 + s_by_3); byte by2 = (byte)(s_by_13 - s_by_3); byte by3 = (byte)(s_by_13 * s_by_3); byte by4 = (byte)(s_by_13 / s_by_3); byte by5 = (byte)(s_by_13 % s_by_3); byte by6 = (byte)(s_by_13 & s_by_3); byte by7 = (byte)(s_by_13 | s_by_3); byte by8 = (byte)(s_by_13 ^ s_by_3); byte by9 = (byte)(-s_by_13); byte by10 = (byte)(s_by_13 >> 1); byte by11 = (byte)(s_by_13 >> 1); #pragma warning disable 1718 bool by12 = (s_by_13 == s_by_13); #pragma warning restore bool by13 = (s_by_13 == s_by_3); #pragma warning disable 1718 bool by14 = (s_by_13 != s_by_13); #pragma warning restore bool by15 = (s_by_13 != s_by_3); #pragma warning disable 1718 bool by16 = (s_by_13 >= s_by_13); #pragma warning restore bool by17 = (s_by_13 >= s_by_3); #pragma warning disable 1718 bool by18 = (s_by_13 <= s_by_13); #pragma warning restore bool by19 = (s_by_13 <= s_by_3); #pragma warning disable 1718 bool by20 = (s_by_13 < s_by_13); #pragma warning restore bool by21 = (s_by_13 < s_by_3); #pragma warning disable 1718 bool by22 = (s_by_13 > s_by_13); #pragma warning restore bool by23 = (s_by_13 > s_by_3); System.Console.WriteLine("----------------"); sbyte sb1 = (sbyte)(s_sb_13 + s_sb_m3); sbyte sb2 = (sbyte)(s_sb_13 - s_sb_m3); sbyte sb3 = (sbyte)(s_sb_13 * s_sb_m3); sbyte sb4 = (sbyte)(s_sb_13 / s_sb_m3); sbyte sb5 = (sbyte)(s_sb_13 % s_sb_m3); sbyte sb6 = (sbyte)(s_sb_13 & s_sb_m3); sbyte sb7 = (sbyte)(s_sb_13 | s_sb_m3); sbyte sb8 = (sbyte)(s_sb_13 ^ s_sb_m3); sbyte sb9 = (sbyte)(-s_sb_13); sbyte sb10 = (sbyte)(s_sb_m3 >> 1); sbyte sb11 = (sbyte)(s_sb_13 >> 1); #pragma warning disable 1718 bool sb12 = (s_sb_13 == s_sb_13); #pragma warning restore bool sb13 = (s_sb_13 == s_sb_m3); #pragma warning disable 1718 bool sb14 = (s_sb_13 != s_sb_13); #pragma warning restore bool sb15 = (s_sb_13 != s_sb_m3); #pragma warning disable 1718 bool sb16 = (s_sb_13 >= s_sb_13); #pragma warning restore bool sb17 = (s_sb_13 >= s_sb_m3); #pragma warning disable 1718 bool sb18 = (s_sb_13 <= s_sb_13); #pragma warning restore bool sb19 = (s_sb_13 <= s_sb_m3); #pragma warning disable 1718 bool sb20 = (s_sb_13 < s_sb_13); #pragma warning restore bool sb21 = (s_sb_13 < s_sb_m3); #pragma warning disable 1718 bool sb22 = (s_sb_13 > s_sb_13); #pragma warning restore bool sb23 = (s_sb_13 > s_sb_m3); System.Console.WriteLine("----------------"); short sh1 = (short)(s_sh_8712 + s_sh_m973); short sh2 = (short)(s_sh_8712 - s_sh_m973); short sh3 = (short)(s_sh_8712 * s_sh_m973); short sh4 = (short)(s_sh_8712 / s_sh_m973); short sh5 = (short)(s_sh_8712 % s_sh_m973); short sh6 = (short)(s_sh_8712 & s_sh_m973); short sh7 = (short)(s_sh_8712 | s_sh_m973); short sh8 = (short)(s_sh_8712 ^ s_sh_m973); short sh9 = (short)(-s_sh_8712); short sh10 = (short)(s_sh_8712 >> 1); short sh11 = (short)((ushort)s_sh_8712 >> 1); #pragma warning disable 1718 bool sh12 = (s_sh_8712 == s_sh_8712); #pragma warning restore bool sh13 = (s_sh_8712 == s_sh_m973); #pragma warning disable 1718 bool sh14 = (s_sh_8712 != s_sh_8712); #pragma warning restore bool sh15 = (s_sh_8712 != s_sh_m973); #pragma warning disable 1718 bool sh16 = (s_sh_8712 >= s_sh_8712); #pragma warning restore bool sh17 = (s_sh_8712 >= s_sh_m973); #pragma warning disable 1718 bool sh18 = (s_sh_8712 <= s_sh_8712); #pragma warning restore bool sh19 = (s_sh_8712 <= s_sh_m973); #pragma warning disable 1718 bool sh20 = (s_sh_8712 < s_sh_8712); #pragma warning restore bool sh21 = (s_sh_8712 < s_sh_m973); #pragma warning disable 1718 bool sh22 = (s_sh_8712 > s_sh_8712); #pragma warning restore bool sh23 = (s_sh_8712 > s_sh_m973); System.Console.WriteLine("----------------"); ushort us1 = (ushort)(s_us_8712 + s_us_973); ushort us2 = (ushort)(s_us_8712 - s_us_973); ushort us3 = (ushort)(s_us_8712 * s_us_973); ushort us4 = (ushort)(s_us_8712 / s_us_973); ushort us5 = (ushort)(s_us_8712 % s_us_973); ushort us6 = (ushort)(s_us_8712 & s_us_973); ushort us7 = (ushort)(s_us_8712 | s_us_973); ushort us8 = (ushort)(s_us_8712 ^ s_us_973); int us9 = -s_us_8712; ushort us10 = (ushort)((short)s_us_8712 >> 1); ushort us11 = (ushort)(s_us_8712 >> 1); #pragma warning disable 1718 bool us12 = (s_us_8712 == s_us_8712); #pragma warning restore bool us13 = (s_us_8712 == s_us_973); #pragma warning disable 1718 bool us14 = (s_us_8712 != s_us_8712); #pragma warning restore bool us15 = (s_us_8712 != s_us_973); #pragma warning disable 1718 bool us16 = (s_us_8712 >= s_us_8712); #pragma warning restore bool us17 = (s_us_8712 >= s_us_973); #pragma warning disable 1718 bool us18 = (s_us_8712 <= s_us_8712); #pragma warning restore bool us19 = (s_us_8712 <= s_us_973); #pragma warning disable 1718 bool us20 = (s_us_8712 < s_us_8712); #pragma warning restore bool us21 = (s_us_8712 < s_us_973); #pragma warning disable 1718 bool us22 = (s_us_8712 > s_us_8712); #pragma warning restore bool us23 = (s_us_8712 > s_us_973); System.Console.WriteLine("----------------"); int int1 = s_int_33452 + s_int_m3097; int int2 = s_int_33452 - s_int_m3097; int int3 = (int)(s_int_33452 * s_int_m3097); int int4 = s_int_33452 / s_int_m3097; int int5 = s_int_33452 % s_int_m3097; int int6 = s_int_33452 & s_int_m3097; int int7 = s_int_33452 | s_int_m3097; int int8 = s_int_33452 ^ s_int_m3097; int int9 = (-s_int_33452); int int10 = s_int_33452 >> 1; int int11 = (int)((uint)s_int_33452 >> 1); #pragma warning disable 1718 bool int12 = (s_int_33452 == s_int_33452); #pragma warning restore bool int13 = (s_int_33452 == s_int_m3097); #pragma warning disable 1718 bool int14 = (s_int_33452 != s_int_33452); #pragma warning restore bool int15 = (s_int_33452 != s_int_m3097); #pragma warning disable 1718 bool int16 = (s_int_33452 >= s_int_33452); #pragma warning restore bool int17 = (s_int_33452 >= s_int_m3097); #pragma warning disable 1718 bool int18 = (s_int_33452 <= s_int_33452); #pragma warning restore bool int19 = (s_int_33452 <= s_int_m3097); #pragma warning disable 1718 bool int20 = (s_int_33452 < s_int_33452); #pragma warning restore bool int21 = (s_int_33452 < s_int_m3097); #pragma warning disable 1718 bool int22 = (s_int_33452 > s_int_33452); #pragma warning restore bool int23 = (s_int_33452 > s_int_m3097); System.Console.WriteLine("----------------"); uint uint1 = s_uint_33452 + s_uint_3097; uint uint2 = s_uint_33452 - s_uint_3097; uint uint3 = (uint)(s_uint_33452 * s_uint_3097); uint uint4 = s_uint_33452 / s_uint_3097; uint uint5 = s_uint_33452 % s_uint_3097; uint uint6 = s_uint_33452 & s_uint_3097; uint uint7 = s_uint_33452 | s_uint_3097; uint uint8 = s_uint_33452 ^ s_uint_3097; long uint9 = -s_uint_33452; uint uint10 = s_uint_33452 >> 1; uint uint11 = s_uint_33452 >> 1; #pragma warning disable 1718 bool uint12 = (s_uint_33452 == s_uint_33452); #pragma warning restore bool uint13 = (s_uint_33452 == s_uint_3097); #pragma warning disable 1718 bool uint14 = (s_uint_33452 != s_uint_33452); #pragma warning restore bool uint15 = (s_uint_33452 != s_uint_3097); #pragma warning disable 1718 bool uint16 = (s_uint_33452 >= s_uint_33452); #pragma warning restore bool uint17 = (s_uint_33452 >= s_uint_3097); #pragma warning disable 1718 bool uint18 = (s_uint_33452 <= s_uint_33452); #pragma warning restore bool uint19 = (s_uint_33452 <= s_uint_3097); #pragma warning disable 1718 bool uint20 = (s_uint_33452 < s_uint_33452); #pragma warning restore bool uint21 = (s_uint_33452 < s_uint_3097); #pragma warning disable 1718 bool uint22 = (s_uint_33452 > s_uint_33452); #pragma warning restore bool uint23 = (s_uint_33452 > s_uint_3097); System.Console.WriteLine("----------------"); long long1 = s_long_x1 + s_long_x2; long long2 = s_long_x1 - s_long_x2; long long3 = s_long_x1 * s_long_x2; long long4 = s_long_x1 / s_long_x2; long long5 = s_long_x1 % s_long_x2; long long6 = s_long_x1 & s_long_x2; long long7 = s_long_x1 | s_long_x2; long long8 = s_long_x1 ^ s_long_x2; long long9 = (-s_long_x1); long long10 = s_long_x1 >> 1; long long11 = (long)((ulong)s_long_x1 >> 1); #pragma warning disable 1718 bool long12 = (s_long_x1 == s_long_x1); #pragma warning restore bool long13 = (s_long_x1 == s_long_x2); #pragma warning disable 1718 bool long14 = (s_long_x1 != s_long_x1); #pragma warning restore bool long15 = (s_long_x1 != s_long_x2); #pragma warning disable 1718 bool long16 = (s_long_x1 >= s_long_x1); #pragma warning restore bool long17 = (s_long_x1 >= s_long_x2); #pragma warning disable 1718 bool long18 = (s_long_x1 <= s_long_x1); #pragma warning restore bool long19 = (s_long_x1 <= s_long_x2); #pragma warning disable 1718 bool long20 = (s_long_x1 < s_long_x1); #pragma warning restore bool long21 = (s_long_x1 < s_long_x2); #pragma warning disable 1718 bool long22 = (s_long_x1 > s_long_x1); #pragma warning restore bool long23 = (s_long_x1 > s_long_x2); System.Console.WriteLine("----------------"); ulong ulong1 = s_ulong_x1 + s_ulong_x2; ulong ulong2 = s_ulong_x1 - s_ulong_x2; ulong ulong3 = s_ulong_x1 * s_ulong_x2; ulong ulong4 = s_ulong_x1 / s_ulong_x2; ulong ulong5 = s_ulong_x1 % s_ulong_x2; ulong ulong6 = s_ulong_x1 & s_ulong_x2; ulong ulong7 = s_ulong_x1 | s_ulong_x2; ulong ulong8 = s_ulong_x1 ^ s_ulong_x2; ulong ulong10 = s_ulong_x1 >> 1; ulong ulong11 = (ulong)(s_ulong_x1 >> 1); #pragma warning disable 1718 bool ulong12 = (s_ulong_x1 == s_ulong_x1); #pragma warning restore bool ulong13 = (s_ulong_x1 == s_ulong_x2); #pragma warning disable 1718 bool ulong14 = (s_ulong_x1 != s_ulong_x1); #pragma warning restore bool ulong15 = (s_ulong_x1 != s_ulong_x2); #pragma warning disable 1718 bool ulong16 = (s_ulong_x1 >= s_ulong_x1); #pragma warning restore bool ulong17 = (s_ulong_x1 >= s_ulong_x2); #pragma warning disable 1718 bool ulong18 = (s_ulong_x1 <= s_ulong_x1); #pragma warning restore bool ulong19 = (s_ulong_x1 <= s_ulong_x2); #pragma warning disable 1718 bool ulong20 = (s_ulong_x1 < s_ulong_x1); #pragma warning restore bool ulong21 = (s_ulong_x1 < s_ulong_x2); #pragma warning disable 1718 bool ulong22 = (s_ulong_x1 > s_ulong_x1); #pragma warning restore bool ulong23 = (s_ulong_x1 > s_ulong_x2); System.Console.WriteLine("----------------"); float float1 = s_float_x1 + s_float_x2; float float2 = s_float_x1 - s_float_x2; float float3 = s_float_x1 * s_float_x2; float float4 = s_float_x1 / s_float_x2; float float5 = s_float_x1 % s_float_x2; float float9 = (-s_float_x1); #pragma warning disable 1718 bool float12 = (s_float_x1 == s_float_x1); #pragma warning restore bool float13 = (s_float_x1 == s_float_x2); #pragma warning disable 1718 bool float14 = (s_float_x1 != s_float_x1); #pragma warning restore bool float15 = (s_float_x1 != s_float_x2); #pragma warning disable 1718 bool float16 = (s_float_x1 >= s_float_x1); #pragma warning restore bool float17 = (s_float_x1 >= s_float_x2); #pragma warning disable 1718 bool float18 = (s_float_x1 <= s_float_x1); #pragma warning restore bool float19 = (s_float_x1 <= s_float_x2); #pragma warning disable 1718 bool float20 = (s_float_x1 < s_float_x1); #pragma warning restore bool float21 = (s_float_x1 < s_float_x2); #pragma warning disable 1718 bool float22 = (s_float_x1 > s_float_x1); #pragma warning restore bool float23 = (s_float_x1 > s_float_x2); System.Console.WriteLine("----------------"); double double1 = s_double_x1 + s_double_x2; double double2 = s_double_x1 - s_double_x2; double double3 = s_double_x1 * s_double_x2; double double4 = s_double_x1 / s_double_x2; double double5 = s_double_x1 % s_double_x2; double double9 = (-s_double_x1); #pragma warning disable 1718 bool double12 = (s_double_x1 == s_double_x1); #pragma warning restore bool double13 = (s_double_x1 == s_double_x2); #pragma warning disable 1718 bool double14 = (s_double_x1 != s_double_x1); #pragma warning restore bool double15 = (s_double_x1 != s_double_x2); #pragma warning disable 1718 bool double16 = (s_double_x1 >= s_double_x1); #pragma warning restore bool double17 = (s_double_x1 >= s_double_x2); #pragma warning disable 1718 bool double18 = (s_double_x1 <= s_double_x1); #pragma warning restore bool double19 = (s_double_x1 <= s_double_x2); #pragma warning disable 1718 bool double20 = (s_double_x1 < s_double_x1); #pragma warning restore bool double21 = (s_double_x1 < s_double_x2); #pragma warning disable 1718 bool double22 = (s_double_x1 > s_double_x1); #pragma warning restore bool double23 = (s_double_x1 > s_double_x2); #pragma warning disable 1718 bool double24 = (s_double_nan == s_double_nan); #pragma warning restore bool double25 = (s_double_nan == s_double_x2); #pragma warning disable 1718 bool double26 = (s_double_nan != s_double_nan); #pragma warning restore bool double27 = (s_double_nan != s_double_x2); #pragma warning disable 1718 bool double28 = (s_double_nan >= s_double_nan); #pragma warning restore bool double29 = (s_double_nan >= s_double_x2); #pragma warning disable 1718 bool double30 = (s_double_nan <= s_double_nan); #pragma warning restore bool double31 = (s_double_nan <= s_double_x2); #pragma warning disable 1718 bool double32 = (s_double_nan < s_double_nan); #pragma warning restore bool double33 = (s_double_nan < s_double_x2); #pragma warning disable 1718 bool double34 = (s_double_nan > s_double_nan); #pragma warning restore bool double35 = (s_double_nan > s_double_x2); System.Console.WriteLine("----------------"); string string4 = s_string1 + s_string2; string string5 = s_string1 + s_string2 + s_string3; string string6 = s_string1 + s_string2 + s_string3 + s_string1; string string7 = s_string1 + s_string2 + s_string3 + s_string1 + s_string2; string string8 = "eric " + "is " + s_string3 + s_string1 + "clapton "; string string9 = s_string1 + s_string_null; string string10 = s_string1 + s_string_null + s_string3; string string11 = s_string_null + s_string2; Console.WriteLine("Booleans:"); Console.WriteLine(s_t); Console.WriteLine(s_f); Console.WriteLine(b1); Console.WriteLine(b2); Console.WriteLine(b3); Console.WriteLine(b4); Console.WriteLine(b5); Console.WriteLine(b6); Console.WriteLine(b7); Console.WriteLine(b8); Console.WriteLine(b9); Console.WriteLine(b10); Console.WriteLine(b11); Console.WriteLine(b12); Console.WriteLine(b13); Console.WriteLine(b14); Console.WriteLine(b15); Console.WriteLine(b16); Console.WriteLine(b17); Console.WriteLine(b18); Console.WriteLine(b19); Console.WriteLine("Bytes:"); Console.WriteLine(s_by_13); Console.WriteLine(s_by_3); Console.WriteLine(by1); Console.WriteLine(by2); Console.WriteLine(by3); Console.WriteLine(by4); Console.WriteLine(by5); Console.WriteLine(by6); Console.WriteLine(by7); Console.WriteLine(by8); Console.WriteLine(by9); Console.WriteLine(by10); Console.WriteLine(by11); Console.WriteLine(by12); Console.WriteLine(by13); Console.WriteLine(by14); Console.WriteLine(by15); Console.WriteLine(by16); Console.WriteLine(by17); Console.WriteLine(by18); Console.WriteLine(by19); Console.WriteLine(by20); Console.WriteLine(by21); Console.WriteLine(by22); Console.WriteLine(by23); Console.WriteLine("SBytes:"); Console.WriteLine(s_sb_13); Console.WriteLine(s_sb_m3); Console.WriteLine(sb1); Console.WriteLine(sb2); Console.WriteLine(sb3); Console.WriteLine(sb4); Console.WriteLine(sb5); Console.WriteLine(sb6); Console.WriteLine(sb7); Console.WriteLine(sb8); Console.WriteLine(sb9); Console.WriteLine(sb10); Console.WriteLine(sb11); Console.WriteLine(sb12); Console.WriteLine(sb13); Console.WriteLine(sb14); Console.WriteLine(sb15); Console.WriteLine(sb16); Console.WriteLine(sb17); Console.WriteLine(sb18); Console.WriteLine(sb19); Console.WriteLine(sb20); Console.WriteLine(sb21); Console.WriteLine(sb22); Console.WriteLine(sb23); Console.WriteLine("Shorts:"); Console.WriteLine(s_sh_8712); Console.WriteLine(s_sh_m973); Console.WriteLine(sh1); Console.WriteLine(sh2); Console.WriteLine(sh3); Console.WriteLine(sh4); Console.WriteLine(sh5); Console.WriteLine(sh6); Console.WriteLine(sh7); Console.WriteLine(sh8); Console.WriteLine(sh9); Console.WriteLine(sh10); Console.WriteLine(sh11); Console.WriteLine(sh12); Console.WriteLine(sh13); Console.WriteLine(sh14); Console.WriteLine(sh15); Console.WriteLine(sh16); Console.WriteLine(sh17); Console.WriteLine(sh18); Console.WriteLine(sh19); Console.WriteLine(sh20); Console.WriteLine(sh21); Console.WriteLine(sh22); Console.WriteLine(sh23); Console.WriteLine("UShorts:"); Console.WriteLine(s_us_8712); Console.WriteLine(s_us_973); Console.WriteLine(us1); Console.WriteLine(us2); Console.WriteLine(us3); Console.WriteLine(us4); Console.WriteLine(us5); Console.WriteLine(us6); Console.WriteLine(us7); Console.WriteLine(us8); Console.WriteLine(us9); Console.WriteLine(us10); Console.WriteLine(us11); Console.WriteLine(us12); Console.WriteLine(us13); Console.WriteLine(us14); Console.WriteLine(us15); Console.WriteLine(us16); Console.WriteLine(us17); Console.WriteLine(us18); Console.WriteLine(us19); Console.WriteLine(us20); Console.WriteLine(us21); Console.WriteLine(us22); Console.WriteLine(us23); Console.WriteLine("Ints:"); Console.WriteLine(s_int_33452); Console.WriteLine(s_int_m3097); Console.WriteLine(int1); Console.WriteLine(int2); Console.WriteLine(int3); Console.WriteLine(int4); Console.WriteLine(int5); Console.WriteLine(int6); Console.WriteLine(int7); Console.WriteLine(int8); Console.WriteLine(int9); Console.WriteLine(int10); Console.WriteLine(int11); Console.WriteLine(int12); Console.WriteLine(int13); Console.WriteLine(int14); Console.WriteLine(int15); Console.WriteLine(int16); Console.WriteLine(int17); Console.WriteLine(int18); Console.WriteLine(int19); Console.WriteLine(int20); Console.WriteLine(int21); Console.WriteLine(int22); Console.WriteLine(int23); Console.WriteLine("UInts:"); Console.WriteLine(s_uint_33452); Console.WriteLine(s_uint_3097); Console.WriteLine(uint1); Console.WriteLine(uint2); Console.WriteLine(uint3); Console.WriteLine(uint4); Console.WriteLine(uint5); Console.WriteLine(uint6); Console.WriteLine(uint7); Console.WriteLine(uint8); Console.WriteLine(uint9); Console.WriteLine(uint10); Console.WriteLine(uint11); Console.WriteLine(uint12); Console.WriteLine(uint13); Console.WriteLine(uint14); Console.WriteLine(uint15); Console.WriteLine(uint16); Console.WriteLine(uint17); Console.WriteLine(uint18); Console.WriteLine(uint19); Console.WriteLine(uint20); Console.WriteLine(uint21); Console.WriteLine(uint22); Console.WriteLine(uint23); Console.WriteLine("Longs:"); Console.WriteLine(s_long_x1); Console.WriteLine(s_long_x2); Console.WriteLine(long1); Console.WriteLine(long2); Console.WriteLine(long3); Console.WriteLine(long4); Console.WriteLine(long5); Console.WriteLine(long6); Console.WriteLine(long7); Console.WriteLine(long8); Console.WriteLine(long9); Console.WriteLine(long10); Console.WriteLine(long11); Console.WriteLine(long12); Console.WriteLine(long13); Console.WriteLine(long14); Console.WriteLine(long15); Console.WriteLine(long16); Console.WriteLine(long17); Console.WriteLine(long18); Console.WriteLine(long19); Console.WriteLine(long20); Console.WriteLine(long21); Console.WriteLine(long22); Console.WriteLine(long23); Console.WriteLine("ULongs:"); Console.WriteLine(s_ulong_x1); Console.WriteLine(s_ulong_x2); Console.WriteLine(ulong1); Console.WriteLine(ulong2); Console.WriteLine(ulong3); Console.WriteLine(ulong4); Console.WriteLine(ulong5); Console.WriteLine(ulong6); Console.WriteLine(ulong7); Console.WriteLine(ulong8); Console.WriteLine(ulong10); Console.WriteLine(ulong11); Console.WriteLine(ulong12); Console.WriteLine(ulong13); Console.WriteLine(ulong14); Console.WriteLine(ulong15); Console.WriteLine(ulong16); Console.WriteLine(ulong17); Console.WriteLine(ulong18); Console.WriteLine(ulong19); Console.WriteLine(ulong20); Console.WriteLine(ulong21); Console.WriteLine(ulong22); Console.WriteLine(ulong23); Console.WriteLine("Floats:"); Console.WriteLine(s_float_x1); Console.WriteLine(s_float_x2); Console.WriteLine(float1); Console.WriteLine(float2); Console.WriteLine(float3); Console.WriteLine(float4); Console.WriteLine(float5); Console.WriteLine(float9); Console.WriteLine(float12); Console.WriteLine(float13); Console.WriteLine(float14); Console.WriteLine(float15); Console.WriteLine(float16); Console.WriteLine(float17); Console.WriteLine(float18); Console.WriteLine(float19); Console.WriteLine(float20); Console.WriteLine(float21); Console.WriteLine(float22); Console.WriteLine(float23); Console.WriteLine("Doubles:"); Console.WriteLine(s_double_x1); Console.WriteLine(s_double_x2); Console.WriteLine(s_double_nan); Console.WriteLine(double1); Console.WriteLine(double2); Console.WriteLine(double3); Console.WriteLine(double4); Console.WriteLine(double5); Console.WriteLine(double9); Console.WriteLine(double12); Console.WriteLine(double13); Console.WriteLine(double14); Console.WriteLine(double15); Console.WriteLine(double16); Console.WriteLine(double17); Console.WriteLine(double18); Console.WriteLine(double19); Console.WriteLine(double20); Console.WriteLine(double21); Console.WriteLine(double22); Console.WriteLine(double23); Console.WriteLine(double24); Console.WriteLine(double25); Console.WriteLine(double26); Console.WriteLine(double27); Console.WriteLine(double28); Console.WriteLine(double29); Console.WriteLine(double30); Console.WriteLine(double31); Console.WriteLine(double32); Console.WriteLine(double33); Console.WriteLine(double34); Console.WriteLine(double35); Console.WriteLine("Strings:"); Console.WriteLine(s_string1); Console.WriteLine(s_string2); Console.WriteLine(s_string3); Console.WriteLine(string4); Console.WriteLine(string5); Console.WriteLine(string6); Console.WriteLine(string7); Console.WriteLine(string8); Console.WriteLine(string9); Console.WriteLine(string10); Console.WriteLine(string11); return 100; } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/JIT/HardwareIntrinsics/General/NotSupported/Vector64DoubleAsGeneric_Boolean.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void Vector64DoubleAsGeneric_Boolean() { bool succeeded = false; try { Vector64<bool> result = default(Vector64<double>).As<double, bool>(); } catch (NotSupportedException) { succeeded = true; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector64DoubleAsGeneric_Boolean: RunNotSupportedScenario failed to throw NotSupportedException."); TestLibrary.TestFramework.LogInformation(string.Empty); throw new Exception("One or more scenarios did not complete as expected."); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void Vector64DoubleAsGeneric_Boolean() { bool succeeded = false; try { Vector64<bool> result = default(Vector64<double>).As<double, bool>(); } catch (NotSupportedException) { succeeded = true; } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector64DoubleAsGeneric_Boolean: RunNotSupportedScenario failed to throw NotSupportedException."); TestLibrary.TestFramework.LogInformation(string.Empty); throw new Exception("One or more scenarios did not complete as expected."); } } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Net.WebClient/tests/System.Net.WebClient.Tests.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <DefineConstants>$(DefineConstants);NETSTANDARD</DefineConstants> <!-- SYSLIB0014: WebRequest, HttpWebRequest, ServicePoint, and WebClient are obsolete. Use HttpClient instead. --> <NoWarn>$(NoWarn);SYSLIB0014</NoWarn> <IgnoreForCI Condition="'$(TargetOS)' == 'Browser'">true</IgnoreForCI> </PropertyGroup> <ItemGroup> <Compile Include="AssemblyInfo.cs" /> <Compile Include="WebClientTest.cs" /> <Compile Include="$(CommonTestPath)System\Net\Capability.Security.cs" Link="Common\System\Net\Capability.Security.cs" /> <Compile Include="$(CommonTestPath)System\Net\Configuration.cs" Link="Common\System\Net\Configuration.cs" /> <Compile Include="$(CommonTestPath)System\Net\Configuration.Certificates.cs" Link="Common\System\Net\Configuration.Certificates.cs" /> <Compile Include="$(CommonTestPath)System\Net\Configuration.Http.cs" Link="Common\System\Net\Configuration.Http.cs" /> <Compile Include="$(CommonTestPath)System\Net\Configuration.Security.cs" Link="Common\System\Net\Configuration.Security.cs" /> <Compile Include="$(CommonTestPath)System\Net\Http\LoopbackServer.cs" Link="Common\System\Net\Http\LoopbackServer.cs" /> <Compile Include="$(CommonTestPath)System\Net\Http\GenericLoopbackServer.cs" Link="Common\System\Net\Http\GenericLoopbackServer.cs" /> <Compile Include="$(CommonTestPath)System\Security\Cryptography\PlatformSupport.cs" Link="CommonTest\System\Security\Cryptography\PlatformSupport.cs" /> <Compile Include="$(CommonTestPath)System\Threading\Tasks\TaskTimeoutExtensions.cs" Link="Common\System\Threading\Tasks\TaskTimeoutExtensions.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <TargetFramework>$(NetCoreAppCurrent)</TargetFramework> <DefineConstants>$(DefineConstants);NETSTANDARD</DefineConstants> <!-- SYSLIB0014: WebRequest, HttpWebRequest, ServicePoint, and WebClient are obsolete. Use HttpClient instead. --> <NoWarn>$(NoWarn);SYSLIB0014</NoWarn> <IgnoreForCI Condition="'$(TargetOS)' == 'Browser'">true</IgnoreForCI> </PropertyGroup> <ItemGroup> <Compile Include="AssemblyInfo.cs" /> <Compile Include="WebClientTest.cs" /> <Compile Include="$(CommonTestPath)System\Net\Capability.Security.cs" Link="Common\System\Net\Capability.Security.cs" /> <Compile Include="$(CommonTestPath)System\Net\Configuration.cs" Link="Common\System\Net\Configuration.cs" /> <Compile Include="$(CommonTestPath)System\Net\Configuration.Certificates.cs" Link="Common\System\Net\Configuration.Certificates.cs" /> <Compile Include="$(CommonTestPath)System\Net\Configuration.Http.cs" Link="Common\System\Net\Configuration.Http.cs" /> <Compile Include="$(CommonTestPath)System\Net\Configuration.Security.cs" Link="Common\System\Net\Configuration.Security.cs" /> <Compile Include="$(CommonTestPath)System\Net\Http\LoopbackServer.cs" Link="Common\System\Net\Http\LoopbackServer.cs" /> <Compile Include="$(CommonTestPath)System\Net\Http\GenericLoopbackServer.cs" Link="Common\System\Net\Http\GenericLoopbackServer.cs" /> <Compile Include="$(CommonTestPath)System\Security\Cryptography\PlatformSupport.cs" Link="CommonTest\System\Security\Cryptography\PlatformSupport.cs" /> <Compile Include="$(CommonTestPath)System\Threading\Tasks\TaskTimeoutExtensions.cs" Link="Common\System\Threading\Tasks\TaskTimeoutExtensions.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Globalization.Calendars/tests/ThaiBuddhistCalendar/ThaiBuddhistCalendarGetDaysInMonth.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.Globalization.Tests { public class ThaiBuddhistCalendarGetDaysInMonth { private static readonly RandomDataGenerator s_randomDataGenerator = new RandomDataGenerator(); public static IEnumerable<object[]> GetDaysInMonth_TestData() { yield return new object[] { 1, 1 }; yield return new object[] { 9999, 12 }; yield return new object[] { 2000, 2 }; yield return new object[] { s_randomDataGenerator.GetInt16(-55) % 9999, s_randomDataGenerator.GetInt16(-55) % 12 + 1 }; } [Theory] [MemberData(nameof(GetDaysInMonth_TestData))] public void GetDaysInMonth(int year, int month) { ThaiBuddhistCalendar calendar = new ThaiBuddhistCalendar(); int expected = new GregorianCalendar().GetDaysInMonth(year, month); Assert.Equal(expected, calendar.GetDaysInMonth(year + 543, month)); Assert.Equal(expected, calendar.GetDaysInMonth(year + 543, month, 0)); Assert.Equal(expected, calendar.GetDaysInMonth(year + 543, month, 1)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using Xunit; namespace System.Globalization.Tests { public class ThaiBuddhistCalendarGetDaysInMonth { private static readonly RandomDataGenerator s_randomDataGenerator = new RandomDataGenerator(); public static IEnumerable<object[]> GetDaysInMonth_TestData() { yield return new object[] { 1, 1 }; yield return new object[] { 9999, 12 }; yield return new object[] { 2000, 2 }; yield return new object[] { s_randomDataGenerator.GetInt16(-55) % 9999, s_randomDataGenerator.GetInt16(-55) % 12 + 1 }; } [Theory] [MemberData(nameof(GetDaysInMonth_TestData))] public void GetDaysInMonth(int year, int month) { ThaiBuddhistCalendar calendar = new ThaiBuddhistCalendar(); int expected = new GregorianCalendar().GetDaysInMonth(year, month); Assert.Equal(expected, calendar.GetDaysInMonth(year + 543, month)); Assert.Equal(expected, calendar.GetDaysInMonth(year + 543, month, 0)); Assert.Equal(expected, calendar.GetDaysInMonth(year + 543, month, 1)); } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/Loader/classloader/InterfaceFolding/TestCase2.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="TestCase2.il" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="TestCase2.il" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Security.Permissions/ref/System.Security.Permissions.netcoreapp.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System.Xaml.Permissions { #if NET5_0_OR_GREATER [System.ObsoleteAttribute("Code Access Security is not supported or honored by the runtime.", DiagnosticId = "SYSLIB0003", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] #endif public sealed partial class XamlLoadPermission : System.Security.CodeAccessPermission, System.Security.Permissions.IUnrestrictedPermission { public XamlLoadPermission(System.Collections.Generic.IEnumerable<System.Xaml.Permissions.XamlAccessLevel> allowedAccess) { } public XamlLoadPermission(System.Security.Permissions.PermissionState state) { } public XamlLoadPermission(System.Xaml.Permissions.XamlAccessLevel allowedAccess) { } [System.Runtime.Versioning.SupportedOSPlatform("windows")] public System.Collections.Generic.IList<System.Xaml.Permissions.XamlAccessLevel> AllowedAccess { get { throw null; } } public override System.Security.IPermission Copy() { throw null; } public override bool Equals(object obj) { throw null; } public override void FromXml(System.Security.SecurityElement elem) { } public override int GetHashCode() { throw null; } public bool Includes(System.Xaml.Permissions.XamlAccessLevel requestedAccess) { throw null; } public override System.Security.IPermission Intersect(System.Security.IPermission target) { throw null; } public override bool IsSubsetOf(System.Security.IPermission target) { throw null; } public bool IsUnrestricted() { throw null; } public override System.Security.SecurityElement ToXml() { throw null; } public override System.Security.IPermission Union(System.Security.IPermission other) { throw null; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // ------------------------------------------------------------------------------ // Changes to this file must follow the https://aka.ms/api-review process. // ------------------------------------------------------------------------------ namespace System.Xaml.Permissions { #if NET5_0_OR_GREATER [System.ObsoleteAttribute("Code Access Security is not supported or honored by the runtime.", DiagnosticId = "SYSLIB0003", UrlFormat = "https://aka.ms/dotnet-warnings/{0}")] #endif public sealed partial class XamlLoadPermission : System.Security.CodeAccessPermission, System.Security.Permissions.IUnrestrictedPermission { public XamlLoadPermission(System.Collections.Generic.IEnumerable<System.Xaml.Permissions.XamlAccessLevel> allowedAccess) { } public XamlLoadPermission(System.Security.Permissions.PermissionState state) { } public XamlLoadPermission(System.Xaml.Permissions.XamlAccessLevel allowedAccess) { } [System.Runtime.Versioning.SupportedOSPlatform("windows")] public System.Collections.Generic.IList<System.Xaml.Permissions.XamlAccessLevel> AllowedAccess { get { throw null; } } public override System.Security.IPermission Copy() { throw null; } public override bool Equals(object obj) { throw null; } public override void FromXml(System.Security.SecurityElement elem) { } public override int GetHashCode() { throw null; } public bool Includes(System.Xaml.Permissions.XamlAccessLevel requestedAccess) { throw null; } public override System.Security.IPermission Intersect(System.Security.IPermission target) { throw null; } public override bool IsSubsetOf(System.Security.IPermission target) { throw null; } public bool IsUnrestricted() { throw null; } public override System.Security.SecurityElement ToXml() { throw null; } public override System.Security.IPermission Union(System.Security.IPermission other) { throw null; } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Security.Permissions/src/System/Security/SecurityState.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Security { public abstract partial class SecurityState { protected SecurityState() { } public abstract void EnsureState(); public bool IsStateAvailable() { return false; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Security { public abstract partial class SecurityState { protected SecurityState() { } public abstract void EnsureState(); public bool IsStateAvailable() { return false; } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Private.CoreLib/src/System/Runtime/CompilerServices/FormattableStringFactory.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** ** ** Purpose: implementation of the FormattableStringFactory ** class. ** ===========================================================*/ namespace System.Runtime.CompilerServices { /// <summary> /// A factory type used by compilers to create instances of the type <see cref="FormattableString"/>. /// </summary> public static class FormattableStringFactory { /// <summary> /// Create a <see cref="FormattableString"/> from a composite format string and object /// array containing zero or more objects to format. /// </summary> public static FormattableString Create(string format!!, params object?[] arguments!!) => new ConcreteFormattableString(format, arguments); private sealed class ConcreteFormattableString : FormattableString { private readonly string _format; private readonly object?[] _arguments; internal ConcreteFormattableString(string format, object?[] arguments) { _format = format; _arguments = arguments; } public override string Format => _format; public override object?[] GetArguments() { return _arguments; } public override int ArgumentCount => _arguments.Length; public override object? GetArgument(int index) { return _arguments[index]; } public override string ToString(IFormatProvider? formatProvider) { return string.Format(formatProvider, _format, _arguments); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /*============================================================ ** ** ** ** Purpose: implementation of the FormattableStringFactory ** class. ** ===========================================================*/ namespace System.Runtime.CompilerServices { /// <summary> /// A factory type used by compilers to create instances of the type <see cref="FormattableString"/>. /// </summary> public static class FormattableStringFactory { /// <summary> /// Create a <see cref="FormattableString"/> from a composite format string and object /// array containing zero or more objects to format. /// </summary> public static FormattableString Create(string format!!, params object?[] arguments!!) => new ConcreteFormattableString(format, arguments); private sealed class ConcreteFormattableString : FormattableString { private readonly string _format; private readonly object?[] _arguments; internal ConcreteFormattableString(string format, object?[] arguments) { _format = format; _arguments = arguments; } public override string Format => _format; public override object?[] GetArguments() { return _arguments; } public override int ArgumentCount => _arguments.Length; public override object? GetArgument(int index) { return _arguments[index]; } public override string ToString(IFormatProvider? formatProvider) { return string.Format(formatProvider, _format, _arguments); } } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/native/public/mono/utils/mono-jemalloc.h
/** * \file * * Header for jemalloc registration code */ #ifndef __MONO_JEMALLOC_H__ #define __MONO_JEMALLOC_H__ #if defined(MONO_JEMALLOC_ENABLED) #include <jemalloc/jemalloc.h> /* Jemalloc can be configured in three ways. * 1. You can use it with library loading hacks at run-time * 2. You can use it as a global malloc replacement * 3. You can use it with a prefix. If you use it with a prefix, you have to explicitly name the malloc function. * * In order to make this feature able to be toggled at run-time, I chose to use a prefix of mono_je. * This mapping is captured below in the header, in the spirit of "no magic constants". * * The place that configures jemalloc and sets this prefix is in the Makefile in * mono/jemalloc/Makefile.am * */ #define MONO_JEMALLOC_MALLOC mono_jemalloc #define MONO_JEMALLOC_REALLOC mono_jerealloc #define MONO_JEMALLOC_FREE mono_jefree #define MONO_JEMALLOC_CALLOC mono_jecalloc void mono_init_jemalloc (void); #endif #endif
/** * \file * * Header for jemalloc registration code */ #ifndef __MONO_JEMALLOC_H__ #define __MONO_JEMALLOC_H__ #if defined(MONO_JEMALLOC_ENABLED) #include <jemalloc/jemalloc.h> /* Jemalloc can be configured in three ways. * 1. You can use it with library loading hacks at run-time * 2. You can use it as a global malloc replacement * 3. You can use it with a prefix. If you use it with a prefix, you have to explicitly name the malloc function. * * In order to make this feature able to be toggled at run-time, I chose to use a prefix of mono_je. * This mapping is captured below in the header, in the spirit of "no magic constants". * * The place that configures jemalloc and sets this prefix is in the Makefile in * mono/jemalloc/Makefile.am * */ #define MONO_JEMALLOC_MALLOC mono_jemalloc #define MONO_JEMALLOC_REALLOC mono_jerealloc #define MONO_JEMALLOC_FREE mono_jefree #define MONO_JEMALLOC_CALLOC mono_jecalloc void mono_init_jemalloc (void); #endif #endif
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Text.Json/tests/System.Text.Json.Tests/Serialization/Value.WriteTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Text.Encodings.Web; using Newtonsoft.Json; using Xunit; namespace System.Text.Json.Serialization.Tests { public static partial class ValueTests { [Fact] public static void WriteStringWithRelaxedEscaper() { string inputString = ">><++>>>\">>\\>>&>>>\u6f22\u5B57>>>"; // Non-ASCII text should remain unescaped. \u6f22 = \u6C49, \u5B57 = \u5B57 string actual = JsonSerializer.Serialize(inputString, new JsonSerializerOptions { Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping }); string expected = "\">><++>>>\\\">>\\\\>>&>>>\u6f22\u5B57>>>\""; Assert.Equal(JsonConvert.SerializeObject(inputString), actual); Assert.Equal(expected, actual); Assert.NotEqual(expected, JsonSerializer.Serialize(inputString)); } [Fact] public static void WritePrimitives() { { string json = JsonSerializer.Serialize(1); Assert.Equal("1", json); } { int? value = 1; string json = JsonSerializer.Serialize(value); Assert.Equal("1", json); } { int? value = null; string json = JsonSerializer.Serialize(value); Assert.Equal("null", json); } { string json = JsonSerializer.Serialize((string)null); Assert.Equal("null", json); } { Span<byte> json = JsonSerializer.SerializeToUtf8Bytes(1); Assert.Equal(Encoding.UTF8.GetBytes("1"), json.ToArray()); } { string json = JsonSerializer.Serialize(long.MaxValue); Assert.Equal(long.MaxValue.ToString(), json); } { Span<byte> json = JsonSerializer.SerializeToUtf8Bytes(long.MaxValue); Assert.Equal(Encoding.UTF8.GetBytes(long.MaxValue.ToString()), json.ToArray()); } { string json = JsonSerializer.Serialize("Hello"); Assert.Equal(@"""Hello""", json); } { Span<byte> json = JsonSerializer.SerializeToUtf8Bytes("Hello"); Assert.Equal(Encoding.UTF8.GetBytes(@"""Hello"""), json.ToArray()); } { Uri uri = new Uri("https://domain/path"); Assert.Equal(@"""https://domain/path""", JsonSerializer.Serialize(uri)); } { Uri.TryCreate("~/path", UriKind.RelativeOrAbsolute, out Uri uri); Assert.Equal(@"""~/path""", JsonSerializer.Serialize(uri)); } // The next two scenarios validate that we're NOT using Uri.ToString() for serializing Uri. The serializer // will escape backslashes and ampersands, but otherwise should be the same as the output of Uri.OriginalString. { // ToString would collapse the relative segment Uri uri = new Uri("http://a/b/../c"); Assert.Equal(@"""http://a/b/../c""", JsonSerializer.Serialize(uri)); } { // "%20" gets turned into a space by Uri.ToString() // https://coding.abel.nu/2014/10/beware-of-uri-tostring/ Uri uri = new Uri("http://localhost?p1=Value&p2=A%20B%26p3%3DFooled!"); Assert.Equal(@"""http://localhost?p1=Value\u0026p2=A%20B%26p3%3DFooled!""", JsonSerializer.Serialize(uri)); } { Version version = new Version(1, 2); Assert.Equal(@"""1.2""", JsonSerializer.Serialize(version)); } { Version version = new Version(1, 2, 3); Assert.Equal(@"""1.2.3""", JsonSerializer.Serialize(version)); } { Version version = new Version(1, 2, 3, 4); Assert.Equal(@"""1.2.3.4""", JsonSerializer.Serialize(version)); } { Version version = new Version(int.MaxValue, int.MaxValue); Assert.Equal(@"""2147483647.2147483647""", JsonSerializer.Serialize(version)); } { Version version = new Version(int.MaxValue, int.MaxValue, int.MaxValue); Assert.Equal(@"""2147483647.2147483647.2147483647""", JsonSerializer.Serialize(version)); } { Version version = new Version(int.MaxValue, int.MaxValue, int.MaxValue, int.MaxValue); Assert.Equal(@"""2147483647.2147483647.2147483647.2147483647""", JsonSerializer.Serialize(version)); } } [Theory] [InlineData("1:59:59", "01:59:59")] [InlineData("23:59:59")] [InlineData("23:59:59.9", "23:59:59.9000000")] [InlineData("23:59:59.9999999")] [InlineData("1.23:59:59")] [InlineData("9999999.23:59:59.9999999")] [InlineData("-9999999.23:59:59.9999999")] [InlineData("10675199.02:48:05.4775807")] // TimeSpan.MaxValue [InlineData("-10675199.02:48:05.4775808")] // TimeSpan.MinValue public static void TimeSpan_Write_Success(string value, string? expectedValue = null) { TimeSpan ts = TimeSpan.Parse(value); string json = JsonSerializer.Serialize(ts); Assert.Equal($"\"{expectedValue ?? value}\"", json); Assert.Equal(json, JsonConvert.SerializeObject(ts)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Text.Encodings.Web; using Newtonsoft.Json; using Xunit; namespace System.Text.Json.Serialization.Tests { public static partial class ValueTests { [Fact] public static void WriteStringWithRelaxedEscaper() { string inputString = ">><++>>>\">>\\>>&>>>\u6f22\u5B57>>>"; // Non-ASCII text should remain unescaped. \u6f22 = \u6C49, \u5B57 = \u5B57 string actual = JsonSerializer.Serialize(inputString, new JsonSerializerOptions { Encoder = JavaScriptEncoder.UnsafeRelaxedJsonEscaping }); string expected = "\">><++>>>\\\">>\\\\>>&>>>\u6f22\u5B57>>>\""; Assert.Equal(JsonConvert.SerializeObject(inputString), actual); Assert.Equal(expected, actual); Assert.NotEqual(expected, JsonSerializer.Serialize(inputString)); } [Fact] public static void WritePrimitives() { { string json = JsonSerializer.Serialize(1); Assert.Equal("1", json); } { int? value = 1; string json = JsonSerializer.Serialize(value); Assert.Equal("1", json); } { int? value = null; string json = JsonSerializer.Serialize(value); Assert.Equal("null", json); } { string json = JsonSerializer.Serialize((string)null); Assert.Equal("null", json); } { Span<byte> json = JsonSerializer.SerializeToUtf8Bytes(1); Assert.Equal(Encoding.UTF8.GetBytes("1"), json.ToArray()); } { string json = JsonSerializer.Serialize(long.MaxValue); Assert.Equal(long.MaxValue.ToString(), json); } { Span<byte> json = JsonSerializer.SerializeToUtf8Bytes(long.MaxValue); Assert.Equal(Encoding.UTF8.GetBytes(long.MaxValue.ToString()), json.ToArray()); } { string json = JsonSerializer.Serialize("Hello"); Assert.Equal(@"""Hello""", json); } { Span<byte> json = JsonSerializer.SerializeToUtf8Bytes("Hello"); Assert.Equal(Encoding.UTF8.GetBytes(@"""Hello"""), json.ToArray()); } { Uri uri = new Uri("https://domain/path"); Assert.Equal(@"""https://domain/path""", JsonSerializer.Serialize(uri)); } { Uri.TryCreate("~/path", UriKind.RelativeOrAbsolute, out Uri uri); Assert.Equal(@"""~/path""", JsonSerializer.Serialize(uri)); } // The next two scenarios validate that we're NOT using Uri.ToString() for serializing Uri. The serializer // will escape backslashes and ampersands, but otherwise should be the same as the output of Uri.OriginalString. { // ToString would collapse the relative segment Uri uri = new Uri("http://a/b/../c"); Assert.Equal(@"""http://a/b/../c""", JsonSerializer.Serialize(uri)); } { // "%20" gets turned into a space by Uri.ToString() // https://coding.abel.nu/2014/10/beware-of-uri-tostring/ Uri uri = new Uri("http://localhost?p1=Value&p2=A%20B%26p3%3DFooled!"); Assert.Equal(@"""http://localhost?p1=Value\u0026p2=A%20B%26p3%3DFooled!""", JsonSerializer.Serialize(uri)); } { Version version = new Version(1, 2); Assert.Equal(@"""1.2""", JsonSerializer.Serialize(version)); } { Version version = new Version(1, 2, 3); Assert.Equal(@"""1.2.3""", JsonSerializer.Serialize(version)); } { Version version = new Version(1, 2, 3, 4); Assert.Equal(@"""1.2.3.4""", JsonSerializer.Serialize(version)); } { Version version = new Version(int.MaxValue, int.MaxValue); Assert.Equal(@"""2147483647.2147483647""", JsonSerializer.Serialize(version)); } { Version version = new Version(int.MaxValue, int.MaxValue, int.MaxValue); Assert.Equal(@"""2147483647.2147483647.2147483647""", JsonSerializer.Serialize(version)); } { Version version = new Version(int.MaxValue, int.MaxValue, int.MaxValue, int.MaxValue); Assert.Equal(@"""2147483647.2147483647.2147483647.2147483647""", JsonSerializer.Serialize(version)); } } [Theory] [InlineData("1:59:59", "01:59:59")] [InlineData("23:59:59")] [InlineData("23:59:59.9", "23:59:59.9000000")] [InlineData("23:59:59.9999999")] [InlineData("1.23:59:59")] [InlineData("9999999.23:59:59.9999999")] [InlineData("-9999999.23:59:59.9999999")] [InlineData("10675199.02:48:05.4775807")] // TimeSpan.MaxValue [InlineData("-10675199.02:48:05.4775808")] // TimeSpan.MinValue public static void TimeSpan_Write_Success(string value, string? expectedValue = null) { TimeSpan ts = TimeSpan.Parse(value); string json = JsonSerializer.Serialize(ts); Assert.Equal($"\"{expectedValue ?? value}\"", json); Assert.Equal(json, JsonConvert.SerializeObject(ts)); } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Net.HttpListener/src/System/Net/WebSockets/HttpWebSocket.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics.CodeAnalysis; using System.Security.Cryptography; using System.Text; using System.Threading; namespace System.Net.WebSockets { internal static partial class HttpWebSocket { internal const string SecWebSocketKeyGuid = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; internal const string WebSocketUpgradeToken = "websocket"; internal const int DefaultReceiveBufferSize = 16 * 1024; internal const int DefaultClientSendBufferSize = 16 * 1024; [SuppressMessage("Microsoft.Security", "CA5350", Justification = "SHA1 used only for hashing purposes, not for crypto.")] internal static string GetSecWebSocketAcceptString(string? secWebSocketKey) { string acceptString = string.Concat(secWebSocketKey, HttpWebSocket.SecWebSocketKeyGuid); byte[] toHash = Encoding.UTF8.GetBytes(acceptString); // SHA1 used only for hashing purposes, not for crypto. Check here for FIPS compat. byte[] hash = SHA1.HashData(toHash); return Convert.ToBase64String(hash); } // return value here signifies if a Sec-WebSocket-Protocol header should be returned by the server. internal static bool ProcessWebSocketProtocolHeader(string? clientSecWebSocketProtocol, string? subProtocol, out string acceptProtocol) { acceptProtocol = string.Empty; if (string.IsNullOrEmpty(clientSecWebSocketProtocol)) { // client hasn't specified any Sec-WebSocket-Protocol header if (subProtocol != null) { // If the server specified _anything_ this isn't valid. throw new WebSocketException(WebSocketError.UnsupportedProtocol, SR.Format(SR.net_WebSockets_ClientAcceptingNoProtocols, subProtocol)); } // Treat empty and null from the server as the same thing here, server should not send headers. return false; } // here, we know the client specified something and it's non-empty. if (subProtocol == null) { // client specified some protocols, server specified 'null'. So server should send headers. return true; } // here, we know that the client has specified something, it's not empty // and the server has specified exactly one protocol string[] requestProtocols = clientSecWebSocketProtocol.Split(',', StringSplitOptions.TrimEntries | StringSplitOptions.RemoveEmptyEntries); acceptProtocol = subProtocol; // client specified protocols, serverOptions has exactly 1 non-empty entry. Check that // this exists in the list the client specified. for (int i = 0; i < requestProtocols.Length; i++) { string currentRequestProtocol = requestProtocols[i]; if (string.Equals(acceptProtocol, currentRequestProtocol, StringComparison.OrdinalIgnoreCase)) { return true; } } throw new WebSocketException(WebSocketError.UnsupportedProtocol, SR.Format(SR.net_WebSockets_AcceptUnsupportedProtocol, clientSecWebSocketProtocol, subProtocol)); } internal static void ValidateOptions(string? subProtocol, int receiveBufferSize, int sendBufferSize, TimeSpan keepAliveInterval) { if (subProtocol != null) { WebSocketValidate.ValidateSubprotocol(subProtocol); } if (receiveBufferSize < MinReceiveBufferSize) { throw new ArgumentOutOfRangeException(nameof(receiveBufferSize), receiveBufferSize, SR.Format(SR.net_WebSockets_ArgumentOutOfRange_TooSmall, MinReceiveBufferSize)); } if (sendBufferSize < MinSendBufferSize) { throw new ArgumentOutOfRangeException(nameof(sendBufferSize), sendBufferSize, SR.Format(SR.net_WebSockets_ArgumentOutOfRange_TooSmall, MinSendBufferSize)); } if (receiveBufferSize > MaxBufferSize) { throw new ArgumentOutOfRangeException(nameof(receiveBufferSize), receiveBufferSize, SR.Format(SR.net_WebSockets_ArgumentOutOfRange_TooBig, nameof(receiveBufferSize), receiveBufferSize, MaxBufferSize)); } if (sendBufferSize > MaxBufferSize) { throw new ArgumentOutOfRangeException(nameof(sendBufferSize), sendBufferSize, SR.Format(SR.net_WebSockets_ArgumentOutOfRange_TooBig, nameof(sendBufferSize), sendBufferSize, MaxBufferSize)); } if (keepAliveInterval < Timeout.InfiniteTimeSpan) // -1 millisecond { throw new ArgumentOutOfRangeException(nameof(keepAliveInterval), keepAliveInterval, SR.Format(SR.net_WebSockets_ArgumentOutOfRange_TooSmall, Timeout.InfiniteTimeSpan.ToString())); } } internal const int MinSendBufferSize = 16; internal const int MinReceiveBufferSize = 256; internal const int MaxBufferSize = 64 * 1024; private static void ValidateWebSocketHeaders(HttpListenerContext context) { if (!WebSocketsSupported) { throw new PlatformNotSupportedException(SR.net_WebSockets_UnsupportedPlatform); } if (!context.Request.IsWebSocketRequest) { throw new WebSocketException(WebSocketError.NotAWebSocket, SR.Format(SR.net_WebSockets_AcceptNotAWebSocket, nameof(ValidateWebSocketHeaders), HttpKnownHeaderNames.Connection, HttpKnownHeaderNames.Upgrade, HttpWebSocket.WebSocketUpgradeToken, context.Request.Headers[HttpKnownHeaderNames.Upgrade])); } string? secWebSocketVersion = context.Request.Headers[HttpKnownHeaderNames.SecWebSocketVersion]; if (string.IsNullOrEmpty(secWebSocketVersion)) { throw new WebSocketException(WebSocketError.HeaderError, SR.Format(SR.net_WebSockets_AcceptHeaderNotFound, nameof(ValidateWebSocketHeaders), HttpKnownHeaderNames.SecWebSocketVersion)); } if (!string.Equals(secWebSocketVersion, SupportedVersion, StringComparison.OrdinalIgnoreCase)) { throw new WebSocketException(WebSocketError.UnsupportedVersion, SR.Format(SR.net_WebSockets_AcceptUnsupportedWebSocketVersion, nameof(ValidateWebSocketHeaders), secWebSocketVersion, SupportedVersion)); } string? secWebSocketKey = context.Request.Headers[HttpKnownHeaderNames.SecWebSocketKey]; bool isSecWebSocketKeyInvalid = string.IsNullOrWhiteSpace(secWebSocketKey); if (!isSecWebSocketKeyInvalid) { try { // key must be 16 bytes then base64-encoded isSecWebSocketKeyInvalid = Convert.FromBase64String(secWebSocketKey!).Length != 16; } catch { isSecWebSocketKeyInvalid = true; } } if (isSecWebSocketKeyInvalid) { throw new WebSocketException(WebSocketError.HeaderError, SR.Format(SR.net_WebSockets_AcceptHeaderNotFound, nameof(ValidateWebSocketHeaders), HttpKnownHeaderNames.SecWebSocketKey)); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Diagnostics.CodeAnalysis; using System.Security.Cryptography; using System.Text; using System.Threading; namespace System.Net.WebSockets { internal static partial class HttpWebSocket { internal const string SecWebSocketKeyGuid = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; internal const string WebSocketUpgradeToken = "websocket"; internal const int DefaultReceiveBufferSize = 16 * 1024; internal const int DefaultClientSendBufferSize = 16 * 1024; [SuppressMessage("Microsoft.Security", "CA5350", Justification = "SHA1 used only for hashing purposes, not for crypto.")] internal static string GetSecWebSocketAcceptString(string? secWebSocketKey) { string acceptString = string.Concat(secWebSocketKey, HttpWebSocket.SecWebSocketKeyGuid); byte[] toHash = Encoding.UTF8.GetBytes(acceptString); // SHA1 used only for hashing purposes, not for crypto. Check here for FIPS compat. byte[] hash = SHA1.HashData(toHash); return Convert.ToBase64String(hash); } // return value here signifies if a Sec-WebSocket-Protocol header should be returned by the server. internal static bool ProcessWebSocketProtocolHeader(string? clientSecWebSocketProtocol, string? subProtocol, out string acceptProtocol) { acceptProtocol = string.Empty; if (string.IsNullOrEmpty(clientSecWebSocketProtocol)) { // client hasn't specified any Sec-WebSocket-Protocol header if (subProtocol != null) { // If the server specified _anything_ this isn't valid. throw new WebSocketException(WebSocketError.UnsupportedProtocol, SR.Format(SR.net_WebSockets_ClientAcceptingNoProtocols, subProtocol)); } // Treat empty and null from the server as the same thing here, server should not send headers. return false; } // here, we know the client specified something and it's non-empty. if (subProtocol == null) { // client specified some protocols, server specified 'null'. So server should send headers. return true; } // here, we know that the client has specified something, it's not empty // and the server has specified exactly one protocol string[] requestProtocols = clientSecWebSocketProtocol.Split(',', StringSplitOptions.TrimEntries | StringSplitOptions.RemoveEmptyEntries); acceptProtocol = subProtocol; // client specified protocols, serverOptions has exactly 1 non-empty entry. Check that // this exists in the list the client specified. for (int i = 0; i < requestProtocols.Length; i++) { string currentRequestProtocol = requestProtocols[i]; if (string.Equals(acceptProtocol, currentRequestProtocol, StringComparison.OrdinalIgnoreCase)) { return true; } } throw new WebSocketException(WebSocketError.UnsupportedProtocol, SR.Format(SR.net_WebSockets_AcceptUnsupportedProtocol, clientSecWebSocketProtocol, subProtocol)); } internal static void ValidateOptions(string? subProtocol, int receiveBufferSize, int sendBufferSize, TimeSpan keepAliveInterval) { if (subProtocol != null) { WebSocketValidate.ValidateSubprotocol(subProtocol); } if (receiveBufferSize < MinReceiveBufferSize) { throw new ArgumentOutOfRangeException(nameof(receiveBufferSize), receiveBufferSize, SR.Format(SR.net_WebSockets_ArgumentOutOfRange_TooSmall, MinReceiveBufferSize)); } if (sendBufferSize < MinSendBufferSize) { throw new ArgumentOutOfRangeException(nameof(sendBufferSize), sendBufferSize, SR.Format(SR.net_WebSockets_ArgumentOutOfRange_TooSmall, MinSendBufferSize)); } if (receiveBufferSize > MaxBufferSize) { throw new ArgumentOutOfRangeException(nameof(receiveBufferSize), receiveBufferSize, SR.Format(SR.net_WebSockets_ArgumentOutOfRange_TooBig, nameof(receiveBufferSize), receiveBufferSize, MaxBufferSize)); } if (sendBufferSize > MaxBufferSize) { throw new ArgumentOutOfRangeException(nameof(sendBufferSize), sendBufferSize, SR.Format(SR.net_WebSockets_ArgumentOutOfRange_TooBig, nameof(sendBufferSize), sendBufferSize, MaxBufferSize)); } if (keepAliveInterval < Timeout.InfiniteTimeSpan) // -1 millisecond { throw new ArgumentOutOfRangeException(nameof(keepAliveInterval), keepAliveInterval, SR.Format(SR.net_WebSockets_ArgumentOutOfRange_TooSmall, Timeout.InfiniteTimeSpan.ToString())); } } internal const int MinSendBufferSize = 16; internal const int MinReceiveBufferSize = 256; internal const int MaxBufferSize = 64 * 1024; private static void ValidateWebSocketHeaders(HttpListenerContext context) { if (!WebSocketsSupported) { throw new PlatformNotSupportedException(SR.net_WebSockets_UnsupportedPlatform); } if (!context.Request.IsWebSocketRequest) { throw new WebSocketException(WebSocketError.NotAWebSocket, SR.Format(SR.net_WebSockets_AcceptNotAWebSocket, nameof(ValidateWebSocketHeaders), HttpKnownHeaderNames.Connection, HttpKnownHeaderNames.Upgrade, HttpWebSocket.WebSocketUpgradeToken, context.Request.Headers[HttpKnownHeaderNames.Upgrade])); } string? secWebSocketVersion = context.Request.Headers[HttpKnownHeaderNames.SecWebSocketVersion]; if (string.IsNullOrEmpty(secWebSocketVersion)) { throw new WebSocketException(WebSocketError.HeaderError, SR.Format(SR.net_WebSockets_AcceptHeaderNotFound, nameof(ValidateWebSocketHeaders), HttpKnownHeaderNames.SecWebSocketVersion)); } if (!string.Equals(secWebSocketVersion, SupportedVersion, StringComparison.OrdinalIgnoreCase)) { throw new WebSocketException(WebSocketError.UnsupportedVersion, SR.Format(SR.net_WebSockets_AcceptUnsupportedWebSocketVersion, nameof(ValidateWebSocketHeaders), secWebSocketVersion, SupportedVersion)); } string? secWebSocketKey = context.Request.Headers[HttpKnownHeaderNames.SecWebSocketKey]; bool isSecWebSocketKeyInvalid = string.IsNullOrWhiteSpace(secWebSocketKey); if (!isSecWebSocketKeyInvalid) { try { // key must be 16 bytes then base64-encoded isSecWebSocketKeyInvalid = Convert.FromBase64String(secWebSocketKey!).Length != 16; } catch { isSecWebSocketKeyInvalid = true; } } if (isSecWebSocketKeyInvalid) { throw new WebSocketException(WebSocketError.HeaderError, SR.Format(SR.net_WebSockets_AcceptHeaderNotFound, nameof(ValidateWebSocketHeaders), HttpKnownHeaderNames.SecWebSocketKey)); } } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.ServiceModel.Syndication/tests/TestFeeds/AtomFeeds/missing-self.xml
<!-- Description: a feed without an atom:link element with a rel attribute value of self produces a warning Expect: MissingSelf{element:feed} --> <feed xmlns="http://www.w3.org/2005/Atom"> <title>Example Feed</title> <link href="http://contoso.com/"/> <updated>2003-12-13T18:30:02Z</updated> <author> <name>Author Name</name> </author> <id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id> <entry> <title>Atom-Powered Robots Run Amok</title> <link href="http://contoso.com/2003/12/13/atom03"/> <id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id> <updated>2003-12-13T18:30:02Z</updated> <summary>Some text.</summary> </entry> </feed>
<!-- Description: a feed without an atom:link element with a rel attribute value of self produces a warning Expect: MissingSelf{element:feed} --> <feed xmlns="http://www.w3.org/2005/Atom"> <title>Example Feed</title> <link href="http://contoso.com/"/> <updated>2003-12-13T18:30:02Z</updated> <author> <name>Author Name</name> </author> <id>urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6</id> <entry> <title>Atom-Powered Robots Run Amok</title> <link href="http://contoso.com/2003/12/13/atom03"/> <id>urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a</id> <updated>2003-12-13T18:30:02Z</updated> <summary>Some text.</summary> </entry> </feed>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/JIT/Regression/JitBlue/DevDiv_205323/starg0.il
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Storing to arg 0 in an instance method requires special // handling by the jit. .assembly extern mscorlib {} .assembly starg0 {} .assembly extern xunit.core {} .class public F { .method public instance void .ctor(int32 a) { ldarg.0 ldarg.1 ldc.i4 100 add stfld int32 F::A ret } .field public int32 A .method public int32 Starg0() cil managed { ldarg.0 ldfld int32 F::A newobj instance void F::.ctor(int32) starg.s 0 ldarg.0 ldfld int32 F::A ret } .method public hidebysig static int32 Main(string[] args) cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint ldc.i4 -100 newobj instance void F::.ctor(int32) call instance int32 F::Starg0() ret } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // Storing to arg 0 in an instance method requires special // handling by the jit. .assembly extern mscorlib {} .assembly starg0 {} .assembly extern xunit.core {} .class public F { .method public instance void .ctor(int32 a) { ldarg.0 ldarg.1 ldc.i4 100 add stfld int32 F::A ret } .field public int32 A .method public int32 Starg0() cil managed { ldarg.0 ldfld int32 F::A newobj instance void F::.ctor(int32) starg.s 0 ldarg.0 ldfld int32 F::A ret } .method public hidebysig static int32 Main(string[] args) cil managed { .custom instance void [xunit.core]Xunit.FactAttribute::.ctor() = ( 01 00 00 00 ) .entrypoint ldc.i4 -100 newobj instance void F::.ctor(int32) call instance int32 F::Starg0() ret } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/baseservices/threading/generics/syncdelegate/GThread15.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="thread15.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <AllowUnsafeBlocks>true</AllowUnsafeBlocks> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="thread15.cs" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd.Arm64/InsertSelectedScalar.Vector64.Int16.3.Vector64.Int16.3.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3() { var test = new InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3 { private struct DataTable { private byte[] inArray1; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(Int16[] inArray1, Int16[] inArray3, Int16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int16, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int16> _fld1; public Vector64<Int16> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); return testStruct; } public void RunStructFldScenario(InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3 testClass) { var result = AdvSimd.Arm64.InsertSelectedScalar(_fld1, 3, _fld3, 3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3 testClass) { fixed (Vector64<Int16>* pFld1 = &_fld1) fixed (Vector64<Int16>* pFld2 = &_fld3) { var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)pFld1), 3, AdvSimd.LoadVector64((Int16*)pFld2), 3 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly byte ElementIndex1 = 3; private static readonly byte ElementIndex2 = 3; private static Int16[] _data1 = new Int16[Op1ElementCount]; private static Int16[] _data3 = new Int16[Op3ElementCount]; private static Vector64<Int16> _clsVar1; private static Vector64<Int16> _clsVar3; private Vector64<Int16> _fld1; private Vector64<Int16> _fld3; private DataTable _dataTable; static InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); } public InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); } _dataTable = new DataTable(_data1, _data3, new Int16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Arm64.InsertSelectedScalar( Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), 3, Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)), 3, AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.InsertSelectedScalar), new Type[] { typeof(Vector64<Int16>), typeof(byte), typeof(Vector64<Int16>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), ElementIndex1, Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr), ElementIndex2 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.InsertSelectedScalar), new Type[] { typeof(Vector64<Int16>), typeof(byte), typeof(Vector64<Int16>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)), ElementIndex1, AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr)), ElementIndex2 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Arm64.InsertSelectedScalar( _clsVar1, 3, _clsVar3, 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int16>* pClsVar1 = &_clsVar1) fixed (Vector64<Int16>* pClsVar3 = &_clsVar3) { var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)(pClsVar1)), 3, AdvSimd.LoadVector64((Int16*)(pClsVar3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr); var op3 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr); var result = AdvSimd.Arm64.InsertSelectedScalar(op1, 3, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)); var op3 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr)); var result = AdvSimd.Arm64.InsertSelectedScalar(op1, 3, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3(); var result = AdvSimd.Arm64.InsertSelectedScalar(test._fld1, 3, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3(); fixed (Vector64<Int16>* pFld1 = &test._fld1) fixed (Vector64<Int16>* pFld2 = &test._fld3) { var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)pFld1), 3, AdvSimd.LoadVector64((Int16*)pFld2), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Arm64.InsertSelectedScalar(_fld1, 3, _fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int16>* pFld1 = &_fld1) fixed (Vector64<Int16>* pFld2 = &_fld3) { var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)pFld1), 3, AdvSimd.LoadVector64((Int16*)pFld2), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.InsertSelectedScalar(test._fld1, 3, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)(&test._fld1)), 3, AdvSimd.LoadVector64((Int16*)(&test._fld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int16> op1, Vector64<Int16> op3, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray3 = new Int16[Op3ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op3, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray3 = new Int16[Op3ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray3, outArray, method); } private void ValidateResult(Int16[] firstOp, Int16[] thirdOp, Int16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.Insert(firstOp, ElementIndex1, thirdOp[ElementIndex2], i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.InsertSelectedScalar)}<Int16>(Vector64<Int16>, {3}, Vector64<Int16>, {3}): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3() { var test = new InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3 { private struct DataTable { private byte[] inArray1; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(Int16[] inArray1, Int16[] inArray3, Int16[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int16>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<Int16>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int16>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int16, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<Int16, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Int16> _fld1; public Vector64<Int16> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref testStruct._fld3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); return testStruct; } public void RunStructFldScenario(InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3 testClass) { var result = AdvSimd.Arm64.InsertSelectedScalar(_fld1, 3, _fld3, 3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3 testClass) { fixed (Vector64<Int16>* pFld1 = &_fld1) fixed (Vector64<Int16>* pFld2 = &_fld3) { var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)pFld1), 3, AdvSimd.LoadVector64((Int16*)pFld2), 3 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Int16>>() / sizeof(Int16); private static readonly byte ElementIndex1 = 3; private static readonly byte ElementIndex2 = 3; private static Int16[] _data1 = new Int16[Op1ElementCount]; private static Int16[] _data3 = new Int16[Op3ElementCount]; private static Vector64<Int16> _clsVar1; private static Vector64<Int16> _clsVar3; private Vector64<Int16> _fld1; private Vector64<Int16> _fld3; private DataTable _dataTable; static InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _clsVar3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); } public InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld1), ref Unsafe.As<Int16, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Int16>, byte>(ref _fld3), ref Unsafe.As<Int16, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector64<Int16>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt16(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetInt16(); } _dataTable = new DataTable(_data1, _data3, new Int16[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.Arm64.InsertSelectedScalar( Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), 3, Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)), 3, AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.InsertSelectedScalar), new Type[] { typeof(Vector64<Int16>), typeof(byte), typeof(Vector64<Int16>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr), ElementIndex1, Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr), ElementIndex2 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd.Arm64).GetMethod(nameof(AdvSimd.Arm64.InsertSelectedScalar), new Type[] { typeof(Vector64<Int16>), typeof(byte), typeof(Vector64<Int16>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)), ElementIndex1, AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr)), ElementIndex2 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Int16>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.Arm64.InsertSelectedScalar( _clsVar1, 3, _clsVar3, 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Int16>* pClsVar1 = &_clsVar1) fixed (Vector64<Int16>* pClsVar3 = &_clsVar3) { var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)(pClsVar1)), 3, AdvSimd.LoadVector64((Int16*)(pClsVar3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray1Ptr); var op3 = Unsafe.Read<Vector64<Int16>>(_dataTable.inArray3Ptr); var result = AdvSimd.Arm64.InsertSelectedScalar(op1, 3, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray1Ptr)); var op3 = AdvSimd.LoadVector64((Int16*)(_dataTable.inArray3Ptr)); var result = AdvSimd.Arm64.InsertSelectedScalar(op1, 3, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3(); var result = AdvSimd.Arm64.InsertSelectedScalar(test._fld1, 3, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new InsertSelectedScalarTest__InsertSelectedScalar_Vector64_Int16_3_Vector64_Int16_3(); fixed (Vector64<Int16>* pFld1 = &test._fld1) fixed (Vector64<Int16>* pFld2 = &test._fld3) { var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)pFld1), 3, AdvSimd.LoadVector64((Int16*)pFld2), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.Arm64.InsertSelectedScalar(_fld1, 3, _fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Int16>* pFld1 = &_fld1) fixed (Vector64<Int16>* pFld2 = &_fld3) { var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)pFld1), 3, AdvSimd.LoadVector64((Int16*)pFld2), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.InsertSelectedScalar(test._fld1, 3, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.Arm64.InsertSelectedScalar( AdvSimd.LoadVector64((Int16*)(&test._fld1)), 3, AdvSimd.LoadVector64((Int16*)(&test._fld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Int16> op1, Vector64<Int16> op3, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray3 = new Int16[Op3ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<Int16, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op3, void* result, [CallerMemberName] string method = "") { Int16[] inArray1 = new Int16[Op1ElementCount]; Int16[] inArray3 = new Int16[Op3ElementCount]; Int16[] outArray = new Int16[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector64<Int16>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int16, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Int16>>()); ValidateResult(inArray1, inArray3, outArray, method); } private void ValidateResult(Int16[] firstOp, Int16[] thirdOp, Int16[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.Insert(firstOp, ElementIndex1, thirdOp[ElementIndex2], i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd.Arm64)}.{nameof(AdvSimd.Arm64.InsertSelectedScalar)}<Int16>(Vector64<Int16>, {3}, Vector64<Int16>, {3}): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/coreclr/tools/superpmi/mcs/removedup.h
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //---------------------------------------------------------- // RemoveDup.h - Functions to remove dups from a method context hive (MCH) //---------------------------------------------------------- #ifndef _RemoveDup #define _RemoveDup #include "methodcontext.h" #include "lightweightmap.h" class RemoveDup { public: RemoveDup() : m_stripCR(false) , m_legacyCompare(false) , m_cleanup(false) , m_inFile(nullptr) , m_inFileLegacy(nullptr) {} bool Initialize(bool stripCR = false, bool legacyCompare = false, bool cleanup = true) { m_stripCR = stripCR; m_legacyCompare = legacyCompare; m_cleanup = cleanup; m_inFile = nullptr; m_inFileLegacy = nullptr; return true; } ~RemoveDup(); bool CopyAndRemoveDups(const char* nameOfInput, HANDLE hFileOut); private: bool m_stripCR; // 'true' if we remove CompileResults when removing duplicates. bool m_legacyCompare; // 'true' to use the legacy comparer. // If false, we don't spend time cleaning up the `m_inFile` and `m_inFileLegacy` // data structures. Only set it to `false` if you're ok with memory leaks, e.g., // if the process will exit soon afterwards. bool m_cleanup; // We use a hash to limit the number of comparisons we need to do. // The first level key to our hash map is ILCodeSize and the second // level map key is just an index and the value is an existing MC Hash. LightWeightMap<int, DenseLightWeightMap<char*>*>* m_inFile; LightWeightMap<int, DenseLightWeightMap<MethodContext*>*>* m_inFileLegacy; bool unique(MethodContext* mc); bool uniqueLegacy(MethodContext* mc); }; #endif
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //---------------------------------------------------------- // RemoveDup.h - Functions to remove dups from a method context hive (MCH) //---------------------------------------------------------- #ifndef _RemoveDup #define _RemoveDup #include "methodcontext.h" #include "lightweightmap.h" class RemoveDup { public: RemoveDup() : m_stripCR(false) , m_legacyCompare(false) , m_cleanup(false) , m_inFile(nullptr) , m_inFileLegacy(nullptr) {} bool Initialize(bool stripCR = false, bool legacyCompare = false, bool cleanup = true) { m_stripCR = stripCR; m_legacyCompare = legacyCompare; m_cleanup = cleanup; m_inFile = nullptr; m_inFileLegacy = nullptr; return true; } ~RemoveDup(); bool CopyAndRemoveDups(const char* nameOfInput, HANDLE hFileOut); private: bool m_stripCR; // 'true' if we remove CompileResults when removing duplicates. bool m_legacyCompare; // 'true' to use the legacy comparer. // If false, we don't spend time cleaning up the `m_inFile` and `m_inFileLegacy` // data structures. Only set it to `false` if you're ok with memory leaks, e.g., // if the process will exit soon afterwards. bool m_cleanup; // We use a hash to limit the number of comparisons we need to do. // The first level key to our hash map is ILCodeSize and the second // level map key is just an index and the value is an existing MC Hash. LightWeightMap<int, DenseLightWeightMap<char*>*>* m_inFile; LightWeightMap<int, DenseLightWeightMap<MethodContext*>*>* m_inFileLegacy; bool unique(MethodContext* mc); bool uniqueLegacy(MethodContext* mc); }; #endif
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest1285/Generated1285.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated1285.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated1285.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/JIT/HardwareIntrinsics/Arm/Dp/DotProductBySelectedQuadruplet.Vector128.Int32.Vector128.SByte.3.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3() { var test = new SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, SByte[] inArray2, SByte[] inArray3, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<SByte, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int32> _fld1; public Vector128<SByte> _fld2; public Vector128<SByte> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3 testClass) { var result = Dp.DotProductBySelectedQuadruplet(_fld1, _fld2, _fld3, 3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3 testClass) { fixed (Vector128<Int32>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) fixed (Vector128<SByte>* pFld3 = &_fld3) { var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)), AdvSimd.LoadVector128((SByte*)(pFld3)), 3 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly byte Imm = 3; private static Int32[] _data1 = new Int32[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static SByte[] _data3 = new SByte[Op3ElementCount]; private static Vector128<Int32> _clsVar1; private static Vector128<SByte> _clsVar2; private static Vector128<SByte> _clsVar3; private Vector128<Int32> _fld1; private Vector128<SByte> _fld2; private Vector128<SByte> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); } public SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data1, _data2, _data3, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => Dp.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Dp.DotProductBySelectedQuadruplet( Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray3Ptr), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray3Ptr)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Dp).GetMethod(nameof(Dp.DotProductBySelectedQuadruplet), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<SByte>), typeof(Vector128<SByte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray3Ptr), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Dp).GetMethod(nameof(Dp.DotProductBySelectedQuadruplet), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<SByte>), typeof(Vector128<SByte>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray3Ptr)), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Dp.DotProductBySelectedQuadruplet( _clsVar1, _clsVar2, _clsVar3, 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int32>* pClsVar1 = &_clsVar1) fixed (Vector128<SByte>* pClsVar2 = &_clsVar2) fixed (Vector128<SByte>* pClsVar3 = &_clsVar3) { var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(pClsVar1)), AdvSimd.LoadVector128((SByte*)(pClsVar2)), AdvSimd.LoadVector128((SByte*)(pClsVar3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray3Ptr); var result = Dp.DotProductBySelectedQuadruplet(op1, op2, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)); var op3 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray3Ptr)); var result = Dp.DotProductBySelectedQuadruplet(op1, op2, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3(); var result = Dp.DotProductBySelectedQuadruplet(test._fld1, test._fld2, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3(); fixed (Vector128<Int32>* pFld1 = &test._fld1) fixed (Vector128<SByte>* pFld2 = &test._fld2) fixed (Vector128<SByte>* pFld3 = &test._fld3) { var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)), AdvSimd.LoadVector128((SByte*)(pFld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Dp.DotProductBySelectedQuadruplet(_fld1, _fld2, _fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int32>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) fixed (Vector128<SByte>* pFld3 = &_fld3) { var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)), AdvSimd.LoadVector128((SByte*)(pFld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Dp.DotProductBySelectedQuadruplet(test._fld1, test._fld2, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(&test._fld1)), AdvSimd.LoadVector128((SByte*)(&test._fld2)), AdvSimd.LoadVector128((SByte*)(&test._fld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int32> op1, Vector128<SByte> op2, Vector128<SByte> op3, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] inArray3 = new SByte[Op3ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] inArray3 = new SByte[Op3ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(Int32[] firstOp, SByte[] secondOp, SByte[] thirdOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.DotProduct(firstOp[i], secondOp, 4 * i, thirdOp, 4 * Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Dp)}.{nameof(Dp.DotProductBySelectedQuadruplet)}<Int32>(Vector128<Int32>, Vector128<SByte>, Vector128<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3() { var test = new SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] inArray3; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle inHandle3; private GCHandle outHandle; private ulong alignment; public DataTable(Int32[] inArray1, SByte[] inArray2, SByte[] inArray3, Int32[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Int32>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>(); int sizeOfinArray3 = inArray3.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Int32>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfinArray3 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.inArray3 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.inHandle3 = GCHandle.Alloc(this.inArray3, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Int32, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray3Ptr), ref Unsafe.As<SByte, byte>(ref inArray3[0]), (uint)sizeOfinArray3); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray3Ptr => Align((byte*)(inHandle3.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); inHandle3.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Int32> _fld1; public Vector128<SByte> _fld2; public Vector128<SByte> _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref testStruct._fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); return testStruct; } public void RunStructFldScenario(SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3 testClass) { var result = Dp.DotProductBySelectedQuadruplet(_fld1, _fld2, _fld3, 3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3 testClass) { fixed (Vector128<Int32>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) fixed (Vector128<SByte>* pFld3 = &_fld3) { var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)), AdvSimd.LoadVector128((SByte*)(pFld3)), 3 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int Op3ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); private static readonly byte Imm = 3; private static Int32[] _data1 = new Int32[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static SByte[] _data3 = new SByte[Op3ElementCount]; private static Vector128<Int32> _clsVar1; private static Vector128<SByte> _clsVar2; private static Vector128<SByte> _clsVar3; private Vector128<Int32> _fld1; private Vector128<SByte> _fld2; private Vector128<SByte> _fld3; private DataTable _dataTable; static SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _clsVar1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); } public SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Int32>, byte>(ref _fld1), ref Unsafe.As<Int32, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Int32>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld3), ref Unsafe.As<SByte, byte>(ref _data3[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetInt32(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } for (var i = 0; i < Op3ElementCount; i++) { _data3[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data1, _data2, _data3, new Int32[RetElementCount], LargestVectorSize); } public bool IsSupported => Dp.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Dp.DotProductBySelectedQuadruplet( Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray3Ptr), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray3Ptr)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(Dp).GetMethod(nameof(Dp.DotProductBySelectedQuadruplet), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<SByte>), typeof(Vector128<SByte>), typeof(byte) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray3Ptr), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(Dp).GetMethod(nameof(Dp.DotProductBySelectedQuadruplet), new Type[] { typeof(Vector128<Int32>), typeof(Vector128<SByte>), typeof(Vector128<SByte>), typeof(byte) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray3Ptr)), (byte)3 }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Int32>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.inArray3Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Dp.DotProductBySelectedQuadruplet( _clsVar1, _clsVar2, _clsVar3, 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Int32>* pClsVar1 = &_clsVar1) fixed (Vector128<SByte>* pClsVar2 = &_clsVar2) fixed (Vector128<SByte>* pClsVar3 = &_clsVar3) { var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(pClsVar1)), AdvSimd.LoadVector128((SByte*)(pClsVar2)), AdvSimd.LoadVector128((SByte*)(pClsVar3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _clsVar3, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Int32>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr); var op3 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray3Ptr); var result = Dp.DotProductBySelectedQuadruplet(op1, op2, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Int32*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)); var op3 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray3Ptr)); var result = Dp.DotProductBySelectedQuadruplet(op1, op2, op3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3(); var result = Dp.DotProductBySelectedQuadruplet(test._fld1, test._fld2, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleTernaryOpTest__DotProductBySelectedQuadruplet_Vector128_Int32_Vector128_SByte_3(); fixed (Vector128<Int32>* pFld1 = &test._fld1) fixed (Vector128<SByte>* pFld2 = &test._fld2) fixed (Vector128<SByte>* pFld3 = &test._fld3) { var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)), AdvSimd.LoadVector128((SByte*)(pFld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Dp.DotProductBySelectedQuadruplet(_fld1, _fld2, _fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Int32>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) fixed (Vector128<SByte>* pFld3 = &_fld3) { var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)), AdvSimd.LoadVector128((SByte*)(pFld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Dp.DotProductBySelectedQuadruplet(test._fld1, test._fld2, test._fld3, 3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = Dp.DotProductBySelectedQuadruplet( AdvSimd.LoadVector128((Int32*)(&test._fld1)), AdvSimd.LoadVector128((SByte*)(&test._fld2)), AdvSimd.LoadVector128((SByte*)(&test._fld3)), 3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Int32> op1, Vector128<SByte> op2, Vector128<SByte> op3, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] inArray3 = new SByte[Op3ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray3[0]), op3); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(void* op1, void* op2, void* op3, void* result, [CallerMemberName] string method = "") { Int32[] inArray1 = new Int32[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; SByte[] inArray3 = new SByte[Op3ElementCount]; Int32[] outArray = new Int32[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Int32>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray3[0]), ref Unsafe.AsRef<byte>(op3), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Int32, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Int32>>()); ValidateResult(inArray1, inArray2, inArray3, outArray, method); } private void ValidateResult(Int32[] firstOp, SByte[] secondOp, SByte[] thirdOp, Int32[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.DotProduct(firstOp[i], secondOp, 4 * i, thirdOp, 4 * Imm) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Dp)}.{nameof(Dp.DotProductBySelectedQuadruplet)}<Int32>(Vector128<Int32>, Vector128<SByte>, Vector128<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($"secondOp: ({string.Join(", ", secondOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: ({string.Join(", ", thirdOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/JIT/CodeGenBringUpTests/DblAddConst.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; using System.Runtime.CompilerServices; public class BringUpTest_DblAddConst { const int Pass = 100; const int Fail = -1; [MethodImplAttribute(MethodImplOptions.NoInlining)] public static double DblAddConst(double x) { return x+1; } public static int Main() { double y = DblAddConst(13d); Console.WriteLine(y); if (System.Math.Abs(y-14d) <= Double.Epsilon) return Pass; else return Fail; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; using System.Runtime.CompilerServices; public class BringUpTest_DblAddConst { const int Pass = 100; const int Fail = -1; [MethodImplAttribute(MethodImplOptions.NoInlining)] public static double DblAddConst(double x) { return x+1; } public static int Main() { double y = DblAddConst(13d); Console.WriteLine(y); if (System.Math.Abs(y-14d) <= Double.Epsilon) return Pass; else return Fail; } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/Microsoft.Extensions.DependencyInjection/tests/DI.Tests/ServiceCollectionContainerBuilderTestExtensions.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Microsoft.Extensions.DependencyInjection.ServiceLookup; namespace Microsoft.Extensions.DependencyInjection.Tests { internal static class ServiceCollectionContainerBuilderTestExtensions { public static ServiceProvider BuildServiceProvider(this IServiceCollection services, ServiceProviderMode mode, ServiceProviderOptions options = null) { options ??= ServiceProviderOptions.Default; if (mode == ServiceProviderMode.Default) { return services.BuildServiceProvider(options); } var provider = new ServiceProvider(services, ServiceProviderOptions.Default); ServiceProviderEngine engine = mode switch { ServiceProviderMode.Dynamic => new DynamicServiceProviderEngine(provider), ServiceProviderMode.Runtime => RuntimeServiceProviderEngine.Instance, ServiceProviderMode.Expressions => new ExpressionsServiceProviderEngine(provider), ServiceProviderMode.ILEmit => new ILEmitServiceProviderEngine(provider), _ => throw new NotSupportedException() }; provider._engine = engine; return provider; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using Microsoft.Extensions.DependencyInjection.ServiceLookup; namespace Microsoft.Extensions.DependencyInjection.Tests { internal static class ServiceCollectionContainerBuilderTestExtensions { public static ServiceProvider BuildServiceProvider(this IServiceCollection services, ServiceProviderMode mode, ServiceProviderOptions options = null) { options ??= ServiceProviderOptions.Default; if (mode == ServiceProviderMode.Default) { return services.BuildServiceProvider(options); } var provider = new ServiceProvider(services, ServiceProviderOptions.Default); ServiceProviderEngine engine = mode switch { ServiceProviderMode.Dynamic => new DynamicServiceProviderEngine(provider), ServiceProviderMode.Runtime => RuntimeServiceProviderEngine.Instance, ServiceProviderMode.Expressions => new ExpressionsServiceProviderEngine(provider), ServiceProviderMode.ILEmit => new ILEmitServiceProviderEngine(provider), _ => throw new NotSupportedException() }; provider._engine = engine; return provider; } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/mono/mono/tests/verifier/make_double_nesting_test.sh
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_OP=$3 TEST_CLASS_ACCESS=$4 TEST_NESTED_ACCESS=$5 TEST_MEMBER_ACCESS=$6 TEST_BASE_EXTENDS=$7 TEST_NESTED_EXTENDS=$8 TEST_LOAD_BASE=$9 if [ "$TEST_BASE_EXTENDS" = "yes" ]; then TEST_BASE_EXTENDS="extends Root" TEST_BASE_CONSTRUCTOR="call instance void Root::.ctor()" else TEST_BASE_EXTENDS="extends [mscorlib]System.Object" TEST_BASE_CONSTRUCTOR="call instance void object::.ctor()" fi if [ "$TEST_NESTED_EXTENDS" = "yes" ]; then TEST_NESTED_EXTENDS="extends Root\/Nested" TEST_NESTED_CONSTRUCTOR="call instance void Root\/Nested::.ctor()" else TEST_NESTED_EXTENDS="extends [mscorlib]System.Object" TEST_NESTED_CONSTRUCTOR="call instance void object::.ctor()" fi if [ "$TEST_LOAD_BASE" = "yes" ]; then TEST_LOAD_REF="ldarg.0" else TEST_LOAD_REF="call class Root\/Nested Root::Create ()" fi TEST_NAME=${TEST_VALIDITY}_${TEST_NAME} TEST_FILE=${TEST_NAME}_generated.il echo $TEST_FILE $SED -e "s/CLASS_ACCESS/${TEST_CLASS_ACCESS}/g" -e "s/NESTED_ACCESS/${TEST_NESTED_ACCESS}/g" -e "s/MEMBER_ACCESS/${TEST_MEMBER_ACCESS}/g" -e "s/ROOT_EXTENDS/${TEST_BASE_EXTENDS}/g" -e "s/ROOT_CONSTRUCTOR/${TEST_BASE_CONSTRUCTOR}/g" -e "s/NESTED_EXTENDS/${TEST_NESTED_EXTENDS}/g" -e "s/NESTED_CONSTRUCTOR/${TEST_NESTED_CONSTRUCTOR}/g" -e "s/LOAD_REF/${TEST_LOAD_REF}/g" -e "s/OPCODE/${TEST_OP}/g" -e "s/VALIDITY/${TEST_VALIDITY}/g" > $TEST_FILE <<//EOF .assembly '${TEST_NAME}_generated' { .hash algorithm 0x00008004 .ver 0:0:0:0 } // VALIDITY CIL which breaks the ECMA-335 rules. // this CIL should fail verification by a conforming CLI verifier. .assembly extern mscorlib { .ver 1:0:5000:0 .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. } .class CLASS_ACCESS Root extends [mscorlib]System.Object { .method public static class Root/Nested Create () { .maxstack 8 newobj instance void class Root/Nested::.ctor() ret } .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 call instance void object::.ctor() ret } .class nested NESTED_ACCESS Nested extends [mscorlib]System.Object { .field MEMBER_ACCESS int32 fld .field MEMBER_ACCESS static int32 sfld .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 call instance void object::.ctor() ret } .method MEMBER_ACCESS virtual hidebysig newslot instance int32 Target () { .maxstack 8 ldc.i4.0 ret } } } .class public Extension ROOT_EXTENDS { .method public static void Execute () { .maxstack 8 newobj instance void class Extension/MyNested::.ctor() call instance void class Extension/MyNested::Method() ret } .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 ROOT_CONSTRUCTOR ret } .class nested MEMBER_ACCESS MyNested NESTED_EXTENDS { .method MEMBER_ACCESS virtual hidebysig instance int32 Target () { .maxstack 8 ldc.i4.0 ret } .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 NESTED_CONSTRUCTOR ret } .method public void Method () { .maxstack 8 LOAD_REF OPCODE // VALIDITY. pop ret } } } .method public static int32 Main() cil managed { .entrypoint .maxstack 8 call void class Extension::Execute () ldc.i4.0 ret } //EOF
#! /bin/sh SED="sed" if [ `which gsed 2> /dev/null` ]; then SED="gsed" fi TEST_NAME=$1 TEST_VALIDITY=$2 TEST_OP=$3 TEST_CLASS_ACCESS=$4 TEST_NESTED_ACCESS=$5 TEST_MEMBER_ACCESS=$6 TEST_BASE_EXTENDS=$7 TEST_NESTED_EXTENDS=$8 TEST_LOAD_BASE=$9 if [ "$TEST_BASE_EXTENDS" = "yes" ]; then TEST_BASE_EXTENDS="extends Root" TEST_BASE_CONSTRUCTOR="call instance void Root::.ctor()" else TEST_BASE_EXTENDS="extends [mscorlib]System.Object" TEST_BASE_CONSTRUCTOR="call instance void object::.ctor()" fi if [ "$TEST_NESTED_EXTENDS" = "yes" ]; then TEST_NESTED_EXTENDS="extends Root\/Nested" TEST_NESTED_CONSTRUCTOR="call instance void Root\/Nested::.ctor()" else TEST_NESTED_EXTENDS="extends [mscorlib]System.Object" TEST_NESTED_CONSTRUCTOR="call instance void object::.ctor()" fi if [ "$TEST_LOAD_BASE" = "yes" ]; then TEST_LOAD_REF="ldarg.0" else TEST_LOAD_REF="call class Root\/Nested Root::Create ()" fi TEST_NAME=${TEST_VALIDITY}_${TEST_NAME} TEST_FILE=${TEST_NAME}_generated.il echo $TEST_FILE $SED -e "s/CLASS_ACCESS/${TEST_CLASS_ACCESS}/g" -e "s/NESTED_ACCESS/${TEST_NESTED_ACCESS}/g" -e "s/MEMBER_ACCESS/${TEST_MEMBER_ACCESS}/g" -e "s/ROOT_EXTENDS/${TEST_BASE_EXTENDS}/g" -e "s/ROOT_CONSTRUCTOR/${TEST_BASE_CONSTRUCTOR}/g" -e "s/NESTED_EXTENDS/${TEST_NESTED_EXTENDS}/g" -e "s/NESTED_CONSTRUCTOR/${TEST_NESTED_CONSTRUCTOR}/g" -e "s/LOAD_REF/${TEST_LOAD_REF}/g" -e "s/OPCODE/${TEST_OP}/g" -e "s/VALIDITY/${TEST_VALIDITY}/g" > $TEST_FILE <<//EOF .assembly '${TEST_NAME}_generated' { .hash algorithm 0x00008004 .ver 0:0:0:0 } // VALIDITY CIL which breaks the ECMA-335 rules. // this CIL should fail verification by a conforming CLI verifier. .assembly extern mscorlib { .ver 1:0:5000:0 .publickeytoken = (B7 7A 5C 56 19 34 E0 89 ) // .z\V.4.. } .class CLASS_ACCESS Root extends [mscorlib]System.Object { .method public static class Root/Nested Create () { .maxstack 8 newobj instance void class Root/Nested::.ctor() ret } .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 call instance void object::.ctor() ret } .class nested NESTED_ACCESS Nested extends [mscorlib]System.Object { .field MEMBER_ACCESS int32 fld .field MEMBER_ACCESS static int32 sfld .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 call instance void object::.ctor() ret } .method MEMBER_ACCESS virtual hidebysig newslot instance int32 Target () { .maxstack 8 ldc.i4.0 ret } } } .class public Extension ROOT_EXTENDS { .method public static void Execute () { .maxstack 8 newobj instance void class Extension/MyNested::.ctor() call instance void class Extension/MyNested::Method() ret } .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 ROOT_CONSTRUCTOR ret } .class nested MEMBER_ACCESS MyNested NESTED_EXTENDS { .method MEMBER_ACCESS virtual hidebysig instance int32 Target () { .maxstack 8 ldc.i4.0 ret } .method public hidebysig specialname rtspecialname instance default void .ctor () cil managed { .maxstack 8 ldarg.0 NESTED_CONSTRUCTOR ret } .method public void Method () { .maxstack 8 LOAD_REF OPCODE // VALIDITY. pop ret } } } .method public static int32 Main() cil managed { .entrypoint .maxstack 8 call void class Extension::Execute () ldc.i4.0 ret } //EOF
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/JIT/HardwareIntrinsics/Arm/ArmBase.Arm64/ReverseElementBits.UInt64.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ReverseElementBits_UInt64() { var test = new ScalarUnaryOpTest__ReverseElementBits_UInt64(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.ReadUnaligned test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.ReadUnaligned test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.ReadUnaligned test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ScalarUnaryOpTest__ReverseElementBits_UInt64 { private struct TestStruct { public UInt64 _fld; public static TestStruct Create() { var testStruct = new TestStruct(); testStruct._fld = TestLibrary.Generator.GetUInt64(); return testStruct; } public void RunStructFldScenario(ScalarUnaryOpTest__ReverseElementBits_UInt64 testClass) { var result = ArmBase.Arm64.ReverseElementBits(_fld); testClass.ValidateResult(_fld, result); } } private static UInt64 _data; private static UInt64 _clsVar; private UInt64 _fld; static ScalarUnaryOpTest__ReverseElementBits_UInt64() { _clsVar = TestLibrary.Generator.GetUInt64(); } public ScalarUnaryOpTest__ReverseElementBits_UInt64() { Succeeded = true; _fld = TestLibrary.Generator.GetUInt64(); _data = TestLibrary.Generator.GetUInt64(); } public bool IsSupported => ArmBase.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = ArmBase.Arm64.ReverseElementBits( Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)) ); ValidateResult(_data, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(ArmBase.Arm64).GetMethod(nameof(ArmBase.Arm64.ReverseElementBits), new Type[] { typeof(UInt64) }) .Invoke(null, new object[] { Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)) }); ValidateResult(_data, (UInt64)result); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = ArmBase.Arm64.ReverseElementBits( _clsVar ); ValidateResult(_clsVar, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var data = Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)); var result = ArmBase.Arm64.ReverseElementBits(data); ValidateResult(data, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ScalarUnaryOpTest__ReverseElementBits_UInt64(); var result = ArmBase.Arm64.ReverseElementBits(test._fld); ValidateResult(test._fld, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = ArmBase.Arm64.ReverseElementBits(_fld); ValidateResult(_fld, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = ArmBase.Arm64.ReverseElementBits(test._fld); ValidateResult(test._fld, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(UInt64 data, UInt64 result, [CallerMemberName] string method = "") { var isUnexpectedResult = false; isUnexpectedResult = Helpers.ReverseElementBits(data) != result; if (isUnexpectedResult) { TestLibrary.TestFramework.LogInformation($"{nameof(ArmBase.Arm64)}.{nameof(ArmBase.Arm64.ReverseElementBits)}<UInt64>(UInt64): ReverseElementBits failed:"); TestLibrary.TestFramework.LogInformation($" data: {data}"); TestLibrary.TestFramework.LogInformation($" result: {result}"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ReverseElementBits_UInt64() { var test = new ScalarUnaryOpTest__ReverseElementBits_UInt64(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.ReadUnaligned test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.ReadUnaligned test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.ReadUnaligned test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class ScalarUnaryOpTest__ReverseElementBits_UInt64 { private struct TestStruct { public UInt64 _fld; public static TestStruct Create() { var testStruct = new TestStruct(); testStruct._fld = TestLibrary.Generator.GetUInt64(); return testStruct; } public void RunStructFldScenario(ScalarUnaryOpTest__ReverseElementBits_UInt64 testClass) { var result = ArmBase.Arm64.ReverseElementBits(_fld); testClass.ValidateResult(_fld, result); } } private static UInt64 _data; private static UInt64 _clsVar; private UInt64 _fld; static ScalarUnaryOpTest__ReverseElementBits_UInt64() { _clsVar = TestLibrary.Generator.GetUInt64(); } public ScalarUnaryOpTest__ReverseElementBits_UInt64() { Succeeded = true; _fld = TestLibrary.Generator.GetUInt64(); _data = TestLibrary.Generator.GetUInt64(); } public bool IsSupported => ArmBase.Arm64.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = ArmBase.Arm64.ReverseElementBits( Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)) ); ValidateResult(_data, result); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(ArmBase.Arm64).GetMethod(nameof(ArmBase.Arm64.ReverseElementBits), new Type[] { typeof(UInt64) }) .Invoke(null, new object[] { Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)) }); ValidateResult(_data, (UInt64)result); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = ArmBase.Arm64.ReverseElementBits( _clsVar ); ValidateResult(_clsVar, result); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var data = Unsafe.ReadUnaligned<UInt64>(ref Unsafe.As<UInt64, byte>(ref _data)); var result = ArmBase.Arm64.ReverseElementBits(data); ValidateResult(data, result); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new ScalarUnaryOpTest__ReverseElementBits_UInt64(); var result = ArmBase.Arm64.ReverseElementBits(test._fld); ValidateResult(test._fld, result); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = ArmBase.Arm64.ReverseElementBits(_fld); ValidateResult(_fld, result); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = ArmBase.Arm64.ReverseElementBits(test._fld); ValidateResult(test._fld, result); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(UInt64 data, UInt64 result, [CallerMemberName] string method = "") { var isUnexpectedResult = false; isUnexpectedResult = Helpers.ReverseElementBits(data) != result; if (isUnexpectedResult) { TestLibrary.TestFramework.LogInformation($"{nameof(ArmBase.Arm64)}.{nameof(ArmBase.Arm64.ReverseElementBits)}<UInt64>(UInt64): ReverseElementBits failed:"); TestLibrary.TestFramework.LogInformation($" data: {data}"); TestLibrary.TestFramework.LogInformation($" result: {result}"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/Microsoft.Extensions.DependencyInjection/src/Properties/InternalsVisibleTo.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.CompilerServices; [assembly: InternalsVisibleTo("Microsoft.Extensions.DependencyInjection.Tests, PublicKey=0024000004800000940000000602000000240000525341310004000001000100f33a29044fa9d740c9b3213a93e57c84b472c84e0b8a0e1ae48e67a9f8f6de9d5f7f3d52ac23e48ac51801f1dc950abe901da34d2a9e3baadb141a17c77ef3c565dd5ee5054b91cf63bb3c6ab83f72ab3aafe93d0fc3c2348b764fafb0b1c0733de51459aeab46580384bf9d74c4e28164b7cde247f891ba07891c9d872ad2bb")] [assembly: InternalsVisibleTo("MicroBenchmarks, PublicKey=00240000048000009400000006020000002400005253413100040000010001004b86c4cb78549b34bab61a3b1800e23bfeb5b3ec390074041536a7e3cbd97f5f04cf0f857155a8928eaa29ebfd11cfbbad3ba70efea7bda3226c6a8d370a4cd303f714486b6ebc225985a638471e6ef571cc92a4613c00b8fa65d61ccee0cbe5f36330c9a01f4183559f1bef24cc2917c6d913e3a541333a1d05d9bed22b38cb")]
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.CompilerServices; [assembly: InternalsVisibleTo("Microsoft.Extensions.DependencyInjection.Tests, PublicKey=0024000004800000940000000602000000240000525341310004000001000100f33a29044fa9d740c9b3213a93e57c84b472c84e0b8a0e1ae48e67a9f8f6de9d5f7f3d52ac23e48ac51801f1dc950abe901da34d2a9e3baadb141a17c77ef3c565dd5ee5054b91cf63bb3c6ab83f72ab3aafe93d0fc3c2348b764fafb0b1c0733de51459aeab46580384bf9d74c4e28164b7cde247f891ba07891c9d872ad2bb")] [assembly: InternalsVisibleTo("MicroBenchmarks, PublicKey=00240000048000009400000006020000002400005253413100040000010001004b86c4cb78549b34bab61a3b1800e23bfeb5b3ec390074041536a7e3cbd97f5f04cf0f857155a8928eaa29ebfd11cfbbad3ba70efea7bda3226c6a8d370a4cd303f714486b6ebc225985a638471e6ef571cc92a4613c00b8fa65d61ccee0cbe5f36330c9a01f4183559f1bef24cc2917c6d913e3a541333a1d05d9bed22b38cb")]
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/Microsoft.VisualBasic.Core/src/Microsoft/VisualBasic/CompilerServices/VB6BinaryFile.vb
' Licensed to the .NET Foundation under one or more agreements. ' The .NET Foundation licenses this file to you under the MIT license. Imports System Imports Microsoft.VisualBasic.CompilerServices.ExceptionUtils Imports Microsoft.VisualBasic.CompilerServices.Utils Imports System.Runtime.Versioning Imports System.Diagnostics.CodeAnalysis Namespace Microsoft.VisualBasic.CompilerServices <System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)> Friend NotInheritable Class VB6BinaryFile '============================================================================ ' Declarations '============================================================================ Inherits VB6RandomFile '============================================================================ ' Constructor '============================================================================ Public Sub New(ByVal FileName As String, ByVal access As OpenAccess, ByVal share As OpenShare) MyBase.New(FileName, access, share, -1) End Sub ' the implementation of Lock in base class VB6RandomFile does not handle m_lRecordLen=-1 <UnsupportedOSPlatform("ios")> <UnsupportedOSPlatform("macos")> <UnsupportedOSPlatform("tvos")> Friend Overloads Overrides Sub Lock(ByVal lStart As Long, ByVal lEnd As Long) If lStart > lEnd Then Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Start")) End If Dim absRecordLength As Long Dim lStartByte As Long Dim lLength As Long If m_lRecordLen = -1 Then ' if record len is -1, then using absolute bytes absRecordLength = 1 Else absRecordLength = m_lRecordLen End If lStartByte = (lStart - 1) * absRecordLength lLength = (lEnd - lStart + 1) * absRecordLength m_file.Lock(lStartByte, lLength) End Sub ' see Lock description <UnsupportedOSPlatform("ios")> <UnsupportedOSPlatform("macos")> <UnsupportedOSPlatform("tvos")> Friend Overloads Overrides Sub Unlock(ByVal lStart As Long, ByVal lEnd As Long) If lStart > lEnd Then Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Start")) End If Dim absRecordLength As Long Dim lStartByte As Long Dim lLength As Long If m_lRecordLen = -1 Then ' if record len is -1, then using absolute bytes absRecordLength = 1 Else absRecordLength = m_lRecordLen End If lStartByte = (lStart - 1) * absRecordLength lLength = (lEnd - lStart + 1) * absRecordLength m_file.Unlock(lStartByte, lLength) End Sub Public Overrides Function GetMode() As OpenMode Return OpenMode.Binary End Function Friend Overloads Overrides Function Seek() As Long 'm_file.position is the last read byte as a zero based offset 'Seek returns the position of the next byte to read Return (m_position + 1) End Function Friend Overloads Overrides Sub Seek(ByVal BaseOnePosition As Long) If BaseOnePosition <= 0 Then Throw VbMakeException(vbErrors.BadRecordNum) End If Dim BaseZeroPosition As Long = BaseOnePosition - 1 m_file.Position = BaseZeroPosition m_position = BaseZeroPosition If Not m_sr Is Nothing Then m_sr.DiscardBufferedData() End If End Sub Friend Overrides Function LOC() As Long Return m_position End Function Friend Overrides Function CanInput() As Boolean Return True End Function Friend Overrides Function CanWrite() As Boolean Return True End Function <RequiresUnreferencedCode("Implementation of Vb6InputFile is unsafe.")> Friend Overloads Overrides Sub Input(ByRef Value As Object) Value = InputStr() End Sub Friend Overloads Overrides Sub Input(ByRef Value As String) Value = InputStr() End Sub Friend Overloads Overrides Sub Input(ByRef Value As Char) Dim s As String = InputStr() If s.Length > 0 Then Value = s.Chars(0) Else Value = ControlChars.NullChar End If End Sub Friend Overloads Overrides Sub Input(ByRef Value As Boolean) Value = BooleanType.FromString(InputStr()) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Byte) Value = ByteType.FromObject(InputNum(VariantType.Byte)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Short) Value = ShortType.FromObject(InputNum(VariantType.Short)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Integer) Value = IntegerType.FromObject(InputNum(VariantType.Integer)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Long) Value = LongType.FromObject(InputNum(VariantType.Long)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Single) Value = SingleType.FromObject(InputNum(VariantType.Single)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Double) Value = DoubleType.FromObject(InputNum(VariantType.Double)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Decimal) Value = DecimalType.FromObject(InputNum(VariantType.Decimal)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Date) Value = DateType.FromString(InputStr(), GetCultureInfo()) End Sub Friend Overloads Overrides Sub Put(ByVal Value As String, Optional ByVal RecordNumber As Long = 0, Optional ByVal StringIsFixedLength As Boolean = False) ValidateWriteable() PutString(RecordNumber, Value) End Sub Friend Overloads Overrides Sub [Get](ByRef Value As String, Optional ByVal RecordNumber As Long = 0, Optional ByVal StringIsFixedLength As Boolean = False) ValidateReadable() Dim ByteLength As Integer If Value Is Nothing Then ByteLength = 0 Else Diagnostics.Debug.Assert(Not m_Encoding Is Nothing) ByteLength = m_Encoding.GetByteCount(Value) End If Value = GetFixedLengthString(RecordNumber, ByteLength) End Sub Protected Overrides Function InputStr() As String Dim lChar As Integer ' The NullReferenceException is for compatibility with VB6 which threw a NullReferenceException when ' reading from a file that was write-only. The inner exception was added to provide more context. If (m_access <> OpenAccess.ReadWrite) AndAlso (m_access <> OpenAccess.Read) Then Dim JustNeedTheMessage As New NullReferenceException ' We don't have access to the localized resources for this string. Throw New NullReferenceException(JustNeedTheMessage.Message, New IO.IOException(SR.FileOpenedNoRead)) End If ' read past any leading spaces or tabs 'Skip over leading whitespace lChar = SkipWhiteSpaceEOF() If lChar = lchDoubleQuote Then lChar = m_sr.Read() m_position += 1 InputStr = ReadInField(FIN_QSTRING) Else InputStr = ReadInField(FIN_STRING) End If SkipTrailingWhiteSpace() End Function End Class End Namespace
' Licensed to the .NET Foundation under one or more agreements. ' The .NET Foundation licenses this file to you under the MIT license. Imports System Imports Microsoft.VisualBasic.CompilerServices.ExceptionUtils Imports Microsoft.VisualBasic.CompilerServices.Utils Imports System.Runtime.Versioning Imports System.Diagnostics.CodeAnalysis Namespace Microsoft.VisualBasic.CompilerServices <System.ComponentModel.EditorBrowsableAttribute(System.ComponentModel.EditorBrowsableState.Never)> Friend NotInheritable Class VB6BinaryFile '============================================================================ ' Declarations '============================================================================ Inherits VB6RandomFile '============================================================================ ' Constructor '============================================================================ Public Sub New(ByVal FileName As String, ByVal access As OpenAccess, ByVal share As OpenShare) MyBase.New(FileName, access, share, -1) End Sub ' the implementation of Lock in base class VB6RandomFile does not handle m_lRecordLen=-1 <UnsupportedOSPlatform("ios")> <UnsupportedOSPlatform("macos")> <UnsupportedOSPlatform("tvos")> Friend Overloads Overrides Sub Lock(ByVal lStart As Long, ByVal lEnd As Long) If lStart > lEnd Then Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Start")) End If Dim absRecordLength As Long Dim lStartByte As Long Dim lLength As Long If m_lRecordLen = -1 Then ' if record len is -1, then using absolute bytes absRecordLength = 1 Else absRecordLength = m_lRecordLen End If lStartByte = (lStart - 1) * absRecordLength lLength = (lEnd - lStart + 1) * absRecordLength m_file.Lock(lStartByte, lLength) End Sub ' see Lock description <UnsupportedOSPlatform("ios")> <UnsupportedOSPlatform("macos")> <UnsupportedOSPlatform("tvos")> Friend Overloads Overrides Sub Unlock(ByVal lStart As Long, ByVal lEnd As Long) If lStart > lEnd Then Throw New ArgumentException(SR.Format(SR.Argument_InvalidValue1, "Start")) End If Dim absRecordLength As Long Dim lStartByte As Long Dim lLength As Long If m_lRecordLen = -1 Then ' if record len is -1, then using absolute bytes absRecordLength = 1 Else absRecordLength = m_lRecordLen End If lStartByte = (lStart - 1) * absRecordLength lLength = (lEnd - lStart + 1) * absRecordLength m_file.Unlock(lStartByte, lLength) End Sub Public Overrides Function GetMode() As OpenMode Return OpenMode.Binary End Function Friend Overloads Overrides Function Seek() As Long 'm_file.position is the last read byte as a zero based offset 'Seek returns the position of the next byte to read Return (m_position + 1) End Function Friend Overloads Overrides Sub Seek(ByVal BaseOnePosition As Long) If BaseOnePosition <= 0 Then Throw VbMakeException(vbErrors.BadRecordNum) End If Dim BaseZeroPosition As Long = BaseOnePosition - 1 m_file.Position = BaseZeroPosition m_position = BaseZeroPosition If Not m_sr Is Nothing Then m_sr.DiscardBufferedData() End If End Sub Friend Overrides Function LOC() As Long Return m_position End Function Friend Overrides Function CanInput() As Boolean Return True End Function Friend Overrides Function CanWrite() As Boolean Return True End Function <RequiresUnreferencedCode("Implementation of Vb6InputFile is unsafe.")> Friend Overloads Overrides Sub Input(ByRef Value As Object) Value = InputStr() End Sub Friend Overloads Overrides Sub Input(ByRef Value As String) Value = InputStr() End Sub Friend Overloads Overrides Sub Input(ByRef Value As Char) Dim s As String = InputStr() If s.Length > 0 Then Value = s.Chars(0) Else Value = ControlChars.NullChar End If End Sub Friend Overloads Overrides Sub Input(ByRef Value As Boolean) Value = BooleanType.FromString(InputStr()) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Byte) Value = ByteType.FromObject(InputNum(VariantType.Byte)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Short) Value = ShortType.FromObject(InputNum(VariantType.Short)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Integer) Value = IntegerType.FromObject(InputNum(VariantType.Integer)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Long) Value = LongType.FromObject(InputNum(VariantType.Long)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Single) Value = SingleType.FromObject(InputNum(VariantType.Single)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Double) Value = DoubleType.FromObject(InputNum(VariantType.Double)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Decimal) Value = DecimalType.FromObject(InputNum(VariantType.Decimal)) End Sub Friend Overloads Overrides Sub Input(ByRef Value As Date) Value = DateType.FromString(InputStr(), GetCultureInfo()) End Sub Friend Overloads Overrides Sub Put(ByVal Value As String, Optional ByVal RecordNumber As Long = 0, Optional ByVal StringIsFixedLength As Boolean = False) ValidateWriteable() PutString(RecordNumber, Value) End Sub Friend Overloads Overrides Sub [Get](ByRef Value As String, Optional ByVal RecordNumber As Long = 0, Optional ByVal StringIsFixedLength As Boolean = False) ValidateReadable() Dim ByteLength As Integer If Value Is Nothing Then ByteLength = 0 Else Diagnostics.Debug.Assert(Not m_Encoding Is Nothing) ByteLength = m_Encoding.GetByteCount(Value) End If Value = GetFixedLengthString(RecordNumber, ByteLength) End Sub Protected Overrides Function InputStr() As String Dim lChar As Integer ' The NullReferenceException is for compatibility with VB6 which threw a NullReferenceException when ' reading from a file that was write-only. The inner exception was added to provide more context. If (m_access <> OpenAccess.ReadWrite) AndAlso (m_access <> OpenAccess.Read) Then Dim JustNeedTheMessage As New NullReferenceException ' We don't have access to the localized resources for this string. Throw New NullReferenceException(JustNeedTheMessage.Message, New IO.IOException(SR.FileOpenedNoRead)) End If ' read past any leading spaces or tabs 'Skip over leading whitespace lChar = SkipWhiteSpaceEOF() If lChar = lchDoubleQuote Then lChar = m_sr.Read() m_position += 1 InputStr = ReadInField(FIN_QSTRING) Else InputStr = ReadInField(FIN_STRING) End If SkipTrailingWhiteSpace() End Function End Class End Namespace
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/System.Net.Quic/src/System/Net/Quic/Implementations/QuicConnectionProvider.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Threading; using System.Threading.Tasks; namespace System.Net.Quic.Implementations { internal abstract class QuicConnectionProvider : IDisposable { internal abstract bool Connected { get; } internal abstract IPEndPoint? LocalEndPoint { get; } internal abstract EndPoint RemoteEndPoint { get; } internal abstract ValueTask ConnectAsync(CancellationToken cancellationToken = default); internal abstract ValueTask WaitForAvailableUnidirectionalStreamsAsync(CancellationToken cancellationToken = default); internal abstract ValueTask WaitForAvailableBidirectionalStreamsAsync(CancellationToken cancellationToken = default); internal abstract QuicStreamProvider OpenUnidirectionalStream(); internal abstract QuicStreamProvider OpenBidirectionalStream(); internal abstract int GetRemoteAvailableUnidirectionalStreamCount(); internal abstract int GetRemoteAvailableBidirectionalStreamCount(); internal abstract ValueTask<QuicStreamProvider> AcceptStreamAsync(CancellationToken cancellationToken = default); internal abstract System.Net.Security.SslApplicationProtocol NegotiatedApplicationProtocol { get; } internal abstract System.Security.Cryptography.X509Certificates.X509Certificate? RemoteCertificate { get ; } internal abstract ValueTask CloseAsync(long errorCode, CancellationToken cancellationToken = default); public abstract void Dispose(); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Threading; using System.Threading.Tasks; namespace System.Net.Quic.Implementations { internal abstract class QuicConnectionProvider : IDisposable { internal abstract bool Connected { get; } internal abstract IPEndPoint? LocalEndPoint { get; } internal abstract EndPoint RemoteEndPoint { get; } internal abstract ValueTask ConnectAsync(CancellationToken cancellationToken = default); internal abstract ValueTask WaitForAvailableUnidirectionalStreamsAsync(CancellationToken cancellationToken = default); internal abstract ValueTask WaitForAvailableBidirectionalStreamsAsync(CancellationToken cancellationToken = default); internal abstract QuicStreamProvider OpenUnidirectionalStream(); internal abstract QuicStreamProvider OpenBidirectionalStream(); internal abstract int GetRemoteAvailableUnidirectionalStreamCount(); internal abstract int GetRemoteAvailableBidirectionalStreamCount(); internal abstract ValueTask<QuicStreamProvider> AcceptStreamAsync(CancellationToken cancellationToken = default); internal abstract System.Net.Security.SslApplicationProtocol NegotiatedApplicationProtocol { get; } internal abstract System.Security.Cryptography.X509Certificates.X509Certificate? RemoteCertificate { get ; } internal abstract ValueTask CloseAsync(long errorCode, CancellationToken cancellationToken = default); public abstract void Dispose(); } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/libraries/Microsoft.Extensions.Logging.Abstractions/tests/Microsoft.Extensions.Logging.Generators.Tests/Microsoft.Extensions.Logging.Generators.Roslyn4.0.Tests.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <RoslynApiVersion>$(MicrosoftCodeAnalysisCSharpWorkspacesVersion)</RoslynApiVersion> <DefineConstants>$(DefineConstants);ROSLYN4_0_OR_GREATER</DefineConstants> <IsHighAotMemoryUsageTest>true</IsHighAotMemoryUsageTest> <EmccLinkOptimizationFlag Condition="'$(ContinuousIntegrationBuild)' == 'true'">-O1</EmccLinkOptimizationFlag> <WasmNativeStrip>false</WasmNativeStrip> </PropertyGroup> <ItemGroup> <HighAotMemoryUsageAssembly Include="Microsoft.CodeAnalysis.CSharp.dll" /> </ItemGroup> <Import Project="Microsoft.Extensions.Logging.Generators.targets"/> <ItemGroup> <ProjectReference Include="..\..\gen\Microsoft.Extensions.Logging.Generators.Roslyn4.0.csproj" OutputItemType="Analyzer" ReferenceOutputAssembly="true" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <RoslynApiVersion>$(MicrosoftCodeAnalysisCSharpWorkspacesVersion)</RoslynApiVersion> <DefineConstants>$(DefineConstants);ROSLYN4_0_OR_GREATER</DefineConstants> <IsHighAotMemoryUsageTest>true</IsHighAotMemoryUsageTest> <EmccLinkOptimizationFlag Condition="'$(ContinuousIntegrationBuild)' == 'true'">-O1</EmccLinkOptimizationFlag> <WasmNativeStrip>false</WasmNativeStrip> </PropertyGroup> <ItemGroup> <HighAotMemoryUsageAssembly Include="Microsoft.CodeAnalysis.CSharp.dll" /> </ItemGroup> <Import Project="Microsoft.Extensions.Logging.Generators.targets"/> <ItemGroup> <ProjectReference Include="..\..\gen\Microsoft.Extensions.Logging.Generators.Roslyn4.0.csproj" OutputItemType="Analyzer" ReferenceOutputAssembly="true" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/coreclr/pal/src/libunwind/src/x86_64/Lget_save_loc.c
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gget_save_loc.c" #endif
#define UNW_LOCAL_ONLY #include <libunwind.h> #if defined(UNW_LOCAL_ONLY) && !defined(UNW_REMOTE_ONLY) #include "Gget_save_loc.c" #endif
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/Loader/classloader/explicitlayout/objrefandnonobjrefoverlap/case9.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // like case1, except that the overlapping doesn't occur in the first // "slot" of the union. // struct // [0] int // [4] int // [4] struct // objref using System; using System.Runtime.InteropServices; public class Foo{ public int i=42; public int getI(){return i;} } public class Bar{ private int i=1; public int getI(){return i;} } public struct WrapFoo { public Foo o; } public struct WrapBar { public Bar o; } [ StructLayout( LayoutKind.Explicit )] public struct MyUnion1 { [ FieldOffset( 0 )] public int i2; [ FieldOffset( 4 )] public int i; [ FieldOffset( 4 )] public WrapBar o; } [ StructLayout( LayoutKind.Explicit )] public struct MyUnion2 { [ FieldOffset( 0 )] public int i2; [ FieldOffset( 4 )] public int i; [ FieldOffset( 4 )] public WrapFoo o; } public class Test{ public static int Main(string[] args){ bool caught=false; try{ Go(); } catch(TypeLoadException e){ caught=true; Console.WriteLine(e); } if(caught){ Console.WriteLine("PASS: caught expected exception"); return 100; } else{ Console.WriteLine("FAIL: was allowed to overlap an objref with a scalar."); return 101; } } public static void Go(){ MyUnion2 u2; MyUnion1 u1; u1.i = 0; u1.o.o = new Bar(); Console.WriteLine("BEFORE: u1.o.o.getI(): {0}. (EXPECT 1)",u1.o.o.getI()); u2.i = 0; u2.o.o = new Foo(); // write the Foo's objref value now in u2.o into the int field of u1, // thereby overwriting the Bar objref that had been in u1.o. u1.i = u2.i; // If u1.o.o.getI() returns 42, that means that we were able to write to a private // member variable of Bar, a huge security problem! int curI = u1.o.o.getI(); Console.WriteLine("AFTER: u1.o.o.getI(): {0}. (BUG if 42)",curI); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // like case1, except that the overlapping doesn't occur in the first // "slot" of the union. // struct // [0] int // [4] int // [4] struct // objref using System; using System.Runtime.InteropServices; public class Foo{ public int i=42; public int getI(){return i;} } public class Bar{ private int i=1; public int getI(){return i;} } public struct WrapFoo { public Foo o; } public struct WrapBar { public Bar o; } [ StructLayout( LayoutKind.Explicit )] public struct MyUnion1 { [ FieldOffset( 0 )] public int i2; [ FieldOffset( 4 )] public int i; [ FieldOffset( 4 )] public WrapBar o; } [ StructLayout( LayoutKind.Explicit )] public struct MyUnion2 { [ FieldOffset( 0 )] public int i2; [ FieldOffset( 4 )] public int i; [ FieldOffset( 4 )] public WrapFoo o; } public class Test{ public static int Main(string[] args){ bool caught=false; try{ Go(); } catch(TypeLoadException e){ caught=true; Console.WriteLine(e); } if(caught){ Console.WriteLine("PASS: caught expected exception"); return 100; } else{ Console.WriteLine("FAIL: was allowed to overlap an objref with a scalar."); return 101; } } public static void Go(){ MyUnion2 u2; MyUnion1 u1; u1.i = 0; u1.o.o = new Bar(); Console.WriteLine("BEFORE: u1.o.o.getI(): {0}. (EXPECT 1)",u1.o.o.getI()); u2.i = 0; u2.o.o = new Foo(); // write the Foo's objref value now in u2.o into the int field of u1, // thereby overwriting the Bar objref that had been in u1.o. u1.i = u2.i; // If u1.o.o.getI() returns 42, that means that we were able to write to a private // member variable of Bar, a huge security problem! int curI = u1.o.o.getI(); Console.WriteLine("AFTER: u1.o.o.getI(): {0}. (BUG if 42)",curI); } }
-1
dotnet/runtime
65,901
Remove usages of native bootstrapping
hoyosjs
"2022-02-25T17:42:53Z"
"2022-02-25T22:06:34Z"
2ce0af0b8404cf051783516cff4b07abccd5de00
8727ac772e1d8031691ee52e2d66e2520f78d01a
Remove usages of native bootstrapping.
./src/tests/JIT/HardwareIntrinsics/General/Vector128_1/AllBitsSet.Int32.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void AllBitsSetInt32() { var test = new VectorAllBitsSet__AllBitsSetInt32(); // Validates basic functionality works test.RunBasicScenario(); // Validates calling via reflection works test.RunReflectionScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorAllBitsSet__AllBitsSetInt32 { private static readonly int LargestVectorSize = 16; private static readonly int ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); public bool Succeeded { get; set; } = true; public void RunBasicScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario)); Vector128<Int32> result = Vector128<Int32>.AllBitsSet; ValidateResult(result); } public void RunReflectionScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario)); object result = typeof(Vector128<Int32>) .GetProperty(nameof(Vector128<Int32>.AllBitsSet), new Type[] { }) .GetGetMethod() .Invoke(null, new object[] { }); ValidateResult((Vector128<Int32>)(result)); } private void ValidateResult(Vector128<Int32> result, [CallerMemberName] string method = "") { Int32[] resultElements = new Int32[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref resultElements[0]), result); ValidateResult(resultElements, method); } private unsafe void ValidateResult(Int32[] resultElements, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < ElementCount; i++) { if (!HasAllBitsSet(resultElements[i])) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector128.AllBitsSet(Int32): {method} failed:"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } private unsafe bool HasAllBitsSet(Int32 value) { for (int i = 0; i < sizeof(Int32); i++) { if (((byte*)&value)[i] != 0xFF) return false; } return true; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\General\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void AllBitsSetInt32() { var test = new VectorAllBitsSet__AllBitsSetInt32(); // Validates basic functionality works test.RunBasicScenario(); // Validates calling via reflection works test.RunReflectionScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorAllBitsSet__AllBitsSetInt32 { private static readonly int LargestVectorSize = 16; private static readonly int ElementCount = Unsafe.SizeOf<Vector128<Int32>>() / sizeof(Int32); public bool Succeeded { get; set; } = true; public void RunBasicScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario)); Vector128<Int32> result = Vector128<Int32>.AllBitsSet; ValidateResult(result); } public void RunReflectionScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario)); object result = typeof(Vector128<Int32>) .GetProperty(nameof(Vector128<Int32>.AllBitsSet), new Type[] { }) .GetGetMethod() .Invoke(null, new object[] { }); ValidateResult((Vector128<Int32>)(result)); } private void ValidateResult(Vector128<Int32> result, [CallerMemberName] string method = "") { Int32[] resultElements = new Int32[ElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Int32, byte>(ref resultElements[0]), result); ValidateResult(resultElements, method); } private unsafe void ValidateResult(Int32[] resultElements, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < ElementCount; i++) { if (!HasAllBitsSet(resultElements[i])) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"Vector128.AllBitsSet(Int32): {method} failed:"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", resultElements)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } private unsafe bool HasAllBitsSet(Int32 value) { for (int i = 0; i < sizeof(Int32); i++) { if (((byte*)&value)[i] != 0xFF) return false; } return true; } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.IO.FileSystem/tests/FileStream/DisposeAsync.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Microsoft.Win32.SafeHandles; using System.Threading.Tasks; using Xunit; namespace System.IO.Tests { public class FileStream_DisposeAsync : FileSystemTest { [Fact] public async Task DisposeAsyncClosesHandle() { SafeFileHandle handle; var fs = new FileStream(GetTestFilePath(), FileMode.Create); handle = fs.SafeFileHandle; await fs.DisposeAsync(); Assert.True(handle.IsClosed); } [Fact] [ActiveIssue("https://github.com/dotnet/runtime/issues/65835")] public async Task DisposeAsyncFlushes() { string path = GetTestFilePath(); var fs1 = new FileStream(path, FileMode.Create, FileAccess.ReadWrite, FileShare.ReadWrite); fs1.Write(new byte[100], 0, 100); using (var fs2 = new FileStream(path, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite)) { Assert.Equal(0, fs2.Length); } await fs1.DisposeAsync(); using (var fs2 = new FileStream(path, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite)) { Assert.Equal(100, fs2.Length); } } [Fact] public async Task DerivedFileStreamDisposeUsedForDisposeAsync() { var fs = new OverridesDisposeFileStream(GetTestFilePath(), FileMode.Create); Assert.False(fs.DisposeInvoked); await fs.DisposeAsync(); Assert.True(fs.DisposeInvoked); } private sealed class OverridesDisposeFileStream : FileStream { public bool DisposeInvoked; public OverridesDisposeFileStream(string path, FileMode mode) : base(path, mode) { } protected override void Dispose(bool disposing) { DisposeInvoked = true; base.Dispose(disposing); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Microsoft.Win32.SafeHandles; using System.Threading.Tasks; using Xunit; namespace System.IO.Tests { public class FileStream_DisposeAsync : FileSystemTest { [Fact] public async Task DisposeAsyncClosesHandle() { SafeFileHandle handle; var fs = new FileStream(GetTestFilePath(), FileMode.Create); handle = fs.SafeFileHandle; await fs.DisposeAsync(); Assert.True(handle.IsClosed); } [Fact] public async Task DisposeAsyncFlushes() { string path = GetTestFilePath(); var fs1 = new FileStream(path, FileMode.Create, FileAccess.ReadWrite, FileShare.ReadWrite); fs1.Write(new byte[100], 0, 100); using (var fs2 = new FileStream(path, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite)) { Assert.Equal(0, fs2.Length); } await fs1.DisposeAsync(); using (var fs2 = new FileStream(path, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite)) { Assert.Equal(100, fs2.Length); } } [Fact] public async Task DerivedFileStreamDisposeUsedForDisposeAsync() { var fs = new OverridesDisposeFileStream(GetTestFilePath(), FileMode.Create); Assert.False(fs.DisposeInvoked); await fs.DisposeAsync(); Assert.True(fs.DisposeInvoked); } private sealed class OverridesDisposeFileStream : FileStream { public bool DisposeInvoked; public OverridesDisposeFileStream(string path, FileMode mode) : base(path, mode) { } protected override void Dispose(bool disposing) { DisposeInvoked = true; base.Dispose(disposing); } } } }
1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.ComponentModel; using System.IO.Strategies; using System.Runtime.Versioning; using System.Threading; using System.Threading.Tasks; using Microsoft.Win32.SafeHandles; namespace System.IO { public class FileStream : Stream { internal const int DefaultBufferSize = 4096; internal const FileShare DefaultShare = FileShare.Read; private const bool DefaultIsAsync = false; private readonly FileStreamStrategy _strategy; [EditorBrowsable(EditorBrowsableState.Never)] [Obsolete("This constructor has been deprecated. Use FileStream(SafeFileHandle handle, FileAccess access) instead.")] public FileStream(IntPtr handle, FileAccess access) : this(handle, access, true, DefaultBufferSize, DefaultIsAsync) { } [EditorBrowsable(EditorBrowsableState.Never)] [Obsolete("This constructor has been deprecated. Use FileStream(SafeFileHandle handle, FileAccess access) and optionally make a new SafeFileHandle with ownsHandle=false if needed instead.")] public FileStream(IntPtr handle, FileAccess access, bool ownsHandle) : this(handle, access, ownsHandle, DefaultBufferSize, DefaultIsAsync) { } [EditorBrowsable(EditorBrowsableState.Never)] [Obsolete("This constructor has been deprecated. Use FileStream(SafeFileHandle handle, FileAccess access, int bufferSize) and optionally make a new SafeFileHandle with ownsHandle=false if needed instead.")] public FileStream(IntPtr handle, FileAccess access, bool ownsHandle, int bufferSize) : this(handle, access, ownsHandle, bufferSize, DefaultIsAsync) { } [EditorBrowsable(EditorBrowsableState.Never)] [Obsolete("This constructor has been deprecated. Use FileStream(SafeFileHandle handle, FileAccess access, int bufferSize, bool isAsync) and optionally make a new SafeFileHandle with ownsHandle=false if needed instead.")] public FileStream(IntPtr handle, FileAccess access, bool ownsHandle, int bufferSize, bool isAsync) { SafeFileHandle safeHandle = new SafeFileHandle(handle, ownsHandle: ownsHandle); try { ValidateHandle(safeHandle, access, bufferSize, isAsync); _strategy = FileStreamHelpers.ChooseStrategy(this, safeHandle, access, bufferSize, isAsync); } catch { // We don't want to take ownership of closing passed in handles // *unless* the constructor completes successfully. GC.SuppressFinalize(safeHandle); // This would also prevent Close from being called, but is unnecessary // as we've removed the object from the finalizer queue. // // safeHandle.SetHandleAsInvalid(); throw; } } private static void ValidateHandle(SafeFileHandle handle, FileAccess access, int bufferSize) { if (handle.IsInvalid) { throw new ArgumentException(SR.Arg_InvalidHandle, nameof(handle)); } else if (access < FileAccess.Read || access > FileAccess.ReadWrite) { throw new ArgumentOutOfRangeException(nameof(access), SR.ArgumentOutOfRange_Enum); } else if (bufferSize < 0) { ThrowHelper.ThrowArgumentOutOfRangeException_NeedNonNegNum(nameof(bufferSize)); } else if (handle.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } } private static void ValidateHandle(SafeFileHandle handle, FileAccess access, int bufferSize, bool isAsync) { ValidateHandle(handle, access, bufferSize); if (isAsync && !handle.IsAsync) { ThrowHelper.ThrowArgumentException_HandleNotAsync(nameof(handle)); } else if (!isAsync && handle.IsAsync) { ThrowHelper.ThrowArgumentException_HandleNotSync(nameof(handle)); } } public FileStream(SafeFileHandle handle, FileAccess access) : this(handle, access, DefaultBufferSize) { } public FileStream(SafeFileHandle handle, FileAccess access, int bufferSize) { ValidateHandle(handle, access, bufferSize); _strategy = FileStreamHelpers.ChooseStrategy(this, handle, access, bufferSize, handle.IsAsync); } public FileStream(SafeFileHandle handle, FileAccess access, int bufferSize, bool isAsync) { ValidateHandle(handle, access, bufferSize, isAsync); _strategy = FileStreamHelpers.ChooseStrategy(this, handle, access, bufferSize, isAsync); } public FileStream(string path, FileMode mode) : this(path, mode, mode == FileMode.Append ? FileAccess.Write : FileAccess.ReadWrite, DefaultShare, DefaultBufferSize, DefaultIsAsync) { } public FileStream(string path, FileMode mode, FileAccess access) : this(path, mode, access, DefaultShare, DefaultBufferSize, DefaultIsAsync) { } public FileStream(string path, FileMode mode, FileAccess access, FileShare share) : this(path, mode, access, share, DefaultBufferSize, DefaultIsAsync) { } public FileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize) : this(path, mode, access, share, bufferSize, DefaultIsAsync) { } public FileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize, bool useAsync) : this(path, mode, access, share, bufferSize, useAsync ? FileOptions.Asynchronous : FileOptions.None) { } public FileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize, FileOptions options) : this(path, mode, access, share, bufferSize, options, 0) { } ~FileStream() { // Preserved for compatibility since FileStream has defined a // finalizer in past releases and derived classes may depend // on Dispose(false) call. Dispose(false); } /// <summary> /// Initializes a new instance of the <see cref="System.IO.FileStream" /> class with the specified path, creation mode, read/write and sharing permission, the access other FileStreams can have to the same file, the buffer size, additional file options and the allocation size. /// </summary> /// <param name="path">A relative or absolute path for the file that the current <see cref="System.IO.FileStream" /> instance will encapsulate.</param> /// <param name="options">An object that describes optional <see cref="System.IO.FileStream" /> parameters to use.</param> /// <exception cref="T:System.ArgumentNullException"><paramref name="path" /> or <paramref name="options" /> is <see langword="null" />.</exception> /// <exception cref="T:System.ArgumentException"><paramref name="path" /> is an empty string (""), contains only white space, or contains one or more invalid characters. /// -or- /// <paramref name="path" /> refers to a non-file device, such as <c>CON:</c>, <c>COM1:</c>, <c>LPT1:</c>, etc. in an NTFS environment.</exception> /// <exception cref="T:System.NotSupportedException"><paramref name="path" /> refers to a non-file device, such as <c>CON:</c>, <c>COM1:</c>, <c>LPT1:</c>, etc. in a non-NTFS environment.</exception> /// <exception cref="T:System.IO.FileNotFoundException">The file cannot be found, such as when <see cref="System.IO.FileStreamOptions.Mode" /> is <see langword="FileMode.Truncate" /> or <see langword="FileMode.Open" />, and the file specified by <paramref name="path" /> does not exist. The file must already exist in these modes.</exception> /// <exception cref="T:System.IO.IOException">An I/O error, such as specifying <see langword="FileMode.CreateNew" /> when the file specified by <paramref name="path" /> already exists, occurred. /// -or- /// The stream has been closed. /// -or- /// The disk was full (when <see cref="System.IO.FileStreamOptions.PreallocationSize" /> was provided and <paramref name="path" /> was pointing to a regular file). /// -or- /// The file was too large (when <see cref="System.IO.FileStreamOptions.PreallocationSize" /> was provided and <paramref name="path" /> was pointing to a regular file).</exception> /// <exception cref="T:System.Security.SecurityException">The caller does not have the required permission.</exception> /// <exception cref="T:System.IO.DirectoryNotFoundException">The specified path is invalid, such as being on an unmapped drive.</exception> /// <exception cref="T:System.UnauthorizedAccessException">The <see cref="System.IO.FileStreamOptions.Access" /> requested is not permitted by the operating system for the specified <paramref name="path" />, such as when <see cref="System.IO.FileStreamOptions.Access" /> is <see cref="System.IO.FileAccess.Write" /> or <see cref="System.IO.FileAccess.ReadWrite" /> and the file or directory is set for read-only access. /// -or- /// <see cref="F:System.IO.FileOptions.Encrypted" /> is specified for <see cref="System.IO.FileStreamOptions.Options" /> , but file encryption is not supported on the current platform.</exception> /// <exception cref="T:System.IO.PathTooLongException">The specified path, file name, or both exceed the system-defined maximum length. </exception> public FileStream(string path, FileStreamOptions options) { ArgumentException.ThrowIfNullOrEmpty(path); ArgumentNullException.ThrowIfNull(options); if ((options.Access & FileAccess.Read) != 0 && options.Mode == FileMode.Append) { throw new ArgumentException(SR.Argument_InvalidAppendMode, nameof(options)); } else if ((options.Access & FileAccess.Write) == 0) { if (options.Mode == FileMode.Truncate || options.Mode == FileMode.CreateNew || options.Mode == FileMode.Create || options.Mode == FileMode.Append) { throw new ArgumentException(SR.Format(SR.Argument_InvalidFileModeAndAccessCombo, options.Mode, options.Access), nameof(options)); } } if (options.PreallocationSize > 0) { FileStreamHelpers.ValidateArgumentsForPreallocation(options.Mode, options.Access); } FileStreamHelpers.SerializationGuard(options.Access); _strategy = FileStreamHelpers.ChooseStrategy( this, path, options.Mode, options.Access, options.Share, options.BufferSize, options.Options, options.PreallocationSize); } private FileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize, FileOptions options, long preallocationSize) { FileStreamHelpers.ValidateArguments(path, mode, access, share, bufferSize, options, preallocationSize); _strategy = FileStreamHelpers.ChooseStrategy(this, path, mode, access, share, bufferSize, options, preallocationSize); } [Obsolete("FileStream.Handle has been deprecated. Use FileStream's SafeFileHandle property instead.")] public virtual IntPtr Handle => _strategy.Handle; [UnsupportedOSPlatform("ios")] [UnsupportedOSPlatform("macos")] [UnsupportedOSPlatform("tvos")] public virtual void Lock(long position, long length) { if (position < 0 || length < 0) { ThrowHelper.ThrowArgumentOutOfRangeException_NeedNonNegNum(position < 0 ? nameof(position) : nameof(length)); } else if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } _strategy.Lock(position, length); } [UnsupportedOSPlatform("ios")] [UnsupportedOSPlatform("macos")] [UnsupportedOSPlatform("tvos")] public virtual void Unlock(long position, long length) { if (position < 0 || length < 0) { ThrowHelper.ThrowArgumentOutOfRangeException_NeedNonNegNum(position < 0 ? nameof(position) : nameof(length)); } else if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } _strategy.Unlock(position, length); } public override Task FlushAsync(CancellationToken cancellationToken) { if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled(cancellationToken); } else if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } return _strategy.FlushAsync(cancellationToken); } public override int Read(byte[] buffer, int offset, int count) { ValidateReadWriteArgs(buffer, offset, count); return _strategy.Read(buffer, offset, count); } public override int Read(Span<byte> buffer) => _strategy.Read(buffer); public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { ValidateBufferArguments(buffer, offset, count); if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled<int>(cancellationToken); } else if (!_strategy.CanRead) { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } ThrowHelper.ThrowNotSupportedException_UnreadableStream(); } return _strategy.ReadAsync(buffer, offset, count, cancellationToken); } public override ValueTask<int> ReadAsync(Memory<byte> buffer, CancellationToken cancellationToken = default) { if (cancellationToken.IsCancellationRequested) { return ValueTask.FromCanceled<int>(cancellationToken); } else if (!_strategy.CanRead) { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } ThrowHelper.ThrowNotSupportedException_UnreadableStream(); } return _strategy.ReadAsync(buffer, cancellationToken); } public override void Write(byte[] buffer, int offset, int count) { ValidateReadWriteArgs(buffer, offset, count); _strategy.Write(buffer, offset, count); } public override void Write(ReadOnlySpan<byte> buffer) => _strategy.Write(buffer); public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { ValidateBufferArguments(buffer, offset, count); if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled(cancellationToken); } else if (!_strategy.CanWrite) { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } ThrowHelper.ThrowNotSupportedException_UnwritableStream(); } return _strategy.WriteAsync(buffer, offset, count, cancellationToken); } public override ValueTask WriteAsync(ReadOnlyMemory<byte> buffer, CancellationToken cancellationToken = default) { if (cancellationToken.IsCancellationRequested) { return ValueTask.FromCanceled(cancellationToken); } else if (!_strategy.CanWrite) { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } ThrowHelper.ThrowNotSupportedException_UnwritableStream(); } return _strategy.WriteAsync(buffer, cancellationToken); } /// <summary> /// Clears buffers for this stream and causes any buffered data to be written to the file. /// </summary> public override void Flush() { // Make sure that we call through the public virtual API Flush(flushToDisk: false); } /// <summary> /// Clears buffers for this stream, and if <param name="flushToDisk"/> is true, /// causes any buffered data to be written to the file. /// </summary> public virtual void Flush(bool flushToDisk) { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } _strategy.Flush(flushToDisk); } /// <summary>Gets a value indicating whether the current stream supports reading.</summary> public override bool CanRead => _strategy.CanRead; /// <summary>Gets a value indicating whether the current stream supports writing.</summary> public override bool CanWrite => _strategy.CanWrite; /// <summary>Validates arguments to Read and Write and throws resulting exceptions.</summary> /// <param name="buffer">The buffer to read from or write to.</param> /// <param name="offset">The zero-based offset into the buffer.</param> /// <param name="count">The maximum number of bytes to read or write.</param> private void ValidateReadWriteArgs(byte[] buffer, int offset, int count) { ValidateBufferArguments(buffer, offset, count); if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } } /// <summary>Sets the length of this stream to the given value.</summary> /// <param name="value">The new length of the stream.</param> public override void SetLength(long value) { if (value < 0) { ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.value, ExceptionResource.ArgumentOutOfRange_NeedNonNegNum); } else if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } else if (!CanSeek) { ThrowHelper.ThrowNotSupportedException_UnseekableStream(); } else if (!CanWrite) { ThrowHelper.ThrowNotSupportedException_UnwritableStream(); } _strategy.SetLength(value); } public virtual SafeFileHandle SafeFileHandle => _strategy.SafeFileHandle; /// <summary>Gets the path that was passed to the constructor.</summary> public virtual string Name => _strategy.Name; /// <summary>Gets a value indicating whether the stream was opened for I/O to be performed synchronously or asynchronously.</summary> public virtual bool IsAsync => _strategy.IsAsync; /// <summary>Gets the length of the stream in bytes.</summary> public override long Length { get { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } else if (!CanSeek) { ThrowHelper.ThrowNotSupportedException_UnseekableStream(); } return _strategy.Length; } } /// <summary>Gets or sets the position within the current stream</summary> public override long Position { get { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } else if (!CanSeek) { ThrowHelper.ThrowNotSupportedException_UnseekableStream(); } return _strategy.Position; } set { if (value < 0) { ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.value, ExceptionResource.ArgumentOutOfRange_NeedNonNegNum); } _strategy.Seek(value, SeekOrigin.Begin); } } /// <summary> /// Reads a byte from the file stream. Returns the byte cast to an int /// or -1 if reading from the end of the stream. /// </summary> public override int ReadByte() => _strategy.ReadByte(); /// <summary> /// Writes a byte to the current position in the stream and advances the position /// within the stream by one byte. /// </summary> /// <param name="value">The byte to write to the stream.</param> public override void WriteByte(byte value) => _strategy.WriteByte(value); // _strategy can be null only when ctor has thrown protected override void Dispose(bool disposing) => _strategy?.DisposeInternal(disposing); public override ValueTask DisposeAsync() => _strategy.DisposeAsync(); public override void CopyTo(Stream destination, int bufferSize) { ValidateCopyToArguments(destination, bufferSize); _strategy.CopyTo(destination, bufferSize); } public override Task CopyToAsync(Stream destination, int bufferSize, CancellationToken cancellationToken) { ValidateCopyToArguments(destination, bufferSize); return _strategy.CopyToAsync(destination, bufferSize, cancellationToken); } public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) { ValidateBufferArguments(buffer, offset, count); if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } else if (!CanRead) { ThrowHelper.ThrowNotSupportedException_UnreadableStream(); } return _strategy.BeginRead(buffer, offset, count, callback, state); } public override int EndRead(IAsyncResult asyncResult!!) { return _strategy.EndRead(asyncResult); } public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) { ValidateBufferArguments(buffer, offset, count); if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } else if (!CanWrite) { ThrowHelper.ThrowNotSupportedException_UnwritableStream(); } return _strategy.BeginWrite(buffer, offset, count, callback, state); } public override void EndWrite(IAsyncResult asyncResult!!) { _strategy.EndWrite(asyncResult); } public override bool CanSeek => _strategy.CanSeek; public override long Seek(long offset, SeekOrigin origin) => _strategy.Seek(offset, origin); internal Task BaseFlushAsync(CancellationToken cancellationToken) => base.FlushAsync(cancellationToken); internal int BaseRead(Span<byte> buffer) => base.Read(buffer); internal Task<int> BaseReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => base.ReadAsync(buffer, offset, count, cancellationToken); internal ValueTask<int> BaseReadAsync(Memory<byte> buffer, CancellationToken cancellationToken = default) => base.ReadAsync(buffer, cancellationToken); internal void BaseWrite(ReadOnlySpan<byte> buffer) => base.Write(buffer); internal Task BaseWriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => base.WriteAsync(buffer, offset, count, cancellationToken); internal ValueTask BaseWriteAsync(ReadOnlyMemory<byte> buffer, CancellationToken cancellationToken = default) => base.WriteAsync(buffer, cancellationToken); internal ValueTask BaseDisposeAsync() => base.DisposeAsync(); internal Task BaseCopyToAsync(Stream destination, int bufferSize, CancellationToken cancellationToken) => base.CopyToAsync(destination, bufferSize, cancellationToken); internal IAsyncResult BaseBeginRead(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) => base.BeginRead(buffer, offset, count, callback, state); internal int BaseEndRead(IAsyncResult asyncResult) => base.EndRead(asyncResult); internal IAsyncResult BaseBeginWrite(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) => base.BeginWrite(buffer, offset, count, callback, state); internal void BaseEndWrite(IAsyncResult asyncResult) => base.EndWrite(asyncResult); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.ComponentModel; using System.IO.Strategies; using System.Runtime.Versioning; using System.Threading; using System.Threading.Tasks; using Microsoft.Win32.SafeHandles; namespace System.IO { public class FileStream : Stream { internal const int DefaultBufferSize = 4096; internal const FileShare DefaultShare = FileShare.Read; private const bool DefaultIsAsync = false; private readonly FileStreamStrategy _strategy; [EditorBrowsable(EditorBrowsableState.Never)] [Obsolete("This constructor has been deprecated. Use FileStream(SafeFileHandle handle, FileAccess access) instead.")] public FileStream(IntPtr handle, FileAccess access) : this(handle, access, true, DefaultBufferSize, DefaultIsAsync) { } [EditorBrowsable(EditorBrowsableState.Never)] [Obsolete("This constructor has been deprecated. Use FileStream(SafeFileHandle handle, FileAccess access) and optionally make a new SafeFileHandle with ownsHandle=false if needed instead.")] public FileStream(IntPtr handle, FileAccess access, bool ownsHandle) : this(handle, access, ownsHandle, DefaultBufferSize, DefaultIsAsync) { } [EditorBrowsable(EditorBrowsableState.Never)] [Obsolete("This constructor has been deprecated. Use FileStream(SafeFileHandle handle, FileAccess access, int bufferSize) and optionally make a new SafeFileHandle with ownsHandle=false if needed instead.")] public FileStream(IntPtr handle, FileAccess access, bool ownsHandle, int bufferSize) : this(handle, access, ownsHandle, bufferSize, DefaultIsAsync) { } [EditorBrowsable(EditorBrowsableState.Never)] [Obsolete("This constructor has been deprecated. Use FileStream(SafeFileHandle handle, FileAccess access, int bufferSize, bool isAsync) and optionally make a new SafeFileHandle with ownsHandle=false if needed instead.")] public FileStream(IntPtr handle, FileAccess access, bool ownsHandle, int bufferSize, bool isAsync) { SafeFileHandle safeHandle = new SafeFileHandle(handle, ownsHandle: ownsHandle); try { ValidateHandle(safeHandle, access, bufferSize, isAsync); _strategy = FileStreamHelpers.ChooseStrategy(this, safeHandle, access, bufferSize, isAsync); } catch { // We don't want to take ownership of closing passed in handles // *unless* the constructor completes successfully. GC.SuppressFinalize(safeHandle); // This would also prevent Close from being called, but is unnecessary // as we've removed the object from the finalizer queue. // // safeHandle.SetHandleAsInvalid(); throw; } } private static void ValidateHandle(SafeFileHandle handle, FileAccess access, int bufferSize) { if (handle.IsInvalid) { throw new ArgumentException(SR.Arg_InvalidHandle, nameof(handle)); } else if (access < FileAccess.Read || access > FileAccess.ReadWrite) { throw new ArgumentOutOfRangeException(nameof(access), SR.ArgumentOutOfRange_Enum); } else if (bufferSize < 0) { ThrowHelper.ThrowArgumentOutOfRangeException_NeedNonNegNum(nameof(bufferSize)); } else if (handle.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } } private static void ValidateHandle(SafeFileHandle handle, FileAccess access, int bufferSize, bool isAsync) { ValidateHandle(handle, access, bufferSize); if (isAsync && !handle.IsAsync) { ThrowHelper.ThrowArgumentException_HandleNotAsync(nameof(handle)); } else if (!isAsync && handle.IsAsync) { ThrowHelper.ThrowArgumentException_HandleNotSync(nameof(handle)); } } public FileStream(SafeFileHandle handle, FileAccess access) : this(handle, access, DefaultBufferSize) { } public FileStream(SafeFileHandle handle, FileAccess access, int bufferSize) { ValidateHandle(handle, access, bufferSize); _strategy = FileStreamHelpers.ChooseStrategy(this, handle, access, bufferSize, handle.IsAsync); } public FileStream(SafeFileHandle handle, FileAccess access, int bufferSize, bool isAsync) { ValidateHandle(handle, access, bufferSize, isAsync); _strategy = FileStreamHelpers.ChooseStrategy(this, handle, access, bufferSize, isAsync); } public FileStream(string path, FileMode mode) : this(path, mode, mode == FileMode.Append ? FileAccess.Write : FileAccess.ReadWrite, DefaultShare, DefaultBufferSize, DefaultIsAsync) { } public FileStream(string path, FileMode mode, FileAccess access) : this(path, mode, access, DefaultShare, DefaultBufferSize, DefaultIsAsync) { } public FileStream(string path, FileMode mode, FileAccess access, FileShare share) : this(path, mode, access, share, DefaultBufferSize, DefaultIsAsync) { } public FileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize) : this(path, mode, access, share, bufferSize, DefaultIsAsync) { } public FileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize, bool useAsync) : this(path, mode, access, share, bufferSize, useAsync ? FileOptions.Asynchronous : FileOptions.None) { } public FileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize, FileOptions options) : this(path, mode, access, share, bufferSize, options, 0) { } ~FileStream() { // Preserved for compatibility since FileStream has defined a // finalizer in past releases and derived classes may depend // on Dispose(false) call. Dispose(false); } /// <summary> /// Initializes a new instance of the <see cref="System.IO.FileStream" /> class with the specified path, creation mode, read/write and sharing permission, the access other FileStreams can have to the same file, the buffer size, additional file options and the allocation size. /// </summary> /// <param name="path">A relative or absolute path for the file that the current <see cref="System.IO.FileStream" /> instance will encapsulate.</param> /// <param name="options">An object that describes optional <see cref="System.IO.FileStream" /> parameters to use.</param> /// <exception cref="T:System.ArgumentNullException"><paramref name="path" /> or <paramref name="options" /> is <see langword="null" />.</exception> /// <exception cref="T:System.ArgumentException"><paramref name="path" /> is an empty string (""), contains only white space, or contains one or more invalid characters. /// -or- /// <paramref name="path" /> refers to a non-file device, such as <c>CON:</c>, <c>COM1:</c>, <c>LPT1:</c>, etc. in an NTFS environment.</exception> /// <exception cref="T:System.NotSupportedException"><paramref name="path" /> refers to a non-file device, such as <c>CON:</c>, <c>COM1:</c>, <c>LPT1:</c>, etc. in a non-NTFS environment.</exception> /// <exception cref="T:System.IO.FileNotFoundException">The file cannot be found, such as when <see cref="System.IO.FileStreamOptions.Mode" /> is <see langword="FileMode.Truncate" /> or <see langword="FileMode.Open" />, and the file specified by <paramref name="path" /> does not exist. The file must already exist in these modes.</exception> /// <exception cref="T:System.IO.IOException">An I/O error, such as specifying <see langword="FileMode.CreateNew" /> when the file specified by <paramref name="path" /> already exists, occurred. /// -or- /// The stream has been closed. /// -or- /// The disk was full (when <see cref="System.IO.FileStreamOptions.PreallocationSize" /> was provided and <paramref name="path" /> was pointing to a regular file). /// -or- /// The file was too large (when <see cref="System.IO.FileStreamOptions.PreallocationSize" /> was provided and <paramref name="path" /> was pointing to a regular file).</exception> /// <exception cref="T:System.Security.SecurityException">The caller does not have the required permission.</exception> /// <exception cref="T:System.IO.DirectoryNotFoundException">The specified path is invalid, such as being on an unmapped drive.</exception> /// <exception cref="T:System.UnauthorizedAccessException">The <see cref="System.IO.FileStreamOptions.Access" /> requested is not permitted by the operating system for the specified <paramref name="path" />, such as when <see cref="System.IO.FileStreamOptions.Access" /> is <see cref="System.IO.FileAccess.Write" /> or <see cref="System.IO.FileAccess.ReadWrite" /> and the file or directory is set for read-only access. /// -or- /// <see cref="F:System.IO.FileOptions.Encrypted" /> is specified for <see cref="System.IO.FileStreamOptions.Options" /> , but file encryption is not supported on the current platform.</exception> /// <exception cref="T:System.IO.PathTooLongException">The specified path, file name, or both exceed the system-defined maximum length. </exception> public FileStream(string path, FileStreamOptions options) { ArgumentException.ThrowIfNullOrEmpty(path); ArgumentNullException.ThrowIfNull(options); if ((options.Access & FileAccess.Read) != 0 && options.Mode == FileMode.Append) { throw new ArgumentException(SR.Argument_InvalidAppendMode, nameof(options)); } else if ((options.Access & FileAccess.Write) == 0) { if (options.Mode == FileMode.Truncate || options.Mode == FileMode.CreateNew || options.Mode == FileMode.Create || options.Mode == FileMode.Append) { throw new ArgumentException(SR.Format(SR.Argument_InvalidFileModeAndAccessCombo, options.Mode, options.Access), nameof(options)); } } if (options.PreallocationSize > 0) { FileStreamHelpers.ValidateArgumentsForPreallocation(options.Mode, options.Access); } FileStreamHelpers.SerializationGuard(options.Access); _strategy = FileStreamHelpers.ChooseStrategy( this, path, options.Mode, options.Access, options.Share, options.BufferSize, options.Options, options.PreallocationSize); } private FileStream(string path, FileMode mode, FileAccess access, FileShare share, int bufferSize, FileOptions options, long preallocationSize) { FileStreamHelpers.ValidateArguments(path, mode, access, share, bufferSize, options, preallocationSize); _strategy = FileStreamHelpers.ChooseStrategy(this, path, mode, access, share, bufferSize, options, preallocationSize); } [Obsolete("FileStream.Handle has been deprecated. Use FileStream's SafeFileHandle property instead.")] public virtual IntPtr Handle => _strategy.Handle; [UnsupportedOSPlatform("ios")] [UnsupportedOSPlatform("macos")] [UnsupportedOSPlatform("tvos")] public virtual void Lock(long position, long length) { if (position < 0 || length < 0) { ThrowHelper.ThrowArgumentOutOfRangeException_NeedNonNegNum(position < 0 ? nameof(position) : nameof(length)); } else if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } _strategy.Lock(position, length); } [UnsupportedOSPlatform("ios")] [UnsupportedOSPlatform("macos")] [UnsupportedOSPlatform("tvos")] public virtual void Unlock(long position, long length) { if (position < 0 || length < 0) { ThrowHelper.ThrowArgumentOutOfRangeException_NeedNonNegNum(position < 0 ? nameof(position) : nameof(length)); } else if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } _strategy.Unlock(position, length); } public override Task FlushAsync(CancellationToken cancellationToken) { if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled(cancellationToken); } else if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } return _strategy.FlushAsync(cancellationToken); } public override int Read(byte[] buffer, int offset, int count) { ValidateReadWriteArgs(buffer, offset, count); return _strategy.Read(buffer, offset, count); } public override int Read(Span<byte> buffer) => _strategy.Read(buffer); public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { ValidateBufferArguments(buffer, offset, count); if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled<int>(cancellationToken); } else if (!_strategy.CanRead) { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } ThrowHelper.ThrowNotSupportedException_UnreadableStream(); } return _strategy.ReadAsync(buffer, offset, count, cancellationToken); } public override ValueTask<int> ReadAsync(Memory<byte> buffer, CancellationToken cancellationToken = default) { if (cancellationToken.IsCancellationRequested) { return ValueTask.FromCanceled<int>(cancellationToken); } else if (!_strategy.CanRead) { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } ThrowHelper.ThrowNotSupportedException_UnreadableStream(); } return _strategy.ReadAsync(buffer, cancellationToken); } public override void Write(byte[] buffer, int offset, int count) { ValidateReadWriteArgs(buffer, offset, count); _strategy.Write(buffer, offset, count); } public override void Write(ReadOnlySpan<byte> buffer) => _strategy.Write(buffer); public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) { ValidateBufferArguments(buffer, offset, count); if (cancellationToken.IsCancellationRequested) { return Task.FromCanceled(cancellationToken); } else if (!_strategy.CanWrite) { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } ThrowHelper.ThrowNotSupportedException_UnwritableStream(); } return _strategy.WriteAsync(buffer, offset, count, cancellationToken); } public override ValueTask WriteAsync(ReadOnlyMemory<byte> buffer, CancellationToken cancellationToken = default) { if (cancellationToken.IsCancellationRequested) { return ValueTask.FromCanceled(cancellationToken); } else if (!_strategy.CanWrite) { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } ThrowHelper.ThrowNotSupportedException_UnwritableStream(); } return _strategy.WriteAsync(buffer, cancellationToken); } /// <summary> /// Clears buffers for this stream and causes any buffered data to be written to the file. /// </summary> public override void Flush() { // Make sure that we call through the public virtual API Flush(flushToDisk: false); } /// <summary> /// Clears buffers for this stream, and if <param name="flushToDisk"/> is true, /// causes any buffered data to be written to the file. /// </summary> public virtual void Flush(bool flushToDisk) { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } _strategy.Flush(flushToDisk); } /// <summary>Gets a value indicating whether the current stream supports reading.</summary> public override bool CanRead => _strategy.CanRead; /// <summary>Gets a value indicating whether the current stream supports writing.</summary> public override bool CanWrite => _strategy.CanWrite; /// <summary>Validates arguments to Read and Write and throws resulting exceptions.</summary> /// <param name="buffer">The buffer to read from or write to.</param> /// <param name="offset">The zero-based offset into the buffer.</param> /// <param name="count">The maximum number of bytes to read or write.</param> private void ValidateReadWriteArgs(byte[] buffer, int offset, int count) { ValidateBufferArguments(buffer, offset, count); if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } } /// <summary>Sets the length of this stream to the given value.</summary> /// <param name="value">The new length of the stream.</param> public override void SetLength(long value) { if (value < 0) { ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.value, ExceptionResource.ArgumentOutOfRange_NeedNonNegNum); } else if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } else if (!CanSeek) { ThrowHelper.ThrowNotSupportedException_UnseekableStream(); } else if (!CanWrite) { ThrowHelper.ThrowNotSupportedException_UnwritableStream(); } _strategy.SetLength(value); } public virtual SafeFileHandle SafeFileHandle => _strategy.SafeFileHandle; /// <summary>Gets the path that was passed to the constructor.</summary> public virtual string Name => _strategy.Name; /// <summary>Gets a value indicating whether the stream was opened for I/O to be performed synchronously or asynchronously.</summary> public virtual bool IsAsync => _strategy.IsAsync; /// <summary>Gets the length of the stream in bytes.</summary> public override long Length { get { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } else if (!CanSeek) { ThrowHelper.ThrowNotSupportedException_UnseekableStream(); } return _strategy.Length; } } /// <summary>Gets or sets the position within the current stream</summary> public override long Position { get { if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } else if (!CanSeek) { ThrowHelper.ThrowNotSupportedException_UnseekableStream(); } return _strategy.Position; } set { if (value < 0) { ThrowHelper.ThrowArgumentOutOfRangeException(ExceptionArgument.value, ExceptionResource.ArgumentOutOfRange_NeedNonNegNum); } _strategy.Seek(value, SeekOrigin.Begin); } } /// <summary> /// Reads a byte from the file stream. Returns the byte cast to an int /// or -1 if reading from the end of the stream. /// </summary> public override int ReadByte() => _strategy.ReadByte(); /// <summary> /// Writes a byte to the current position in the stream and advances the position /// within the stream by one byte. /// </summary> /// <param name="value">The byte to write to the stream.</param> public override void WriteByte(byte value) => _strategy.WriteByte(value); // _strategy can be null only when ctor has thrown protected override void Dispose(bool disposing) => _strategy?.DisposeInternal(disposing); public async override ValueTask DisposeAsync() { await _strategy.DisposeAsync().ConfigureAwait(false); Dispose(false); GC.SuppressFinalize(this); } public override void CopyTo(Stream destination, int bufferSize) { ValidateCopyToArguments(destination, bufferSize); _strategy.CopyTo(destination, bufferSize); } public override Task CopyToAsync(Stream destination, int bufferSize, CancellationToken cancellationToken) { ValidateCopyToArguments(destination, bufferSize); return _strategy.CopyToAsync(destination, bufferSize, cancellationToken); } public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) { ValidateBufferArguments(buffer, offset, count); if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } else if (!CanRead) { ThrowHelper.ThrowNotSupportedException_UnreadableStream(); } return _strategy.BeginRead(buffer, offset, count, callback, state); } public override int EndRead(IAsyncResult asyncResult!!) { return _strategy.EndRead(asyncResult); } public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) { ValidateBufferArguments(buffer, offset, count); if (_strategy.IsClosed) { ThrowHelper.ThrowObjectDisposedException_FileClosed(); } else if (!CanWrite) { ThrowHelper.ThrowNotSupportedException_UnwritableStream(); } return _strategy.BeginWrite(buffer, offset, count, callback, state); } public override void EndWrite(IAsyncResult asyncResult!!) { _strategy.EndWrite(asyncResult); } public override bool CanSeek => _strategy.CanSeek; public override long Seek(long offset, SeekOrigin origin) => _strategy.Seek(offset, origin); internal Task BaseFlushAsync(CancellationToken cancellationToken) => base.FlushAsync(cancellationToken); internal int BaseRead(Span<byte> buffer) => base.Read(buffer); internal Task<int> BaseReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => base.ReadAsync(buffer, offset, count, cancellationToken); internal ValueTask<int> BaseReadAsync(Memory<byte> buffer, CancellationToken cancellationToken = default) => base.ReadAsync(buffer, cancellationToken); internal void BaseWrite(ReadOnlySpan<byte> buffer) => base.Write(buffer); internal Task BaseWriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => base.WriteAsync(buffer, offset, count, cancellationToken); internal ValueTask BaseWriteAsync(ReadOnlyMemory<byte> buffer, CancellationToken cancellationToken = default) => base.WriteAsync(buffer, cancellationToken); internal Task BaseCopyToAsync(Stream destination, int bufferSize, CancellationToken cancellationToken) => base.CopyToAsync(destination, bufferSize, cancellationToken); internal IAsyncResult BaseBeginRead(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) => base.BeginRead(buffer, offset, count, callback, state); internal int BaseEndRead(IAsyncResult asyncResult) => base.EndRead(asyncResult); internal IAsyncResult BaseBeginWrite(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) => base.BeginWrite(buffer, offset, count, callback, state); internal void BaseEndWrite(IAsyncResult asyncResult) => base.EndWrite(asyncResult); } }
1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Private.CoreLib/src/System/IO/Strategies/DerivedFileStreamStrategy.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Threading; using System.Threading.Tasks; using Microsoft.Win32.SafeHandles; namespace System.IO.Strategies { // this type exists so we can avoid GetType() != typeof(FileStream) checks in FileStream // when FileStream was supposed to call base.Method() for such cases, we just call _fileStream.BaseMethod() // for everything else we fall back to the actual strategy (like FileStream does) // // it's crucial to NOT use the "base" keyoword here! everything must be using _fileStream or _strategy internal sealed class DerivedFileStreamStrategy : FileStreamStrategy { private readonly FileStreamStrategy _strategy; private readonly FileStream _fileStream; internal DerivedFileStreamStrategy(FileStream fileStream, FileStreamStrategy strategy) { _fileStream = fileStream; _strategy = strategy; } public override bool CanRead => _strategy.CanRead; public override bool CanWrite => _strategy.CanWrite; public override bool CanSeek => _strategy.CanSeek; public override long Length => _strategy.Length; public override long Position { get => _strategy.Position; set => _strategy.Position = value; } internal override bool IsAsync => _strategy.IsAsync; internal override string Name => _strategy.Name; internal override SafeFileHandle SafeFileHandle { get { _fileStream.Flush(false); return _strategy.SafeFileHandle; } } internal override bool IsClosed => _strategy.IsClosed; internal override void Lock(long position, long length) => _strategy.Lock(position, length); internal override void Unlock(long position, long length) => _strategy.Unlock(position, length); public override long Seek(long offset, SeekOrigin origin) => _strategy.Seek(offset, origin); public override void SetLength(long value) => _strategy.SetLength(value); public override int ReadByte() => _strategy.ReadByte(); public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) => _strategy.IsAsync ? _strategy.BeginRead(buffer, offset, count, callback, state) : _fileStream.BaseBeginRead(buffer, offset, count, callback, state); public override int EndRead(IAsyncResult asyncResult) => _strategy.IsAsync ? _strategy.EndRead(asyncResult) : _fileStream.BaseEndRead(asyncResult); public override int Read(byte[] buffer, int offset, int count) => _strategy.Read(buffer, offset, count); // If this is a derived type, it may have overridden Read(byte[], int, int) prior to this Read(Span<byte>) // overload being introduced. In that case, this Read(Span<byte>) overload should use the behavior // of Read(byte[],int,int) overload. public override int Read(Span<byte> buffer) => _fileStream.BaseRead(buffer); // If we have been inherited into a subclass, the Strategy implementation could be incorrect // since it does not call through to Read() which a subclass might have overridden. // To be safe we will only use this implementation in cases where we know it is safe to do so, // and delegate to FileStream base class (which will call into Read/ReadAsync) when we are not sure. public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => _fileStream.BaseReadAsync(buffer, offset, count, cancellationToken); // If this isn't a concrete FileStream, a derived type may have overridden ReadAsync(byte[],...), // which was introduced first, so delegate to the base which will delegate to that. public override ValueTask<int> ReadAsync(Memory<byte> buffer, CancellationToken cancellationToken = default) => _fileStream.BaseReadAsync(buffer, cancellationToken); public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) => _strategy.IsAsync ? _strategy.BeginWrite(buffer, offset, count, callback, state) : _fileStream.BaseBeginWrite(buffer, offset, count, callback, state); public override void EndWrite(IAsyncResult asyncResult) { if (_strategy.IsAsync) { _strategy.EndWrite(asyncResult); } else { _fileStream.BaseEndWrite(asyncResult); } } public override void WriteByte(byte value) => _strategy.WriteByte(value); public override void Write(byte[] buffer, int offset, int count) => _strategy.Write(buffer, offset, count); // If this is a derived type, it may have overridden Write(byte[], int, int) prior to this Write(ReadOnlySpan<byte>) // overload being introduced. In that case, this Write(ReadOnlySpan<byte>) overload should use the behavior // of Write(byte[],int,int) overload. public override void Write(ReadOnlySpan<byte> buffer) => _fileStream.BaseWrite(buffer); // If we have been inherited into a subclass, the Strategy implementation could be incorrect // since it does not call through to Write() or WriteAsync() which a subclass might have overridden. // To be safe we will only use this implementation in cases where we know it is safe to do so, // and delegate to our base class (which will call into Write/WriteAsync) when we are not sure. public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => _fileStream.BaseWriteAsync(buffer, offset, count, cancellationToken); // If this isn't a concrete FileStream, a derived type may have overridden WriteAsync(byte[],...), // which was introduced first, so delegate to the base which will delegate to that. public override ValueTask WriteAsync(ReadOnlyMemory<byte> buffer, CancellationToken cancellationToken = default) => _fileStream.BaseWriteAsync(buffer, cancellationToken); public override void Flush() => throw new InvalidOperationException("FileStream should never call this method."); internal override void Flush(bool flushToDisk) => _strategy.Flush(flushToDisk); // If we have been inherited into a subclass, the following implementation could be incorrect // since it does not call through to Flush() which a subclass might have overridden. To be safe // we will only use this implementation in cases where we know it is safe to do so, // and delegate to our base class (which will call into Flush) when we are not sure. public override Task FlushAsync(CancellationToken cancellationToken) => _fileStream.BaseFlushAsync(cancellationToken); // We also need to take this path if this is a derived // instance from FileStream, as a derived type could have overridden ReadAsync, in which // case our custom CopyToAsync implementation isn't necessarily correct. public override Task CopyToAsync(Stream destination, int bufferSize, CancellationToken cancellationToken) => _fileStream.BaseCopyToAsync(destination, bufferSize, cancellationToken); public override ValueTask DisposeAsync() => _fileStream.BaseDisposeAsync(); protected sealed override void Dispose(bool disposing) => _strategy.DisposeInternal(disposing); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Threading; using System.Threading.Tasks; using Microsoft.Win32.SafeHandles; namespace System.IO.Strategies { // this type exists so we can avoid GetType() != typeof(FileStream) checks in FileStream // when FileStream was supposed to call base.Method() for such cases, we just call _fileStream.BaseMethod() // for everything else we fall back to the actual strategy (like FileStream does) // // it's crucial to NOT use the "base" keyoword here! everything must be using _fileStream or _strategy internal sealed class DerivedFileStreamStrategy : FileStreamStrategy { private readonly FileStreamStrategy _strategy; private readonly FileStream _fileStream; internal DerivedFileStreamStrategy(FileStream fileStream, FileStreamStrategy strategy) { _fileStream = fileStream; _strategy = strategy; } public override bool CanRead => _strategy.CanRead; public override bool CanWrite => _strategy.CanWrite; public override bool CanSeek => _strategy.CanSeek; public override long Length => _strategy.Length; public override long Position { get => _strategy.Position; set => _strategy.Position = value; } internal override bool IsAsync => _strategy.IsAsync; internal override string Name => _strategy.Name; internal override SafeFileHandle SafeFileHandle { get { _fileStream.Flush(false); return _strategy.SafeFileHandle; } } internal override bool IsClosed => _strategy.IsClosed; internal override void Lock(long position, long length) => _strategy.Lock(position, length); internal override void Unlock(long position, long length) => _strategy.Unlock(position, length); public override long Seek(long offset, SeekOrigin origin) => _strategy.Seek(offset, origin); public override void SetLength(long value) => _strategy.SetLength(value); public override int ReadByte() => _strategy.ReadByte(); public override IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) => _strategy.IsAsync ? _strategy.BeginRead(buffer, offset, count, callback, state) : _fileStream.BaseBeginRead(buffer, offset, count, callback, state); public override int EndRead(IAsyncResult asyncResult) => _strategy.IsAsync ? _strategy.EndRead(asyncResult) : _fileStream.BaseEndRead(asyncResult); public override int Read(byte[] buffer, int offset, int count) => _strategy.Read(buffer, offset, count); // If this is a derived type, it may have overridden Read(byte[], int, int) prior to this Read(Span<byte>) // overload being introduced. In that case, this Read(Span<byte>) overload should use the behavior // of Read(byte[],int,int) overload. public override int Read(Span<byte> buffer) => _fileStream.BaseRead(buffer); // If we have been inherited into a subclass, the Strategy implementation could be incorrect // since it does not call through to Read() which a subclass might have overridden. // To be safe we will only use this implementation in cases where we know it is safe to do so, // and delegate to FileStream base class (which will call into Read/ReadAsync) when we are not sure. public override Task<int> ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => _fileStream.BaseReadAsync(buffer, offset, count, cancellationToken); // If this isn't a concrete FileStream, a derived type may have overridden ReadAsync(byte[],...), // which was introduced first, so delegate to the base which will delegate to that. public override ValueTask<int> ReadAsync(Memory<byte> buffer, CancellationToken cancellationToken = default) => _fileStream.BaseReadAsync(buffer, cancellationToken); public override IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback? callback, object? state) => _strategy.IsAsync ? _strategy.BeginWrite(buffer, offset, count, callback, state) : _fileStream.BaseBeginWrite(buffer, offset, count, callback, state); public override void EndWrite(IAsyncResult asyncResult) { if (_strategy.IsAsync) { _strategy.EndWrite(asyncResult); } else { _fileStream.BaseEndWrite(asyncResult); } } public override void WriteByte(byte value) => _strategy.WriteByte(value); public override void Write(byte[] buffer, int offset, int count) => _strategy.Write(buffer, offset, count); // If this is a derived type, it may have overridden Write(byte[], int, int) prior to this Write(ReadOnlySpan<byte>) // overload being introduced. In that case, this Write(ReadOnlySpan<byte>) overload should use the behavior // of Write(byte[],int,int) overload. public override void Write(ReadOnlySpan<byte> buffer) => _fileStream.BaseWrite(buffer); // If we have been inherited into a subclass, the Strategy implementation could be incorrect // since it does not call through to Write() or WriteAsync() which a subclass might have overridden. // To be safe we will only use this implementation in cases where we know it is safe to do so, // and delegate to our base class (which will call into Write/WriteAsync) when we are not sure. public override Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken) => _fileStream.BaseWriteAsync(buffer, offset, count, cancellationToken); // If this isn't a concrete FileStream, a derived type may have overridden WriteAsync(byte[],...), // which was introduced first, so delegate to the base which will delegate to that. public override ValueTask WriteAsync(ReadOnlyMemory<byte> buffer, CancellationToken cancellationToken = default) => _fileStream.BaseWriteAsync(buffer, cancellationToken); public override void Flush() => throw new InvalidOperationException("FileStream should never call this method."); internal override void Flush(bool flushToDisk) => _strategy.Flush(flushToDisk); // If we have been inherited into a subclass, the following implementation could be incorrect // since it does not call through to Flush() which a subclass might have overridden. To be safe // we will only use this implementation in cases where we know it is safe to do so, // and delegate to our base class (which will call into Flush) when we are not sure. public override Task FlushAsync(CancellationToken cancellationToken) => _fileStream.BaseFlushAsync(cancellationToken); // We also need to take this path if this is a derived // instance from FileStream, as a derived type could have overridden ReadAsync, in which // case our custom CopyToAsync implementation isn't necessarily correct. public override Task CopyToAsync(Stream destination, int bufferSize, CancellationToken cancellationToken) => _fileStream.BaseCopyToAsync(destination, bufferSize, cancellationToken); public override ValueTask DisposeAsync() => _strategy.DisposeAsync(); protected sealed override void Dispose(bool disposing) => _strategy.DisposeInternal(disposing); } }
1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/coreclr/System.Private.CoreLib/src/System/OleAutBinder.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // This class represents the Ole Automation binder. // #define DISPLAY_DEBUG_INFO using System.Runtime.InteropServices; using Microsoft.Win32; using CultureInfo = System.Globalization.CultureInfo; namespace System { // Made serializable in anticipation of this class eventually having state. internal sealed class OleAutBinder : DefaultBinder { // ChangeType // This binder uses OLEAUT to change the type of the variant. public override object ChangeType(object value, Type type, CultureInfo? cultureInfo) { Variant myValue = new Variant(value); cultureInfo ??= CultureInfo.CurrentCulture; #if DISPLAY_DEBUG_INFO Console.WriteLine("In OleAutBinder::ChangeType converting variant of type: {0} to type: {1}", myValue.VariantType, type.Name); #endif if (type.IsByRef) { #if DISPLAY_DEBUG_INFO Console.WriteLine("Stripping byref from the type to convert to."); #endif type = type.GetElementType()!; } // If we are trying to convert from an object to another type then we don't // need the OLEAUT change type, we can just use the normal COM+ mechanisms. if (!type.IsPrimitive && type.IsInstanceOfType(value)) { #if DISPLAY_DEBUG_INFO Console.WriteLine("Source variant can be assigned to destination type"); #endif return value; } Type srcType = value.GetType(); // Handle converting primitives to enums. if (type.IsEnum && srcType.IsPrimitive) { #if DISPLAY_DEBUG_INFO Console.WriteLine("Converting primitive to enum"); #endif return Enum.Parse(type, value.ToString()!); } // Use the OA variant lib to convert primitive types. try { #if DISPLAY_DEBUG_INFO Console.WriteLine("Using OAVariantLib.ChangeType() to do the conversion"); #endif // Specify the LocalBool flag to have BOOL values converted to local language rather // than 0 or -1. object RetObj = OAVariantLib.ChangeType(myValue, type, OAVariantLib.LocalBool, cultureInfo).ToObject()!; #if DISPLAY_DEBUG_INFO Console.WriteLine("Object returned from ChangeType is of type: " + RetObj.GetType().Name); #endif return RetObj; } #if DISPLAY_DEBUG_INFO catch(NotSupportedException e) #else catch (NotSupportedException) #endif { #if DISPLAY_DEBUG_INFO Console.Write("Exception thrown: "); Console.WriteLine(e); #endif throw new COMException(SR.Interop_COM_TypeMismatch, unchecked((int)0x80020005)); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // This class represents the Ole Automation binder. // #define DISPLAY_DEBUG_INFO using System.Runtime.InteropServices; using Microsoft.Win32; using CultureInfo = System.Globalization.CultureInfo; namespace System { // Made serializable in anticipation of this class eventually having state. internal sealed class OleAutBinder : DefaultBinder { // ChangeType // This binder uses OLEAUT to change the type of the variant. public override object ChangeType(object value, Type type, CultureInfo? cultureInfo) { Variant myValue = new Variant(value); cultureInfo ??= CultureInfo.CurrentCulture; #if DISPLAY_DEBUG_INFO Console.WriteLine("In OleAutBinder::ChangeType converting variant of type: {0} to type: {1}", myValue.VariantType, type.Name); #endif if (type.IsByRef) { #if DISPLAY_DEBUG_INFO Console.WriteLine("Stripping byref from the type to convert to."); #endif type = type.GetElementType()!; } // If we are trying to convert from an object to another type then we don't // need the OLEAUT change type, we can just use the normal COM+ mechanisms. if (!type.IsPrimitive && type.IsInstanceOfType(value)) { #if DISPLAY_DEBUG_INFO Console.WriteLine("Source variant can be assigned to destination type"); #endif return value; } Type srcType = value.GetType(); // Handle converting primitives to enums. if (type.IsEnum && srcType.IsPrimitive) { #if DISPLAY_DEBUG_INFO Console.WriteLine("Converting primitive to enum"); #endif return Enum.Parse(type, value.ToString()!); } // Use the OA variant lib to convert primitive types. try { #if DISPLAY_DEBUG_INFO Console.WriteLine("Using OAVariantLib.ChangeType() to do the conversion"); #endif // Specify the LocalBool flag to have BOOL values converted to local language rather // than 0 or -1. object RetObj = OAVariantLib.ChangeType(myValue, type, OAVariantLib.LocalBool, cultureInfo).ToObject()!; #if DISPLAY_DEBUG_INFO Console.WriteLine("Object returned from ChangeType is of type: " + RetObj.GetType().Name); #endif return RetObj; } #if DISPLAY_DEBUG_INFO catch(NotSupportedException e) #else catch (NotSupportedException) #endif { #if DISPLAY_DEBUG_INFO Console.Write("Exception thrown: "); Console.WriteLine(e); #endif throw new COMException(SR.Interop_COM_TypeMismatch, unchecked((int)0x80020005)); } } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Globalization.Calendars/tests/System/Globalization/HebrewCalendarTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Globalization.Tests { public class HebrewCalendarTests : CalendarTestBase { public override Calendar Calendar => new HebrewCalendar(); public override DateTime MinSupportedDateTime => new DateTime(1583, 01, 01); public override DateTime MaxSupportedDateTime => new DateTime(2239, 09, 29, 23, 59, 59).AddTicks(9999999); public override CalendarAlgorithmType AlgorithmType => CalendarAlgorithmType.LunisolarCalendar; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Globalization.Tests { public class HebrewCalendarTests : CalendarTestBase { public override Calendar Calendar => new HebrewCalendar(); public override DateTime MinSupportedDateTime => new DateTime(1583, 01, 01); public override DateTime MaxSupportedDateTime => new DateTime(2239, 09, 29, 23, 59, 59).AddTicks(9999999); public override CalendarAlgorithmType AlgorithmType => CalendarAlgorithmType.LunisolarCalendar; } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Linq.Expressions/src/System/Linq/Expressions/SwitchCase.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Collections.ObjectModel; using System.Diagnostics; using System.Dynamic.Utils; namespace System.Linq.Expressions { /// <summary> /// Represents one case of a <see cref="SwitchExpression"/>. /// </summary> [DebuggerTypeProxy(typeof(Expression.SwitchCaseProxy))] public sealed class SwitchCase { internal SwitchCase(Expression body, ReadOnlyCollection<Expression> testValues) { Body = body; TestValues = testValues; } /// <summary> /// Gets the values of this case. This case is selected for execution when the <see cref="SwitchExpression.SwitchValue"/> matches any of these values. /// </summary> public ReadOnlyCollection<Expression> TestValues { get; } /// <summary> /// Gets the body of this case. /// </summary> public Expression Body { get; } /// <summary> /// Returns a <see cref="string"/> that represents the current <see cref="object"/>. /// </summary> /// <returns>A <see cref="string"/> that represents the current <see cref="object"/>.</returns> public override string ToString() { return ExpressionStringBuilder.SwitchCaseToString(this); } /// <summary> /// Creates a new expression that is like this one, but using the /// supplied children. If all of the children are the same, it will /// return this expression. /// </summary> /// <param name="testValues">The <see cref="TestValues"/> property of the result.</param> /// <param name="body">The <see cref="Body"/> property of the result.</param> /// <returns>This expression if no children changed, or an expression with the updated children.</returns> public SwitchCase Update(IEnumerable<Expression> testValues, Expression body) { if (body == Body & testValues != null) { if (ExpressionUtils.SameElements(ref testValues!, TestValues)) { return this; } } return Expression.SwitchCase(body, testValues!); } } public partial class Expression { /// <summary> /// Creates a <see cref="Expressions.SwitchCase"/> for use in a <see cref="SwitchExpression"/>. /// </summary> /// <param name="body">The body of the case.</param> /// <param name="testValues">The test values of the case.</param> /// <returns>The created <see cref="Expressions.SwitchCase"/>.</returns> public static SwitchCase SwitchCase(Expression body, params Expression[] testValues) { return SwitchCase(body, (IEnumerable<Expression>)testValues); } /// <summary> /// Creates a <see cref="Expressions.SwitchCase"/> for use in a <see cref="SwitchExpression"/>. /// </summary> /// <param name="body">The body of the case.</param> /// <param name="testValues">The test values of the case.</param> /// <returns>The created <see cref="Expressions.SwitchCase"/>.</returns> public static SwitchCase SwitchCase(Expression body, IEnumerable<Expression> testValues) { ExpressionUtils.RequiresCanRead(body, nameof(body)); ReadOnlyCollection<Expression> values = testValues.ToReadOnly(); ContractUtils.RequiresNotEmpty(values, nameof(testValues)); RequiresCanRead(values, nameof(testValues)); return new SwitchCase(body, values); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Collections.ObjectModel; using System.Diagnostics; using System.Dynamic.Utils; namespace System.Linq.Expressions { /// <summary> /// Represents one case of a <see cref="SwitchExpression"/>. /// </summary> [DebuggerTypeProxy(typeof(Expression.SwitchCaseProxy))] public sealed class SwitchCase { internal SwitchCase(Expression body, ReadOnlyCollection<Expression> testValues) { Body = body; TestValues = testValues; } /// <summary> /// Gets the values of this case. This case is selected for execution when the <see cref="SwitchExpression.SwitchValue"/> matches any of these values. /// </summary> public ReadOnlyCollection<Expression> TestValues { get; } /// <summary> /// Gets the body of this case. /// </summary> public Expression Body { get; } /// <summary> /// Returns a <see cref="string"/> that represents the current <see cref="object"/>. /// </summary> /// <returns>A <see cref="string"/> that represents the current <see cref="object"/>.</returns> public override string ToString() { return ExpressionStringBuilder.SwitchCaseToString(this); } /// <summary> /// Creates a new expression that is like this one, but using the /// supplied children. If all of the children are the same, it will /// return this expression. /// </summary> /// <param name="testValues">The <see cref="TestValues"/> property of the result.</param> /// <param name="body">The <see cref="Body"/> property of the result.</param> /// <returns>This expression if no children changed, or an expression with the updated children.</returns> public SwitchCase Update(IEnumerable<Expression> testValues, Expression body) { if (body == Body & testValues != null) { if (ExpressionUtils.SameElements(ref testValues!, TestValues)) { return this; } } return Expression.SwitchCase(body, testValues!); } } public partial class Expression { /// <summary> /// Creates a <see cref="Expressions.SwitchCase"/> for use in a <see cref="SwitchExpression"/>. /// </summary> /// <param name="body">The body of the case.</param> /// <param name="testValues">The test values of the case.</param> /// <returns>The created <see cref="Expressions.SwitchCase"/>.</returns> public static SwitchCase SwitchCase(Expression body, params Expression[] testValues) { return SwitchCase(body, (IEnumerable<Expression>)testValues); } /// <summary> /// Creates a <see cref="Expressions.SwitchCase"/> for use in a <see cref="SwitchExpression"/>. /// </summary> /// <param name="body">The body of the case.</param> /// <param name="testValues">The test values of the case.</param> /// <returns>The created <see cref="Expressions.SwitchCase"/>.</returns> public static SwitchCase SwitchCase(Expression body, IEnumerable<Expression> testValues) { ExpressionUtils.RequiresCanRead(body, nameof(body)); ReadOnlyCollection<Expression> values = testValues.ToReadOnly(); ContractUtils.RequiresNotEmpty(values, nameof(testValues)); RequiresCanRead(values, nameof(testValues)); return new SwitchCase(body, values); } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/Loader/binding/assemblies/generics/arilistienum/methods/methods.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //array<T> IList properties using System; using System.IO; using System.Reflection; using System.Collections; public class GenClass<T> { public T fld; } public class PropsArIList { public static int Main() { int result = 0; int i, index = 0; bool bRes = false; try { //Part 1 - GenClass <int> Console.WriteLine("\ntest GenClass<int>"); GenClass<int> obj1; obj1 = new GenClass<int>(); obj1.fld = 3; Console.WriteLine (obj1.fld); GenClass<int>[] arGen; arGen = new GenClass<int>[5]; for (i=0;i<5;i++) { arGen[i] = new GenClass<int>(); arGen[i].fld = i; Console.Write (arGen[i].fld + "\t"); } Console.WriteLine(); IList interf1 = (IList) arGen; Console.WriteLine ("testing IList.Contains"); bRes = interf1.Contains (arGen[2]); if (bRes!=true) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf1.Contains (obj1); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } Console.WriteLine ("testing IList.IndexOf"); index = interf1.IndexOf (arGen[2]); if (index!=2) { Console.WriteLine ("unexpected result: {0} \n test failed", index); return 110; } Console.WriteLine ("testing IList.Clear"); interf1.Clear(); for (i=0;i<5;i++) { if (arGen[i]!=null) { Console.WriteLine ("unexpected result: element {0} is not null \n test failed", i); return 110; } } //Part 2 - GenClass <string> Console.WriteLine("\ntest GenClass<string>"); GenClass<string> obj2; obj2 = new GenClass<string>(); obj2.fld = "name"; Console.WriteLine (obj2.fld); GenClass<string>[] arGenS; arGenS = new GenClass<string>[5]; string aux = "none"; for (i=0;i<5;i++) { arGenS[i] = new GenClass<string>(); aux = Convert.ToString(i); arGenS[i].fld = aux; Console.Write (arGenS[i].fld + "\t"); } Console.WriteLine(); IList interf2 = (IList) arGenS; Console.WriteLine ("testing IList.Contains"); bRes = interf2.Contains (arGenS[2]); if (bRes!=true) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf2.Contains (obj2); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf2.Contains (obj1); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } Console.WriteLine ("testing IList.IndexOf"); index = interf2.IndexOf (arGenS[2]); if (index!=2) { Console.WriteLine ("unexpected result: {0} \n test failed", index); return 110; } Console.WriteLine ("testing IList.Clear"); interf2.Clear(); for (i=0;i<5;i++) { if (arGenS[i]!=null) { Console.WriteLine ("unexpected result: element {0} is not null \n test failed", i); return 110; } } result = 100; //pass } catch (Exception e) { Console.WriteLine ("unexpected exception.."); Console.WriteLine (e); Console.WriteLine ("test failed"); return 101; } if (result==100) Console.WriteLine ("test passed"); else Console.WriteLine ("test failed"); return result; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. //array<T> IList properties using System; using System.IO; using System.Reflection; using System.Collections; public class GenClass<T> { public T fld; } public class PropsArIList { public static int Main() { int result = 0; int i, index = 0; bool bRes = false; try { //Part 1 - GenClass <int> Console.WriteLine("\ntest GenClass<int>"); GenClass<int> obj1; obj1 = new GenClass<int>(); obj1.fld = 3; Console.WriteLine (obj1.fld); GenClass<int>[] arGen; arGen = new GenClass<int>[5]; for (i=0;i<5;i++) { arGen[i] = new GenClass<int>(); arGen[i].fld = i; Console.Write (arGen[i].fld + "\t"); } Console.WriteLine(); IList interf1 = (IList) arGen; Console.WriteLine ("testing IList.Contains"); bRes = interf1.Contains (arGen[2]); if (bRes!=true) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf1.Contains (obj1); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } Console.WriteLine ("testing IList.IndexOf"); index = interf1.IndexOf (arGen[2]); if (index!=2) { Console.WriteLine ("unexpected result: {0} \n test failed", index); return 110; } Console.WriteLine ("testing IList.Clear"); interf1.Clear(); for (i=0;i<5;i++) { if (arGen[i]!=null) { Console.WriteLine ("unexpected result: element {0} is not null \n test failed", i); return 110; } } //Part 2 - GenClass <string> Console.WriteLine("\ntest GenClass<string>"); GenClass<string> obj2; obj2 = new GenClass<string>(); obj2.fld = "name"; Console.WriteLine (obj2.fld); GenClass<string>[] arGenS; arGenS = new GenClass<string>[5]; string aux = "none"; for (i=0;i<5;i++) { arGenS[i] = new GenClass<string>(); aux = Convert.ToString(i); arGenS[i].fld = aux; Console.Write (arGenS[i].fld + "\t"); } Console.WriteLine(); IList interf2 = (IList) arGenS; Console.WriteLine ("testing IList.Contains"); bRes = interf2.Contains (arGenS[2]); if (bRes!=true) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf2.Contains (obj2); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } bRes = interf2.Contains (obj1); if (bRes!=false) { Console.WriteLine ("unexpected result: {0} \n test failed", bRes); return 110; } Console.WriteLine ("testing IList.IndexOf"); index = interf2.IndexOf (arGenS[2]); if (index!=2) { Console.WriteLine ("unexpected result: {0} \n test failed", index); return 110; } Console.WriteLine ("testing IList.Clear"); interf2.Clear(); for (i=0;i<5;i++) { if (arGenS[i]!=null) { Console.WriteLine ("unexpected result: element {0} is not null \n test failed", i); return 110; } } result = 100; //pass } catch (Exception e) { Console.WriteLine ("unexpected exception.."); Console.WriteLine (e); Console.WriteLine ("test failed"); return 101; } if (result==100) Console.WriteLine ("test passed"); else Console.WriteLine ("test failed"); return result; } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Private.CoreLib/src/System/Runtime/Intrinsics/Arm/AdvSimd.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.CompilerServices; namespace System.Runtime.Intrinsics.Arm { /// <summary> /// This class provides access to the ARM AdvSIMD hardware instructions via intrinsics /// </summary> [Intrinsic] [CLSCompliant(false)] public abstract class AdvSimd : ArmBase { internal AdvSimd() { } public static new bool IsSupported { get => IsSupported; } // [Intrinsic] // public new abstract class Arm32 : ArmBase.Arm32 // { // internal Arm32() { } // // public static new bool IsSupported { get => IsSupported; } // // /// <summary> // /// float32x2_t vmla_f32 (float32x2_t a, float32x2_t b, float32x2_t c) // /// A32: VMLA.F32 Dd, Dn, Dm // /// </summary> // public static Vector64<float> MultiplyAdd(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => MultiplyAdd(addend, left, right); // // /// <summary> // /// float32x4_t vmlaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c) // /// A32: VMLA.F32 Qd, Qn, Qm // /// </summary> // public static Vector128<float> MultiplyAdd(Vector128<float> addend, Vector128<float> left, Vector128<float> right) => MultiplyAdd(addend, left, right); // // /// <summary> // /// float32x2_t vmla_n_f32 (float32x2_t a, float32x2_t b, float32_t c) // /// A32: VMLA.F32 Dd, Dn, Dm[0] // /// </summary> // public static Vector64<float> MultiplyAddByScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => MultiplyAddByScalar(addend, left, right); // // /// <summary> // /// float32x4_t vmlaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c) // /// A32: VMLA.F32 Qd, Qn, Dm[0] // /// </summary> // public static Vector128<float> MultiplyAddByScalar(Vector128<float> addend, Vector128<float> left, Vector64<float> right) => MultiplyAddByScalar(addend, left, right); // // /// <summary> // /// float32x2_t vmla_lane_f32 (float32x2_t a, float32x2_t b, float32x2_t v, const int lane) // /// A32: VMLA.F32 Dd, Dn, Dm[lane] // /// </summary> // public static Vector64<float> MultiplyAddBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); // // /// <summary> // /// float32x2_t vmla_laneq_f32 (float32x2_t a, float32x2_t b, float32x4_t v, const int lane) // /// A32: VMLA.F32 Dd, Dn, Dm[lane] // /// </summary> // public static Vector64<float> MultiplyAddBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); // // /// <summary> // /// float32x4_t vmlaq_lane_f32 (float32x4_t a, float32x4_t b, float32x2_t v, const int lane) // /// A32: VMLA.F32 Qd, Qn, Dm[lane] // /// </summary> // public static Vector128<float> MultiplyAddBySelectedScalar(Vector128<float> addend, Vector128<float> left, Vector64<float> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); // // /// <summary> // /// float32x4_t vmlaq_laneq_f32 (float32x4_t a, float32x4_t b, float32x4_t v, const int lane) // /// A32: VMLA.F32 Qd, Qn, Dm[lane] // /// </summary> // public static Vector128<float> MultiplyAddBySelectedScalar(Vector128<float> addend, Vector128<float> left, Vector128<float> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); // // /// <summary> // /// float64x1_t vmla_f64 (float64x1_t a, float64x1_t b, float64x1_t c) // /// A32: VMLA.F64 Dd, Dn, Dm // /// </summary> // public static Vector64<double> MultiplyAddScalar(Vector64<double> addend, Vector64<double> left, Vector64<double> right) => MultiplyAddScalar(addend, left, right); // // /// <summary> // /// float32_t vmlas_f32 (float32_t a, float32_t b, float32_t c) // /// A32: VMLA.F32 Sd, Sn, Sm // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. // /// </summary> // public static Vector64<float> MultiplyAddScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => MultiplyAddScalar(addend, left, right); // // /// <summary> // /// float32x2_t vmls_f32 (float32x2_t a, float32x2_t b, float32x2_t c) // /// A32: VMLS.F32 Dd, Dn, Dm // /// </summary> // public static Vector64<float> MultiplySubtract(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => MultiplySubtract(minuend, left, right); // // /// <summary> // /// float32x4_t vmlsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c) // /// A32: VMLS.F32 Qd, Qn, Qm // /// </summary> // public static Vector128<float> MultiplySubtract(Vector128<float> minuend, Vector128<float> left, Vector128<float> right) => MultiplySubtract(minuend, left, right); // // /// <summary> // /// float32x2_t vmls_n_f32 (float32x2_t a, float32x2_t b, float32_t c) // /// A32: VMLS.F32 Dd, Dn, Dm[0] // /// </summary> // public static Vector64<float> MultiplySubtractByScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => MultiplySubtractByScalar(minuend, left, right); // // /// <summary> // /// float32x4_t vmlsq_n_f32 (float32x4_t a, float32x4_t b, float32_t c) // /// A32: VMLS.F32 Qd, Qn, Dm[0] // /// </summary> // public static Vector128<float> MultiplySubtractByScalar(Vector128<float> minuend, Vector128<float> left, Vector64<float> right) => MultiplySubtractByScalar(minuend, left, right); // // /// <summary> // /// float32x2_t vmls_lane_f32 (float32x2_t a, float32x2_t b, float32x2_t v, const int lane) // /// A32: VMLS.F32 Dd, Dn, Dm[lane] // /// </summary> // public static Vector64<float> MultiplySubtractBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); // // /// <summary> // /// float32x2_t vmls_laneq_f32 (float32x2_t a, float32x2_t b, float32x4_t v, const int lane) // /// A32: VMLS.F32 Dd, Dn, Dm[lane] // /// </summary> // public static Vector64<float> MultiplySubtractBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); // // /// <summary> // /// float32x4_t vmlsq_lane_f32 (float32x4_t a, float32x4_t b, float32x2_t v, const int lane) // /// A32: VMLS.F32 Qd, Qn, Dm[lane] // /// </summary> // public static Vector128<float> MultiplySubtractBySelectedScalar(Vector128<float> minuend, Vector128<float> left, Vector64<float> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); // // /// <summary> // /// float32x4_t vmlsq_laneq_f32 (float32x4_t a, float32x4_t b, float32x4_t v, const int lane) // /// A32: VMLS.F32 Qd, Qn, Dm[lane] // /// </summary> // public static Vector128<float> MultiplySubtractBySelectedScalar(Vector128<float> minuend, Vector128<float> left, Vector128<float> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); // // /// <summary> // /// float64x1_t vmls_f64 (float64x1_t a, float64x1_t b, float64x1_t c) // /// A32: VMLS.F64 Dd, Dn, Dm // /// </summary> // public static Vector64<double> MultiplySubtractScalar(Vector64<double> minuend, Vector64<double> left, Vector64<double> right) => MultiplySubtractScalar(minuend, left, right); // // /// <summary> // /// float32_t vmlss_f32 (float32_t a, float32_t b, float32_t c) // /// A32: VMLS.F32 Sd, Sn, Sm // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. // /// </summary> // public static Vector64<float> MultiplySubtractScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => MultiplySubtractScalar(minuend, left, right); // } [Intrinsic] public new abstract class Arm64 : ArmBase.Arm64 { internal Arm64() { } public static new bool IsSupported { get => IsSupported; } /// <summary> /// float64x2_t vabsq_f64 (float64x2_t a) /// A64: FABS Vd.2D, Vn.2D /// </summary> public static Vector128<double> Abs(Vector128<double> value) => Abs(value); /// <summary> /// int64x2_t vabsq_s64 (int64x2_t a) /// A64: ABS Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> Abs(Vector128<long> value) => Abs(value); /// <summary> /// int64x2_t vqabsq_s64 (int64x2_t a) /// A64: SQABS Vd.2D, Vn.2D /// </summary> public static Vector128<long> AbsSaturate(Vector128<long> value) => AbsSaturate(value); /// <summary> /// int16_t vqabsh_s16 (int16_t a) /// A64: SQABS Hd, Hn /// </summary> public static Vector64<short> AbsSaturateScalar(Vector64<short> value) => AbsSaturateScalar(value); /// <summary> /// int32_t vqabss_s32 (int32_t a) /// A64: SQABS Sd, Sn /// </summary> public static Vector64<int> AbsSaturateScalar(Vector64<int> value) => AbsSaturateScalar(value); /// <summary> /// int64_t vqabsd_s64 (int64_t a) /// A64: SQABS Dd, Dn /// </summary> public static Vector64<long> AbsSaturateScalar(Vector64<long> value) => AbsSaturateScalar(value); /// <summary> /// int8_t vqabsb_s8 (int8_t a) /// A64: SQABS Bd, Bn /// </summary> public static Vector64<sbyte> AbsSaturateScalar(Vector64<sbyte> value) => AbsSaturateScalar(value); /// <summary> /// int64x1_t vabs_s64 (int64x1_t a) /// A64: ABS Dd, Dn /// </summary> public static Vector64<ulong> AbsScalar(Vector64<long> value) => AbsScalar(value); /// <summary> /// uint64x2_t vcagtq_f64 (float64x2_t a, float64x2_t b) /// A64: FACGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AbsoluteCompareGreaterThan(Vector128<double> left, Vector128<double> right) => AbsoluteCompareGreaterThan(left, right); /// <summary> /// uint64x1_t vcagt_f64 (float64x1_t a, float64x1_t b) /// A64: FACGT Dd, Dn, Dm /// </summary> public static Vector64<double> AbsoluteCompareGreaterThanScalar(Vector64<double> left, Vector64<double> right) => AbsoluteCompareGreaterThanScalar(left, right); /// <summary> /// uint32_t vcagts_f32 (float32_t a, float32_t b) /// A64: FACGT Sd, Sn, Sm /// </summary> public static Vector64<float> AbsoluteCompareGreaterThanScalar(Vector64<float> left, Vector64<float> right) => AbsoluteCompareGreaterThanScalar(left, right); /// <summary> /// uint64x2_t vcageq_f64 (float64x2_t a, float64x2_t b) /// A64: FACGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AbsoluteCompareGreaterThanOrEqual(Vector128<double> left, Vector128<double> right) => AbsoluteCompareGreaterThanOrEqual(left, right); /// <summary> /// uint64x1_t vcage_f64 (float64x1_t a, float64x1_t b) /// A64: FACGE Dd, Dn, Dm /// </summary> public static Vector64<double> AbsoluteCompareGreaterThanOrEqualScalar(Vector64<double> left, Vector64<double> right) => AbsoluteCompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint32_t vcages_f32 (float32_t a, float32_t b) /// A64: FACGE Sd, Sn, Sm /// </summary> public static Vector64<float> AbsoluteCompareGreaterThanOrEqualScalar(Vector64<float> left, Vector64<float> right) => AbsoluteCompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint64x2_t vcaltq_f64 (float64x2_t a, float64x2_t b) /// A64: FACGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AbsoluteCompareLessThan(Vector128<double> left, Vector128<double> right) => AbsoluteCompareLessThan(left, right); /// <summary> /// uint64x1_t vcalt_f64 (float64x1_t a, float64x1_t b) /// A64: FACGT Dd, Dn, Dm /// </summary> public static Vector64<double> AbsoluteCompareLessThanScalar(Vector64<double> left, Vector64<double> right) => AbsoluteCompareLessThanScalar(left, right); /// <summary> /// uint32_t vcalts_f32 (float32_t a, float32_t b) /// A64: FACGT Sd, Sn, Sm /// </summary> public static Vector64<float> AbsoluteCompareLessThanScalar(Vector64<float> left, Vector64<float> right) => AbsoluteCompareLessThanScalar(left, right); /// <summary> /// uint64x2_t vcaleq_f64 (float64x2_t a, float64x2_t b) /// A64: FACGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AbsoluteCompareLessThanOrEqual(Vector128<double> left, Vector128<double> right) => AbsoluteCompareLessThanOrEqual(left, right); /// <summary> /// uint64x1_t vcale_f64 (float64x1_t a, float64x1_t b) /// A64: FACGE Dd, Dn, Dm /// </summary> public static Vector64<double> AbsoluteCompareLessThanOrEqualScalar(Vector64<double> left, Vector64<double> right) => AbsoluteCompareLessThanOrEqualScalar(left, right); /// <summary> /// uint32_t vcales_f32 (float32_t a, float32_t b) /// A64: FACGE Sd, Sn, Sm /// </summary> public static Vector64<float> AbsoluteCompareLessThanOrEqualScalar(Vector64<float> left, Vector64<float> right) => AbsoluteCompareLessThanOrEqualScalar(left, right); /// <summary> /// float64x2_t vabdq_f64 (float64x2_t a, float64x2_t b) /// A64: FABD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AbsoluteDifference(Vector128<double> left, Vector128<double> right) => AbsoluteDifference(left, right); /// <summary> /// float64x1_t vabd_f64 (float64x1_t a, float64x1_t b) /// A64: FABD Dd, Dn, Dm /// </summary> public static Vector64<double> AbsoluteDifferenceScalar(Vector64<double> left, Vector64<double> right) => AbsoluteDifferenceScalar(left, right); /// <summary> /// float32_t vabds_f32 (float32_t a, float32_t b) /// A64: FABD Sd, Sn, Sm /// </summary> public static Vector64<float> AbsoluteDifferenceScalar(Vector64<float> left, Vector64<float> right) => AbsoluteDifferenceScalar(left, right); /// <summary> /// float64x2_t vaddq_f64 (float64x2_t a, float64x2_t b) /// A64: FADD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Add(Vector128<double> left, Vector128<double> right) => Add(left, right); /// <summary> /// uint8_t vaddv_u8 (uint8x8_t a) /// A64: ADDV Bd, Vn.8B /// </summary> public static Vector64<byte> AddAcross(Vector64<byte> value) => AddAcross(value); /// <summary> /// int16_t vaddv_s16 (int16x4_t a) /// A64: ADDV Hd, Vn.4H /// </summary> public static Vector64<short> AddAcross(Vector64<short> value) => AddAcross(value); /// <summary> /// int8_t vaddv_s8 (int8x8_t a) /// A64: ADDV Bd, Vn.8B /// </summary> public static Vector64<sbyte> AddAcross(Vector64<sbyte> value) => AddAcross(value); /// <summary> /// uint16_t vaddv_u16 (uint16x4_t a) /// A64: ADDV Hd, Vn.4H /// </summary> public static Vector64<ushort> AddAcross(Vector64<ushort> value) => AddAcross(value); /// <summary> /// uint8_t vaddvq_u8 (uint8x16_t a) /// A64: ADDV Bd, Vn.16B /// </summary> public static Vector64<byte> AddAcross(Vector128<byte> value) => AddAcross(value); /// <summary> /// int16_t vaddvq_s16 (int16x8_t a) /// A64: ADDV Hd, Vn.8H /// </summary> public static Vector64<short> AddAcross(Vector128<short> value) => AddAcross(value); /// <summary> /// int32_t vaddvq_s32 (int32x4_t a) /// A64: ADDV Sd, Vn.4S /// </summary> public static Vector64<int> AddAcross(Vector128<int> value) => AddAcross(value); /// <summary> /// int8_t vaddvq_s8 (int8x16_t a) /// A64: ADDV Bd, Vn.16B /// </summary> public static Vector64<sbyte> AddAcross(Vector128<sbyte> value) => AddAcross(value); /// <summary> /// uint16_t vaddvq_u16 (uint16x8_t a) /// A64: ADDV Hd, Vn.8H /// </summary> public static Vector64<ushort> AddAcross(Vector128<ushort> value) => AddAcross(value); /// <summary> /// uint32_t vaddvq_u32 (uint32x4_t a) /// A64: ADDV Sd, Vn.4S /// </summary> public static Vector64<uint> AddAcross(Vector128<uint> value) => AddAcross(value); /// <summary> /// uint16_t vaddlv_u8 (uint8x8_t a) /// A64: UADDLV Hd, Vn.8B /// </summary> public static Vector64<ushort> AddAcrossWidening(Vector64<byte> value) => AddAcrossWidening(value); /// <summary> /// int32_t vaddlv_s16 (int16x4_t a) /// A64: SADDLV Sd, Vn.4H /// </summary> public static Vector64<int> AddAcrossWidening(Vector64<short> value) => AddAcrossWidening(value); /// <summary> /// int16_t vaddlv_s8 (int8x8_t a) /// A64: SADDLV Hd, Vn.8B /// </summary> public static Vector64<short> AddAcrossWidening(Vector64<sbyte> value) => AddAcrossWidening(value); /// <summary> /// uint32_t vaddlv_u16 (uint16x4_t a) /// A64: UADDLV Sd, Vn.4H /// </summary> public static Vector64<uint> AddAcrossWidening(Vector64<ushort> value) => AddAcrossWidening(value); /// <summary> /// uint16_t vaddlvq_u8 (uint8x16_t a) /// A64: UADDLV Hd, Vn.16B /// </summary> public static Vector64<ushort> AddAcrossWidening(Vector128<byte> value) => AddAcrossWidening(value); /// <summary> /// int32_t vaddlvq_s16 (int16x8_t a) /// A64: SADDLV Sd, Vn.8H /// </summary> public static Vector64<int> AddAcrossWidening(Vector128<short> value) => AddAcrossWidening(value); /// <summary> /// int64_t vaddlvq_s32 (int32x4_t a) /// A64: SADDLV Dd, Vn.4S /// </summary> public static Vector64<long> AddAcrossWidening(Vector128<int> value) => AddAcrossWidening(value); /// <summary> /// int16_t vaddlvq_s8 (int8x16_t a) /// A64: SADDLV Hd, Vn.16B /// </summary> public static Vector64<short> AddAcrossWidening(Vector128<sbyte> value) => AddAcrossWidening(value); /// <summary> /// uint32_t vaddlvq_u16 (uint16x8_t a) /// A64: UADDLV Sd, Vn.8H /// </summary> public static Vector64<uint> AddAcrossWidening(Vector128<ushort> value) => AddAcrossWidening(value); /// <summary> /// uint64_t vaddlvq_u32 (uint32x4_t a) /// A64: UADDLV Dd, Vn.4S /// </summary> public static Vector64<ulong> AddAcrossWidening(Vector128<uint> value) => AddAcrossWidening(value); /// <summary> /// uint8x16_t vpaddq_u8 (uint8x16_t a, uint8x16_t b) /// A64: ADDP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> AddPairwise(Vector128<byte> left, Vector128<byte> right) => AddPairwise(left, right); /// <summary> /// float64x2_t vpaddq_f64 (float64x2_t a, float64x2_t b) /// A64: FADDP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AddPairwise(Vector128<double> left, Vector128<double> right) => AddPairwise(left, right); /// <summary> /// int16x8_t vpaddq_s16 (int16x8_t a, int16x8_t b) /// A64: ADDP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> AddPairwise(Vector128<short> left, Vector128<short> right) => AddPairwise(left, right); /// <summary> /// int32x4_t vpaddq_s32 (int32x4_t a, int32x4_t b) /// A64: ADDP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> AddPairwise(Vector128<int> left, Vector128<int> right) => AddPairwise(left, right); /// <summary> /// int64x2_t vpaddq_s64 (int64x2_t a, int64x2_t b) /// A64: ADDP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> AddPairwise(Vector128<long> left, Vector128<long> right) => AddPairwise(left, right); /// <summary> /// int8x16_t vpaddq_s8 (int8x16_t a, int8x16_t b) /// A64: ADDP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> AddPairwise(Vector128<sbyte> left, Vector128<sbyte> right) => AddPairwise(left, right); /// <summary> /// float32x4_t vpaddq_f32 (float32x4_t a, float32x4_t b) /// A64: FADDP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AddPairwise(Vector128<float> left, Vector128<float> right) => AddPairwise(left, right); /// <summary> /// uint16x8_t vpaddq_u16 (uint16x8_t a, uint16x8_t b) /// A64: ADDP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> AddPairwise(Vector128<ushort> left, Vector128<ushort> right) => AddPairwise(left, right); /// <summary> /// uint32x4_t vpaddq_u32 (uint32x4_t a, uint32x4_t b) /// A64: ADDP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> AddPairwise(Vector128<uint> left, Vector128<uint> right) => AddPairwise(left, right); /// <summary> /// uint64x2_t vpaddq_u64 (uint64x2_t a, uint64x2_t b) /// A64: ADDP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> AddPairwise(Vector128<ulong> left, Vector128<ulong> right) => AddPairwise(left, right); /// <summary> /// float32_t vpadds_f32 (float32x2_t a) /// A64: FADDP Sd, Vn.2S /// </summary> public static Vector64<float> AddPairwiseScalar(Vector64<float> value) => AddPairwiseScalar(value); /// <summary> /// float64_t vpaddd_f64 (float64x2_t a) /// A64: FADDP Dd, Vn.2D /// </summary> public static Vector64<double> AddPairwiseScalar(Vector128<double> value) => AddPairwiseScalar(value); /// <summary> /// int64_t vpaddd_s64 (int64x2_t a) /// A64: ADDP Dd, Vn.2D /// </summary> public static Vector64<long> AddPairwiseScalar(Vector128<long> value) => AddPairwiseScalar(value); /// <summary> /// uint64_t vpaddd_u64 (uint64x2_t a) /// A64: ADDP Dd, Vn.2D /// </summary> public static Vector64<ulong> AddPairwiseScalar(Vector128<ulong> value) => AddPairwiseScalar(value); /// <summary> /// uint8x8_t vsqadd_u8 (uint8x8_t a, int8x8_t b) /// A64: USQADD Vd.8B, Vn.8B /// </summary> public static Vector64<byte> AddSaturate(Vector64<byte> left, Vector64<sbyte> right) => AddSaturate(left, right); /// <summary> /// int16x4_t vuqadd_s16 (int16x4_t a, uint16x4_t b) /// A64: SUQADD Vd.4H, Vn.4H /// </summary> public static Vector64<short> AddSaturate(Vector64<short> left, Vector64<ushort> right) => AddSaturate(left, right); /// <summary> /// int32x2_t vuqadd_s32 (int32x2_t a, uint32x2_t b) /// A64: SUQADD Vd.2S, Vn.2S /// </summary> public static Vector64<int> AddSaturate(Vector64<int> left, Vector64<uint> right) => AddSaturate(left, right); /// <summary> /// int8x8_t vuqadd_s8 (int8x8_t a, uint8x8_t b) /// A64: SUQADD Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> AddSaturate(Vector64<sbyte> left, Vector64<byte> right) => AddSaturate(left, right); /// <summary> /// uint16x4_t vsqadd_u16 (uint16x4_t a, int16x4_t b) /// A64: USQADD Vd.4H, Vn.4H /// </summary> public static Vector64<ushort> AddSaturate(Vector64<ushort> left, Vector64<short> right) => AddSaturate(left, right); /// <summary> /// uint32x2_t vsqadd_u32 (uint32x2_t a, int32x2_t b) /// A64: USQADD Vd.2S, Vn.2S /// </summary> public static Vector64<uint> AddSaturate(Vector64<uint> left, Vector64<int> right) => AddSaturate(left, right); /// <summary> /// uint8x16_t vsqaddq_u8 (uint8x16_t a, int8x16_t b) /// A64: USQADD Vd.16B, Vn.16B /// </summary> public static Vector128<byte> AddSaturate(Vector128<byte> left, Vector128<sbyte> right) => AddSaturate(left, right); /// <summary> /// int16x8_t vuqaddq_s16 (int16x8_t a, uint16x8_t b) /// A64: SUQADD Vd.8H, Vn.8H /// </summary> public static Vector128<short> AddSaturate(Vector128<short> left, Vector128<ushort> right) => AddSaturate(left, right); /// <summary> /// int32x4_t vuqaddq_s32 (int32x4_t a, uint32x4_t b) /// A64: SUQADD Vd.4S, Vn.4S /// </summary> public static Vector128<int> AddSaturate(Vector128<int> left, Vector128<uint> right) => AddSaturate(left, right); /// <summary> /// int64x2_t vuqaddq_s64 (int64x2_t a, uint64x2_t b) /// A64: SUQADD Vd.2D, Vn.2D /// </summary> public static Vector128<long> AddSaturate(Vector128<long> left, Vector128<ulong> right) => AddSaturate(left, right); /// <summary> /// int8x16_t vuqaddq_s8 (int8x16_t a, uint8x16_t b) /// A64: SUQADD Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> AddSaturate(Vector128<sbyte> left, Vector128<byte> right) => AddSaturate(left, right); /// <summary> /// uint16x8_t vsqaddq_u16 (uint16x8_t a, int16x8_t b) /// A64: USQADD Vd.8H, Vn.8H /// </summary> public static Vector128<ushort> AddSaturate(Vector128<ushort> left, Vector128<short> right) => AddSaturate(left, right); /// <summary> /// uint32x4_t vsqaddq_u32 (uint32x4_t a, int32x4_t b) /// A64: USQADD Vd.4S, Vn.4S /// </summary> public static Vector128<uint> AddSaturate(Vector128<uint> left, Vector128<int> right) => AddSaturate(left, right); /// <summary> /// uint64x2_t vsqaddq_u64 (uint64x2_t a, int64x2_t b) /// A64: USQADD Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> AddSaturate(Vector128<ulong> left, Vector128<long> right) => AddSaturate(left, right); /// <summary> /// uint8_t vqaddb_u8 (uint8_t a, uint8_t b) /// A64: UQADD Bd, Bn, Bm /// </summary> public static Vector64<byte> AddSaturateScalar(Vector64<byte> left, Vector64<byte> right) => AddSaturateScalar(left, right); /// <summary> /// uint8_t vsqaddb_u8 (uint8_t a, int8_t b) /// A64: USQADD Bd, Bn /// </summary> public static Vector64<byte> AddSaturateScalar(Vector64<byte> left, Vector64<sbyte> right) => AddSaturateScalar(left, right); /// <summary> /// int16_t vqaddh_s16 (int16_t a, int16_t b) /// A64: SQADD Hd, Hn, Hm /// </summary> public static Vector64<short> AddSaturateScalar(Vector64<short> left, Vector64<short> right) => AddSaturateScalar(left, right); /// <summary> /// int16_t vuqaddh_s16 (int16_t a, uint16_t b) /// A64: SUQADD Hd, Hn /// </summary> public static Vector64<short> AddSaturateScalar(Vector64<short> left, Vector64<ushort> right) => AddSaturateScalar(left, right); /// <summary> /// int32_t vqadds_s32 (int32_t a, int32_t b) /// A64: SQADD Sd, Sn, Sm /// </summary> public static Vector64<int> AddSaturateScalar(Vector64<int> left, Vector64<int> right) => AddSaturateScalar(left, right); /// <summary> /// int32_t vuqadds_s32 (int32_t a, uint32_t b) /// A64: SUQADD Sd, Sn /// </summary> public static Vector64<int> AddSaturateScalar(Vector64<int> left, Vector64<uint> right) => AddSaturateScalar(left, right); /// <summary> /// int64x1_t vuqadd_s64 (int64x1_t a, uint64x1_t b) /// A64: SUQADD Dd, Dn /// </summary> public static Vector64<long> AddSaturateScalar(Vector64<long> left, Vector64<ulong> right) => AddSaturateScalar(left, right); /// <summary> /// int8_t vqaddb_s8 (int8_t a, int8_t b) /// A64: SQADD Bd, Bn, Bm /// </summary> public static Vector64<sbyte> AddSaturateScalar(Vector64<sbyte> left, Vector64<sbyte> right) => AddSaturateScalar(left, right); /// <summary> /// int8_t vuqaddb_s8 (int8_t a, uint8_t b) /// A64: SUQADD Bd, Bn /// </summary> public static Vector64<sbyte> AddSaturateScalar(Vector64<sbyte> left, Vector64<byte> right) => AddSaturateScalar(left, right); /// <summary> /// uint16_t vqaddh_u16 (uint16_t a, uint16_t b) /// A64: UQADD Hd, Hn, Hm /// </summary> public static Vector64<ushort> AddSaturateScalar(Vector64<ushort> left, Vector64<ushort> right) => AddSaturateScalar(left, right); /// <summary> /// uint16_t vsqaddh_u16 (uint16_t a, int16_t b) /// A64: USQADD Hd, Hn /// </summary> public static Vector64<ushort> AddSaturateScalar(Vector64<ushort> left, Vector64<short> right) => AddSaturateScalar(left, right); /// <summary> /// uint32_t vqadds_u32 (uint32_t a, uint32_t b) /// A64: UQADD Sd, Sn, Sm /// </summary> public static Vector64<uint> AddSaturateScalar(Vector64<uint> left, Vector64<uint> right) => AddSaturateScalar(left, right); /// <summary> /// uint32_t vsqadds_u32 (uint32_t a, int32_t b) /// A64: USQADD Sd, Sn /// </summary> public static Vector64<uint> AddSaturateScalar(Vector64<uint> left, Vector64<int> right) => AddSaturateScalar(left, right); /// <summary> /// uint64x1_t vsqadd_u64 (uint64x1_t a, int64x1_t b) /// A64: USQADD Dd, Dn /// </summary> public static Vector64<ulong> AddSaturateScalar(Vector64<ulong> left, Vector64<long> right) => AddSaturateScalar(left, right); /// <summary> /// float64x2_t vrndpq_f64 (float64x2_t a) /// A64: FRINTP Vd.2D, Vn.2D /// </summary> public static Vector128<double> Ceiling(Vector128<double> value) => Ceiling(value); /// <summary> /// uint64x2_t vceqq_f64 (float64x2_t a, float64x2_t b) /// A64: FCMEQ Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> CompareEqual(Vector128<double> left, Vector128<double> right) => CompareEqual(left, right); /// <summary> /// uint64x2_t vceqq_s64 (int64x2_t a, int64x2_t b) /// A64: CMEQ Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareEqual(Vector128<long> left, Vector128<long> right) => CompareEqual(left, right); /// <summary> /// uint64x2_t vceqq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMEQ Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareEqual(Vector128<ulong> left, Vector128<ulong> right) => CompareEqual(left, right); /// <summary> /// uint64x1_t vceq_f64 (float64x1_t a, float64x1_t b) /// A64: FCMEQ Dd, Dn, Dm /// </summary> public static Vector64<double> CompareEqualScalar(Vector64<double> left, Vector64<double> right) => CompareEqualScalar(left, right); /// <summary> /// uint64x1_t vceq_s64 (int64x1_t a, int64x1_t b) /// A64: CMEQ Dd, Dn, Dm /// </summary> public static Vector64<long> CompareEqualScalar(Vector64<long> left, Vector64<long> right) => CompareEqualScalar(left, right); /// <summary> /// uint32_t vceqs_f32 (float32_t a, float32_t b) /// A64: FCMEQ Sd, Sn, Sm /// </summary> public static Vector64<float> CompareEqualScalar(Vector64<float> left, Vector64<float> right) => CompareEqualScalar(left, right); /// <summary> /// uint64x1_t vceq_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMEQ Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareEqualScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareEqualScalar(left, right); /// <summary> /// uint64x2_t vcgtq_f64 (float64x2_t a, float64x2_t b) /// A64: FCMGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> CompareGreaterThan(Vector128<double> left, Vector128<double> right) => CompareGreaterThan(left, right); /// <summary> /// uint64x2_t vcgtq_s64 (int64x2_t a, int64x2_t b) /// A64: CMGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareGreaterThan(Vector128<long> left, Vector128<long> right) => CompareGreaterThan(left, right); /// <summary> /// uint64x2_t vcgtq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMHI Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareGreaterThan(Vector128<ulong> left, Vector128<ulong> right) => CompareGreaterThan(left, right); /// <summary> /// uint64x1_t vcgt_f64 (float64x1_t a, float64x1_t b) /// A64: FCMGT Dd, Dn, Dm /// </summary> public static Vector64<double> CompareGreaterThanScalar(Vector64<double> left, Vector64<double> right) => CompareGreaterThanScalar(left, right); /// <summary> /// uint64x1_t vcgt_s64 (int64x1_t a, int64x1_t b) /// A64: CMGT Dd, Dn, Dm /// </summary> public static Vector64<long> CompareGreaterThanScalar(Vector64<long> left, Vector64<long> right) => CompareGreaterThanScalar(left, right); /// <summary> /// uint32_t vcgts_f32 (float32_t a, float32_t b) /// A64: FCMGT Sd, Sn, Sm /// </summary> public static Vector64<float> CompareGreaterThanScalar(Vector64<float> left, Vector64<float> right) => CompareGreaterThanScalar(left, right); /// <summary> /// uint64x1_t vcgt_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMHI Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareGreaterThanScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareGreaterThanScalar(left, right); /// <summary> /// uint64x2_t vcgeq_f64 (float64x2_t a, float64x2_t b) /// A64: FCMGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> CompareGreaterThanOrEqual(Vector128<double> left, Vector128<double> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint64x2_t vcgeq_s64 (int64x2_t a, int64x2_t b) /// A64: CMGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareGreaterThanOrEqual(Vector128<long> left, Vector128<long> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint64x2_t vcgeq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMHS Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareGreaterThanOrEqual(Vector128<ulong> left, Vector128<ulong> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint64x1_t vcge_f64 (float64x1_t a, float64x1_t b) /// A64: FCMGE Dd, Dn, Dm /// </summary> public static Vector64<double> CompareGreaterThanOrEqualScalar(Vector64<double> left, Vector64<double> right) => CompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint64x1_t vcge_s64 (int64x1_t a, int64x1_t b) /// A64: CMGE Dd, Dn, Dm /// </summary> public static Vector64<long> CompareGreaterThanOrEqualScalar(Vector64<long> left, Vector64<long> right) => CompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint32_t vcges_f32 (float32_t a, float32_t b) /// A64: FCMGE Sd, Sn, Sm /// </summary> public static Vector64<float> CompareGreaterThanOrEqualScalar(Vector64<float> left, Vector64<float> right) => CompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint64x1_t vcge_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMHS Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareGreaterThanOrEqualScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint64x2_t vcltq_f64 (float64x2_t a, float64x2_t b) /// A64: FCMGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> CompareLessThan(Vector128<double> left, Vector128<double> right) => CompareLessThan(left, right); /// <summary> /// uint64x2_t vcltq_s64 (int64x2_t a, int64x2_t b) /// A64: CMGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareLessThan(Vector128<long> left, Vector128<long> right) => CompareLessThan(left, right); /// <summary> /// uint64x2_t vcltq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMHI Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareLessThan(Vector128<ulong> left, Vector128<ulong> right) => CompareLessThan(left, right); /// <summary> /// uint64x1_t vclt_f64 (float64x1_t a, float64x1_t b) /// A64: FCMGT Dd, Dn, Dm /// </summary> public static Vector64<double> CompareLessThanScalar(Vector64<double> left, Vector64<double> right) => CompareLessThanScalar(left, right); /// <summary> /// uint64x1_t vclt_s64 (int64x1_t a, int64x1_t b) /// A64: CMGT Dd, Dn, Dm /// </summary> public static Vector64<long> CompareLessThanScalar(Vector64<long> left, Vector64<long> right) => CompareLessThanScalar(left, right); /// <summary> /// uint32_t vclts_f32 (float32_t a, float32_t b) /// A64: FCMGT Sd, Sn, Sm /// </summary> public static Vector64<float> CompareLessThanScalar(Vector64<float> left, Vector64<float> right) => CompareLessThanScalar(left, right); /// <summary> /// uint64x1_t vclt_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMHI Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareLessThanScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareLessThanScalar(left, right); /// <summary> /// uint64x2_t vcleq_f64 (float64x2_t a, float64x2_t b) /// A64: FCMGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> CompareLessThanOrEqual(Vector128<double> left, Vector128<double> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint64x2_t vcleq_s64 (int64x2_t a, int64x2_t b) /// A64: CMGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareLessThanOrEqual(Vector128<long> left, Vector128<long> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint64x2_t vcleq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMHS Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareLessThanOrEqual(Vector128<ulong> left, Vector128<ulong> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint64x1_t vcle_f64 (float64x1_t a, float64x1_t b) /// A64: FCMGE Dd, Dn, Dm /// </summary> public static Vector64<double> CompareLessThanOrEqualScalar(Vector64<double> left, Vector64<double> right) => CompareLessThanOrEqualScalar(left, right); /// <summary> /// uint64x1_t vcle_s64 (int64x1_t a, int64x1_t b) /// A64: CMGE Dd, Dn, Dm /// </summary> public static Vector64<long> CompareLessThanOrEqualScalar(Vector64<long> left, Vector64<long> right) => CompareLessThanOrEqualScalar(left, right); /// <summary> /// uint32_t vcles_f32 (float32_t a, float32_t b) /// A64: FCMGE Sd, Sn, Sm /// </summary> public static Vector64<float> CompareLessThanOrEqualScalar(Vector64<float> left, Vector64<float> right) => CompareLessThanOrEqualScalar(left, right); /// <summary> /// uint64x1_t vcle_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMHS Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareLessThanOrEqualScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareLessThanOrEqualScalar(left, right); /// <summary> /// uint64x2_t vtstq_f64 (float64x2_t a, float64x2_t b) /// A64: CMTST Vd.2D, Vn.2D, Vm.2D /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> CompareTest(Vector128<double> left, Vector128<double> right) => CompareTest(left, right); /// <summary> /// uint64x2_t vtstq_s64 (int64x2_t a, int64x2_t b) /// A64: CMTST Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareTest(Vector128<long> left, Vector128<long> right) => CompareTest(left, right); /// <summary> /// uint64x2_t vtstq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMTST Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareTest(Vector128<ulong> left, Vector128<ulong> right) => CompareTest(left, right); /// <summary> /// uint64x1_t vtst_f64 (float64x1_t a, float64x1_t b) /// A64: CMTST Dd, Dn, Dm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> CompareTestScalar(Vector64<double> left, Vector64<double> right) => CompareTestScalar(left, right); /// <summary> /// uint64x1_t vtst_s64 (int64x1_t a, int64x1_t b) /// A64: CMTST Dd, Dn, Dm /// </summary> public static Vector64<long> CompareTestScalar(Vector64<long> left, Vector64<long> right) => CompareTestScalar(left, right); /// <summary> /// uint64x1_t vtst_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMTST Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareTestScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareTestScalar(left, right); /// <summary> /// float64x2_t vcvt_f64_f32 (float32x2_t a) /// A64: FCVTL Vd.2D, Vn.2S /// </summary> public static Vector128<double> ConvertToDouble(Vector64<float> value) => ConvertToDouble(value); /// <summary> /// float64x2_t vcvtq_f64_s64 (int64x2_t a) /// A64: SCVTF Vd.2D, Vn.2D /// </summary> public static Vector128<double> ConvertToDouble(Vector128<long> value) => ConvertToDouble(value); /// <summary> /// float64x2_t vcvtq_f64_u64 (uint64x2_t a) /// A64: UCVTF Vd.2D, Vn.2D /// </summary> public static Vector128<double> ConvertToDouble(Vector128<ulong> value) => ConvertToDouble(value); /// <summary> /// float64x1_t vcvt_f64_s64 (int64x1_t a) /// A64: SCVTF Dd, Dn /// </summary> public static Vector64<double> ConvertToDoubleScalar(Vector64<long> value) => ConvertToDoubleScalar(value); /// <summary> /// float64x1_t vcvt_f64_u64 (uint64x1_t a) /// A64: UCVTF Dd, Dn /// </summary> public static Vector64<double> ConvertToDoubleScalar(Vector64<ulong> value) => ConvertToDoubleScalar(value); /// <summary> /// float64x2_t vcvt_high_f64_f32 (float32x4_t a) /// A64: FCVTL2 Vd.2D, Vn.4S /// </summary> public static Vector128<double> ConvertToDoubleUpper(Vector128<float> value) => ConvertToDoubleUpper(value); /// <summary> /// int64x2_t vcvtaq_s64_f64 (float64x2_t a) /// A64: FCVTAS Vd.2D, Vn.2D /// </summary> public static Vector128<long> ConvertToInt64RoundAwayFromZero(Vector128<double> value) => ConvertToInt64RoundAwayFromZero(value); /// <summary> /// int64x1_t vcvta_s64_f64 (float64x1_t a) /// A64: FCVTAS Dd, Dn /// </summary> public static Vector64<long> ConvertToInt64RoundAwayFromZeroScalar(Vector64<double> value) => ConvertToInt64RoundAwayFromZeroScalar(value); /// <summary> /// int64x2_t vcvtnq_s64_f64 (float64x2_t a) /// A64: FCVTNS Vd.2D, Vn.2D /// </summary> public static Vector128<long> ConvertToInt64RoundToEven(Vector128<double> value) => ConvertToInt64RoundToEven(value); /// <summary> /// int64x1_t vcvtn_s64_f64 (float64x1_t a) /// A64: FCVTNS Dd, Dn /// </summary> public static Vector64<long> ConvertToInt64RoundToEvenScalar(Vector64<double> value) => ConvertToInt64RoundToEvenScalar(value); /// <summary> /// int64x2_t vcvtmq_s64_f64 (float64x2_t a) /// A64: FCVTMS Vd.2D, Vn.2D /// </summary> public static Vector128<long> ConvertToInt64RoundToNegativeInfinity(Vector128<double> value) => ConvertToInt64RoundToNegativeInfinity(value); /// <summary> /// int64x1_t vcvtm_s64_f64 (float64x1_t a) /// A64: FCVTMS Dd, Dn /// </summary> public static Vector64<long> ConvertToInt64RoundToNegativeInfinityScalar(Vector64<double> value) => ConvertToInt64RoundToNegativeInfinityScalar(value); /// <summary> /// int64x2_t vcvtpq_s64_f64 (float64x2_t a) /// A64: FCVTPS Vd.2D, Vn.2D /// </summary> public static Vector128<long> ConvertToInt64RoundToPositiveInfinity(Vector128<double> value) => ConvertToInt64RoundToPositiveInfinity(value); /// <summary> /// int64x1_t vcvtp_s64_f64 (float64x1_t a) /// A64: FCVTPS Dd, Dn /// </summary> public static Vector64<long> ConvertToInt64RoundToPositiveInfinityScalar(Vector64<double> value) => ConvertToInt64RoundToPositiveInfinityScalar(value); /// <summary> /// int64x2_t vcvtq_s64_f64 (float64x2_t a) /// A64: FCVTZS Vd.2D, Vn.2D /// </summary> public static Vector128<long> ConvertToInt64RoundToZero(Vector128<double> value) => ConvertToInt64RoundToZero(value); /// <summary> /// int64x1_t vcvt_s64_f64 (float64x1_t a) /// A64: FCVTZS Dd, Dn /// </summary> public static Vector64<long> ConvertToInt64RoundToZeroScalar(Vector64<double> value) => ConvertToInt64RoundToZeroScalar(value); /// <summary> /// float32x2_t vcvt_f32_f64 (float64x2_t a) /// A64: FCVTN Vd.2S, Vn.2D /// </summary> public static Vector64<float> ConvertToSingleLower(Vector128<double> value) => ConvertToSingleLower(value); /// <summary> /// float32x2_t vcvtx_f32_f64 (float64x2_t a) /// A64: FCVTXN Vd.2S, Vn.2D /// </summary> public static Vector64<float> ConvertToSingleRoundToOddLower(Vector128<double> value) => ConvertToSingleRoundToOddLower(value); /// <summary> /// float32x4_t vcvtx_high_f32_f64 (float32x2_t r, float64x2_t a) /// A64: FCVTXN2 Vd.4S, Vn.2D /// </summary> public static Vector128<float> ConvertToSingleRoundToOddUpper(Vector64<float> lower, Vector128<double> value) => ConvertToSingleRoundToOddUpper(lower, value); /// <summary> /// float32x4_t vcvt_high_f32_f64 (float32x2_t r, float64x2_t a) /// A64: FCVTN2 Vd.4S, Vn.2D /// </summary> public static Vector128<float> ConvertToSingleUpper(Vector64<float> lower, Vector128<double> value) => ConvertToSingleUpper(lower, value); /// <summary> /// uint64x2_t vcvtaq_u64_f64 (float64x2_t a) /// A64: FCVTAU Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> ConvertToUInt64RoundAwayFromZero(Vector128<double> value) => ConvertToUInt64RoundAwayFromZero(value); /// <summary> /// uint64x1_t vcvta_u64_f64 (float64x1_t a) /// A64: FCVTAU Dd, Dn /// </summary> public static Vector64<ulong> ConvertToUInt64RoundAwayFromZeroScalar(Vector64<double> value) => ConvertToUInt64RoundAwayFromZeroScalar(value); /// <summary> /// uint64x2_t vcvtnq_u64_f64 (float64x2_t a) /// A64: FCVTNU Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> ConvertToUInt64RoundToEven(Vector128<double> value) => ConvertToUInt64RoundToEven(value); /// <summary> /// uint64x1_t vcvtn_u64_f64 (float64x1_t a) /// A64: FCVTNU Dd, Dn /// </summary> public static Vector64<ulong> ConvertToUInt64RoundToEvenScalar(Vector64<double> value) => ConvertToUInt64RoundToEvenScalar(value); /// <summary> /// uint64x2_t vcvtmq_u64_f64 (float64x2_t a) /// A64: FCVTMU Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> ConvertToUInt64RoundToNegativeInfinity(Vector128<double> value) => ConvertToUInt64RoundToNegativeInfinity(value); /// <summary> /// uint64x1_t vcvtm_u64_f64 (float64x1_t a) /// A64: FCVTMU Dd, Dn /// </summary> public static Vector64<ulong> ConvertToUInt64RoundToNegativeInfinityScalar(Vector64<double> value) => ConvertToUInt64RoundToNegativeInfinityScalar(value); /// <summary> /// uint64x2_t vcvtpq_u64_f64 (float64x2_t a) /// A64: FCVTPU Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> ConvertToUInt64RoundToPositiveInfinity(Vector128<double> value) => ConvertToUInt64RoundToPositiveInfinity(value); /// <summary> /// uint64x1_t vcvtp_u64_f64 (float64x1_t a) /// A64: FCVTPU Dd, Dn /// </summary> public static Vector64<ulong> ConvertToUInt64RoundToPositiveInfinityScalar(Vector64<double> value) => ConvertToUInt64RoundToPositiveInfinityScalar(value); /// <summary> /// uint64x2_t vcvtq_u64_f64 (float64x2_t a) /// A64: FCVTZU Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> ConvertToUInt64RoundToZero(Vector128<double> value) => ConvertToUInt64RoundToZero(value); /// <summary> /// uint64x1_t vcvt_u64_f64 (float64x1_t a) /// A64: FCVTZU Dd, Dn /// </summary> public static Vector64<ulong> ConvertToUInt64RoundToZeroScalar(Vector64<double> value) => ConvertToUInt64RoundToZeroScalar(value); /// <summary> /// float32x2_t vdiv_f32 (float32x2_t a, float32x2_t b) /// A64: FDIV Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Divide(Vector64<float> left, Vector64<float> right) => Divide(left, right); /// <summary> /// float64x2_t vdivq_f64 (float64x2_t a, float64x2_t b) /// A64: FDIV Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Divide(Vector128<double> left, Vector128<double> right) => Divide(left, right); /// <summary> /// float32x4_t vdivq_f32 (float32x4_t a, float32x4_t b) /// A64: FDIV Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Divide(Vector128<float> left, Vector128<float> right) => Divide(left, right); /// <summary> /// float64x2_t vdupq_laneq_f64 (float64x2_t vec, const int lane) /// A64: DUP Vd.2D, Vn.D[index] /// </summary> public static Vector128<double> DuplicateSelectedScalarToVector128(Vector128<double> value, byte index) => DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int64x2_t vdupq_laneq_s64 (int64x2_t vec, const int lane) /// A64: DUP Vd.2D, Vn.D[index] /// </summary> public static Vector128<long> DuplicateSelectedScalarToVector128(Vector128<long> value, byte index) => DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint64x2_t vdupq_laneq_u64 (uint64x2_t vec, const int lane) /// A64: DUP Vd.2D, Vn.D[index] /// </summary> public static Vector128<ulong> DuplicateSelectedScalarToVector128(Vector128<ulong> value, byte index) => DuplicateSelectedScalarToVector128(value, index); /// <summary> /// float64x2_t vdupq_n_f64 (float64_t value) /// A64: DUP Vd.2D, Vn.D[0] /// </summary> public static Vector128<double> DuplicateToVector128(double value) => DuplicateToVector128(value); /// <summary> /// int64x2_t vdupq_n_s64 (int64_t value) /// A64: DUP Vd.2D, Rn /// </summary> public static Vector128<long> DuplicateToVector128(long value) => DuplicateToVector128(value); /// <summary> /// uint64x2_t vdupq_n_s64 (uint64_t value) /// A64: DUP Vd.2D, Rn /// </summary> public static Vector128<ulong> DuplicateToVector128(ulong value) => DuplicateToVector128(value); /// <summary> /// uint8_t vqmovnh_u16 (uint16_t a) /// A64: UQXTN Bd, Hn /// </summary> public static Vector64<byte> ExtractNarrowingSaturateScalar(Vector64<ushort> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// int16_t vqmovns_s32 (int32_t a) /// A64: SQXTN Hd, Sn /// </summary> public static Vector64<short> ExtractNarrowingSaturateScalar(Vector64<int> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// int32_t vqmovnd_s64 (int64_t a) /// A64: SQXTN Sd, Dn /// </summary> public static Vector64<int> ExtractNarrowingSaturateScalar(Vector64<long> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// int8_t vqmovnh_s16 (int16_t a) /// A64: SQXTN Bd, Hn /// </summary> public static Vector64<sbyte> ExtractNarrowingSaturateScalar(Vector64<short> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// uint16_t vqmovns_u32 (uint32_t a) /// A64: UQXTN Hd, Sn /// </summary> public static Vector64<ushort> ExtractNarrowingSaturateScalar(Vector64<uint> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// uint32_t vqmovnd_u64 (uint64_t a) /// A64: UQXTN Sd, Dn /// </summary> public static Vector64<uint> ExtractNarrowingSaturateScalar(Vector64<ulong> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// uint8_t vqmovunh_s16 (int16_t a) /// A64: SQXTUN Bd, Hn /// </summary> public static Vector64<byte> ExtractNarrowingSaturateUnsignedScalar(Vector64<short> value) => ExtractNarrowingSaturateUnsignedScalar(value); /// <summary> /// uint16_t vqmovuns_s32 (int32_t a) /// A64: SQXTUN Hd, Sn /// </summary> public static Vector64<ushort> ExtractNarrowingSaturateUnsignedScalar(Vector64<int> value) => ExtractNarrowingSaturateUnsignedScalar(value); /// <summary> /// uint32_t vqmovund_s64 (int64_t a) /// A64: SQXTUN Sd, Dn /// </summary> public static Vector64<uint> ExtractNarrowingSaturateUnsignedScalar(Vector64<long> value) => ExtractNarrowingSaturateUnsignedScalar(value); /// <summary> /// float64x2_t vrndmq_f64 (float64x2_t a) /// A64: FRINTM Vd.2D, Vn.2D /// </summary> public static Vector128<double> Floor(Vector128<double> value) => Floor(value); /// <summary> /// float64x2_t vfmaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c) /// A64: FMLA Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> FusedMultiplyAdd(Vector128<double> addend, Vector128<double> left, Vector128<double> right) => FusedMultiplyAdd(addend, left, right); /// <summary> /// float32x2_t vfma_n_f32 (float32x2_t a, float32x2_t b, float32_t n) /// A64: FMLA Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<float> FusedMultiplyAddByScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => FusedMultiplyAddByScalar(addend, left, right); /// <summary> /// float64x2_t vfmaq_n_f64 (float64x2_t a, float64x2_t b, float64_t n) /// A64: FMLA Vd.2D, Vn.2D, Vm.D[0] /// </summary> public static Vector128<double> FusedMultiplyAddByScalar(Vector128<double> addend, Vector128<double> left, Vector64<double> right) => FusedMultiplyAddByScalar(addend, left, right); /// <summary> /// float32x4_t vfmaq_n_f32 (float32x4_t a, float32x4_t b, float32_t n) /// A64: FMLA Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<float> FusedMultiplyAddByScalar(Vector128<float> addend, Vector128<float> left, Vector64<float> right) => FusedMultiplyAddByScalar(addend, left, right); /// <summary> /// float32x2_t vfma_lane_f32 (float32x2_t a, float32x2_t b, float32x2_t v, const int lane) /// A64: FMLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplyAddBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float32x2_t vfma_laneq_f32 (float32x2_t a, float32x2_t b, float32x4_t v, const int lane) /// A64: FMLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplyAddBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float64x2_t vfmaq_laneq_f64 (float64x2_t a, float64x2_t b, float64x2_t v, const int lane) /// A64: FMLA Vd.2D, Vn.2D, Vm.D[lane] /// </summary> public static Vector128<double> FusedMultiplyAddBySelectedScalar(Vector128<double> addend, Vector128<double> left, Vector128<double> right, byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float32x4_t vfmaq_lane_f32 (float32x4_t a, float32x4_t b, float32x2_t v, const int lane) /// A64: FMLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> FusedMultiplyAddBySelectedScalar(Vector128<float> addend, Vector128<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float32x4_t vfmaq_laneq_f32 (float32x4_t a, float32x4_t b, float32x4_t v, const int lane) /// A64: FMLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> FusedMultiplyAddBySelectedScalar(Vector128<float> addend, Vector128<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float64_t vfmad_laneq_f64 (float64_t a, float64_t b, float64x2_t v, const int lane) /// A64: FMLA Dd, Dn, Vm.D[lane] /// </summary> public static Vector64<double> FusedMultiplyAddScalarBySelectedScalar(Vector64<double> addend, Vector64<double> left, Vector128<double> right, byte rightIndex) => FusedMultiplyAddScalarBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float32_t vfmas_lane_f32 (float32_t a, float32_t b, float32x2_t v, const int lane) /// A64: FMLA Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplyAddScalarBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplyAddScalarBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float32_t vfmas_laneq_f32 (float32_t a, float32_t b, float32x4_t v, const int lane) /// A64: FMLA Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplyAddScalarBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplyAddScalarBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float64x2_t vfmsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c) /// A64: FMLS Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> FusedMultiplySubtract(Vector128<double> minuend, Vector128<double> left, Vector128<double> right) => FusedMultiplySubtract(minuend, left, right); /// <summary> /// float32x2_t vfms_n_f32 (float32x2_t a, float32x2_t b, float32_t n) /// A64: FMLS Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<float> FusedMultiplySubtractByScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => FusedMultiplySubtractByScalar(minuend, left, right); /// <summary> /// float64x2_t vfmsq_n_f64 (float64x2_t a, float64x2_t b, float64_t n) /// A64: FMLS Vd.2D, Vn.2D, Vm.D[0] /// </summary> public static Vector128<double> FusedMultiplySubtractByScalar(Vector128<double> minuend, Vector128<double> left, Vector64<double> right) => FusedMultiplySubtractByScalar(minuend, left, right); /// <summary> /// float32x4_t vfmsq_n_f32 (float32x4_t a, float32x4_t b, float32_t n) /// A64: FMLS Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<float> FusedMultiplySubtractByScalar(Vector128<float> minuend, Vector128<float> left, Vector64<float> right) => FusedMultiplySubtractByScalar(minuend, left, right); /// <summary> /// float32x2_t vfms_lane_f32 (float32x2_t a, float32x2_t b, float32x2_t v, const int lane) /// A64: FMLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplySubtractBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float32x2_t vfms_laneq_f32 (float32x2_t a, float32x2_t b, float32x4_t v, const int lane) /// A64: FMLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplySubtractBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float64x2_t vfmsq_laneq_f64 (float64x2_t a, float64x2_t b, float64x2_t v, const int lane) /// A64: FMLS Vd.2D, Vn.2D, Vm.D[lane] /// </summary> public static Vector128<double> FusedMultiplySubtractBySelectedScalar(Vector128<double> minuend, Vector128<double> left, Vector128<double> right, byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float32x4_t vfmsq_lane_f32 (float32x4_t a, float32x4_t b, float32x2_t v, const int lane) /// A64: FMLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> FusedMultiplySubtractBySelectedScalar(Vector128<float> minuend, Vector128<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float32x4_t vfmsq_laneq_f32 (float32x4_t a, float32x4_t b, float32x4_t v, const int lane) /// A64: FMLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> FusedMultiplySubtractBySelectedScalar(Vector128<float> minuend, Vector128<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float64_t vfmsd_laneq_f64 (float64_t a, float64_t b, float64x2_t v, const int lane) /// A64: FMLS Dd, Dn, Vm.D[lane] /// </summary> public static Vector64<double> FusedMultiplySubtractScalarBySelectedScalar(Vector64<double> minuend, Vector64<double> left, Vector128<double> right, byte rightIndex) => FusedMultiplySubtractScalarBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float32_t vfmss_lane_f32 (float32_t a, float32_t b, float32x2_t v, const int lane) /// A64: FMLS Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplySubtractScalarBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplySubtractScalarBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float32_t vfmss_laneq_f32 (float32_t a, float32_t b, float32x4_t v, const int lane) /// A64: FMLS Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplySubtractScalarBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplySubtractScalarBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint8x8_t vcopy_lane_u8 (uint8x8_t a, const int lane1, uint8x8_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector64<byte> InsertSelectedScalar(Vector64<byte> result, byte resultIndex, Vector64<byte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint8x8_t vcopy_laneq_u8 (uint8x8_t a, const int lane1, uint8x16_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector64<byte> InsertSelectedScalar(Vector64<byte> result, byte resultIndex, Vector128<byte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int16x4_t vcopy_lane_s16 (int16x4_t a, const int lane1, int16x4_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector64<short> InsertSelectedScalar(Vector64<short> result, byte resultIndex, Vector64<short> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int16x4_t vcopy_laneq_s16 (int16x4_t a, const int lane1, int16x8_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector64<short> InsertSelectedScalar(Vector64<short> result, byte resultIndex, Vector128<short> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int32x2_t vcopy_lane_s32 (int32x2_t a, const int lane1, int32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<int> InsertSelectedScalar(Vector64<int> result, byte resultIndex, Vector64<int> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int32x2_t vcopy_laneq_s32 (int32x2_t a, const int lane1, int32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<int> InsertSelectedScalar(Vector64<int> result, byte resultIndex, Vector128<int> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int8x8_t vcopy_lane_s8 (int8x8_t a, const int lane1, int8x8_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector64<sbyte> InsertSelectedScalar(Vector64<sbyte> result, byte resultIndex, Vector64<sbyte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int8x8_t vcopy_laneq_s8 (int8x8_t a, const int lane1, int8x16_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector64<sbyte> InsertSelectedScalar(Vector64<sbyte> result, byte resultIndex, Vector128<sbyte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float32x2_t vcopy_lane_f32 (float32x2_t a, const int lane1, float32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<float> InsertSelectedScalar(Vector64<float> result, byte resultIndex, Vector64<float> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float32x2_t vcopy_laneq_f32 (float32x2_t a, const int lane1, float32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<float> InsertSelectedScalar(Vector64<float> result, byte resultIndex, Vector128<float> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint16x4_t vcopy_lane_u16 (uint16x4_t a, const int lane1, uint16x4_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector64<ushort> InsertSelectedScalar(Vector64<ushort> result, byte resultIndex, Vector64<ushort> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint16x4_t vcopy_laneq_u16 (uint16x4_t a, const int lane1, uint16x8_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector64<ushort> InsertSelectedScalar(Vector64<ushort> result, byte resultIndex, Vector128<ushort> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint32x2_t vcopy_lane_u32 (uint32x2_t a, const int lane1, uint32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<uint> InsertSelectedScalar(Vector64<uint> result, byte resultIndex, Vector64<uint> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint32x2_t vcopy_laneq_u32 (uint32x2_t a, const int lane1, uint32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<uint> InsertSelectedScalar(Vector64<uint> result, byte resultIndex, Vector128<uint> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint8x16_t vcopyq_lane_u8 (uint8x16_t a, const int lane1, uint8x8_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector128<byte> InsertSelectedScalar(Vector128<byte> result, byte resultIndex, Vector64<byte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint8x16_t vcopyq_laneq_u8 (uint8x16_t a, const int lane1, uint8x16_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector128<byte> InsertSelectedScalar(Vector128<byte> result, byte resultIndex, Vector128<byte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float64x2_t vcopyq_laneq_f64 (float64x2_t a, const int lane1, float64x2_t b, const int lane2) /// A64: INS Vd.D[lane1], Vn.D[lane2] /// </summary> public static Vector128<double> InsertSelectedScalar(Vector128<double> result, byte resultIndex, Vector128<double> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int16x8_t vcopyq_lane_s16 (int16x8_t a, const int lane1, int16x4_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector128<short> InsertSelectedScalar(Vector128<short> result, byte resultIndex, Vector64<short> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int16x8_t vcopyq_laneq_s16 (int16x8_t a, const int lane1, int16x8_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector128<short> InsertSelectedScalar(Vector128<short> result, byte resultIndex, Vector128<short> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int32x4_t vcopyq_lane_s32 (int32x4_t a, const int lane1, int32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<int> InsertSelectedScalar(Vector128<int> result, byte resultIndex, Vector64<int> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int32x4_t vcopyq_laneq_s32 (int32x4_t a, const int lane1, int32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<int> InsertSelectedScalar(Vector128<int> result, byte resultIndex, Vector128<int> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int64x2_t vcopyq_laneq_s64 (int64x2_t a, const int lane1, int64x2_t b, const int lane2) /// A64: INS Vd.D[lane1], Vn.D[lane2] /// </summary> public static Vector128<long> InsertSelectedScalar(Vector128<long> result, byte resultIndex, Vector128<long> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int8x16_t vcopyq_lane_s8 (int8x16_t a, const int lane1, int8x8_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector128<sbyte> InsertSelectedScalar(Vector128<sbyte> result, byte resultIndex, Vector64<sbyte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int8x16_t vcopyq_laneq_s8 (int8x16_t a, const int lane1, int8x16_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector128<sbyte> InsertSelectedScalar(Vector128<sbyte> result, byte resultIndex, Vector128<sbyte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float32x4_t vcopyq_lane_f32 (float32x4_t a, const int lane1, float32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<float> InsertSelectedScalar(Vector128<float> result, byte resultIndex, Vector64<float> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float32x4_t vcopyq_laneq_f32 (float32x4_t a, const int lane1, float32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<float> InsertSelectedScalar(Vector128<float> result, byte resultIndex, Vector128<float> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint16x8_t vcopyq_lane_u16 (uint16x8_t a, const int lane1, uint16x4_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector128<ushort> InsertSelectedScalar(Vector128<ushort> result, byte resultIndex, Vector64<ushort> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint16x8_t vcopyq_laneq_u16 (uint16x8_t a, const int lane1, uint16x8_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector128<ushort> InsertSelectedScalar(Vector128<ushort> result, byte resultIndex, Vector128<ushort> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint32x4_t vcopyq_lane_u32 (uint32x4_t a, const int lane1, uint32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<uint> InsertSelectedScalar(Vector128<uint> result, byte resultIndex, Vector64<uint> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint32x4_t vcopyq_laneq_u32 (uint32x4_t a, const int lane1, uint32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<uint> InsertSelectedScalar(Vector128<uint> result, byte resultIndex, Vector128<uint> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint64x2_t vcopyq_laneq_u64 (uint64x2_t a, const int lane1, uint64x2_t b, const int lane2) /// A64: INS Vd.D[lane1], Vn.D[lane2] /// </summary> public static Vector128<ulong> InsertSelectedScalar(Vector128<ulong> result, byte resultIndex, Vector128<ulong> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float64x2_t vld1q_dup_f64 (float64_t const * ptr) /// A64: LD1R { Vt.2D }, [Xn] /// </summary> public static unsafe Vector128<double> LoadAndReplicateToVector128(double* address) => LoadAndReplicateToVector128(address); /// <summary> /// int64x2_t vld1q_dup_s64 (int64_t const * ptr) /// A64: LD1R { Vt.2D }, [Xn] /// </summary> public static unsafe Vector128<long> LoadAndReplicateToVector128(long* address) => LoadAndReplicateToVector128(address); /// <summary> /// uint64x2_t vld1q_dup_u64 (uint64_t const * ptr) /// A64: LD1R { Vt.2D }, [Xn] /// </summary> public static unsafe Vector128<ulong> LoadAndReplicateToVector128(ulong* address) => LoadAndReplicateToVector128(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<byte> Value1, Vector64<byte> Value2) LoadPairVector64(byte* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<double> Value1, Vector64<double> Value2) LoadPairVector64(double* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<short> Value1, Vector64<short> Value2) LoadPairVector64(short* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<int> Value1, Vector64<int> Value2) LoadPairVector64(int* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<long> Value1, Vector64<long> Value2) LoadPairVector64(long* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<sbyte> Value1, Vector64<sbyte> Value2) LoadPairVector64(sbyte* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<float> Value1, Vector64<float> Value2) LoadPairVector64(float* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<ushort> Value1, Vector64<ushort> Value2) LoadPairVector64(ushort* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<uint> Value1, Vector64<uint> Value2) LoadPairVector64(uint* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<ulong> Value1, Vector64<ulong> Value2) LoadPairVector64(ulong* address) => LoadPairVector64(address); /// <summary> /// A64: LDP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<int> Value1, Vector64<int> Value2) LoadPairScalarVector64(int* address) => LoadPairScalarVector64(address); /// <summary> /// A64: LDP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<float> Value1, Vector64<float> Value2) LoadPairScalarVector64(float* address) => LoadPairScalarVector64(address); /// <summary> /// A64: LDP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<uint> Value1, Vector64<uint> Value2) LoadPairScalarVector64(uint* address) => LoadPairScalarVector64(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<byte> Value1, Vector128<byte> Value2) LoadPairVector128(byte* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<double> Value1, Vector128<double> Value2) LoadPairVector128(double* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<short> Value1, Vector128<short> Value2) LoadPairVector128(short* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<int> Value1, Vector128<int> Value2) LoadPairVector128(int* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<long> Value1, Vector128<long> Value2) LoadPairVector128(long* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<sbyte> Value1, Vector128<sbyte> Value2) LoadPairVector128(sbyte* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<float> Value1, Vector128<float> Value2) LoadPairVector128(float* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<ushort> Value1, Vector128<ushort> Value2) LoadPairVector128(ushort* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<uint> Value1, Vector128<uint> Value2) LoadPairVector128(uint* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<ulong> Value1, Vector128<ulong> Value2) LoadPairVector128(ulong* address) => LoadPairVector128(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<byte> Value1, Vector64<byte> Value2) LoadPairVector64NonTemporal(byte* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<double> Value1, Vector64<double> Value2) LoadPairVector64NonTemporal(double* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<short> Value1, Vector64<short> Value2) LoadPairVector64NonTemporal(short* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<int> Value1, Vector64<int> Value2) LoadPairVector64NonTemporal(int* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<long> Value1, Vector64<long> Value2) LoadPairVector64NonTemporal(long* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<sbyte> Value1, Vector64<sbyte> Value2) LoadPairVector64NonTemporal(sbyte* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<float> Value1, Vector64<float> Value2) LoadPairVector64NonTemporal(float* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<ushort> Value1, Vector64<ushort> Value2) LoadPairVector64NonTemporal(ushort* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<uint> Value1, Vector64<uint> Value2) LoadPairVector64NonTemporal(uint* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<ulong> Value1, Vector64<ulong> Value2) LoadPairVector64NonTemporal(ulong* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<int> Value1, Vector64<int> Value2) LoadPairScalarVector64NonTemporal(int* address) => LoadPairScalarVector64NonTemporal(address); /// <summary> /// A64: LDNP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<float> Value1, Vector64<float> Value2) LoadPairScalarVector64NonTemporal(float* address) => LoadPairScalarVector64NonTemporal(address); /// <summary> /// A64: LDNP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<uint> Value1, Vector64<uint> Value2) LoadPairScalarVector64NonTemporal(uint* address) => LoadPairScalarVector64NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<byte> Value1, Vector128<byte> Value2) LoadPairVector128NonTemporal(byte* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<double> Value1, Vector128<double> Value2) LoadPairVector128NonTemporal(double* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<short> Value1, Vector128<short> Value2) LoadPairVector128NonTemporal(short* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<int> Value1, Vector128<int> Value2) LoadPairVector128NonTemporal(int* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<long> Value1, Vector128<long> Value2) LoadPairVector128NonTemporal(long* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<sbyte> Value1, Vector128<sbyte> Value2) LoadPairVector128NonTemporal(sbyte* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<float> Value1, Vector128<float> Value2) LoadPairVector128NonTemporal(float* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<ushort> Value1, Vector128<ushort> Value2) LoadPairVector128NonTemporal(ushort* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<uint> Value1, Vector128<uint> Value2) LoadPairVector128NonTemporal(uint* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<ulong> Value1, Vector128<ulong> Value2) LoadPairVector128NonTemporal(ulong* address) => LoadPairVector128NonTemporal(address); /// <summary> /// float64x2_t vmaxq_f64 (float64x2_t a, float64x2_t b) /// A64: FMAX Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Max(Vector128<double> left, Vector128<double> right) => Max(left, right); /// <summary> /// uint8_t vmaxv_u8 (uint8x8_t a) /// A64: UMAXV Bd, Vn.8B /// </summary> public static Vector64<byte> MaxAcross(Vector64<byte> value) => MaxAcross(value); /// <summary> /// int16_t vmaxv_s16 (int16x4_t a) /// A64: SMAXV Hd, Vn.4H /// </summary> public static Vector64<short> MaxAcross(Vector64<short> value) => MaxAcross(value); /// <summary> /// int8_t vmaxv_s8 (int8x8_t a) /// A64: SMAXV Bd, Vn.8B /// </summary> public static Vector64<sbyte> MaxAcross(Vector64<sbyte> value) => MaxAcross(value); /// <summary> /// uint16_t vmaxv_u16 (uint16x4_t a) /// A64: UMAXV Hd, Vn.4H /// </summary> public static Vector64<ushort> MaxAcross(Vector64<ushort> value) => MaxAcross(value); /// <summary> /// uint8_t vmaxvq_u8 (uint8x16_t a) /// A64: UMAXV Bd, Vn.16B /// </summary> public static Vector64<byte> MaxAcross(Vector128<byte> value) => MaxAcross(value); /// <summary> /// int16_t vmaxvq_s16 (int16x8_t a) /// A64: SMAXV Hd, Vn.8H /// </summary> public static Vector64<short> MaxAcross(Vector128<short> value) => MaxAcross(value); /// <summary> /// int32_t vmaxvq_s32 (int32x4_t a) /// A64: SMAXV Sd, Vn.4S /// </summary> public static Vector64<int> MaxAcross(Vector128<int> value) => MaxAcross(value); /// <summary> /// int8_t vmaxvq_s8 (int8x16_t a) /// A64: SMAXV Bd, Vn.16B /// </summary> public static Vector64<sbyte> MaxAcross(Vector128<sbyte> value) => MaxAcross(value); /// <summary> /// float32_t vmaxvq_f32 (float32x4_t a) /// A64: FMAXV Sd, Vn.4S /// </summary> public static Vector64<float> MaxAcross(Vector128<float> value) => MaxAcross(value); /// <summary> /// uint16_t vmaxvq_u16 (uint16x8_t a) /// A64: UMAXV Hd, Vn.8H /// </summary> public static Vector64<ushort> MaxAcross(Vector128<ushort> value) => MaxAcross(value); /// <summary> /// uint32_t vmaxvq_u32 (uint32x4_t a) /// A64: UMAXV Sd, Vn.4S /// </summary> public static Vector64<uint> MaxAcross(Vector128<uint> value) => MaxAcross(value); /// <summary> /// float64x2_t vmaxnmq_f64 (float64x2_t a, float64x2_t b) /// A64: FMAXNM Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MaxNumber(Vector128<double> left, Vector128<double> right) => MaxNumber(left, right); /// <summary> /// float32_t vmaxnmvq_f32 (float32x4_t a) /// A64: FMAXNMV Sd, Vn.4S /// </summary> public static Vector64<float> MaxNumberAcross(Vector128<float> value) => MaxNumberAcross(value); /// <summary> /// float32x2_t vpmaxnm_f32 (float32x2_t a, float32x2_t b) /// A64: FMAXNMP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MaxNumberPairwise(Vector64<float> left, Vector64<float> right) => MaxNumberPairwise(left, right); /// <summary> /// float64x2_t vpmaxnmq_f64 (float64x2_t a, float64x2_t b) /// A64: FMAXNMP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MaxNumberPairwise(Vector128<double> left, Vector128<double> right) => MaxNumberPairwise(left, right); /// <summary> /// float32x4_t vpmaxnmq_f32 (float32x4_t a, float32x4_t b) /// A64: FMAXNMP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MaxNumberPairwise(Vector128<float> left, Vector128<float> right) => MaxNumberPairwise(left, right); /// <summary> /// float32_t vpmaxnms_f32 (float32x2_t a) /// A64: FMAXNMP Sd, Vn.2S /// </summary> public static Vector64<float> MaxNumberPairwiseScalar(Vector64<float> value) => MaxNumberPairwiseScalar(value); /// <summary> /// float64_t vpmaxnmqd_f64 (float64x2_t a) /// A64: FMAXNMP Dd, Vn.2D /// </summary> public static Vector64<double> MaxNumberPairwiseScalar(Vector128<double> value) => MaxNumberPairwiseScalar(value); /// <summary> /// uint8x16_t vpmaxq_u8 (uint8x16_t a, uint8x16_t b) /// A64: UMAXP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> MaxPairwise(Vector128<byte> left, Vector128<byte> right) => MaxPairwise(left, right); /// <summary> /// float64x2_t vpmaxq_f64 (float64x2_t a, float64x2_t b) /// A64: FMAXP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MaxPairwise(Vector128<double> left, Vector128<double> right) => MaxPairwise(left, right); /// <summary> /// int16x8_t vpmaxq_s16 (int16x8_t a, int16x8_t b) /// A64: SMAXP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MaxPairwise(Vector128<short> left, Vector128<short> right) => MaxPairwise(left, right); /// <summary> /// int32x4_t vpmaxq_s32 (int32x4_t a, int32x4_t b) /// A64: SMAXP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MaxPairwise(Vector128<int> left, Vector128<int> right) => MaxPairwise(left, right); /// <summary> /// int8x16_t vpmaxq_s8 (int8x16_t a, int8x16_t b) /// A64: SMAXP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> MaxPairwise(Vector128<sbyte> left, Vector128<sbyte> right) => MaxPairwise(left, right); /// <summary> /// float32x4_t vpmaxq_f32 (float32x4_t a, float32x4_t b) /// A64: FMAXP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MaxPairwise(Vector128<float> left, Vector128<float> right) => MaxPairwise(left, right); /// <summary> /// uint16x8_t vpmaxq_u16 (uint16x8_t a, uint16x8_t b) /// A64: UMAXP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> MaxPairwise(Vector128<ushort> left, Vector128<ushort> right) => MaxPairwise(left, right); /// <summary> /// uint32x4_t vpmaxq_u32 (uint32x4_t a, uint32x4_t b) /// A64: UMAXP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> MaxPairwise(Vector128<uint> left, Vector128<uint> right) => MaxPairwise(left, right); /// <summary> /// float32_t vpmaxs_f32 (float32x2_t a) /// A64: FMAXP Sd, Vn.2S /// </summary> public static Vector64<float> MaxPairwiseScalar(Vector64<float> value) => MaxPairwiseScalar(value); /// <summary> /// float64_t vpmaxqd_f64 (float64x2_t a) /// A64: FMAXP Dd, Vn.2D /// </summary> public static Vector64<double> MaxPairwiseScalar(Vector128<double> value) => MaxPairwiseScalar(value); /// <summary> /// float64x1_t vmax_f64 (float64x1_t a, float64x1_t b) /// A64: FMAX Dd, Dn, Dm /// </summary> public static Vector64<double> MaxScalar(Vector64<double> left, Vector64<double> right) => MaxScalar(left, right); /// <summary> /// float32_t vmaxs_f32 (float32_t a, float32_t b) /// A64: FMAX Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> MaxScalar(Vector64<float> left, Vector64<float> right) => MaxScalar(left, right); /// <summary> /// float64x2_t vminq_f64 (float64x2_t a, float64x2_t b) /// A64: FMIN Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Min(Vector128<double> left, Vector128<double> right) => Min(left, right); /// <summary> /// uint8_t vminv_u8 (uint8x8_t a) /// A64: UMINV Bd, Vn.8B /// </summary> public static Vector64<byte> MinAcross(Vector64<byte> value) => MinAcross(value); /// <summary> /// int16_t vminv_s16 (int16x4_t a) /// A64: SMINV Hd, Vn.4H /// </summary> public static Vector64<short> MinAcross(Vector64<short> value) => MinAcross(value); /// <summary> /// int8_t vminv_s8 (int8x8_t a) /// A64: SMINV Bd, Vn.8B /// </summary> public static Vector64<sbyte> MinAcross(Vector64<sbyte> value) => MinAcross(value); /// <summary> /// uint16_t vminv_u16 (uint16x4_t a) /// A64: UMINV Hd, Vn.4H /// </summary> public static Vector64<ushort> MinAcross(Vector64<ushort> value) => MinAcross(value); /// <summary> /// uint8_t vminvq_u8 (uint8x16_t a) /// A64: UMINV Bd, Vn.16B /// </summary> public static Vector64<byte> MinAcross(Vector128<byte> value) => MinAcross(value); /// <summary> /// int16_t vminvq_s16 (int16x8_t a) /// A64: SMINV Hd, Vn.8H /// </summary> public static Vector64<short> MinAcross(Vector128<short> value) => MinAcross(value); /// <summary> /// int32_t vaddvq_s32 (int32x4_t a) /// A64: SMINV Sd, Vn.4S /// </summary> public static Vector64<int> MinAcross(Vector128<int> value) => MinAcross(value); /// <summary> /// int8_t vminvq_s8 (int8x16_t a) /// A64: SMINV Bd, Vn.16B /// </summary> public static Vector64<sbyte> MinAcross(Vector128<sbyte> value) => MinAcross(value); /// <summary> /// float32_t vminvq_f32 (float32x4_t a) /// A64: FMINV Sd, Vn.4S /// </summary> public static Vector64<float> MinAcross(Vector128<float> value) => MinAcross(value); /// <summary> /// uint16_t vminvq_u16 (uint16x8_t a) /// A64: UMINV Hd, Vn.8H /// </summary> public static Vector64<ushort> MinAcross(Vector128<ushort> value) => MinAcross(value); /// <summary> /// uint32_t vminvq_u32 (uint32x4_t a) /// A64: UMINV Sd, Vn.4S /// </summary> public static Vector64<uint> MinAcross(Vector128<uint> value) => MinAcross(value); /// <summary> /// float64x2_t vminnmq_f64 (float64x2_t a, float64x2_t b) /// A64: FMINNM Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MinNumber(Vector128<double> left, Vector128<double> right) => MinNumber(left, right); /// <summary> /// float32_t vminnmvq_f32 (float32x4_t a) /// A64: FMINNMV Sd, Vn.4S /// </summary> public static Vector64<float> MinNumberAcross(Vector128<float> value) => MinNumberAcross(value); /// <summary> /// float32x2_t vpminnm_f32 (float32x2_t a, float32x2_t b) /// A64: FMINNMP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MinNumberPairwise(Vector64<float> left, Vector64<float> right) => MinNumberPairwise(left, right); /// <summary> /// float64x2_t vpminnmq_f64 (float64x2_t a, float64x2_t b) /// A64: FMINNMP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MinNumberPairwise(Vector128<double> left, Vector128<double> right) => MinNumberPairwise(left, right); /// <summary> /// float32x4_t vpminnmq_f32 (float32x4_t a, float32x4_t b) /// A64: FMINNMP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MinNumberPairwise(Vector128<float> left, Vector128<float> right) => MinNumberPairwise(left, right); /// <summary> /// float32_t vpminnms_f32 (float32x2_t a) /// A64: FMINNMP Sd, Vn.2S /// </summary> public static Vector64<float> MinNumberPairwiseScalar(Vector64<float> value) => MinNumberPairwiseScalar(value); /// <summary> /// float64_t vpminnmqd_f64 (float64x2_t a) /// A64: FMINNMP Dd, Vn.2D /// </summary> public static Vector64<double> MinNumberPairwiseScalar(Vector128<double> value) => MinNumberPairwiseScalar(value); /// <summary> /// uint8x16_t vpminq_u8 (uint8x16_t a, uint8x16_t b) /// A64: UMINP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> MinPairwise(Vector128<byte> left, Vector128<byte> right) => MinPairwise(left, right); /// <summary> /// float64x2_t vpminq_f64 (float64x2_t a, float64x2_t b) /// A64: FMINP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MinPairwise(Vector128<double> left, Vector128<double> right) => MinPairwise(left, right); /// <summary> /// int16x8_t vpminq_s16 (int16x8_t a, int16x8_t b) /// A64: SMINP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MinPairwise(Vector128<short> left, Vector128<short> right) => MinPairwise(left, right); /// <summary> /// int32x4_t vpminq_s32 (int32x4_t a, int32x4_t b) /// A64: SMINP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MinPairwise(Vector128<int> left, Vector128<int> right) => MinPairwise(left, right); /// <summary> /// int8x16_t vpminq_s8 (int8x16_t a, int8x16_t b) /// A64: SMINP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> MinPairwise(Vector128<sbyte> left, Vector128<sbyte> right) => MinPairwise(left, right); /// <summary> /// float32x4_t vpminq_f32 (float32x4_t a, float32x4_t b) /// A64: FMINP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MinPairwise(Vector128<float> left, Vector128<float> right) => MinPairwise(left, right); /// <summary> /// uint16x8_t vpminq_u16 (uint16x8_t a, uint16x8_t b) /// A64: UMINP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> MinPairwise(Vector128<ushort> left, Vector128<ushort> right) => MinPairwise(left, right); /// <summary> /// uint32x4_t vpminq_u32 (uint32x4_t a, uint32x4_t b) /// A64: UMINP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> MinPairwise(Vector128<uint> left, Vector128<uint> right) => MinPairwise(left, right); /// <summary> /// float32_t vpmins_f32 (float32x2_t a) /// A64: FMINP Sd, Vn.2S /// </summary> public static Vector64<float> MinPairwiseScalar(Vector64<float> value) => MinPairwiseScalar(value); /// <summary> /// float64_t vpminqd_f64 (float64x2_t a) /// A64: FMINP Dd, Vn.2D /// </summary> public static Vector64<double> MinPairwiseScalar(Vector128<double> value) => MinPairwiseScalar(value); /// <summary> /// float64x1_t vmin_f64 (float64x1_t a, float64x1_t b) /// A64: FMIN Dd, Dn, Dm /// </summary> public static Vector64<double> MinScalar(Vector64<double> left, Vector64<double> right) => MinScalar(left, right); /// <summary> /// float32_t vmins_f32 (float32_t a, float32_t b) /// A64: FMIN Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> MinScalar(Vector64<float> left, Vector64<float> right) => MinScalar(left, right); /// <summary> /// float64x2_t vmulq_f64 (float64x2_t a, float64x2_t b) /// A64: FMUL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Multiply(Vector128<double> left, Vector128<double> right) => Multiply(left, right); /// <summary> /// float64x2_t vmulq_n_f64 (float64x2_t a, float64_t b) /// A64: FMUL Vd.2D, Vn.2D, Vm.D[0] /// </summary> public static Vector128<double> MultiplyByScalar(Vector128<double> left, Vector64<double> right) => MultiplyByScalar(left, right); /// <summary> /// float64x2_t vmulq_laneq_f64 (float64x2_t a, float64x2_t v, const int lane) /// A64: FMUL Vd.2D, Vn.2D, Vm.D[lane] /// </summary> public static Vector128<double> MultiplyBySelectedScalar(Vector128<double> left, Vector128<double> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int16_t vqdmulhh_s16 (int16_t a, int16_t b) /// A64: SQDMULH Hd, Hn, Hm /// </summary> public static Vector64<short> MultiplyDoublingSaturateHighScalar(Vector64<short> left, Vector64<short> right) => MultiplyDoublingSaturateHighScalar(left, right); /// <summary> /// int32_t vqdmulhs_s32 (int32_t a, int32_t b) /// A64: SQDMULH Sd, Sn, Sm /// </summary> public static Vector64<int> MultiplyDoublingSaturateHighScalar(Vector64<int> left, Vector64<int> right) => MultiplyDoublingSaturateHighScalar(left, right); /// <summary> /// int16_t vqdmulhh_lane_s16 (int16_t a, int16x4_t v, const int lane) /// A64: SQDMULH Hd, Hn, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16_t vqdmulhh_laneq_s16 (int16_t a, int16x8_t v, const int lane) /// A64: SQDMULH Hd, Hn, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32_t vqdmulhs_lane_s32 (int32_t a, int32x2_t v, const int lane) /// A64: SQDMULH Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32_t vqdmulhs_laneq_s32 (int32_t a, int32x4_t v, const int lane) /// A64: SQDMULH Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32_t vqdmlalh_s16 (int32_t a, int16_t b, int16_t c) /// A64: SQDMLAL Sd, Hn, Hm /// </summary> public static Vector64<int> MultiplyDoublingWideningAndAddSaturateScalar(Vector64<int> addend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningAndAddSaturateScalar(addend, left, right); /// <summary> /// int64_t vqdmlals_s32 (int64_t a, int32_t b, int32_t c) /// A64: SQDMLAL Dd, Sn, Sm /// </summary> public static Vector64<long> MultiplyDoublingWideningAndAddSaturateScalar(Vector64<long> addend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningAndAddSaturateScalar(addend, left, right); /// <summary> /// int32_t vqdmlslh_s16 (int32_t a, int16_t b, int16_t c) /// A64: SQDMLSL Sd, Hn, Hm /// </summary> public static Vector64<int> MultiplyDoublingWideningAndSubtractSaturateScalar(Vector64<int> minuend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningAndSubtractSaturateScalar(minuend, left, right); /// <summary> /// int64_t vqdmlsls_s32 (int64_t a, int32_t b, int32_t c) /// A64: SQDMLSL Dd, Sn, Sm /// </summary> public static Vector64<long> MultiplyDoublingWideningAndSubtractSaturateScalar(Vector64<long> minuend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningAndSubtractSaturateScalar(minuend, left, right); /// <summary> /// int32_t vqdmullh_s16 (int16_t a, int16_t b) /// A64: SQDMULL Sd, Hn, Hm /// </summary> public static Vector64<int> MultiplyDoublingWideningSaturateScalar(Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningSaturateScalar(left, right); /// <summary> /// int64_t vqdmulls_s32 (int32_t a, int32_t b) /// A64: SQDMULL Dd, Sn, Sm /// </summary> public static Vector64<long> MultiplyDoublingWideningSaturateScalar(Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningSaturateScalar(left, right); /// <summary> /// int32_t vqdmullh_lane_s16 (int16_t a, int16x4_t v, const int lane) /// A64: SQDMULL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// int32_t vqdmullh_laneq_s16 (int16_t a, int16x8_t v, const int lane) /// A64: SQDMULL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// int64_t vqdmulls_lane_s32 (int32_t a, int32x2_t v, const int lane) /// A64: SQDMULL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// int64_t vqdmulls_laneq_s32 (int32_t a, int32x4_t v, const int lane) /// A64: SQDMULL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// int32_t vqdmlalh_lane_s16 (int32_t a, int16_t b, int16x4_t v, const int lane) /// A64: SQDMLAL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<int> addend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32_t vqdmlalh_laneq_s16 (int32_t a, int16_t b, int16x8_t v, const int lane) /// A64: SQDMLAL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<int> addend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64_t vqdmlals_lane_s32 (int64_t a, int32_t b, int32x2_t v, const int lane) /// A64: SQDMLAL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<long> addend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64_t vqdmlals_laneq_s32 (int64_t a, int32_t b, int32x4_t v, const int lane) /// A64: SQDMLAL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<long> addend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32_t vqdmlslh_lane_s16 (int32_t a, int16_t b, int16x4_t v, const int lane) /// A64: SQDMLSL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<int> minuend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int32_t vqdmlslh_laneq_s16 (int32_t a, int16_t b, int16x8_t v, const int lane) /// A64: SQDMLSL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<int> minuend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64_t vqdmlsls_lane_s32 (int64_t a, int32_t b, int32x2_t v, const int lane) /// A64: SQDMLSL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<long> minuend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64_t vqdmlsls_laneq_s32 (int64_t a, int32_t b, int32x4_t v, const int lane) /// A64: SQDMLSL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<long> minuend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// float32x2_t vmulx_f32 (float32x2_t a, float32x2_t b) /// A64: FMULX Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MultiplyExtended(Vector64<float> left, Vector64<float> right) => MultiplyExtended(left, right); /// <summary> /// float64x2_t vmulxq_f64 (float64x2_t a, float64x2_t b) /// A64: FMULX Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MultiplyExtended(Vector128<double> left, Vector128<double> right) => MultiplyExtended(left, right); /// <summary> /// float32x4_t vmulxq_f32 (float32x4_t a, float32x4_t b) /// A64: FMULX Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MultiplyExtended(Vector128<float> left, Vector128<float> right) => MultiplyExtended(left, right); /// <summary> /// float64x2_t vmulxq_lane_f64 (float64x2_t a, float64x1_t v, const int lane) /// A64: FMULX Vd.2D, Vn.2D, Vm.D[0] /// </summary> public static Vector128<double> MultiplyExtendedByScalar(Vector128<double> left, Vector64<double> right) => MultiplyExtendedByScalar(left, right); /// <summary> /// float32x2_t vmulx_lane_f32 (float32x2_t a, float32x2_t v, const int lane) /// A64: FMULX Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyExtendedBySelectedScalar(Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplyExtendedBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x2_t vmulx_laneq_f32 (float32x2_t a, float32x4_t v, const int lane) /// A64: FMULX Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyExtendedBySelectedScalar(Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplyExtendedBySelectedScalar(left, right, rightIndex); /// <summary> /// float64x2_t vmulxq_laneq_f64 (float64x2_t a, float64x2_t v, const int lane) /// A64: FMULX Vd.2D, Vn.2D, Vm.D[lane] /// </summary> public static Vector128<double> MultiplyExtendedBySelectedScalar(Vector128<double> left, Vector128<double> right, byte rightIndex) => MultiplyExtendedBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x4_t vmulxq_lane_f32 (float32x4_t a, float32x2_t v, const int lane) /// A64: FMULX Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> MultiplyExtendedBySelectedScalar(Vector128<float> left, Vector64<float> right, byte rightIndex) => MultiplyExtendedBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x4_t vmulxq_laneq_f32 (float32x4_t a, float32x4_t v, const int lane) /// A64: FMULX Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> MultiplyExtendedBySelectedScalar(Vector128<float> left, Vector128<float> right, byte rightIndex) => MultiplyExtendedBySelectedScalar(left, right, rightIndex); /// <summary> /// float64x1_t vmulx_f64 (float64x1_t a, float64x1_t b) /// A64: FMULX Dd, Dn, Dm /// </summary> public static Vector64<double> MultiplyExtendedScalar(Vector64<double> left, Vector64<double> right) => MultiplyExtendedScalar(left, right); /// <summary> /// float32_t vmulxs_f32 (float32_t a, float32_t b) /// A64: FMULX Sd, Sn, Sm /// </summary> public static Vector64<float> MultiplyExtendedScalar(Vector64<float> left, Vector64<float> right) => MultiplyExtendedScalar(left, right); /// <summary> /// float64_t vmulxd_laneq_f64 (float64_t a, float64x2_t v, const int lane) /// A64: FMULX Dd, Dn, Vm.D[lane] /// </summary> public static Vector64<double> MultiplyExtendedScalarBySelectedScalar(Vector64<double> left, Vector128<double> right, byte rightIndex) => MultiplyExtendedScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// float32_t vmulxs_lane_f32 (float32_t a, float32x2_t v, const int lane) /// A64: FMULX Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyExtendedScalarBySelectedScalar(Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplyExtendedScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// float32_t vmulxs_laneq_f32 (float32_t a, float32x4_t v, const int lane) /// A64: FMULX Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyExtendedScalarBySelectedScalar(Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplyExtendedScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// int16_t vqrdmulhh_s16 (int16_t a, int16_t b) /// A64: SQRDMULH Hd, Hn, Hm /// </summary> public static Vector64<short> MultiplyRoundedDoublingSaturateHighScalar(Vector64<short> left, Vector64<short> right) => MultiplyRoundedDoublingSaturateHighScalar(left, right); /// <summary> /// int32_t vqrdmulhs_s32 (int32_t a, int32_t b) /// A64: SQRDMULH Sd, Sn, Sm /// </summary> public static Vector64<int> MultiplyRoundedDoublingSaturateHighScalar(Vector64<int> left, Vector64<int> right) => MultiplyRoundedDoublingSaturateHighScalar(left, right); /// <summary> /// int16_t vqrdmulhh_lane_s16 (int16_t a, int16x4_t v, const int lane) /// A64: SQRDMULH Hd, Hn, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16_t vqrdmulhh_laneq_s16 (int16_t a, int16x8_t v, const int lane) /// A64: SQRDMULH Hd, Hn, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32_t vqrdmulhs_lane_s32 (int32_t a, int32x2_t v, const int lane) /// A64: SQRDMULH Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32_t vqrdmulhs_laneq_s32 (int32_t a, int32x4_t v, const int lane) /// A64: SQRDMULH Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// float64_t vmuld_laneq_f64 (float64_t a, float64x2_t v, const int lane) /// A64: FMUL Dd, Dn, Vm.D[lane] /// </summary> public static Vector64<double> MultiplyScalarBySelectedScalar(Vector64<double> left, Vector128<double> right, byte rightIndex) => MultiplyScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// float64x2_t vnegq_f64 (float64x2_t a) /// A64: FNEG Vd.2D, Vn.2D /// </summary> public static Vector128<double> Negate(Vector128<double> value) => Negate(value); /// <summary> /// int64x2_t vnegq_s64 (int64x2_t a) /// A64: NEG Vd.2D, Vn.2D /// </summary> public static Vector128<long> Negate(Vector128<long> value) => Negate(value); /// <summary> /// int64x2_t vqnegq_s64 (int64x2_t a) /// A64: SQNEG Vd.2D, Vn.2D /// </summary> public static Vector128<long> NegateSaturate(Vector128<long> value) => NegateSaturate(value); /// <summary> /// int16_t vqnegh_s16 (int16_t a) /// A64: SQNEG Hd, Hn /// </summary> public static Vector64<short> NegateSaturateScalar(Vector64<short> value) => NegateSaturateScalar(value); /// <summary> /// int32_t vqnegs_s32 (int32_t a) /// A64: SQNEG Sd, Sn /// </summary> public static Vector64<int> NegateSaturateScalar(Vector64<int> value) => NegateSaturateScalar(value); /// <summary> /// int64_t vqnegd_s64 (int64_t a) /// A64: SQNEG Dd, Dn /// </summary> public static Vector64<long> NegateSaturateScalar(Vector64<long> value) => NegateSaturateScalar(value); /// <summary> /// int8_t vqnegb_s8 (int8_t a) /// A64: SQNEG Bd, Bn /// </summary> public static Vector64<sbyte> NegateSaturateScalar(Vector64<sbyte> value) => NegateSaturateScalar(value); /// <summary> /// int64x1_t vneg_s64 (int64x1_t a) /// A64: NEG Dd, Dn /// </summary> public static Vector64<long> NegateScalar(Vector64<long> value) => NegateScalar(value); /// <summary> /// float64x2_t vrecpeq_f64 (float64x2_t a) /// A64: FRECPE Vd.2D, Vn.2D /// </summary> public static Vector128<double> ReciprocalEstimate(Vector128<double> value) => ReciprocalEstimate(value); /// <summary> /// float64x1_t vrecpe_f64 (float64x1_t a) /// A64: FRECPE Dd, Dn /// </summary> public static Vector64<double> ReciprocalEstimateScalar(Vector64<double> value) => ReciprocalEstimateScalar(value); /// <summary> /// float32_t vrecpes_f32 (float32_t a) /// A64: FRECPE Sd, Sn /// </summary> public static Vector64<float> ReciprocalEstimateScalar(Vector64<float> value) => ReciprocalEstimateScalar(value); /// <summary> /// float64_t vrecpxd_f64 (float64_t a) /// A64: FRECPX Dd, Dn /// </summary> public static Vector64<double> ReciprocalExponentScalar(Vector64<double> value) => ReciprocalExponentScalar(value); /// <summary> /// float32_t vrecpxs_f32 (float32_t a) /// A64: FRECPX Sd, Sn /// </summary> public static Vector64<float> ReciprocalExponentScalar(Vector64<float> value) => ReciprocalExponentScalar(value); /// <summary> /// float64x2_t vrsqrteq_f64 (float64x2_t a) /// A64: FRSQRTE Vd.2D, Vn.2D /// </summary> public static Vector128<double> ReciprocalSquareRootEstimate(Vector128<double> value) => ReciprocalSquareRootEstimate(value); /// <summary> /// float64x1_t vrsqrte_f64 (float64x1_t a) /// A64: FRSQRTE Dd, Dn /// </summary> public static Vector64<double> ReciprocalSquareRootEstimateScalar(Vector64<double> value) => ReciprocalSquareRootEstimateScalar(value); /// <summary> /// float32_t vrsqrtes_f32 (float32_t a) /// A64: FRSQRTE Sd, Sn /// </summary> public static Vector64<float> ReciprocalSquareRootEstimateScalar(Vector64<float> value) => ReciprocalSquareRootEstimateScalar(value); /// <summary> /// float64x2_t vrsqrtsq_f64 (float64x2_t a, float64x2_t b) /// A64: FRSQRTS Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> ReciprocalSquareRootStep(Vector128<double> left, Vector128<double> right) => ReciprocalSquareRootStep(left, right); /// <summary> /// float64x1_t vrsqrts_f64 (float64x1_t a, float64x1_t b) /// A64: FRSQRTS Dd, Dn, Dm /// </summary> public static Vector64<double> ReciprocalSquareRootStepScalar(Vector64<double> left, Vector64<double> right) => ReciprocalSquareRootStepScalar(left, right); /// <summary> /// float32_t vrsqrtss_f32 (float32_t a, float32_t b) /// A64: FRSQRTS Sd, Sn, Sm /// </summary> public static Vector64<float> ReciprocalSquareRootStepScalar(Vector64<float> left, Vector64<float> right) => ReciprocalSquareRootStepScalar(left, right); /// <summary> /// float64x2_t vrecpsq_f64 (float64x2_t a, float64x2_t b) /// A64: FRECPS Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> ReciprocalStep(Vector128<double> left, Vector128<double> right) => ReciprocalStep(left, right); /// <summary> /// float64x1_t vrecps_f64 (float64x1_t a, float64x1_t b) /// A64: FRECPS Dd, Dn, Dm /// </summary> public static Vector64<double> ReciprocalStepScalar(Vector64<double> left, Vector64<double> right) => ReciprocalStepScalar(left, right); /// <summary> /// float32_t vrecpss_f32 (float32_t a, float32_t b) /// A64: FRECPS Sd, Sn, Sm /// </summary> public static Vector64<float> ReciprocalStepScalar(Vector64<float> left, Vector64<float> right) => ReciprocalStepScalar(left, right); /// <summary> /// float64x2_t vrndaq_f64 (float64x2_t a) /// A64: FRINTA Vd.2D, Vn.2D /// </summary> public static Vector128<double> RoundAwayFromZero(Vector128<double> value) => RoundAwayFromZero(value); /// <summary> /// float64x2_t vrndnq_f64 (float64x2_t a) /// A64: FRINTN Vd.2D, Vn.2D /// </summary> public static Vector128<double> RoundToNearest(Vector128<double> value) => RoundToNearest(value); /// <summary> /// float64x2_t vrndmq_f64 (float64x2_t a) /// A64: FRINTM Vd.2D, Vn.2D /// </summary> public static Vector128<double> RoundToNegativeInfinity(Vector128<double> value) => RoundToNegativeInfinity(value); /// <summary> /// float64x2_t vrndpq_f64 (float64x2_t a) /// A64: FRINTP Vd.2D, Vn.2D /// </summary> public static Vector128<double> RoundToPositiveInfinity(Vector128<double> value) => RoundToPositiveInfinity(value); /// <summary> /// float64x2_t vrndq_f64 (float64x2_t a) /// A64: FRINTZ Vd.2D, Vn.2D /// </summary> public static Vector128<double> RoundToZero(Vector128<double> value) => RoundToZero(value); /// <summary> /// int16_t vqrshlh_s16 (int16_t a, int16_t b) /// A64: SQRSHL Hd, Hn, Hm /// </summary> public static Vector64<short> ShiftArithmeticRoundedSaturateScalar(Vector64<short> value, Vector64<short> count) => ShiftArithmeticRoundedSaturateScalar(value, count); /// <summary> /// int32_t vqrshls_s32 (int32_t a, int32_t b) /// A64: SQRSHL Sd, Sn, Sm /// </summary> public static Vector64<int> ShiftArithmeticRoundedSaturateScalar(Vector64<int> value, Vector64<int> count) => ShiftArithmeticRoundedSaturateScalar(value, count); /// <summary> /// int8_t vqrshlb_s8 (int8_t a, int8_t b) /// A64: SQRSHL Bd, Bn, Bm /// </summary> public static Vector64<sbyte> ShiftArithmeticRoundedSaturateScalar(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmeticRoundedSaturateScalar(value, count); /// <summary> /// int16_t vqshlh_s16 (int16_t a, int16_t b) /// A64: SQSHL Hd, Hn, Hm /// </summary> public static Vector64<short> ShiftArithmeticSaturateScalar(Vector64<short> value, Vector64<short> count) => ShiftArithmeticSaturateScalar(value, count); /// <summary> /// int32_t vqshls_s32 (int32_t a, int32_t b) /// A64: SQSHL Sd, Sn, Sm /// </summary> public static Vector64<int> ShiftArithmeticSaturateScalar(Vector64<int> value, Vector64<int> count) => ShiftArithmeticSaturateScalar(value, count); /// <summary> /// int8_t vqshlb_s8 (int8_t a, int8_t b) /// A64: SQSHL Bd, Bn, Bm /// </summary> public static Vector64<sbyte> ShiftArithmeticSaturateScalar(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmeticSaturateScalar(value, count); /// <summary> /// uint8_t vqshlb_n_u8 (uint8_t a, const int n) /// A64: UQSHL Bd, Bn, #n /// </summary> public static Vector64<byte> ShiftLeftLogicalSaturateScalar(Vector64<byte> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// int16_t vqshlh_n_s16 (int16_t a, const int n) /// A64: SQSHL Hd, Hn, #n /// </summary> public static Vector64<short> ShiftLeftLogicalSaturateScalar(Vector64<short> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// int32_t vqshls_n_s32 (int32_t a, const int n) /// A64: SQSHL Sd, Sn, #n /// </summary> public static Vector64<int> ShiftLeftLogicalSaturateScalar(Vector64<int> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// int8_t vqshlb_n_s8 (int8_t a, const int n) /// A64: SQSHL Bd, Bn, #n /// </summary> public static Vector64<sbyte> ShiftLeftLogicalSaturateScalar(Vector64<sbyte> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// uint16_t vqshlh_n_u16 (uint16_t a, const int n) /// A64: UQSHL Hd, Hn, #n /// </summary> public static Vector64<ushort> ShiftLeftLogicalSaturateScalar(Vector64<ushort> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// uint32_t vqshls_n_u32 (uint32_t a, const int n) /// A64: UQSHL Sd, Sn, #n /// </summary> public static Vector64<uint> ShiftLeftLogicalSaturateScalar(Vector64<uint> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// uint16_t vqshluh_n_s16 (int16_t a, const int n) /// A64: SQSHLU Hd, Hn, #n /// </summary> public static Vector64<ushort> ShiftLeftLogicalSaturateUnsignedScalar(Vector64<short> value, byte count) => ShiftLeftLogicalSaturateUnsignedScalar(value, count); /// <summary> /// uint32_t vqshlus_n_s32 (int32_t a, const int n) /// A64: SQSHLU Sd, Sn, #n /// </summary> public static Vector64<uint> ShiftLeftLogicalSaturateUnsignedScalar(Vector64<int> value, byte count) => ShiftLeftLogicalSaturateUnsignedScalar(value, count); /// <summary> /// uint8_t vqshlub_n_s8 (int8_t a, const int n) /// A64: SQSHLU Bd, Bn, #n /// </summary> public static Vector64<byte> ShiftLeftLogicalSaturateUnsignedScalar(Vector64<sbyte> value, byte count) => ShiftLeftLogicalSaturateUnsignedScalar(value, count); /// <summary> /// uint8_t vqrshlb_u8 (uint8_t a, int8_t b) /// A64: UQRSHL Bd, Bn, Bm /// </summary> public static Vector64<byte> ShiftLogicalRoundedSaturateScalar(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint16_t vqrshlh_u16 (uint16_t a, int16_t b) /// A64: UQRSHL Hd, Hn, Hm /// </summary> public static Vector64<short> ShiftLogicalRoundedSaturateScalar(Vector64<short> value, Vector64<short> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint32_t vqrshls_u32 (uint32_t a, int32_t b) /// A64: UQRSHL Sd, Sn, Sm /// </summary> public static Vector64<int> ShiftLogicalRoundedSaturateScalar(Vector64<int> value, Vector64<int> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint8_t vqrshlb_u8 (uint8_t a, int8_t b) /// A64: UQRSHL Bd, Bn, Bm /// </summary> public static Vector64<sbyte> ShiftLogicalRoundedSaturateScalar(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint16_t vqrshlh_u16 (uint16_t a, int16_t b) /// A64: UQRSHL Hd, Hn, Hm /// </summary> public static Vector64<ushort> ShiftLogicalRoundedSaturateScalar(Vector64<ushort> value, Vector64<short> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint32_t vqrshls_u32 (uint32_t a, int32_t b) /// A64: UQRSHL Sd, Sn, Sm /// </summary> public static Vector64<uint> ShiftLogicalRoundedSaturateScalar(Vector64<uint> value, Vector64<int> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint8_t vqshlb_u8 (uint8_t a, int8_t b) /// A64: UQSHL Bd, Bn, Bm /// </summary> public static Vector64<byte> ShiftLogicalSaturateScalar(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint16_t vqshlh_u16 (uint16_t a, int16_t b) /// A64: UQSHL Hd, Hn, Hm /// </summary> public static Vector64<short> ShiftLogicalSaturateScalar(Vector64<short> value, Vector64<short> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint32_t vqshls_u32 (uint32_t a, int32_t b) /// A64: UQSHL Sd, Sn, Sm /// </summary> public static Vector64<int> ShiftLogicalSaturateScalar(Vector64<int> value, Vector64<int> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint8_t vqshlb_u8 (uint8_t a, int8_t b) /// A64: UQSHL Bd, Bn, Bm /// </summary> public static Vector64<sbyte> ShiftLogicalSaturateScalar(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint16_t vqshlh_u16 (uint16_t a, int16_t b) /// A64: UQSHL Hd, Hn, Hm /// </summary> public static Vector64<ushort> ShiftLogicalSaturateScalar(Vector64<ushort> value, Vector64<short> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint32_t vqshls_u32 (uint32_t a, int32_t b) /// A64: UQSHL Sd, Sn, Sm /// </summary> public static Vector64<uint> ShiftLogicalSaturateScalar(Vector64<uint> value, Vector64<int> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// int16_t vqshrns_n_s32 (int32_t a, const int n) /// A64: SQSHRN Hd, Sn, #n /// </summary> public static Vector64<short> ShiftRightArithmeticNarrowingSaturateScalar(Vector64<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateScalar(value, count); /// <summary> /// int32_t vqshrnd_n_s64 (int64_t a, const int n) /// A64: SQSHRN Sd, Dn, #n /// </summary> public static Vector64<int> ShiftRightArithmeticNarrowingSaturateScalar(Vector64<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateScalar(value, count); /// <summary> /// int8_t vqshrnh_n_s16 (int16_t a, const int n) /// A64: SQSHRN Bd, Hn, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticNarrowingSaturateScalar(Vector64<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateScalar(value, count); /// <summary> /// uint8_t vqshrunh_n_s16 (int16_t a, const int n) /// A64: SQSHRUN Bd, Hn, #n /// </summary> public static Vector64<byte> ShiftRightArithmeticNarrowingSaturateUnsignedScalar(Vector64<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// uint16_t vqshruns_n_s32 (int32_t a, const int n) /// A64: SQSHRUN Hd, Sn, #n /// </summary> public static Vector64<ushort> ShiftRightArithmeticNarrowingSaturateUnsignedScalar(Vector64<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// uint32_t vqshrund_n_s64 (int64_t a, const int n) /// A64: SQSHRUN Sd, Dn, #n /// </summary> public static Vector64<uint> ShiftRightArithmeticNarrowingSaturateUnsignedScalar(Vector64<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// int16_t vqrshrns_n_s32 (int32_t a, const int n) /// A64: SQRSHRN Hd, Sn, #n /// </summary> public static Vector64<short> ShiftRightArithmeticRoundedNarrowingSaturateScalar(Vector64<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateScalar(value, count); /// <summary> /// int32_t vqrshrnd_n_s64 (int64_t a, const int n) /// A64: SQRSHRN Sd, Dn, #n /// </summary> public static Vector64<int> ShiftRightArithmeticRoundedNarrowingSaturateScalar(Vector64<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateScalar(value, count); /// <summary> /// int8_t vqrshrnh_n_s16 (int16_t a, const int n) /// A64: SQRSHRN Bd, Hn, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticRoundedNarrowingSaturateScalar(Vector64<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint8_t vqrshrunh_n_s16 (int16_t a, const int n) /// A64: SQRSHRUN Bd, Hn, #n /// </summary> public static Vector64<byte> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(Vector64<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// uint16_t vqrshruns_n_s32 (int32_t a, const int n) /// A64: SQRSHRUN Hd, Sn, #n /// </summary> public static Vector64<ushort> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(Vector64<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// uint32_t vqrshrund_n_s64 (int64_t a, const int n) /// A64: SQRSHRUN Sd, Dn, #n /// </summary> public static Vector64<uint> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(Vector64<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// uint8_t vqshrnh_n_u16 (uint16_t a, const int n) /// A64: UQSHRN Bd, Hn, #n /// </summary> public static Vector64<byte> ShiftRightLogicalNarrowingSaturateScalar(Vector64<ushort> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint16_t vqshrns_n_u32 (uint32_t a, const int n) /// A64: UQSHRN Hd, Sn, #n /// </summary> public static Vector64<short> ShiftRightLogicalNarrowingSaturateScalar(Vector64<int> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint32_t vqshrnd_n_u64 (uint64_t a, const int n) /// A64: UQSHRN Sd, Dn, #n /// </summary> public static Vector64<int> ShiftRightLogicalNarrowingSaturateScalar(Vector64<long> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint8_t vqshrnh_n_u16 (uint16_t a, const int n) /// A64: UQSHRN Bd, Hn, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalNarrowingSaturateScalar(Vector64<short> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint16_t vqshrns_n_u32 (uint32_t a, const int n) /// A64: UQSHRN Hd, Sn, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalNarrowingSaturateScalar(Vector64<uint> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint32_t vqshrnd_n_u64 (uint64_t a, const int n) /// A64: UQSHRN Sd, Dn, #n /// </summary> public static Vector64<uint> ShiftRightLogicalNarrowingSaturateScalar(Vector64<ulong> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint8_t vqrshrnh_n_u16 (uint16_t a, const int n) /// A64: UQRSHRN Bd, Hn, #n /// </summary> public static Vector64<byte> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<ushort> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint16_t vqrshrns_n_u32 (uint32_t a, const int n) /// A64: UQRSHRN Hd, Sn, #n /// </summary> public static Vector64<short> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<int> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint32_t vqrshrnd_n_u64 (uint64_t a, const int n) /// A64: UQRSHRN Sd, Dn, #n /// </summary> public static Vector64<int> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<long> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint8_t vqrshrnh_n_u16 (uint16_t a, const int n) /// A64: UQRSHRN Bd, Hn, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<short> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint16_t vqrshrns_n_u32 (uint32_t a, const int n) /// A64: UQRSHRN Hd, Sn, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<uint> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint32_t vqrshrnd_n_u64 (uint64_t a, const int n) /// A64: UQRSHRN Sd, Dn, #n /// </summary> public static Vector64<uint> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<ulong> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// float32x2_t vsqrt_f32 (float32x2_t a) /// A64: FSQRT Vd.2S, Vn.2S /// </summary> public static Vector64<float> Sqrt(Vector64<float> value) => Sqrt(value); /// <summary> /// float64x2_t vsqrtq_f64 (float64x2_t a) /// A64: FSQRT Vd.2D, Vn.2D /// </summary> public static Vector128<double> Sqrt(Vector128<double> value) => Sqrt(value); /// <summary> /// float32x4_t vsqrtq_f32 (float32x4_t a) /// A64: FSQRT Vd.4S, Vn.4S /// </summary> public static Vector128<float> Sqrt(Vector128<float> value) => Sqrt(value); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(byte* address, Vector64<byte> value1, Vector64<byte> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(double* address, Vector64<double> value1, Vector64<double> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(short* address, Vector64<short> value1, Vector64<short> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(int* address, Vector64<int> value1, Vector64<int> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(long* address, Vector64<long> value1, Vector64<long> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(sbyte* address, Vector64<sbyte> value1, Vector64<sbyte> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(float* address, Vector64<float> value1, Vector64<float> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(ushort* address, Vector64<ushort> value1, Vector64<ushort> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(uint* address, Vector64<uint> value1, Vector64<uint> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(ulong* address, Vector64<ulong> value1, Vector64<ulong> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(byte* address, Vector128<byte> value1, Vector128<byte> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(double* address, Vector128<double> value1, Vector128<double> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(short* address, Vector128<short> value1, Vector128<short> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(int* address, Vector128<int> value1, Vector128<int> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(long* address, Vector128<long> value1, Vector128<long> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(sbyte* address, Vector128<sbyte> value1, Vector128<sbyte> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(float* address, Vector128<float> value1, Vector128<float> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(ushort* address, Vector128<ushort> value1, Vector128<ushort> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(uint* address, Vector128<uint> value1, Vector128<uint> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(ulong* address, Vector128<ulong> value1, Vector128<ulong> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(byte* address, Vector64<byte> value1, Vector64<byte> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(double* address, Vector64<double> value1, Vector64<double> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(short* address, Vector64<short> value1, Vector64<short> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(int* address, Vector64<int> value1, Vector64<int> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(long* address, Vector64<long> value1, Vector64<long> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(sbyte* address, Vector64<sbyte> value1, Vector64<sbyte> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(float* address, Vector64<float> value1, Vector64<float> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(ushort* address, Vector64<ushort> value1, Vector64<ushort> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(uint* address, Vector64<uint> value1, Vector64<uint> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(ulong* address, Vector64<ulong> value1, Vector64<ulong> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(byte* address, Vector128<byte> value1, Vector128<byte> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(double* address, Vector128<double> value1, Vector128<double> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(short* address, Vector128<short> value1, Vector128<short> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(int* address, Vector128<int> value1, Vector128<int> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(long* address, Vector128<long> value1, Vector128<long> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(sbyte* address, Vector128<sbyte> value1, Vector128<sbyte> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(float* address, Vector128<float> value1, Vector128<float> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(ushort* address, Vector128<ushort> value1, Vector128<ushort> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(uint* address, Vector128<uint> value1, Vector128<uint> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(ulong* address, Vector128<ulong> value1, Vector128<ulong> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalar(int* address, Vector64<int> value1, Vector64<int> value2) => StorePairScalar(address, value1, value2); /// <summary> /// A64: STP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalar(float* address, Vector64<float> value1, Vector64<float> value2) => StorePairScalar(address, value1, value2); /// <summary> /// A64: STP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalar(uint* address, Vector64<uint> value1, Vector64<uint> value2) => StorePairScalar(address, value1, value2); /// <summary> /// A64: STNP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalarNonTemporal(int* address, Vector64<int> value1, Vector64<int> value2) => StorePairScalarNonTemporal(address, value1, value2); /// <summary> /// A64: STNP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalarNonTemporal(float* address, Vector64<float> value1, Vector64<float> value2) => StorePairScalarNonTemporal(address, value1, value2); /// <summary> /// A64: STNP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalarNonTemporal(uint* address, Vector64<uint> value1, Vector64<uint> value2) => StorePairScalarNonTemporal(address, value1, value2); /// <summary> /// float64x2_t vsubq_f64 (float64x2_t a, float64x2_t b) /// A64: FSUB Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Subtract(Vector128<double> left, Vector128<double> right) => Subtract(left, right); /// <summary> /// uint8_t vqsubb_u8 (uint8_t a, uint8_t b) /// A64: UQSUB Bd, Bn, Bm /// </summary> public static Vector64<byte> SubtractSaturateScalar(Vector64<byte> left, Vector64<byte> right) => SubtractSaturateScalar(left, right); /// <summary> /// int16_t vqsubh_s16 (int16_t a, int16_t b) /// A64: SQSUB Hd, Hn, Hm /// </summary> public static Vector64<short> SubtractSaturateScalar(Vector64<short> left, Vector64<short> right) => SubtractSaturateScalar(left, right); /// <summary> /// int32_t vqsubs_s32 (int32_t a, int32_t b) /// A64: SQSUB Sd, Sn, Sm /// </summary> public static Vector64<int> SubtractSaturateScalar(Vector64<int> left, Vector64<int> right) => SubtractSaturateScalar(left, right); /// <summary> /// int8_t vqsubb_s8 (int8_t a, int8_t b) /// A64: SQSUB Bd, Bn, Bm /// </summary> public static Vector64<sbyte> SubtractSaturateScalar(Vector64<sbyte> left, Vector64<sbyte> right) => SubtractSaturateScalar(left, right); /// <summary> /// uint16_t vqsubh_u16 (uint16_t a, uint16_t b) /// A64: UQSUB Hd, Hn, Hm /// </summary> public static Vector64<ushort> SubtractSaturateScalar(Vector64<ushort> left, Vector64<ushort> right) => SubtractSaturateScalar(left, right); /// <summary> /// uint32_t vqsubs_u32 (uint32_t a, uint32_t b) /// A64: UQSUB Sd, Sn, Sm /// </summary> public static Vector64<uint> SubtractSaturateScalar(Vector64<uint> left, Vector64<uint> right) => SubtractSaturateScalar(left, right); /// <summary> /// uint8x8_t vrbit_u8 (uint8x8_t a) /// A64: RBIT Vd.8B, Vn.8B /// </summary> public static Vector64<byte> ReverseElementBits(Vector64<byte> value) => ReverseElementBits(value); /// <summary> /// int8x8_t vrbit_s8 (int8x8_t a) /// A64: RBIT Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> ReverseElementBits(Vector64<sbyte> value) => ReverseElementBits(value); /// <summary> /// uint8x16_t vrbitq_u8 (uint8x16_t a) /// A64: RBIT Vd.16B, Vn.16B /// </summary> public static Vector128<byte> ReverseElementBits(Vector128<byte> value) => ReverseElementBits(value); /// <summary> /// int8x16_t vrbitq_s8 (int8x16_t a) /// A64: RBIT Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> ReverseElementBits(Vector128<sbyte> value) => ReverseElementBits(value); /// <summary> /// uint8x8_t vtrn1_u8(uint8x8_t a, uint8x8_t b) /// A64: TRN1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> TransposeEven(Vector64<byte> left, Vector64<byte> right) => TransposeEven(left, right); /// <summary> /// int16x4_t vtrn1_s16(int16x4_t a, int16x4_t b) /// A64: TRN1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> TransposeEven(Vector64<short> left, Vector64<short> right) => TransposeEven(left, right); /// <summary> /// int32x2_t vtrn1_s32(int32x2_t a, int32x2_t b) /// A64: TRN1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> TransposeEven(Vector64<int> left, Vector64<int> right) => TransposeEven(left, right); /// <summary> /// int8x8_t vtrn1_s8(int8x8_t a, int8x8_t b) /// A64: TRN1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> TransposeEven(Vector64<sbyte> left, Vector64<sbyte> right) => TransposeEven(left, right); /// <summary> /// float32x2_t vtrn1_f32(float32x2_t a, float32x2_t b) /// A64: TRN1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> TransposeEven(Vector64<float> left, Vector64<float> right) => TransposeEven(left, right); /// <summary> /// uint16x4_t vtrn1_u16(uint16x4_t a, uint16x4_t b) /// A64: TRN1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> TransposeEven(Vector64<ushort> left, Vector64<ushort> right) => TransposeEven(left, right); /// <summary> /// uint32x2_t vtrn1_u32(uint32x2_t a, uint32x2_t b) /// A64: TRN1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> TransposeEven(Vector64<uint> left, Vector64<uint> right) => TransposeEven(left, right); /// <summary> /// uint8x16_t vtrn1q_u8(uint8x16_t a, uint8x16_t b) /// A64: TRN1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> TransposeEven(Vector128<byte> left, Vector128<byte> right) => TransposeEven(left, right); /// <summary> /// float64x2_t vtrn1q_f64(float64x2_t a, float64x2_t b) /// A64: TRN1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> TransposeEven(Vector128<double> left, Vector128<double> right) => TransposeEven(left, right); /// <summary> /// int16x8_t vtrn1q_s16(int16x8_t a, int16x8_t b) /// A64: TRN1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> TransposeEven(Vector128<short> left, Vector128<short> right) => TransposeEven(left, right); /// <summary> /// int32x4_t vtrn1q_s32(int32x4_t a, int32x4_t b) /// A64: TRN1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> TransposeEven(Vector128<int> left, Vector128<int> right) => TransposeEven(left, right); /// <summary> /// int64x2_t vtrn1q_s64(int64x2_t a, int64x2_t b) /// A64: TRN1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> TransposeEven(Vector128<long> left, Vector128<long> right) => TransposeEven(left, right); /// <summary> /// int8x16_t vtrn1q_u8(int8x16_t a, int8x16_t b) /// A64: TRN1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> TransposeEven(Vector128<sbyte> left, Vector128<sbyte> right) => TransposeEven(left, right); /// <summary> /// float32x4_t vtrn1q_f32(float32x4_t a, float32x4_t b) /// A64: TRN1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> TransposeEven(Vector128<float> left, Vector128<float> right) => TransposeEven(left, right); /// <summary> /// uint16x8_t vtrn1q_u16(uint16x8_t a, uint16x8_t b) /// A64: TRN1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> TransposeEven(Vector128<ushort> left, Vector128<ushort> right) => TransposeEven(left, right); /// <summary> /// uint32x4_t vtrn1q_u32(uint32x4_t a, uint32x4_t b) /// A64: TRN1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> TransposeEven(Vector128<uint> left, Vector128<uint> right) => TransposeEven(left, right); /// <summary> /// uint64x2_t vtrn1q_u64(uint64x2_t a, uint64x2_t b) /// A64: TRN1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> TransposeEven(Vector128<ulong> left, Vector128<ulong> right) => TransposeEven(left, right); /// <summary> /// uint8x8_t vtrn2_u8(uint8x8_t a, uint8x8_t b) /// A64: TRN2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> TransposeOdd(Vector64<byte> left, Vector64<byte> right) => TransposeOdd(left, right); /// <summary> /// int16x4_t vtrn2_s16(int16x4_t a, int16x4_t b) /// A64: TRN2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> TransposeOdd(Vector64<short> left, Vector64<short> right) => TransposeOdd(left, right); /// <summary> /// int32x2_t vtrn2_s32(int32x2_t a, int32x2_t b) /// A64: TRN2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> TransposeOdd(Vector64<int> left, Vector64<int> right) => TransposeOdd(left, right); /// <summary> /// int8x8_t vtrn2_s8(int8x8_t a, int8x8_t b) /// A64: TRN2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> TransposeOdd(Vector64<sbyte> left, Vector64<sbyte> right) => TransposeOdd(left, right); /// <summary> /// float32x2_t vtrn2_f32(float32x2_t a, float32x2_t b) /// A64: TRN2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> TransposeOdd(Vector64<float> left, Vector64<float> right) => TransposeOdd(left, right); /// <summary> /// uint16x4_t vtrn2_u16(uint16x4_t a, uint16x4_t b) /// A64: TRN2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> TransposeOdd(Vector64<ushort> left, Vector64<ushort> right) => TransposeOdd(left, right); /// <summary> /// uint32x2_t vtrn2_u32(uint32x2_t a, uint32x2_t b) /// A64: TRN2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> TransposeOdd(Vector64<uint> left, Vector64<uint> right) => TransposeOdd(left, right); /// <summary> /// uint8x16_t vtrn2q_u8(uint8x16_t a, uint8x16_t b) /// A64: TRN2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> TransposeOdd(Vector128<byte> left, Vector128<byte> right) => TransposeOdd(left, right); /// <summary> /// float64x2_t vtrn2q_f64(float64x2_t a, float64x2_t b) /// A64: TRN2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> TransposeOdd(Vector128<double> left, Vector128<double> right) => TransposeOdd(left, right); /// <summary> /// int16x8_t vtrn2q_s16(int16x8_t a, int16x8_t b) /// A64: TRN2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> TransposeOdd(Vector128<short> left, Vector128<short> right) => TransposeOdd(left, right); /// <summary> /// int32x4_t vtrn2q_s32(int32x4_t a, int32x4_t b) /// A64: TRN2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> TransposeOdd(Vector128<int> left, Vector128<int> right) => TransposeOdd(left, right); /// <summary> /// int64x2_t vtrn2q_s64(int64x2_t a, int64x2_t b) /// A64: TRN2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> TransposeOdd(Vector128<long> left, Vector128<long> right) => TransposeOdd(left, right); /// <summary> /// int8x16_t vtrn2q_u8(int8x16_t a, int8x16_t b) /// A64: TRN2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> TransposeOdd(Vector128<sbyte> left, Vector128<sbyte> right) => TransposeOdd(left, right); /// <summary> /// float32x4_t vtrn2q_f32(float32x4_t a, float32x4_t b) /// A64: TRN2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> TransposeOdd(Vector128<float> left, Vector128<float> right) => TransposeOdd(left, right); /// <summary> /// uint16x8_t vtrn2q_u16(uint16x8_t a, uint16x8_t b) /// A64: TRN2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> TransposeOdd(Vector128<ushort> left, Vector128<ushort> right) => TransposeOdd(left, right); /// <summary> /// uint32x4_t vtrn1q_u32(uint32x4_t a, uint32x4_t b) /// A64: TRN1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> TransposeOdd(Vector128<uint> left, Vector128<uint> right) => TransposeOdd(left, right); /// <summary> /// uint64x2_t vtrn1q_u64(uint64x2_t a, uint64x2_t b) /// A64: TRN1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> TransposeOdd(Vector128<ulong> left, Vector128<ulong> right) => TransposeOdd(left, right); /// <summary> /// uint8x8_t vuzp1_u8(uint8x8_t a, uint8x8_t b) /// A64: UZP1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> UnzipEven(Vector64<byte> left, Vector64<byte> right) => UnzipEven(left, right); /// <summary> /// int16x4_t vuzp1_s16(int16x4_t a, int16x4_t b) /// A64: UZP1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> UnzipEven(Vector64<short> left, Vector64<short> right) => UnzipEven(left, right); /// <summary> /// int32x2_t vuzp1_s32(int32x2_t a, int32x2_t b) /// A64: UZP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> UnzipEven(Vector64<int> left, Vector64<int> right) => UnzipEven(left, right); /// <summary> /// int8x8_t vuzp1_s8(int8x8_t a, int8x8_t b) /// A64: UZP1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> UnzipEven(Vector64<sbyte> left, Vector64<sbyte> right) => UnzipEven(left, right); /// <summary> /// float32x2_t vuzp1_f32(float32x2_t a, float32x2_t b) /// A64: UZP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> UnzipEven(Vector64<float> left, Vector64<float> right) => UnzipEven(left, right); /// <summary> /// uint16x4_t vuzp1_u16(uint16x4_t a, uint16x4_t b) /// A64: UZP1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> UnzipEven(Vector64<ushort> left, Vector64<ushort> right) => UnzipEven(left, right); /// <summary> /// uint32x2_t vuzp1_u32(uint32x2_t a, uint32x2_t b) /// A64: UZP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> UnzipEven(Vector64<uint> left, Vector64<uint> right) => UnzipEven(left, right); /// <summary> /// uint8x16_t vuzp1q_u8(uint8x16_t a, uint8x16_t b) /// A64: UZP1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> UnzipEven(Vector128<byte> left, Vector128<byte> right) => UnzipEven(left, right); /// <summary> /// float64x2_t vuzp1q_f64(float64x2_t a, float64x2_t b) /// A64: UZP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> UnzipEven(Vector128<double> left, Vector128<double> right) => UnzipEven(left, right); /// <summary> /// int16x8_t vuzp1q_s16(int16x8_t a, int16x8_t b) /// A64: UZP1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> UnzipEven(Vector128<short> left, Vector128<short> right) => UnzipEven(left, right); /// <summary> /// int32x4_t vuzp1q_s32(int32x4_t a, int32x4_t b) /// A64: UZP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> UnzipEven(Vector128<int> left, Vector128<int> right) => UnzipEven(left, right); /// <summary> /// int64x2_t vuzp1q_s64(int64x2_t a, int64x2_t b) /// A64: UZP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> UnzipEven(Vector128<long> left, Vector128<long> right) => UnzipEven(left, right); /// <summary> /// int8x16_t vuzp1q_u8(int8x16_t a, int8x16_t b) /// A64: UZP1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> UnzipEven(Vector128<sbyte> left, Vector128<sbyte> right) => UnzipEven(left, right); /// <summary> /// float32x4_t vuzp1q_f32(float32x4_t a, float32x4_t b) /// A64: UZP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> UnzipEven(Vector128<float> left, Vector128<float> right) => UnzipEven(left, right); /// <summary> /// uint16x8_t vuzp1q_u16(uint16x8_t a, uint16x8_t b) /// A64: UZP1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> UnzipEven(Vector128<ushort> left, Vector128<ushort> right) => UnzipEven(left, right); /// <summary> /// uint32x4_t vuzp1q_u32(uint32x4_t a, uint32x4_t b) /// A64: UZP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> UnzipEven(Vector128<uint> left, Vector128<uint> right) => UnzipEven(left, right); /// <summary> /// uint64x2_t vuzp1q_u64(uint64x2_t a, uint64x2_t b) /// A64: UZP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> UnzipEven(Vector128<ulong> left, Vector128<ulong> right) => UnzipEven(left, right); /// <summary> /// uint8x8_t vuzp2_u8(uint8x8_t a, uint8x8_t b) /// A64: UZP2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> UnzipOdd(Vector64<byte> left, Vector64<byte> right) => UnzipOdd(left, right); /// <summary> /// int16x4_t vuzp2_s16(int16x4_t a, int16x4_t b) /// A64: UZP2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> UnzipOdd(Vector64<short> left, Vector64<short> right) => UnzipOdd(left, right); /// <summary> /// int32x2_t vuzp2_s32(int32x2_t a, int32x2_t b) /// A64: UZP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> UnzipOdd(Vector64<int> left, Vector64<int> right) => UnzipOdd(left, right); /// <summary> /// int8x8_t vuzp2_s8(int8x8_t a, int8x8_t b) /// A64: UZP2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> UnzipOdd(Vector64<sbyte> left, Vector64<sbyte> right) => UnzipOdd(left, right); /// <summary> /// float32x2_t vuzp2_f32(float32x2_t a, float32x2_t b) /// A64: UZP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> UnzipOdd(Vector64<float> left, Vector64<float> right) => UnzipOdd(left, right); /// <summary> /// uint16x4_t vuzp2_u16(uint16x4_t a, uint16x4_t b) /// A64: UZP2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> UnzipOdd(Vector64<ushort> left, Vector64<ushort> right) => UnzipOdd(left, right); /// <summary> /// uint32x2_t vuzp2_u32(uint32x2_t a, uint32x2_t b) /// A64: UZP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> UnzipOdd(Vector64<uint> left, Vector64<uint> right) => UnzipOdd(left, right); /// <summary> /// uint8x16_t vuzp2q_u8(uint8x16_t a, uint8x16_t b) /// A64: UZP2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> UnzipOdd(Vector128<byte> left, Vector128<byte> right) => UnzipOdd(left, right); /// <summary> /// float64x2_t vuzp2q_f64(float64x2_t a, float64x2_t b) /// A64: UZP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> UnzipOdd(Vector128<double> left, Vector128<double> right) => UnzipOdd(left, right); /// <summary> /// int16x8_t vuzp2q_s16(int16x8_t a, int16x8_t b) /// A64: UZP2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> UnzipOdd(Vector128<short> left, Vector128<short> right) => UnzipOdd(left, right); /// <summary> /// int32x4_t vuzp2q_s32(int32x4_t a, int32x4_t b) /// A64: UZP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> UnzipOdd(Vector128<int> left, Vector128<int> right) => UnzipOdd(left, right); /// <summary> /// int64x2_t vuzp2q_s64(int64x2_t a, int64x2_t b) /// A64: UZP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> UnzipOdd(Vector128<long> left, Vector128<long> right) => UnzipOdd(left, right); /// <summary> /// int8x16_t vuzp2q_u8(int8x16_t a, int8x16_t b) /// A64: UZP2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> UnzipOdd(Vector128<sbyte> left, Vector128<sbyte> right) => UnzipOdd(left, right); /// <summary> /// float32x4_t vuzp2q_f32(float32x4_t a, float32x4_t b) /// A64: UZP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> UnzipOdd(Vector128<float> left, Vector128<float> right) => UnzipOdd(left, right); /// <summary> /// uint16x8_t vuzp2q_u16(uint16x8_t a, uint16x8_t b) /// A64: UZP2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> UnzipOdd(Vector128<ushort> left, Vector128<ushort> right) => UnzipOdd(left, right); /// <summary> /// uint32x4_t vuzp2q_u32(uint32x4_t a, uint32x4_t b) /// A64: UZP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> UnzipOdd(Vector128<uint> left, Vector128<uint> right) => UnzipOdd(left, right); /// <summary> /// uint64x2_t vuzp2q_u64(uint64x2_t a, uint64x2_t b) /// A64: UZP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> UnzipOdd(Vector128<ulong> left, Vector128<ulong> right) => UnzipOdd(left, right); /// <summary> /// uint8x16_t vqvtbl1q_u8(uint8x16_t t, uint8x16_t idx) /// A64: TBL Vd.16B, {Vn.16B}, Vm.16B /// </summary> public static Vector128<byte> VectorTableLookup(Vector128<byte> table, Vector128<byte> byteIndexes) => VectorTableLookup(table, byteIndexes); /// <summary> /// int8x16_t vqvtbl1q_s8(int8x16_t t, uint8x16_t idx) /// A64: TBL Vd.16B, {Vn.16B}, Vm.16B /// </summary> public static Vector128<sbyte> VectorTableLookup(Vector128<sbyte> table, Vector128<sbyte> byteIndexes) => VectorTableLookup(table, byteIndexes); /// <summary> /// uint8x16_t vqvtbx1q_u8(uint8x16_t r, int8x16_t t, uint8x16_t idx) /// A64: TBX Vd.16B, {Vn.16B}, Vm.16B /// </summary> public static Vector128<byte> VectorTableLookupExtension(Vector128<byte> defaultValues, Vector128<byte> table, Vector128<byte> byteIndexes) => VectorTableLookupExtension(defaultValues, table, byteIndexes); /// <summary> /// int8x16_t vqvtbx1q_s8(int8x16_t r, int8x16_t t, uint8x16_t idx) /// A64: TBX Vd.16B, {Vn.16B}, Vm.16B /// </summary> public static Vector128<sbyte> VectorTableLookupExtension(Vector128<sbyte> defaultValues, Vector128<sbyte> table, Vector128<sbyte> byteIndexes) => VectorTableLookupExtension(defaultValues, table, byteIndexes); /// <summary> /// uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) /// A64: ZIP2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ZipHigh(Vector64<byte> left, Vector64<byte> right) => ZipHigh(left, right); /// <summary> /// int16x4_t vzip2_s16(int16x4_t a, int16x4_t b) /// A64: ZIP2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ZipHigh(Vector64<short> left, Vector64<short> right) => ZipHigh(left, right); /// <summary> /// int32x2_t vzip2_s32(int32x2_t a, int32x2_t b) /// A64: ZIP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ZipHigh(Vector64<int> left, Vector64<int> right) => ZipHigh(left, right); /// <summary> /// int8x8_t vzip2_s8(int8x8_t a, int8x8_t b) /// A64: ZIP2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ZipHigh(Vector64<sbyte> left, Vector64<sbyte> right) => ZipHigh(left, right); /// <summary> /// float32x2_t vzip2_f32(float32x2_t a, float32x2_t b) /// A64: ZIP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> ZipHigh(Vector64<float> left, Vector64<float> right) => ZipHigh(left, right); /// <summary> /// uint16x4_t vzip2_u16(uint16x4_t a, uint16x4_t b) /// A64: ZIP2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ZipHigh(Vector64<ushort> left, Vector64<ushort> right) => ZipHigh(left, right); /// <summary> /// uint32x2_t vzip2_u32(uint32x2_t a, uint32x2_t b) /// A64: ZIP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ZipHigh(Vector64<uint> left, Vector64<uint> right) => ZipHigh(left, right); /// <summary> /// uint8x16_t vzip2q_u8(uint8x16_t a, uint8x16_t b) /// A64: ZIP2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ZipHigh(Vector128<byte> left, Vector128<byte> right) => ZipHigh(left, right); /// <summary> /// float64x2_t vzip2q_f64(float64x2_t a, float64x2_t b) /// A64: ZIP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> ZipHigh(Vector128<double> left, Vector128<double> right) => ZipHigh(left, right); /// <summary> /// int16x8_t vzip2q_s16(int16x8_t a, int16x8_t b) /// A64: ZIP2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ZipHigh(Vector128<short> left, Vector128<short> right) => ZipHigh(left, right); /// <summary> /// int32x4_t vzip2q_s32(int32x4_t a, int32x4_t b) /// A64: ZIP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ZipHigh(Vector128<int> left, Vector128<int> right) => ZipHigh(left, right); /// <summary> /// int64x2_t vzip2q_s64(int64x2_t a, int64x2_t b) /// A64: ZIP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ZipHigh(Vector128<long> left, Vector128<long> right) => ZipHigh(left, right); /// <summary> /// int8x16_t vzip2q_u8(int8x16_t a, int8x16_t b) /// A64: ZIP2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ZipHigh(Vector128<sbyte> left, Vector128<sbyte> right) => ZipHigh(left, right); /// <summary> /// float32x4_t vzip2q_f32(float32x4_t a, float32x4_t b) /// A64: ZIP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> ZipHigh(Vector128<float> left, Vector128<float> right) => ZipHigh(left, right); /// <summary> /// uint16x8_t vzip2q_u16(uint16x8_t a, uint16x8_t b) /// A64: ZIP2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ZipHigh(Vector128<ushort> left, Vector128<ushort> right) => ZipHigh(left, right); /// <summary> /// uint32x4_t vzip2q_u32(uint32x4_t a, uint32x4_t b) /// A64: ZIP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ZipHigh(Vector128<uint> left, Vector128<uint> right) => ZipHigh(left, right); /// <summary> /// uint64x2_t vzip2q_u64(uint64x2_t a, uint64x2_t b) /// A64: ZIP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ZipHigh(Vector128<ulong> left, Vector128<ulong> right) => ZipHigh(left, right); /// <summary> /// uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) /// A64: ZIP1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ZipLow(Vector64<byte> left, Vector64<byte> right) => ZipLow(left, right); /// <summary> /// int16x4_t vzip1_s16(int16x4_t a, int16x4_t b) /// A64: ZIP1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ZipLow(Vector64<short> left, Vector64<short> right) => ZipLow(left, right); /// <summary> /// int32x2_t vzip1_s32(int32x2_t a, int32x2_t b) /// A64: ZIP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ZipLow(Vector64<int> left, Vector64<int> right) => ZipLow(left, right); /// <summary> /// int8x8_t vzip1_s8(int8x8_t a, int8x8_t b) /// A64: ZIP1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ZipLow(Vector64<sbyte> left, Vector64<sbyte> right) => ZipLow(left, right); /// <summary> /// float32x2_t vzip1_f32(float32x2_t a, float32x2_t b) /// A64: ZIP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> ZipLow(Vector64<float> left, Vector64<float> right) => ZipLow(left, right); /// <summary> /// uint16x4_t vzip1_u16(uint16x4_t a, uint16x4_t b) /// A64: ZIP1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ZipLow(Vector64<ushort> left, Vector64<ushort> right) => ZipLow(left, right); /// <summary> /// uint32x2_t vzip1_u32(uint32x2_t a, uint32x2_t b) /// A64: ZIP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ZipLow(Vector64<uint> left, Vector64<uint> right) => ZipLow(left, right); /// <summary> /// uint8x16_t vzip1q_u8(uint8x16_t a, uint8x16_t b) /// A64: ZIP1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ZipLow(Vector128<byte> left, Vector128<byte> right) => ZipLow(left, right); /// <summary> /// float64x2_t vzip1q_f64(float64x2_t a, float64x2_t b) /// A64: ZIP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> ZipLow(Vector128<double> left, Vector128<double> right) => ZipLow(left, right); /// <summary> /// int16x8_t vzip1q_s16(int16x8_t a, int16x8_t b) /// A64: ZIP1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ZipLow(Vector128<short> left, Vector128<short> right) => ZipLow(left, right); /// <summary> /// int32x4_t vzip1q_s32(int32x4_t a, int32x4_t b) /// A64: ZIP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ZipLow(Vector128<int> left, Vector128<int> right) => ZipLow(left, right); /// <summary> /// int64x2_t vzip1q_s64(int64x2_t a, int64x2_t b) /// A64: ZIP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ZipLow(Vector128<long> left, Vector128<long> right) => ZipLow(left, right); /// <summary> /// int8x16_t vzip1q_u8(int8x16_t a, int8x16_t b) /// A64: ZIP1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ZipLow(Vector128<sbyte> left, Vector128<sbyte> right) => ZipLow(left, right); /// <summary> /// float32x4_t vzip1q_f32(float32x4_t a, float32x4_t b) /// A64: ZIP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> ZipLow(Vector128<float> left, Vector128<float> right) => ZipLow(left, right); /// <summary> /// uint16x8_t vzip1q_u16(uint16x8_t a, uint16x8_t b) /// A64: ZIP1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ZipLow(Vector128<ushort> left, Vector128<ushort> right) => ZipLow(left, right); /// <summary> /// uint32x4_t vzip1q_u32(uint32x4_t a, uint32x4_t b) /// A64: ZIP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ZipLow(Vector128<uint> left, Vector128<uint> right) => ZipLow(left, right); /// <summary> /// uint64x2_t vzip1q_u64(uint64x2_t a, uint64x2_t b) /// A64: ZIP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ZipLow(Vector128<ulong> left, Vector128<ulong> right) => ZipLow(left, right); } /// <summary> /// int16x4_t vabs_s16 (int16x4_t a) /// A32: VABS.S16 Dd, Dm /// A64: ABS Vd.4H, Vn.4H /// </summary> public static Vector64<ushort> Abs(Vector64<short> value) => Abs(value); /// <summary> /// int32x2_t vabs_s32 (int32x2_t a) /// A32: VABS.S32 Dd, Dm /// A64: ABS Vd.2S, Vn.2S /// </summary> public static Vector64<uint> Abs(Vector64<int> value) => Abs(value); /// <summary> /// int8x8_t vabs_s8 (int8x8_t a) /// A32: VABS.S8 Dd, Dm /// A64: ABS Vd.8B, Vn.8B /// </summary> public static Vector64<byte> Abs(Vector64<sbyte> value) => Abs(value); /// <summary> /// float32x2_t vabs_f32 (float32x2_t a) /// A32: VABS.F32 Dd, Dm /// A64: FABS Vd.2S, Vn.2S /// </summary> public static Vector64<float> Abs(Vector64<float> value) => Abs(value); /// <summary> /// int16x8_t vabsq_s16 (int16x8_t a) /// A32: VABS.S16 Qd, Qm /// A64: ABS Vd.8H, Vn.8H /// </summary> public static Vector128<ushort> Abs(Vector128<short> value) => Abs(value); /// <summary> /// int32x4_t vabsq_s32 (int32x4_t a) /// A32: VABS.S32 Qd, Qm /// A64: ABS Vd.4S, Vn.4S /// </summary> public static Vector128<uint> Abs(Vector128<int> value) => Abs(value); /// <summary> /// int8x16_t vabsq_s8 (int8x16_t a) /// A32: VABS.S8 Qd, Qm /// A64: ABS Vd.16B, Vn.16B /// </summary> public static Vector128<byte> Abs(Vector128<sbyte> value) => Abs(value); /// <summary> /// float32x4_t vabsq_f32 (float32x4_t a) /// A32: VABS.F32 Qd, Qm /// A64: FABS Vd.4S, Vn.4S /// </summary> public static Vector128<float> Abs(Vector128<float> value) => Abs(value); /// <summary> /// int16x4_t vqabs_s16 (int16x4_t a) /// A32: VQABS.S16 Dd, Dm /// A64: SQABS Vd.4H, Vn.4H /// </summary> public static Vector64<short> AbsSaturate(Vector64<short> value) => AbsSaturate(value); /// <summary> /// int32x2_t vqabs_s32 (int32x2_t a) /// A32: VQABS.S32 Dd, Dm /// A64: SQABS Vd.2S, Vn.2S /// </summary> public static Vector64<int> AbsSaturate(Vector64<int> value) => AbsSaturate(value); /// <summary> /// int8x8_t vqabs_s8 (int8x8_t a) /// A32: VQABS.S8 Dd, Dm /// A64: SQABS Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> AbsSaturate(Vector64<sbyte> value) => AbsSaturate(value); /// <summary> /// int16x8_t vqabsq_s16 (int16x8_t a) /// A32: VQABS.S16 Qd, Qm /// A64: SQABS Vd.8H, Vn.8H /// </summary> public static Vector128<short> AbsSaturate(Vector128<short> value) => AbsSaturate(value); /// <summary> /// int32x4_t vqabsq_s32 (int32x4_t a) /// A32: VQABS.S32 Qd, Qm /// A64: SQABS Vd.4S, Vn.4S /// </summary> public static Vector128<int> AbsSaturate(Vector128<int> value) => AbsSaturate(value); /// <summary> /// int8x16_t vqabsq_s8 (int8x16_t a) /// A32: VQABS.S8 Qd, Qm /// A64: SQABS Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> AbsSaturate(Vector128<sbyte> value) => AbsSaturate(value); /// <summary> /// float64x1_t vabs_f64 (float64x1_t a) /// A32: VABS.F64 Dd, Dm /// A64: FABS Dd, Dn /// </summary> public static Vector64<double> AbsScalar(Vector64<double> value) => AbsScalar(value); /// <summary> /// float32_t vabss_f32 (float32_t a) /// A32: VABS.F32 Sd, Sm /// A64: FABS Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> AbsScalar(Vector64<float> value) => AbsScalar(value); /// <summary> /// uint32x2_t vcagt_f32 (float32x2_t a, float32x2_t b) /// A32: VACGT.F32 Dd, Dn, Dm /// A64: FACGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AbsoluteCompareGreaterThan(Vector64<float> left, Vector64<float> right) => AbsoluteCompareGreaterThan(left, right); /// <summary> /// uint32x4_t vcagtq_f32 (float32x4_t a, float32x4_t b) /// A32: VACGT.F32 Qd, Qn, Qm /// A64: FACGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AbsoluteCompareGreaterThan(Vector128<float> left, Vector128<float> right) => AbsoluteCompareGreaterThan(left, right); /// <summary> /// uint32x2_t vcage_f32 (float32x2_t a, float32x2_t b) /// A32: VACGE.F32 Dd, Dn, Dm /// A64: FACGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AbsoluteCompareGreaterThanOrEqual(Vector64<float> left, Vector64<float> right) => AbsoluteCompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x4_t vcageq_f32 (float32x4_t a, float32x4_t b) /// A32: VACGE.F32 Qd, Qn, Qm /// A64: FACGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AbsoluteCompareGreaterThanOrEqual(Vector128<float> left, Vector128<float> right) => AbsoluteCompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x2_t vcalt_f32 (float32x2_t a, float32x2_t b) /// A32: VACLT.F32 Dd, Dn, Dm /// A64: FACGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AbsoluteCompareLessThan(Vector64<float> left, Vector64<float> right) => AbsoluteCompareLessThan(left, right); /// <summary> /// uint32x4_t vcaltq_f32 (float32x4_t a, float32x4_t b) /// A32: VACLT.F32 Qd, Qn, Qm /// A64: FACGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AbsoluteCompareLessThan(Vector128<float> left, Vector128<float> right) => AbsoluteCompareLessThan(left, right); /// <summary> /// uint32x2_t vcale_f32 (float32x2_t a, float32x2_t b) /// A32: VACLE.F32 Dd, Dn, Dm /// A64: FACGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AbsoluteCompareLessThanOrEqual(Vector64<float> left, Vector64<float> right) => AbsoluteCompareLessThanOrEqual(left, right); /// <summary> /// uint32x4_t vcaleq_f32 (float32x4_t a, float32x4_t b) /// A32: VACLE.F32 Qd, Qn, Qm /// A64: FACGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AbsoluteCompareLessThanOrEqual(Vector128<float> left, Vector128<float> right) => AbsoluteCompareLessThanOrEqual(left, right); /// <summary> /// uint8x8_t vabd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VABD.U8 Dd, Dn, Dm /// A64: UABD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> AbsoluteDifference(Vector64<byte> left, Vector64<byte> right) => AbsoluteDifference(left, right); /// <summary> /// int16x4_t vabd_s16 (int16x4_t a, int16x4_t b) /// A32: VABD.S16 Dd, Dn, Dm /// A64: SABD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> AbsoluteDifference(Vector64<short> left, Vector64<short> right) => AbsoluteDifference(left, right); /// <summary> /// int32x2_t vabd_s32 (int32x2_t a, int32x2_t b) /// A32: VABD.S32 Dd, Dn, Dm /// A64: SABD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> AbsoluteDifference(Vector64<int> left, Vector64<int> right) => AbsoluteDifference(left, right); /// <summary> /// int8x8_t vabd_s8 (int8x8_t a, int8x8_t b) /// A32: VABD.S8 Dd, Dn, Dm /// A64: SABD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> AbsoluteDifference(Vector64<sbyte> left, Vector64<sbyte> right) => AbsoluteDifference(left, right); /// <summary> /// float32x2_t vabd_f32 (float32x2_t a, float32x2_t b) /// A32: VABD.F32 Dd, Dn, Dm /// A64: FABD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AbsoluteDifference(Vector64<float> left, Vector64<float> right) => AbsoluteDifference(left, right); /// <summary> /// uint16x4_t vabd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VABD.U16 Dd, Dn, Dm /// A64: UABD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> AbsoluteDifference(Vector64<ushort> left, Vector64<ushort> right) => AbsoluteDifference(left, right); /// <summary> /// uint32x2_t vabd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VABD.U32 Dd, Dn, Dm /// A64: UABD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> AbsoluteDifference(Vector64<uint> left, Vector64<uint> right) => AbsoluteDifference(left, right); /// <summary> /// uint8x16_t vabdq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VABD.U8 Qd, Qn, Qm /// A64: UABD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> AbsoluteDifference(Vector128<byte> left, Vector128<byte> right) => AbsoluteDifference(left, right); /// <summary> /// int16x8_t vabdq_s16 (int16x8_t a, int16x8_t b) /// A32: VABD.S16 Qd, Qn, Qm /// A64: SABD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> AbsoluteDifference(Vector128<short> left, Vector128<short> right) => AbsoluteDifference(left, right); /// <summary> /// int32x4_t vabdq_s32 (int32x4_t a, int32x4_t b) /// A32: VABD.S32 Qd, Qn, Qm /// A64: SABD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> AbsoluteDifference(Vector128<int> left, Vector128<int> right) => AbsoluteDifference(left, right); /// <summary> /// int8x16_t vabdq_s8 (int8x16_t a, int8x16_t b) /// A32: VABD.S8 Qd, Qn, Qm /// A64: SABD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> AbsoluteDifference(Vector128<sbyte> left, Vector128<sbyte> right) => AbsoluteDifference(left, right); /// <summary> /// float32x4_t vabdq_f32 (float32x4_t a, float32x4_t b) /// A32: VABD.F32 Qd, Qn, Qm /// A64: FABD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AbsoluteDifference(Vector128<float> left, Vector128<float> right) => AbsoluteDifference(left, right); /// <summary> /// uint16x8_t vabdq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VABD.U16 Qd, Qn, Qm /// A64: UABD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> AbsoluteDifference(Vector128<ushort> left, Vector128<ushort> right) => AbsoluteDifference(left, right); /// <summary> /// uint32x4_t vabdq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VABD.U32 Qd, Qn, Qm /// A64: UABD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> AbsoluteDifference(Vector128<uint> left, Vector128<uint> right) => AbsoluteDifference(left, right); /// <summary> /// uint8x8_t vaba_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VABA.U8 Dd, Dn, Dm /// A64: UABA Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> AbsoluteDifferenceAdd(Vector64<byte> addend, Vector64<byte> left, Vector64<byte> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int16x4_t vaba_s16 (int16x4_t a, int16x4_t b, int16x4_t c) /// A32: VABA.S16 Dd, Dn, Dm /// A64: SABA Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> AbsoluteDifferenceAdd(Vector64<short> addend, Vector64<short> left, Vector64<short> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int32x2_t vaba_s32 (int32x2_t a, int32x2_t b, int32x2_t c) /// A32: VABA.S32 Dd, Dn, Dm /// A64: SABA Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> AbsoluteDifferenceAdd(Vector64<int> addend, Vector64<int> left, Vector64<int> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int8x8_t vaba_s8 (int8x8_t a, int8x8_t b, int8x8_t c) /// A32: VABA.S8 Dd, Dn, Dm /// A64: SABA Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> AbsoluteDifferenceAdd(Vector64<sbyte> addend, Vector64<sbyte> left, Vector64<sbyte> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint16x4_t vaba_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VABA.U16 Dd, Dn, Dm /// A64: UABA Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> AbsoluteDifferenceAdd(Vector64<ushort> addend, Vector64<ushort> left, Vector64<ushort> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint32x2_t vaba_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VABA.U32 Dd, Dn, Dm /// A64: UABA Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> AbsoluteDifferenceAdd(Vector64<uint> addend, Vector64<uint> left, Vector64<uint> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint8x16_t vabaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) /// A32: VABA.U8 Qd, Qn, Qm /// A64: UABA Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> AbsoluteDifferenceAdd(Vector128<byte> addend, Vector128<byte> left, Vector128<byte> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int16x8_t vabaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c) /// A32: VABA.S16 Qd, Qn, Qm /// A64: SABA Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> AbsoluteDifferenceAdd(Vector128<short> addend, Vector128<short> left, Vector128<short> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int32x4_t vabaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c) /// A32: VABA.S32 Qd, Qn, Qm /// A64: SABA Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> AbsoluteDifferenceAdd(Vector128<int> addend, Vector128<int> left, Vector128<int> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int8x16_t vabaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c) /// A32: VABA.S8 Qd, Qn, Qm /// A64: SABA Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> AbsoluteDifferenceAdd(Vector128<sbyte> addend, Vector128<sbyte> left, Vector128<sbyte> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint16x8_t vabaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) /// A32: VABA.U16 Qd, Qn, Qm /// A64: UABA Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> AbsoluteDifferenceAdd(Vector128<ushort> addend, Vector128<ushort> left, Vector128<ushort> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint32x4_t vabaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) /// A32: VABA.U32 Qd, Qn, Qm /// A64: UABA Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> AbsoluteDifferenceAdd(Vector128<uint> addend, Vector128<uint> left, Vector128<uint> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint16x8_t vabdl_u8 (uint8x8_t a, uint8x8_t b) /// A32: VABDL.U8 Qd, Dn, Dm /// A64: UABDL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningLower(Vector64<byte> left, Vector64<byte> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// int32x4_t vabdl_s16 (int16x4_t a, int16x4_t b) /// A32: VABDL.S16 Qd, Dn, Dm /// A64: SABDL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningLower(Vector64<short> left, Vector64<short> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// int64x2_t vabdl_s32 (int32x2_t a, int32x2_t b) /// A32: VABDL.S32 Qd, Dn, Dm /// A64: SABDL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningLower(Vector64<int> left, Vector64<int> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// int16x8_t vabdl_s8 (int8x8_t a, int8x8_t b) /// A32: VABDL.S8 Qd, Dn, Dm /// A64: SABDL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningLower(Vector64<sbyte> left, Vector64<sbyte> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// uint32x4_t vabdl_u16 (uint16x4_t a, uint16x4_t b) /// A32: VABDL.U16 Qd, Dn, Dm /// A64: UABDL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningLower(Vector64<ushort> left, Vector64<ushort> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// uint64x2_t vabdl_u32 (uint32x2_t a, uint32x2_t b) /// A32: VABDL.U32 Qd, Dn, Dm /// A64: UABDL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningLower(Vector64<uint> left, Vector64<uint> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// uint16x8_t vabal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VABAL.U8 Qd, Dn, Dm /// A64: UABAL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningLowerAndAdd(Vector128<ushort> addend, Vector64<byte> left, Vector64<byte> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// int32x4_t vabal_s16 (int32x4_t a, int16x4_t b, int16x4_t c) /// A32: VABAL.S16 Qd, Dn, Dm /// A64: SABAL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> AbsoluteDifferenceWideningLowerAndAdd(Vector128<int> addend, Vector64<short> left, Vector64<short> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// int64x2_t vabal_s32 (int64x2_t a, int32x2_t b, int32x2_t c) /// A32: VABAL.S32 Qd, Dn, Dm /// A64: SABAL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> AbsoluteDifferenceWideningLowerAndAdd(Vector128<long> addend, Vector64<int> left, Vector64<int> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// int16x8_t vabal_s8 (int16x8_t a, int8x8_t b, int8x8_t c) /// A32: VABAL.S8 Qd, Dn, Dm /// A64: SABAL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> AbsoluteDifferenceWideningLowerAndAdd(Vector128<short> addend, Vector64<sbyte> left, Vector64<sbyte> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// uint32x4_t vabal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VABAL.U16 Qd, Dn, Dm /// A64: UABAL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningLowerAndAdd(Vector128<uint> addend, Vector64<ushort> left, Vector64<ushort> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// uint64x2_t vabal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VABAL.U32 Qd, Dn, Dm /// A64: UABAL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningLowerAndAdd(Vector128<ulong> addend, Vector64<uint> left, Vector64<uint> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// uint16x8_t vabdl_high_u8 (uint8x16_t a, uint8x16_t b) /// A32: VABDL.U8 Qd, Dn+1, Dm+1 /// A64: UABDL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningUpper(Vector128<byte> left, Vector128<byte> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// int32x4_t vabdl_high_s16 (int16x8_t a, int16x8_t b) /// A32: VABDL.S16 Qd, Dn+1, Dm+1 /// A64: SABDL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningUpper(Vector128<short> left, Vector128<short> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// int64x2_t vabdl_high_s32 (int32x4_t a, int32x4_t b) /// A32: VABDL.S32 Qd, Dn+1, Dm+1 /// A64: SABDL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningUpper(Vector128<int> left, Vector128<int> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// int16x8_t vabdl_high_s8 (int8x16_t a, int8x16_t b) /// A32: VABDL.S8 Qd, Dn+1, Dm+1 /// A64: SABDL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningUpper(Vector128<sbyte> left, Vector128<sbyte> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// uint32x4_t vabdl_high_u16 (uint16x8_t a, uint16x8_t b) /// A32: VABDL.U16 Qd, Dn+1, Dm+1 /// A64: UABDL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningUpper(Vector128<ushort> left, Vector128<ushort> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// uint64x2_t vabdl_high_u32 (uint32x4_t a, uint32x4_t b) /// A32: VABDL.U32 Qd, Dn+1, Dm+1 /// A64: UABDL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningUpper(Vector128<uint> left, Vector128<uint> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// uint16x8_t vabal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c) /// A32: VABAL.U8 Qd, Dn+1, Dm+1 /// A64: UABAL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningUpperAndAdd(Vector128<ushort> addend, Vector128<byte> left, Vector128<byte> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// int32x4_t vabal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) /// A32: VABAL.S16 Qd, Dn+1, Dm+1 /// A64: SABAL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> AbsoluteDifferenceWideningUpperAndAdd(Vector128<int> addend, Vector128<short> left, Vector128<short> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// int64x2_t vabal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) /// A32: VABAL.S32 Qd, Dn+1, Dm+1 /// A64: SABAL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> AbsoluteDifferenceWideningUpperAndAdd(Vector128<long> addend, Vector128<int> left, Vector128<int> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// int16x8_t vabal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c) /// A32: VABAL.S8 Qd, Dn+1, Dm+1 /// A64: SABAL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> AbsoluteDifferenceWideningUpperAndAdd(Vector128<short> addend, Vector128<sbyte> left, Vector128<sbyte> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// uint32x4_t vabal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c) /// A32: VABAL.U16 Qd, Dn+1, Dm+1 /// A64: UABAL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningUpperAndAdd(Vector128<uint> addend, Vector128<ushort> left, Vector128<ushort> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// uint64x2_t vabal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c) /// A32: VABAL.U32 Qd, Dn+1, Dm+1 /// A64: UABAL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningUpperAndAdd(Vector128<ulong> addend, Vector128<uint> left, Vector128<uint> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// uint8x8_t vadd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VADD.I8 Dd, Dn, Dm /// A64: ADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Add(Vector64<byte> left, Vector64<byte> right) => Add(left, right); /// <summary> /// int16x4_t vadd_s16 (int16x4_t a, int16x4_t b) /// A32: VADD.I16 Dd, Dn, Dm /// A64: ADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> Add(Vector64<short> left, Vector64<short> right) => Add(left, right); /// <summary> /// int32x2_t vadd_s32 (int32x2_t a, int32x2_t b) /// A32: VADD.I32 Dd, Dn, Dm /// A64: ADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> Add(Vector64<int> left, Vector64<int> right) => Add(left, right); /// <summary> /// int8x8_t vadd_s8 (int8x8_t a, int8x8_t b) /// A32: VADD.I8 Dd, Dn, Dm /// A64: ADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Add(Vector64<sbyte> left, Vector64<sbyte> right) => Add(left, right); /// <summary> /// float32x2_t vadd_f32 (float32x2_t a, float32x2_t b) /// A32: VADD.F32 Dd, Dn, Dm /// A64: FADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Add(Vector64<float> left, Vector64<float> right) => Add(left, right); /// <summary> /// uint16x4_t vadd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VADD.I16 Dd, Dn, Dm /// A64: ADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> Add(Vector64<ushort> left, Vector64<ushort> right) => Add(left, right); /// <summary> /// uint32x2_t vadd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VADD.I32 Dd, Dn, Dm /// A64: ADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> Add(Vector64<uint> left, Vector64<uint> right) => Add(left, right); /// <summary> /// uint8x16_t vaddq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VADD.I8 Qd, Qn, Qm /// A64: ADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Add(Vector128<byte> left, Vector128<byte> right) => Add(left, right); /// <summary> /// int16x8_t vaddq_s16 (int16x8_t a, int16x8_t b) /// A32: VADD.I16 Qd, Qn, Qm /// A64: ADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> Add(Vector128<short> left, Vector128<short> right) => Add(left, right); /// <summary> /// int32x4_t vaddq_s32 (int32x4_t a, int32x4_t b) /// A32: VADD.I32 Qd, Qn, Qm /// A64: ADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> Add(Vector128<int> left, Vector128<int> right) => Add(left, right); /// <summary> /// int64x2_t vaddq_s64 (int64x2_t a, int64x2_t b) /// A32: VADD.I64 Qd, Qn, Qm /// A64: ADD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> Add(Vector128<long> left, Vector128<long> right) => Add(left, right); /// <summary> /// int8x16_t vaddq_s8 (int8x16_t a, int8x16_t b) /// A32: VADD.I8 Qd, Qn, Qm /// A64: ADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Add(Vector128<sbyte> left, Vector128<sbyte> right) => Add(left, right); /// <summary> /// float32x4_t vaddq_f32 (float32x4_t a, float32x4_t b) /// A32: VADD.F32 Qd, Qn, Qm /// A64: FADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Add(Vector128<float> left, Vector128<float> right) => Add(left, right); /// <summary> /// uint16x8_t vaddq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VADD.I16 Qd, Qn, Qm /// A64: ADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> Add(Vector128<ushort> left, Vector128<ushort> right) => Add(left, right); /// <summary> /// uint32x4_t vaddq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VADD.I32 Qd, Qn, Qm /// A64: ADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> Add(Vector128<uint> left, Vector128<uint> right) => Add(left, right); /// <summary> /// uint64x2_t vaddq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VADD.I64 Qd, Qn, Qm /// A64: ADD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> Add(Vector128<ulong> left, Vector128<ulong> right) => Add(left, right); /// <summary> /// uint8x8_t vaddhn_u16 (uint16x8_t a, uint16x8_t b) /// A32: VADDHN.I16 Dd, Qn, Qm /// A64: ADDHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<byte> AddHighNarrowingLower(Vector128<ushort> left, Vector128<ushort> right) => AddHighNarrowingLower(left, right); /// <summary> /// int16x4_t vaddhn_s32 (int32x4_t a, int32x4_t b) /// A32: VADDHN.I32 Dd, Qn, Qm /// A64: ADDHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<short> AddHighNarrowingLower(Vector128<int> left, Vector128<int> right) => AddHighNarrowingLower(left, right); /// <summary> /// int32x2_t vaddhn_s64 (int64x2_t a, int64x2_t b) /// A32: VADDHN.I64 Dd, Qn, Qm /// A64: ADDHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<int> AddHighNarrowingLower(Vector128<long> left, Vector128<long> right) => AddHighNarrowingLower(left, right); /// <summary> /// int8x8_t vaddhn_s16 (int16x8_t a, int16x8_t b) /// A32: VADDHN.I16 Dd, Qn, Qm /// A64: ADDHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<sbyte> AddHighNarrowingLower(Vector128<short> left, Vector128<short> right) => AddHighNarrowingLower(left, right); /// <summary> /// uint16x4_t vaddhn_u32 (uint32x4_t a, uint32x4_t b) /// A32: VADDHN.I32 Dd, Qn, Qm /// A64: ADDHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<ushort> AddHighNarrowingLower(Vector128<uint> left, Vector128<uint> right) => AddHighNarrowingLower(left, right); /// <summary> /// uint32x2_t vaddhn_u64 (uint64x2_t a, uint64x2_t b) /// A32: VADDHN.I64 Dd, Qn, Qm /// A64: ADDHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<uint> AddHighNarrowingLower(Vector128<ulong> left, Vector128<ulong> right) => AddHighNarrowingLower(left, right); /// <summary> /// uint8x16_t vaddhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) /// A32: VADDHN.I16 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<byte> AddHighNarrowingUpper(Vector64<byte> lower, Vector128<ushort> left, Vector128<ushort> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// int16x8_t vaddhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) /// A32: VADDHN.I32 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<short> AddHighNarrowingUpper(Vector64<short> lower, Vector128<int> left, Vector128<int> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// int32x4_t vaddhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) /// A32: VADDHN.I64 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<int> AddHighNarrowingUpper(Vector64<int> lower, Vector128<long> left, Vector128<long> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// int8x16_t vaddhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) /// A32: VADDHN.I16 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<sbyte> AddHighNarrowingUpper(Vector64<sbyte> lower, Vector128<short> left, Vector128<short> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// uint16x8_t vaddhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) /// A32: VADDHN.I32 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<ushort> AddHighNarrowingUpper(Vector64<ushort> lower, Vector128<uint> left, Vector128<uint> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// uint32x4_t vaddhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) /// A32: VADDHN.I64 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<uint> AddHighNarrowingUpper(Vector64<uint> lower, Vector128<ulong> left, Vector128<ulong> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// uint8x8_t vpadd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VPADD.I8 Dd, Dn, Dm /// A64: ADDP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> AddPairwise(Vector64<byte> left, Vector64<byte> right) => AddPairwise(left, right); /// <summary> /// int16x4_t vpadd_s16 (int16x4_t a, int16x4_t b) /// A32: VPADD.I16 Dd, Dn, Dm /// A64: ADDP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> AddPairwise(Vector64<short> left, Vector64<short> right) => AddPairwise(left, right); /// <summary> /// int32x2_t vpadd_s32 (int32x2_t a, int32x2_t b) /// A32: VPADD.I32 Dd, Dn, Dm /// A64: ADDP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> AddPairwise(Vector64<int> left, Vector64<int> right) => AddPairwise(left, right); /// <summary> /// int8x8_t vpadd_s8 (int8x8_t a, int8x8_t b) /// A32: VPADD.I8 Dd, Dn, Dm /// A64: ADDP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> AddPairwise(Vector64<sbyte> left, Vector64<sbyte> right) => AddPairwise(left, right); /// <summary> /// float32x2_t vpadd_f32 (float32x2_t a, float32x2_t b) /// A32: VPADD.F32 Dd, Dn, Dm /// A64: FADDP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AddPairwise(Vector64<float> left, Vector64<float> right) => AddPairwise(left, right); /// <summary> /// uint16x4_t vpadd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VPADD.I16 Dd, Dn, Dm /// A64: ADDP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> AddPairwise(Vector64<ushort> left, Vector64<ushort> right) => AddPairwise(left, right); /// <summary> /// uint32x2_t vpadd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VPADD.I32 Dd, Dn, Dm /// A64: ADDP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> AddPairwise(Vector64<uint> left, Vector64<uint> right) => AddPairwise(left, right); /// <summary> /// uint16x4_t vpaddl_u8 (uint8x8_t a) /// A32: VPADDL.U8 Dd, Dm /// A64: UADDLP Vd.4H, Vn.8B /// </summary> public static Vector64<ushort> AddPairwiseWidening(Vector64<byte> value) => AddPairwiseWidening(value); /// <summary> /// int32x2_t vpaddl_s16 (int16x4_t a) /// A32: VPADDL.S16 Dd, Dm /// A64: SADDLP Vd.2S, Vn.4H /// </summary> public static Vector64<int> AddPairwiseWidening(Vector64<short> value) => AddPairwiseWidening(value); /// <summary> /// int16x4_t vpaddl_s8 (int8x8_t a) /// A32: VPADDL.S8 Dd, Dm /// A64: SADDLP Vd.4H, Vn.8B /// </summary> public static Vector64<short> AddPairwiseWidening(Vector64<sbyte> value) => AddPairwiseWidening(value); /// <summary> /// uint32x2_t vpaddl_u16 (uint16x4_t a) /// A32: VPADDL.U16 Dd, Dm /// A64: UADDLP Vd.2S, Vn.4H /// </summary> public static Vector64<uint> AddPairwiseWidening(Vector64<ushort> value) => AddPairwiseWidening(value); /// <summary> /// uint16x8_t vpaddlq_u8 (uint8x16_t a) /// A32: VPADDL.U8 Qd, Qm /// A64: UADDLP Vd.8H, Vn.16B /// </summary> public static Vector128<ushort> AddPairwiseWidening(Vector128<byte> value) => AddPairwiseWidening(value); /// <summary> /// int32x4_t vpaddlq_s16 (int16x8_t a) /// A32: VPADDL.S16 Qd, Qm /// A64: SADDLP Vd.4S, Vn.8H /// </summary> public static Vector128<int> AddPairwiseWidening(Vector128<short> value) => AddPairwiseWidening(value); /// <summary> /// int64x2_t vpaddlq_s32 (int32x4_t a) /// A32: VPADDL.S32 Qd, Qm /// A64: SADDLP Vd.2D, Vn.4S /// </summary> public static Vector128<long> AddPairwiseWidening(Vector128<int> value) => AddPairwiseWidening(value); /// <summary> /// int16x8_t vpaddlq_s8 (int8x16_t a) /// A32: VPADDL.S8 Qd, Qm /// A64: SADDLP Vd.8H, Vn.16B /// </summary> public static Vector128<short> AddPairwiseWidening(Vector128<sbyte> value) => AddPairwiseWidening(value); /// <summary> /// uint32x4_t vpaddlq_u16 (uint16x8_t a) /// A32: VPADDL.U16 Qd, Qm /// A64: UADDLP Vd.4S, Vn.8H /// </summary> public static Vector128<uint> AddPairwiseWidening(Vector128<ushort> value) => AddPairwiseWidening(value); /// <summary> /// uint64x2_t vpaddlq_u32 (uint32x4_t a) /// A32: VPADDL.U32 Qd, Qm /// A64: UADDLP Vd.2D, Vn.4S /// </summary> public static Vector128<ulong> AddPairwiseWidening(Vector128<uint> value) => AddPairwiseWidening(value); /// <summary> /// uint16x4_t vpadal_u8 (uint16x4_t a, uint8x8_t b) /// A32: VPADAL.U8 Dd, Dm /// A64: UADALP Vd.4H, Vn.8B /// </summary> public static Vector64<ushort> AddPairwiseWideningAndAdd(Vector64<ushort> addend, Vector64<byte> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int32x2_t vpadal_s16 (int32x2_t a, int16x4_t b) /// A32: VPADAL.S16 Dd, Dm /// A64: SADALP Vd.2S, Vn.4H /// </summary> public static Vector64<int> AddPairwiseWideningAndAdd(Vector64<int> addend, Vector64<short> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int16x4_t vpadal_s8 (int16x4_t a, int8x8_t b) /// A32: VPADAL.S8 Dd, Dm /// A64: SADALP Vd.4H, Vn.8B /// </summary> public static Vector64<short> AddPairwiseWideningAndAdd(Vector64<short> addend, Vector64<sbyte> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// uint32x2_t vpadal_u16 (uint32x2_t a, uint16x4_t b) /// A32: VPADAL.U16 Dd, Dm /// A64: UADALP Vd.2S, Vn.4H /// </summary> public static Vector64<uint> AddPairwiseWideningAndAdd(Vector64<uint> addend, Vector64<ushort> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// uint16x8_t vpadalq_u8 (uint16x8_t a, uint8x16_t b) /// A32: VPADAL.U8 Qd, Qm /// A64: UADALP Vd.8H, Vn.16B /// </summary> public static Vector128<ushort> AddPairwiseWideningAndAdd(Vector128<ushort> addend, Vector128<byte> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int32x4_t vpadalq_s16 (int32x4_t a, int16x8_t b) /// A32: VPADAL.S16 Qd, Qm /// A64: SADALP Vd.4S, Vn.8H /// </summary> public static Vector128<int> AddPairwiseWideningAndAdd(Vector128<int> addend, Vector128<short> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int64x2_t vpadalq_s32 (int64x2_t a, int32x4_t b) /// A32: VPADAL.S32 Qd, Qm /// A64: SADALP Vd.2D, Vn.4S /// </summary> public static Vector128<long> AddPairwiseWideningAndAdd(Vector128<long> addend, Vector128<int> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int16x8_t vpadalq_s8 (int16x8_t a, int8x16_t b) /// A32: VPADAL.S8 Qd, Qm /// A64: SADALP Vd.8H, Vn.16B /// </summary> public static Vector128<short> AddPairwiseWideningAndAdd(Vector128<short> addend, Vector128<sbyte> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// uint32x4_t vpadalq_u16 (uint32x4_t a, uint16x8_t b) /// A32: VPADAL.U16 Qd, Qm /// A64: UADALP Vd.4S, Vn.8H /// </summary> public static Vector128<uint> AddPairwiseWideningAndAdd(Vector128<uint> addend, Vector128<ushort> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// uint64x2_t vpadalq_u32 (uint64x2_t a, uint32x4_t b) /// A32: VPADAL.U32 Qd, Qm /// A64: UADALP Vd.2D, Vn.4S /// </summary> public static Vector128<ulong> AddPairwiseWideningAndAdd(Vector128<ulong> addend, Vector128<uint> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int64x1_t vpadal_s32 (int64x1_t a, int32x2_t b) /// A32: VPADAL.S32 Dd, Dm /// A64: SADALP Vd.1D, Vn.2S /// </summary> public static Vector64<long> AddPairwiseWideningAndAddScalar(Vector64<long> addend, Vector64<int> value) => AddPairwiseWideningAndAddScalar(addend, value); /// <summary> /// uint64x1_t vpadal_u32 (uint64x1_t a, uint32x2_t b) /// A32: VPADAL.U32 Dd, Dm /// A64: UADALP Vd.1D, Vn.2S /// </summary> public static Vector64<ulong> AddPairwiseWideningAndAddScalar(Vector64<ulong> addend, Vector64<uint> value) => AddPairwiseWideningAndAddScalar(addend, value); /// <summary> /// int64x1_t vpaddl_s32 (int32x2_t a) /// A32: VPADDL.S32 Dd, Dm /// A64: SADDLP Dd, Vn.2S /// </summary> public static Vector64<long> AddPairwiseWideningScalar(Vector64<int> value) => AddPairwiseWideningScalar(value); /// <summary> /// uint64x1_t vpaddl_u32 (uint32x2_t a) /// A32: VPADDL.U32 Dd, Dm /// A64: UADDLP Dd, Vn.2S /// </summary> public static Vector64<ulong> AddPairwiseWideningScalar(Vector64<uint> value) => AddPairwiseWideningScalar(value); /// <summary> /// uint8x8_t vraddhn_u16 (uint16x8_t a, uint16x8_t b) /// A32: VRADDHN.I16 Dd, Qn, Qm /// A64: RADDHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<byte> AddRoundedHighNarrowingLower(Vector128<ushort> left, Vector128<ushort> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// int16x4_t vraddhn_s32 (int32x4_t a, int32x4_t b) /// A32: VRADDHN.I32 Dd, Qn, Qm /// A64: RADDHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<short> AddRoundedHighNarrowingLower(Vector128<int> left, Vector128<int> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// int32x2_t vraddhn_s64 (int64x2_t a, int64x2_t b) /// A32: VRADDHN.I64 Dd, Qn, Qm /// A64: RADDHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<int> AddRoundedHighNarrowingLower(Vector128<long> left, Vector128<long> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// int8x8_t vraddhn_s16 (int16x8_t a, int16x8_t b) /// A32: VRADDHN.I16 Dd, Qn, Qm /// A64: RADDHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<sbyte> AddRoundedHighNarrowingLower(Vector128<short> left, Vector128<short> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// uint16x4_t vraddhn_u32 (uint32x4_t a, uint32x4_t b) /// A32: VRADDHN.I32 Dd, Qn, Qm /// A64: RADDHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<ushort> AddRoundedHighNarrowingLower(Vector128<uint> left, Vector128<uint> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// uint32x2_t vraddhn_u64 (uint64x2_t a, uint64x2_t b) /// A32: VRADDHN.I64 Dd, Qn, Qm /// A64: RADDHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<uint> AddRoundedHighNarrowingLower(Vector128<ulong> left, Vector128<ulong> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// uint8x16_t vraddhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) /// A32: VRADDHN.I16 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<byte> AddRoundedHighNarrowingUpper(Vector64<byte> lower, Vector128<ushort> left, Vector128<ushort> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int16x8_t vraddhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) /// A32: VRADDHN.I32 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<short> AddRoundedHighNarrowingUpper(Vector64<short> lower, Vector128<int> left, Vector128<int> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int32x4_t vraddhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) /// A32: VRADDHN.I64 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<int> AddRoundedHighNarrowingUpper(Vector64<int> lower, Vector128<long> left, Vector128<long> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int8x16_t vraddhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) /// A32: VRADDHN.I16 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<sbyte> AddRoundedHighNarrowingUpper(Vector64<sbyte> lower, Vector128<short> left, Vector128<short> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint16x8_t vraddhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) /// A32: VRADDHN.I32 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<ushort> AddRoundedHighNarrowingUpper(Vector64<ushort> lower, Vector128<uint> left, Vector128<uint> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint32x4_t vraddhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) /// A32: VRADDHN.I64 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<uint> AddRoundedHighNarrowingUpper(Vector64<uint> lower, Vector128<ulong> left, Vector128<ulong> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint8x8_t vqadd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VQADD.U8 Dd, Dn, Dm /// A64: UQADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> AddSaturate(Vector64<byte> left, Vector64<byte> right) => AddSaturate(left, right); /// <summary> /// int16x4_t vqadd_s16 (int16x4_t a, int16x4_t b) /// A32: VQADD.S16 Dd, Dn, Dm /// A64: SQADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> AddSaturate(Vector64<short> left, Vector64<short> right) => AddSaturate(left, right); /// <summary> /// int32x2_t vqadd_s32 (int32x2_t a, int32x2_t b) /// A32: VQADD.S32 Dd, Dn, Dm /// A64: SQADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> AddSaturate(Vector64<int> left, Vector64<int> right) => AddSaturate(left, right); /// <summary> /// int8x8_t vqadd_s8 (int8x8_t a, int8x8_t b) /// A32: VQADD.S8 Dd, Dn, Dm /// A64: SQADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> AddSaturate(Vector64<sbyte> left, Vector64<sbyte> right) => AddSaturate(left, right); /// <summary> /// uint16x4_t vqadd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VQADD.U16 Dd, Dn, Dm /// A64: UQADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> AddSaturate(Vector64<ushort> left, Vector64<ushort> right) => AddSaturate(left, right); /// <summary> /// uint32x2_t vqadd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VQADD.U32 Dd, Dn, Dm /// A64: UQADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> AddSaturate(Vector64<uint> left, Vector64<uint> right) => AddSaturate(left, right); /// <summary> /// uint8x16_t vqaddq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VQADD.U8 Qd, Qn, Qm /// A64: UQADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> AddSaturate(Vector128<byte> left, Vector128<byte> right) => AddSaturate(left, right); /// <summary> /// int16x8_t vqaddq_s16 (int16x8_t a, int16x8_t b) /// A32: VQADD.S16 Qd, Qn, Qm /// A64: SQADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> AddSaturate(Vector128<short> left, Vector128<short> right) => AddSaturate(left, right); /// <summary> /// int32x4_t vqaddq_s32 (int32x4_t a, int32x4_t b) /// A32: VQADD.S32 Qd, Qn, Qm /// A64: SQADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> AddSaturate(Vector128<int> left, Vector128<int> right) => AddSaturate(left, right); /// <summary> /// int64x2_t vqaddq_s64 (int64x2_t a, int64x2_t b) /// A32: VQADD.S64 Qd, Qn, Qm /// A64: SQADD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> AddSaturate(Vector128<long> left, Vector128<long> right) => AddSaturate(left, right); /// <summary> /// int8x16_t vqaddq_s8 (int8x16_t a, int8x16_t b) /// A32: VQADD.S8 Qd, Qn, Qm /// A64: SQADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> AddSaturate(Vector128<sbyte> left, Vector128<sbyte> right) => AddSaturate(left, right); /// <summary> /// uint16x8_t vqaddq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VQADD.U16 Qd, Qn, Qm /// A64: UQADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> AddSaturate(Vector128<ushort> left, Vector128<ushort> right) => AddSaturate(left, right); /// <summary> /// uint32x4_t vqaddq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VQADD.U32 Qd, Qn, Qm /// A64: UQADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> AddSaturate(Vector128<uint> left, Vector128<uint> right) => AddSaturate(left, right); /// <summary> /// uint64x2_t vqaddq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VQADD.U64 Qd, Qn, Qm /// A64: UQADD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> AddSaturate(Vector128<ulong> left, Vector128<ulong> right) => AddSaturate(left, right); /// <summary> /// int64x1_t vqadd_s64 (int64x1_t a, int64x1_t b) /// A32: VQADD.S64 Dd, Dn, Dm /// A64: SQADD Dd, Dn, Dm /// </summary> public static Vector64<long> AddSaturateScalar(Vector64<long> left, Vector64<long> right) => AddSaturateScalar(left, right); /// <summary> /// uint64x1_t vqadd_u64 (uint64x1_t a, uint64x1_t b) /// A32: VQADD.U64 Dd, Dn, Dm /// A64: UQADD Dd, Dn, Dm /// </summary> public static Vector64<ulong> AddSaturateScalar(Vector64<ulong> left, Vector64<ulong> right) => AddSaturateScalar(left, right); /// <summary> /// float64x1_t vadd_f64 (float64x1_t a, float64x1_t b) /// A32: VADD.F64 Dd, Dn, Dm /// A64: FADD Dd, Dn, Dm /// </summary> public static Vector64<double> AddScalar(Vector64<double> left, Vector64<double> right) => AddScalar(left, right); /// <summary> /// int64x1_t vadd_s64 (int64x1_t a, int64x1_t b) /// A32: VADD.I64 Dd, Dn, Dm /// A64: ADD Dd, Dn, Dm /// </summary> public static Vector64<long> AddScalar(Vector64<long> left, Vector64<long> right) => AddScalar(left, right); /// <summary> /// float32_t vadds_f32 (float32_t a, float32_t b) /// A32: VADD.F32 Sd, Sn, Sm /// A64: FADD Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> AddScalar(Vector64<float> left, Vector64<float> right) => AddScalar(left, right); /// <summary> /// uint64x1_t vadd_u64 (uint64x1_t a, uint64x1_t b) /// A32: VADD.I64 Dd, Dn, Dm /// A64: ADD Dd, Dn, Dm /// </summary> public static Vector64<ulong> AddScalar(Vector64<ulong> left, Vector64<ulong> right) => AddScalar(left, right); /// <summary> /// uint16x8_t vaddl_u8 (uint8x8_t a, uint8x8_t b) /// A32: VADDL.U8 Qd, Dn, Dm /// A64: UADDL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> AddWideningLower(Vector64<byte> left, Vector64<byte> right) => AddWideningLower(left, right); /// <summary> /// int32x4_t vaddl_s16 (int16x4_t a, int16x4_t b) /// A32: VADDL.S16 Qd, Dn, Dm /// A64: SADDL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> AddWideningLower(Vector64<short> left, Vector64<short> right) => AddWideningLower(left, right); /// <summary> /// int64x2_t vaddl_s32 (int32x2_t a, int32x2_t b) /// A32: VADDL.S32 Qd, Dn, Dm /// A64: SADDL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> AddWideningLower(Vector64<int> left, Vector64<int> right) => AddWideningLower(left, right); /// <summary> /// int16x8_t vaddl_s8 (int8x8_t a, int8x8_t b) /// A32: VADDL.S8 Qd, Dn, Dm /// A64: SADDL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> AddWideningLower(Vector64<sbyte> left, Vector64<sbyte> right) => AddWideningLower(left, right); /// <summary> /// uint32x4_t vaddl_u16 (uint16x4_t a, uint16x4_t b) /// A32: VADDL.U16 Qd, Dn, Dm /// A64: UADDL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> AddWideningLower(Vector64<ushort> left, Vector64<ushort> right) => AddWideningLower(left, right); /// <summary> /// uint64x2_t vaddl_u32 (uint32x2_t a, uint32x2_t b) /// A32: VADDL.U32 Qd, Dn, Dm /// A64: UADDL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> AddWideningLower(Vector64<uint> left, Vector64<uint> right) => AddWideningLower(left, right); /// <summary> /// int16x8_t vaddw_s8 (int16x8_t a, int8x8_t b) /// A32: VADDW.S8 Qd, Qn, Dm /// A64: SADDW Vd.8H, Vn.8H, Vm.8B /// </summary> public static Vector128<short> AddWideningLower(Vector128<short> left, Vector64<sbyte> right) => AddWideningLower(left, right); /// <summary> /// int32x4_t vaddw_s16 (int32x4_t a, int16x4_t b) /// A32: VADDW.S16 Qd, Qn, Dm /// A64: SADDW Vd.4S, Vn.4S, Vm.4H /// </summary> public static Vector128<int> AddWideningLower(Vector128<int> left, Vector64<short> right) => AddWideningLower(left, right); /// <summary> /// int64x2_t vaddw_s32 (int64x2_t a, int32x2_t b) /// A32: VADDW.S32 Qd, Qn, Dm /// A64: SADDW Vd.2D, Vn.2D, Vm.2S /// </summary> public static Vector128<long> AddWideningLower(Vector128<long> left, Vector64<int> right) => AddWideningLower(left, right); /// <summary> /// uint16x8_t vaddw_u8 (uint16x8_t a, uint8x8_t b) /// A32: VADDW.U8 Qd, Qn, Dm /// A64: UADDW Vd.8H, Vn.8H, Vm.8B /// </summary> public static Vector128<ushort> AddWideningLower(Vector128<ushort> left, Vector64<byte> right) => AddWideningLower(left, right); /// <summary> /// uint32x4_t vaddw_u16 (uint32x4_t a, uint16x4_t b) /// A32: VADDW.U16 Qd, Qn, Dm /// A64: UADDW Vd.4S, Vn.4S, Vm.4H /// </summary> public static Vector128<uint> AddWideningLower(Vector128<uint> left, Vector64<ushort> right) => AddWideningLower(left, right); /// <summary> /// uint64x2_t vaddw_u32 (uint64x2_t a, uint32x2_t b) /// A32: VADDW.U32 Qd, Qn, Dm /// A64: UADDW Vd.2D, Vn.2D, Vm.2S /// </summary> public static Vector128<ulong> AddWideningLower(Vector128<ulong> left, Vector64<uint> right) => AddWideningLower(left, right); /// <summary> /// uint16x8_t vaddl_high_u8 (uint8x16_t a, uint8x16_t b) /// A32: VADDL.U8 Qd, Dn+1, Dm+1 /// A64: UADDL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> AddWideningUpper(Vector128<byte> left, Vector128<byte> right) => AddWideningUpper(left, right); /// <summary> /// int32x4_t vaddl_high_s16 (int16x8_t a, int16x8_t b) /// A32: VADDL.S16 Qd, Dn+1, Dm+1 /// A64: SADDL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> AddWideningUpper(Vector128<short> left, Vector128<short> right) => AddWideningUpper(left, right); /// <summary> /// int16x8_t vaddw_high_s8 (int16x8_t a, int8x16_t b) /// A32: VADDW.S8 Qd, Qn, Dm+1 /// A64: SADDW2 Vd.8H, Vn.8H, Vm.16B /// </summary> public static Vector128<short> AddWideningUpper(Vector128<short> left, Vector128<sbyte> right) => AddWideningUpper(left, right); /// <summary> /// int32x4_t vaddw_high_s16 (int32x4_t a, int16x8_t b) /// A32: VADDW.S16 Qd, Qn, Dm+1 /// A64: SADDW2 Vd.4S, Vn.4S, Vm.8H /// </summary> public static Vector128<int> AddWideningUpper(Vector128<int> left, Vector128<short> right) => AddWideningUpper(left, right); /// <summary> /// int64x2_t vaddl_high_s32 (int32x4_t a, int32x4_t b) /// A32: VADDL.S32 Qd, Dn+1, Dm+1 /// A64: SADDL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> AddWideningUpper(Vector128<int> left, Vector128<int> right) => AddWideningUpper(left, right); /// <summary> /// int64x2_t vaddw_high_s32 (int64x2_t a, int32x4_t b) /// A32: VADDW.S32 Qd, Qn, Dm+1 /// A64: SADDW2 Vd.2D, Vn.2D, Vm.4S /// </summary> public static Vector128<long> AddWideningUpper(Vector128<long> left, Vector128<int> right) => AddWideningUpper(left, right); /// <summary> /// int16x8_t vaddl_high_s8 (int8x16_t a, int8x16_t b) /// A32: VADDL.S8 Qd, Dn+1, Dm+1 /// A64: SADDL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> AddWideningUpper(Vector128<sbyte> left, Vector128<sbyte> right) => AddWideningUpper(left, right); /// <summary> /// uint16x8_t vaddw_high_u8 (uint16x8_t a, uint8x16_t b) /// A32: VADDW.U8 Qd, Qn, Dm+1 /// A64: UADDW2 Vd.8H, Vn.8H, Vm.16B /// </summary> public static Vector128<ushort> AddWideningUpper(Vector128<ushort> left, Vector128<byte> right) => AddWideningUpper(left, right); /// <summary> /// uint32x4_t vaddl_high_u16 (uint16x8_t a, uint16x8_t b) /// A32: VADDL.U16 Qd, Dn+1, Dm+1 /// A64: UADDL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> AddWideningUpper(Vector128<ushort> left, Vector128<ushort> right) => AddWideningUpper(left, right); /// <summary> /// uint32x4_t vaddw_high_u16 (uint32x4_t a, uint16x8_t b) /// A32: VADDW.U16 Qd, Qn, Dm+1 /// A64: UADDW2 Vd.4S, Vn.4S, Vm.8H /// </summary> public static Vector128<uint> AddWideningUpper(Vector128<uint> left, Vector128<ushort> right) => AddWideningUpper(left, right); /// <summary> /// uint64x2_t vaddl_high_u32 (uint32x4_t a, uint32x4_t b) /// A32: VADDL.U32 Qd, Dn+1, Dm+1 /// A64: UADDL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> AddWideningUpper(Vector128<uint> left, Vector128<uint> right) => AddWideningUpper(left, right); /// <summary> /// uint64x2_t vaddw_high_u32 (uint64x2_t a, uint32x4_t b) /// A32: VADDW.U32 Qd, Qn, Dm+1 /// A64: UADDW2 Vd.2D, Vn.2D, Vm.4S /// </summary> public static Vector128<ulong> AddWideningUpper(Vector128<ulong> left, Vector128<uint> right) => AddWideningUpper(left, right); /// <summary> /// uint8x8_t vand_u8 (uint8x8_t a, uint8x8_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> And(Vector64<byte> left, Vector64<byte> right) => And(left, right); /// <summary> /// float64x1_t vand_f64 (float64x1_t a, float64x1_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> And(Vector64<double> left, Vector64<double> right) => And(left, right); /// <summary> /// int16x4_t vand_s16 (int16x4_t a, int16x4_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> And(Vector64<short> left, Vector64<short> right) => And(left, right); /// <summary> /// int32x2_t vand_s32 (int32x2_t a, int32x2_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> And(Vector64<int> left, Vector64<int> right) => And(left, right); /// <summary> /// int64x1_t vand_s64 (int64x1_t a, int64x1_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> And(Vector64<long> left, Vector64<long> right) => And(left, right); /// <summary> /// int8x8_t vand_s8 (int8x8_t a, int8x8_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> And(Vector64<sbyte> left, Vector64<sbyte> right) => And(left, right); /// <summary> /// float32x2_t vand_f32 (float32x2_t a, float32x2_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> And(Vector64<float> left, Vector64<float> right) => And(left, right); /// <summary> /// uint16x4_t vand_u16 (uint16x4_t a, uint16x4_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> And(Vector64<ushort> left, Vector64<ushort> right) => And(left, right); /// <summary> /// uint32x2_t vand_u32 (uint32x2_t a, uint32x2_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> And(Vector64<uint> left, Vector64<uint> right) => And(left, right); /// <summary> /// uint64x1_t vand_u64 (uint64x1_t a, uint64x1_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> And(Vector64<ulong> left, Vector64<ulong> right) => And(left, right); /// <summary> /// uint8x16_t vandq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> And(Vector128<byte> left, Vector128<byte> right) => And(left, right); /// <summary> /// float64x2_t vandq_f64 (float64x2_t a, float64x2_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> And(Vector128<double> left, Vector128<double> right) => And(left, right); /// <summary> /// int16x8_t vandq_s16 (int16x8_t a, int16x8_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> And(Vector128<short> left, Vector128<short> right) => And(left, right); /// <summary> /// int32x4_t vandq_s32 (int32x4_t a, int32x4_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> And(Vector128<int> left, Vector128<int> right) => And(left, right); /// <summary> /// int64x2_t vandq_s64 (int64x2_t a, int64x2_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> And(Vector128<long> left, Vector128<long> right) => And(left, right); /// <summary> /// int8x16_t vandq_s8 (int8x16_t a, int8x16_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> And(Vector128<sbyte> left, Vector128<sbyte> right) => And(left, right); /// <summary> /// float32x4_t vandq_f32 (float32x4_t a, float32x4_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> And(Vector128<float> left, Vector128<float> right) => And(left, right); /// <summary> /// uint16x8_t vandq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> And(Vector128<ushort> left, Vector128<ushort> right) => And(left, right); /// <summary> /// uint32x4_t vandq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> And(Vector128<uint> left, Vector128<uint> right) => And(left, right); /// <summary> /// uint64x2_t vandq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> And(Vector128<ulong> left, Vector128<ulong> right) => And(left, right); /// <summary> /// uint8x8_t vbic_u8 (uint8x8_t a, uint8x8_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> BitwiseClear(Vector64<byte> value, Vector64<byte> mask) => BitwiseClear(value, mask); /// <summary> /// float64x1_t vbic_f64 (float64x1_t a, float64x1_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> BitwiseClear(Vector64<double> value, Vector64<double> mask) => BitwiseClear(value, mask); /// <summary> /// int16x4_t vbic_s16 (int16x4_t a, int16x4_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> BitwiseClear(Vector64<short> value, Vector64<short> mask) => BitwiseClear(value, mask); /// <summary> /// int32x2_t vbic_s32 (int32x2_t a, int32x2_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> BitwiseClear(Vector64<int> value, Vector64<int> mask) => BitwiseClear(value, mask); /// <summary> /// int64x1_t vbic_s64 (int64x1_t a, int64x1_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> BitwiseClear(Vector64<long> value, Vector64<long> mask) => BitwiseClear(value, mask); /// <summary> /// int8x8_t vbic_s8 (int8x8_t a, int8x8_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> BitwiseClear(Vector64<sbyte> value, Vector64<sbyte> mask) => BitwiseClear(value, mask); /// <summary> /// float32x2_t vbic_f32 (float32x2_t a, float32x2_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> BitwiseClear(Vector64<float> value, Vector64<float> mask) => BitwiseClear(value, mask); /// <summary> /// uint16x4_t vbic_u16 (uint16x4_t a, uint16x4_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> BitwiseClear(Vector64<ushort> value, Vector64<ushort> mask) => BitwiseClear(value, mask); /// <summary> /// uint32x2_t vbic_u32 (uint32x2_t a, uint32x2_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> BitwiseClear(Vector64<uint> value, Vector64<uint> mask) => BitwiseClear(value, mask); /// <summary> /// uint64x1_t vbic_u64 (uint64x1_t a, uint64x1_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> BitwiseClear(Vector64<ulong> value, Vector64<ulong> mask) => BitwiseClear(value, mask); /// <summary> /// uint8x16_t vbicq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> BitwiseClear(Vector128<byte> value, Vector128<byte> mask) => BitwiseClear(value, mask); /// <summary> /// float64x2_t vbicq_f64 (float64x2_t a, float64x2_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> BitwiseClear(Vector128<double> value, Vector128<double> mask) => BitwiseClear(value, mask); /// <summary> /// int16x8_t vbicq_s16 (int16x8_t a, int16x8_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> BitwiseClear(Vector128<short> value, Vector128<short> mask) => BitwiseClear(value, mask); /// <summary> /// int32x4_t vbicq_s32 (int32x4_t a, int32x4_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> BitwiseClear(Vector128<int> value, Vector128<int> mask) => BitwiseClear(value, mask); /// <summary> /// int64x2_t vbicq_s64 (int64x2_t a, int64x2_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> BitwiseClear(Vector128<long> value, Vector128<long> mask) => BitwiseClear(value, mask); /// <summary> /// int8x16_t vbicq_s8 (int8x16_t a, int8x16_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> BitwiseClear(Vector128<sbyte> value, Vector128<sbyte> mask) => BitwiseClear(value, mask); /// <summary> /// float32x4_t vbicq_f32 (float32x4_t a, float32x4_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> BitwiseClear(Vector128<float> value, Vector128<float> mask) => BitwiseClear(value, mask); /// <summary> /// uint16x8_t vbicq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> BitwiseClear(Vector128<ushort> value, Vector128<ushort> mask) => BitwiseClear(value, mask); /// <summary> /// uint32x4_t vbicq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> BitwiseClear(Vector128<uint> value, Vector128<uint> mask) => BitwiseClear(value, mask); /// <summary> /// uint64x2_t vbicq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> BitwiseClear(Vector128<ulong> value, Vector128<ulong> mask) => BitwiseClear(value, mask); /// <summary> /// uint8x8_t vbsl_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> BitwiseSelect(Vector64<byte> select, Vector64<byte> left, Vector64<byte> right) => BitwiseSelect(select, left, right); /// <summary> /// float64x1_t vbsl_f64 (uint64x1_t a, float64x1_t b, float64x1_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<double> BitwiseSelect(Vector64<double> select, Vector64<double> left, Vector64<double> right) => BitwiseSelect(select, left, right); /// <summary> /// int16x4_t vbsl_s16 (uint16x4_t a, int16x4_t b, int16x4_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> BitwiseSelect(Vector64<short> select, Vector64<short> left, Vector64<short> right) => BitwiseSelect(select, left, right); /// <summary> /// int32x2_t vbsl_s32 (uint32x2_t a, int32x2_t b, int32x2_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> BitwiseSelect(Vector64<int> select, Vector64<int> left, Vector64<int> right) => BitwiseSelect(select, left, right); /// <summary> /// int64x1_t vbsl_s64 (uint64x1_t a, int64x1_t b, int64x1_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> BitwiseSelect(Vector64<long> select, Vector64<long> left, Vector64<long> right) => BitwiseSelect(select, left, right); /// <summary> /// int8x8_t vbsl_s8 (uint8x8_t a, int8x8_t b, int8x8_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> BitwiseSelect(Vector64<sbyte> select, Vector64<sbyte> left, Vector64<sbyte> right) => BitwiseSelect(select, left, right); /// <summary> /// float32x2_t vbsl_f32 (uint32x2_t a, float32x2_t b, float32x2_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<float> BitwiseSelect(Vector64<float> select, Vector64<float> left, Vector64<float> right) => BitwiseSelect(select, left, right); /// <summary> /// uint16x4_t vbsl_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> BitwiseSelect(Vector64<ushort> select, Vector64<ushort> left, Vector64<ushort> right) => BitwiseSelect(select, left, right); /// <summary> /// uint32x2_t vbsl_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> BitwiseSelect(Vector64<uint> select, Vector64<uint> left, Vector64<uint> right) => BitwiseSelect(select, left, right); /// <summary> /// uint64x1_t vbsl_u64 (uint64x1_t a, uint64x1_t b, uint64x1_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> BitwiseSelect(Vector64<ulong> select, Vector64<ulong> left, Vector64<ulong> right) => BitwiseSelect(select, left, right); /// <summary> /// uint8x16_t vbslq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> BitwiseSelect(Vector128<byte> select, Vector128<byte> left, Vector128<byte> right) => BitwiseSelect(select, left, right); /// <summary> /// float64x2_t vbslq_f64 (uint64x2_t a, float64x2_t b, float64x2_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<double> BitwiseSelect(Vector128<double> select, Vector128<double> left, Vector128<double> right) => BitwiseSelect(select, left, right); /// <summary> /// int16x8_t vbslq_s16 (uint16x8_t a, int16x8_t b, int16x8_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> BitwiseSelect(Vector128<short> select, Vector128<short> left, Vector128<short> right) => BitwiseSelect(select, left, right); /// <summary> /// int32x4_t vbslq_s32 (uint32x4_t a, int32x4_t b, int32x4_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> BitwiseSelect(Vector128<int> select, Vector128<int> left, Vector128<int> right) => BitwiseSelect(select, left, right); /// <summary> /// int64x2_t vbslq_s64 (uint64x2_t a, int64x2_t b, int64x2_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> BitwiseSelect(Vector128<long> select, Vector128<long> left, Vector128<long> right) => BitwiseSelect(select, left, right); /// <summary> /// int8x16_t vbslq_s8 (uint8x16_t a, int8x16_t b, int8x16_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> BitwiseSelect(Vector128<sbyte> select, Vector128<sbyte> left, Vector128<sbyte> right) => BitwiseSelect(select, left, right); /// <summary> /// float32x4_t vbslq_f32 (uint32x4_t a, float32x4_t b, float32x4_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<float> BitwiseSelect(Vector128<float> select, Vector128<float> left, Vector128<float> right) => BitwiseSelect(select, left, right); /// <summary> /// uint16x8_t vbslq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> BitwiseSelect(Vector128<ushort> select, Vector128<ushort> left, Vector128<ushort> right) => BitwiseSelect(select, left, right); /// <summary> /// uint32x4_t vbslq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> BitwiseSelect(Vector128<uint> select, Vector128<uint> left, Vector128<uint> right) => BitwiseSelect(select, left, right); /// <summary> /// uint64x2_t vbslq_u64 (uint64x2_t a, uint64x2_t b, uint64x2_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> BitwiseSelect(Vector128<ulong> select, Vector128<ulong> left, Vector128<ulong> right) => BitwiseSelect(select, left, right); /// <summary> /// float32x2_t vrndp_f32 (float32x2_t a) /// A32: VRINTP.F32 Dd, Dm /// A64: FRINTP Vd.2S, Vn.2S /// </summary> public static Vector64<float> Ceiling(Vector64<float> value) => Ceiling(value); /// <summary> /// float32x4_t vrndpq_f32 (float32x4_t a) /// A32: VRINTP.F32 Qd, Qm /// A64: FRINTP Vd.4S, Vn.4S /// </summary> public static Vector128<float> Ceiling(Vector128<float> value) => Ceiling(value); /// <summary> /// float64x1_t vrndp_f64 (float64x1_t a) /// A32: VRINTP.F64 Dd, Dm /// A64: FRINTP Dd, Dn /// </summary> public static Vector64<double> CeilingScalar(Vector64<double> value) => CeilingScalar(value); /// <summary> /// float32_t vrndps_f32 (float32_t a) /// A32: VRINTP.F32 Sd, Sm /// A64: FRINTP Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> CeilingScalar(Vector64<float> value) => CeilingScalar(value); /// <summary> /// uint8x8_t vceq_u8 (uint8x8_t a, uint8x8_t b) /// A32: VCEQ.I8 Dd, Dn, Dm /// A64: CMEQ Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareEqual(Vector64<byte> left, Vector64<byte> right) => CompareEqual(left, right); /// <summary> /// uint16x4_t vceq_s16 (int16x4_t a, int16x4_t b) /// A32: VCEQ.I16 Dd, Dn, Dm /// A64: CMEQ Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareEqual(Vector64<short> left, Vector64<short> right) => CompareEqual(left, right); /// <summary> /// uint32x2_t vceq_s32 (int32x2_t a, int32x2_t b) /// A32: VCEQ.I32 Dd, Dn, Dm /// A64: CMEQ Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareEqual(Vector64<int> left, Vector64<int> right) => CompareEqual(left, right); /// <summary> /// uint8x8_t vceq_s8 (int8x8_t a, int8x8_t b) /// A32: VCEQ.I8 Dd, Dn, Dm /// A64: CMEQ Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareEqual(Vector64<sbyte> left, Vector64<sbyte> right) => CompareEqual(left, right); /// <summary> /// uint32x2_t vceq_f32 (float32x2_t a, float32x2_t b) /// A32: VCEQ.F32 Dd, Dn, Dm /// A64: FCMEQ Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> CompareEqual(Vector64<float> left, Vector64<float> right) => CompareEqual(left, right); /// <summary> /// uint16x4_t vceq_u16 (uint16x4_t a, uint16x4_t b) /// A32: VCEQ.I16 Dd, Dn, Dm /// A64: CMEQ Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareEqual(Vector64<ushort> left, Vector64<ushort> right) => CompareEqual(left, right); /// <summary> /// uint32x2_t vceq_u32 (uint32x2_t a, uint32x2_t b) /// A32: VCEQ.I32 Dd, Dn, Dm /// A64: CMEQ Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareEqual(Vector64<uint> left, Vector64<uint> right) => CompareEqual(left, right); /// <summary> /// uint8x16_t vceqq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VCEQ.I8 Qd, Qn, Qm /// A64: CMEQ Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareEqual(Vector128<byte> left, Vector128<byte> right) => CompareEqual(left, right); /// <summary> /// uint16x8_t vceqq_s16 (int16x8_t a, int16x8_t b) /// A32: VCEQ.I16 Qd, Qn, Qm /// A64: CMEQ Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareEqual(Vector128<short> left, Vector128<short> right) => CompareEqual(left, right); /// <summary> /// uint32x4_t vceqq_s32 (int32x4_t a, int32x4_t b) /// A32: VCEQ.I32 Qd, Qn, Qm /// A64: CMEQ Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareEqual(Vector128<int> left, Vector128<int> right) => CompareEqual(left, right); /// <summary> /// uint8x16_t vceqq_s8 (int8x16_t a, int8x16_t b) /// A32: VCEQ.I8 Qd, Qn, Qm /// A64: CMEQ Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareEqual(Vector128<sbyte> left, Vector128<sbyte> right) => CompareEqual(left, right); /// <summary> /// uint32x4_t vceqq_f32 (float32x4_t a, float32x4_t b) /// A32: VCEQ.F32 Qd, Qn, Qm /// A64: FCMEQ Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> CompareEqual(Vector128<float> left, Vector128<float> right) => CompareEqual(left, right); /// <summary> /// uint16x8_t vceqq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VCEQ.I16 Qd, Qn, Qm /// A64: CMEQ Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareEqual(Vector128<ushort> left, Vector128<ushort> right) => CompareEqual(left, right); /// <summary> /// uint32x4_t vceqq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VCEQ.I32 Qd, Qn, Qm /// A64: CMEQ Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareEqual(Vector128<uint> left, Vector128<uint> right) => CompareEqual(left, right); /// <summary> /// uint8x8_t vcgt_u8 (uint8x8_t a, uint8x8_t b) /// A32: VCGT.U8 Dd, Dn, Dm /// A64: CMHI Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareGreaterThan(Vector64<byte> left, Vector64<byte> right) => CompareGreaterThan(left, right); /// <summary> /// uint16x4_t vcgt_s16 (int16x4_t a, int16x4_t b) /// A32: VCGT.S16 Dd, Dn, Dm /// A64: CMGT Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareGreaterThan(Vector64<short> left, Vector64<short> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x2_t vcgt_s32 (int32x2_t a, int32x2_t b) /// A32: VCGT.S32 Dd, Dn, Dm /// A64: CMGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareGreaterThan(Vector64<int> left, Vector64<int> right) => CompareGreaterThan(left, right); /// <summary> /// uint8x8_t vcgt_s8 (int8x8_t a, int8x8_t b) /// A32: VCGT.S8 Dd, Dn, Dm /// A64: CMGT Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareGreaterThan(Vector64<sbyte> left, Vector64<sbyte> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x2_t vcgt_f32 (float32x2_t a, float32x2_t b) /// A32: VCGT.F32 Dd, Dn, Dm /// A64: FCMGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> CompareGreaterThan(Vector64<float> left, Vector64<float> right) => CompareGreaterThan(left, right); /// <summary> /// uint16x4_t vcgt_u16 (uint16x4_t a, uint16x4_t b) /// A32: VCGT.U16 Dd, Dn, Dm /// A64: CMHI Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareGreaterThan(Vector64<ushort> left, Vector64<ushort> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x2_t vcgt_u32 (uint32x2_t a, uint32x2_t b) /// A32: VCGT.U32 Dd, Dn, Dm /// A64: CMHI Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareGreaterThan(Vector64<uint> left, Vector64<uint> right) => CompareGreaterThan(left, right); /// <summary> /// uint8x16_t vcgtq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VCGT.U8 Qd, Qn, Qm /// A64: CMHI Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareGreaterThan(Vector128<byte> left, Vector128<byte> right) => CompareGreaterThan(left, right); /// <summary> /// uint16x8_t vcgtq_s16 (int16x8_t a, int16x8_t b) /// A32: VCGT.S16 Qd, Qn, Qm /// A64: CMGT Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareGreaterThan(Vector128<short> left, Vector128<short> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x4_t vcgtq_s32 (int32x4_t a, int32x4_t b) /// A32: VCGT.S32 Qd, Qn, Qm /// A64: CMGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareGreaterThan(Vector128<int> left, Vector128<int> right) => CompareGreaterThan(left, right); /// <summary> /// uint8x16_t vcgtq_s8 (int8x16_t a, int8x16_t b) /// A32: VCGT.S8 Qd, Qn, Qm /// A64: CMGT Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareGreaterThan(Vector128<sbyte> left, Vector128<sbyte> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x4_t vcgtq_f32 (float32x4_t a, float32x4_t b) /// A32: VCGT.F32 Qd, Qn, Qm /// A64: FCMGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> CompareGreaterThan(Vector128<float> left, Vector128<float> right) => CompareGreaterThan(left, right); /// <summary> /// uint16x8_t vcgtq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VCGT.U16 Qd, Qn, Qm /// A64: CMHI Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareGreaterThan(Vector128<ushort> left, Vector128<ushort> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x4_t vcgtq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VCGT.U32 Qd, Qn, Qm /// A64: CMHI Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareGreaterThan(Vector128<uint> left, Vector128<uint> right) => CompareGreaterThan(left, right); /// <summary> /// uint8x8_t vcge_u8 (uint8x8_t a, uint8x8_t b) /// A32: VCGE.U8 Dd, Dn, Dm /// A64: CMHS Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareGreaterThanOrEqual(Vector64<byte> left, Vector64<byte> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint16x4_t vcge_s16 (int16x4_t a, int16x4_t b) /// A32: VCGE.S16 Dd, Dn, Dm /// A64: CMGE Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareGreaterThanOrEqual(Vector64<short> left, Vector64<short> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x2_t vcge_s32 (int32x2_t a, int32x2_t b) /// A32: VCGE.S32 Dd, Dn, Dm /// A64: CMGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareGreaterThanOrEqual(Vector64<int> left, Vector64<int> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint8x8_t vcge_s8 (int8x8_t a, int8x8_t b) /// A32: VCGE.S8 Dd, Dn, Dm /// A64: CMGE Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareGreaterThanOrEqual(Vector64<sbyte> left, Vector64<sbyte> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x2_t vcge_f32 (float32x2_t a, float32x2_t b) /// A32: VCGE.F32 Dd, Dn, Dm /// A64: FCMGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> CompareGreaterThanOrEqual(Vector64<float> left, Vector64<float> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint16x4_t vcge_u16 (uint16x4_t a, uint16x4_t b) /// A32: VCGE.U16 Dd, Dn, Dm /// A64: CMHS Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareGreaterThanOrEqual(Vector64<ushort> left, Vector64<ushort> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x2_t vcge_u32 (uint32x2_t a, uint32x2_t b) /// A32: VCGE.U32 Dd, Dn, Dm /// A64: CMHS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareGreaterThanOrEqual(Vector64<uint> left, Vector64<uint> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint8x16_t vcgeq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VCGE.U8 Qd, Qn, Qm /// A64: CMHS Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareGreaterThanOrEqual(Vector128<byte> left, Vector128<byte> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint16x8_t vcgeq_s16 (int16x8_t a, int16x8_t b) /// A32: VCGE.S16 Qd, Qn, Qm /// A64: CMGE Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareGreaterThanOrEqual(Vector128<short> left, Vector128<short> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x4_t vcgeq_s32 (int32x4_t a, int32x4_t b) /// A32: VCGE.S32 Qd, Qn, Qm /// A64: CMGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareGreaterThanOrEqual(Vector128<int> left, Vector128<int> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint8x16_t vcgeq_s8 (int8x16_t a, int8x16_t b) /// A32: VCGE.S8 Qd, Qn, Qm /// A64: CMGE Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareGreaterThanOrEqual(Vector128<sbyte> left, Vector128<sbyte> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x4_t vcgeq_f32 (float32x4_t a, float32x4_t b) /// A32: VCGE.F32 Qd, Qn, Qm /// A64: FCMGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> CompareGreaterThanOrEqual(Vector128<float> left, Vector128<float> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint16x8_t vcgeq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VCGE.U16 Qd, Qn, Qm /// A64: CMHS Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareGreaterThanOrEqual(Vector128<ushort> left, Vector128<ushort> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x4_t vcgeq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VCGE.U32 Qd, Qn, Qm /// A64: CMHS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareGreaterThanOrEqual(Vector128<uint> left, Vector128<uint> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint8x8_t vclt_u8 (uint8x8_t a, uint8x8_t b) /// A32: VCLT.U8 Dd, Dn, Dm /// A64: CMHI Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareLessThan(Vector64<byte> left, Vector64<byte> right) => CompareLessThan(left, right); /// <summary> /// uint16x4_t vclt_s16 (int16x4_t a, int16x4_t b) /// A32: VCLT.S16 Dd, Dn, Dm /// A64: CMGT Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareLessThan(Vector64<short> left, Vector64<short> right) => CompareLessThan(left, right); /// <summary> /// uint32x2_t vclt_s32 (int32x2_t a, int32x2_t b) /// A32: VCLT.S32 Dd, Dn, Dm /// A64: CMGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareLessThan(Vector64<int> left, Vector64<int> right) => CompareLessThan(left, right); /// <summary> /// uint8x8_t vclt_s8 (int8x8_t a, int8x8_t b) /// A32: VCLT.S8 Dd, Dn, Dm /// A64: CMGT Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareLessThan(Vector64<sbyte> left, Vector64<sbyte> right) => CompareLessThan(left, right); /// <summary> /// uint32x2_t vclt_f32 (float32x2_t a, float32x2_t b) /// A32: VCLT.F32 Dd, Dn, Dm /// A64: FCMGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> CompareLessThan(Vector64<float> left, Vector64<float> right) => CompareLessThan(left, right); /// <summary> /// uint16x4_t vclt_u16 (uint16x4_t a, uint16x4_t b) /// A32: VCLT.U16 Dd, Dn, Dm /// A64: CMHI Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareLessThan(Vector64<ushort> left, Vector64<ushort> right) => CompareLessThan(left, right); /// <summary> /// uint32x2_t vclt_u32 (uint32x2_t a, uint32x2_t b) /// A32: VCLT.U32 Dd, Dn, Dm /// A64: CMHI Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareLessThan(Vector64<uint> left, Vector64<uint> right) => CompareLessThan(left, right); /// <summary> /// uint8x16_t vcltq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VCLT.U8 Qd, Qn, Qm /// A64: CMHI Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareLessThan(Vector128<byte> left, Vector128<byte> right) => CompareLessThan(left, right); /// <summary> /// uint16x8_t vcltq_s16 (int16x8_t a, int16x8_t b) /// A32: VCLT.S16 Qd, Qn, Qm /// A64: CMGT Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareLessThan(Vector128<short> left, Vector128<short> right) => CompareLessThan(left, right); /// <summary> /// uint32x4_t vcltq_s32 (int32x4_t a, int32x4_t b) /// A32: VCLT.S32 Qd, Qn, Qm /// A64: CMGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareLessThan(Vector128<int> left, Vector128<int> right) => CompareLessThan(left, right); /// <summary> /// uint8x16_t vcltq_s8 (int8x16_t a, int8x16_t b) /// A32: VCLT.S8 Qd, Qn, Qm /// A64: CMGT Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareLessThan(Vector128<sbyte> left, Vector128<sbyte> right) => CompareLessThan(left, right); /// <summary> /// uint32x4_t vcltq_f32 (float32x4_t a, float32x4_t b) /// A32: VCLT.F32 Qd, Qn, Qm /// A64: FCMGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> CompareLessThan(Vector128<float> left, Vector128<float> right) => CompareLessThan(left, right); /// <summary> /// uint16x8_t vcltq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VCLT.U16 Qd, Qn, Qm /// A64: CMHI Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareLessThan(Vector128<ushort> left, Vector128<ushort> right) => CompareLessThan(left, right); /// <summary> /// uint32x4_t vcltq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VCLT.U32 Qd, Qn, Qm /// A64: CMHI Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareLessThan(Vector128<uint> left, Vector128<uint> right) => CompareLessThan(left, right); /// <summary> /// uint8x8_t vcle_u8 (uint8x8_t a, uint8x8_t b) /// A32: VCLE.U8 Dd, Dn, Dm /// A64: CMHS Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareLessThanOrEqual(Vector64<byte> left, Vector64<byte> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint16x4_t vcle_s16 (int16x4_t a, int16x4_t b) /// A32: VCLE.S16 Dd, Dn, Dm /// A64: CMGE Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareLessThanOrEqual(Vector64<short> left, Vector64<short> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x2_t vcle_s32 (int32x2_t a, int32x2_t b) /// A32: VCLE.S32 Dd, Dn, Dm /// A64: CMGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareLessThanOrEqual(Vector64<int> left, Vector64<int> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint8x8_t vcle_s8 (int8x8_t a, int8x8_t b) /// A32: VCLE.S8 Dd, Dn, Dm /// A64: CMGE Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareLessThanOrEqual(Vector64<sbyte> left, Vector64<sbyte> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x2_t vcle_f32 (float32x2_t a, float32x2_t b) /// A32: VCLE.F32 Dd, Dn, Dm /// A64: FCMGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> CompareLessThanOrEqual(Vector64<float> left, Vector64<float> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint16x4_t vcle_u16 (uint16x4_t a, uint16x4_t b) /// A32: VCLE.U16 Dd, Dn, Dm /// A64: CMHS Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareLessThanOrEqual(Vector64<ushort> left, Vector64<ushort> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x2_t vcle_u32 (uint32x2_t a, uint32x2_t b) /// A32: VCLE.U32 Dd, Dn, Dm /// A64: CMHS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareLessThanOrEqual(Vector64<uint> left, Vector64<uint> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint8x16_t vcleq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VCLE.U8 Qd, Qn, Qm /// A64: CMHS Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareLessThanOrEqual(Vector128<byte> left, Vector128<byte> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint16x8_t vcleq_s16 (int16x8_t a, int16x8_t b) /// A32: VCLE.S16 Qd, Qn, Qm /// A64: CMGE Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareLessThanOrEqual(Vector128<short> left, Vector128<short> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x4_t vcleq_s32 (int32x4_t a, int32x4_t b) /// A32: VCLE.S32 Qd, Qn, Qm /// A64: CMGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareLessThanOrEqual(Vector128<int> left, Vector128<int> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint8x16_t vcleq_s8 (int8x16_t a, int8x16_t b) /// A32: VCLE.S8 Qd, Qn, Qm /// A64: CMGE Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareLessThanOrEqual(Vector128<sbyte> left, Vector128<sbyte> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x4_t vcleq_f32 (float32x4_t a, float32x4_t b) /// A32: VCLE.F32 Qd, Qn, Qm /// A64: FCMGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> CompareLessThanOrEqual(Vector128<float> left, Vector128<float> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint16x8_t vcleq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VCLE.U16 Qd, Qn, Qm /// A64: CMHS Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareLessThanOrEqual(Vector128<ushort> left, Vector128<ushort> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x4_t vcleq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VCLE.U32 Qd, Qn, Qm /// A64: CMHS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareLessThanOrEqual(Vector128<uint> left, Vector128<uint> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint8x8_t vtst_u8 (uint8x8_t a, uint8x8_t b) /// A32: VTST.8 Dd, Dn, Dm /// A64: CMTST Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareTest(Vector64<byte> left, Vector64<byte> right) => CompareTest(left, right); /// <summary> /// uint16x4_t vtst_s16 (int16x4_t a, int16x4_t b) /// A32: VTST.16 Dd, Dn, Dm /// A64: CMTST Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareTest(Vector64<short> left, Vector64<short> right) => CompareTest(left, right); /// <summary> /// uint32x2_t vtst_s32 (int32x2_t a, int32x2_t b) /// A32: VTST.32 Dd, Dn, Dm /// A64: CMTST Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareTest(Vector64<int> left, Vector64<int> right) => CompareTest(left, right); /// <summary> /// uint8x8_t vtst_s8 (int8x8_t a, int8x8_t b) /// A32: VTST.8 Dd, Dn, Dm /// A64: CMTST Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareTest(Vector64<sbyte> left, Vector64<sbyte> right) => CompareTest(left, right); /// <summary> /// uint32x2_t vtst_f32 (float32x2_t a, float32x2_t b) /// A32: VTST.32 Dd, Dn, Dm /// A64: CMTST Vd.2S, Vn.2S, Vm.2S /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> CompareTest(Vector64<float> left, Vector64<float> right) => CompareTest(left, right); /// <summary> /// uint16x4_t vtst_u16 (uint16x4_t a, uint16x4_t b) /// A32: VTST.16 Dd, Dn, Dm /// A64: CMTST Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareTest(Vector64<ushort> left, Vector64<ushort> right) => CompareTest(left, right); /// <summary> /// uint32x2_t vtst_u32 (uint32x2_t a, uint32x2_t b) /// A32: VTST.32 Dd, Dn, Dm /// A64: CMTST Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareTest(Vector64<uint> left, Vector64<uint> right) => CompareTest(left, right); /// <summary> /// uint8x16_t vtstq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VTST.8 Qd, Qn, Qm /// A64: CMTST Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareTest(Vector128<byte> left, Vector128<byte> right) => CompareTest(left, right); /// <summary> /// uint16x8_t vtstq_s16 (int16x8_t a, int16x8_t b) /// A32: VTST.16 Qd, Qn, Qm /// A64: CMTST Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareTest(Vector128<short> left, Vector128<short> right) => CompareTest(left, right); /// <summary> /// uint32x4_t vtstq_s32 (int32x4_t a, int32x4_t b) /// A32: VTST.32 Qd, Qn, Qm /// A64: CMTST Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareTest(Vector128<int> left, Vector128<int> right) => CompareTest(left, right); /// <summary> /// uint8x16_t vtstq_s8 (int8x16_t a, int8x16_t b) /// A32: VTST.8 Qd, Qn, Qm /// A64: CMTST Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareTest(Vector128<sbyte> left, Vector128<sbyte> right) => CompareTest(left, right); /// <summary> /// uint32x4_t vtstq_f32 (float32x4_t a, float32x4_t b) /// A32: VTST.32 Qd, Qn, Qm /// A64: CMTST Vd.4S, Vn.4S, Vm.4S /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> CompareTest(Vector128<float> left, Vector128<float> right) => CompareTest(left, right); /// <summary> /// uint16x8_t vtstq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VTST.16 Qd, Qn, Qm /// A64: CMTST Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareTest(Vector128<ushort> left, Vector128<ushort> right) => CompareTest(left, right); /// <summary> /// uint32x4_t vtstq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VTST.32 Qd, Qn, Qm /// A64: CMTST Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareTest(Vector128<uint> left, Vector128<uint> right) => CompareTest(left, right); /// <summary> /// int32x2_t vcvta_s32_f32 (float32x2_t a) /// A32: VCVTA.S32.F32 Dd, Dm /// A64: FCVTAS Vd.2S, Vn.2S /// </summary> public static Vector64<int> ConvertToInt32RoundAwayFromZero(Vector64<float> value) => ConvertToInt32RoundAwayFromZero(value); /// <summary> /// int32x4_t vcvtaq_s32_f32 (float32x4_t a) /// A32: VCVTA.S32.F32 Qd, Qm /// A64: FCVTAS Vd.4S, Vn.4S /// </summary> public static Vector128<int> ConvertToInt32RoundAwayFromZero(Vector128<float> value) => ConvertToInt32RoundAwayFromZero(value); /// <summary> /// int32_t vcvtas_s32_f32 (float32_t a) /// A32: VCVTA.S32.F32 Sd, Sm /// A64: FCVTAS Sd, Sn /// </summary> public static Vector64<int> ConvertToInt32RoundAwayFromZeroScalar(Vector64<float> value) => ConvertToInt32RoundAwayFromZeroScalar(value); /// <summary> /// int32x2_t vcvtn_s32_f32 (float32x2_t a) /// A32: VCVTN.S32.F32 Dd, Dm /// A64: FCVTNS Vd.2S, Vn.2S /// </summary> public static Vector64<int> ConvertToInt32RoundToEven(Vector64<float> value) => ConvertToInt32RoundToEven(value); /// <summary> /// int32x4_t vcvtnq_s32_f32 (float32x4_t a) /// A32: VCVTN.S32.F32 Qd, Qm /// A64: FCVTNS Vd.4S, Vn.4S /// </summary> public static Vector128<int> ConvertToInt32RoundToEven(Vector128<float> value) => ConvertToInt32RoundToEven(value); /// <summary> /// int32_t vcvtns_s32_f32 (float32_t a) /// A32: VCVTN.S32.F32 Sd, Sm /// A64: FCVTNS Sd, Sn /// </summary> public static Vector64<int> ConvertToInt32RoundToEvenScalar(Vector64<float> value) => ConvertToInt32RoundToEvenScalar(value); /// <summary> /// int32x2_t vcvtm_s32_f32 (float32x2_t a) /// A32: VCVTM.S32.F32 Dd, Dm /// A64: FCVTMS Vd.2S, Vn.2S /// </summary> public static Vector64<int> ConvertToInt32RoundToNegativeInfinity(Vector64<float> value) => ConvertToInt32RoundToNegativeInfinity(value); /// <summary> /// int32x4_t vcvtmq_s32_f32 (float32x4_t a) /// A32: VCVTM.S32.F32 Qd, Qm /// A64: FCVTMS Vd.4S, Vn.4S /// </summary> public static Vector128<int> ConvertToInt32RoundToNegativeInfinity(Vector128<float> value) => ConvertToInt32RoundToNegativeInfinity(value); /// <summary> /// int32_t vcvtms_s32_f32 (float32_t a) /// A32: VCVTM.S32.F32 Sd, Sm /// A64: FCVTMS Sd, Sn /// </summary> public static Vector64<int> ConvertToInt32RoundToNegativeInfinityScalar(Vector64<float> value) => ConvertToInt32RoundToNegativeInfinityScalar(value); /// <summary> /// int32x2_t vcvtp_s32_f32 (float32x2_t a) /// A32: VCVTP.S32.F32 Dd, Dm /// A64: FCVTPS Vd.2S, Vn.2S /// </summary> public static Vector64<int> ConvertToInt32RoundToPositiveInfinity(Vector64<float> value) => ConvertToInt32RoundToPositiveInfinity(value); /// <summary> /// int32x4_t vcvtpq_s32_f32 (float32x4_t a) /// A32: VCVTP.S32.F32 Qd, Qm /// A64: FCVTPS Vd.4S, Vn.4S /// </summary> public static Vector128<int> ConvertToInt32RoundToPositiveInfinity(Vector128<float> value) => ConvertToInt32RoundToPositiveInfinity(value); /// <summary> /// int32_t vcvtps_s32_f32 (float32_t a) /// A32: VCVTP.S32.F32 Sd, Sm /// A64: FCVTPS Sd, Sn /// </summary> public static Vector64<int> ConvertToInt32RoundToPositiveInfinityScalar(Vector64<float> value) => ConvertToInt32RoundToPositiveInfinityScalar(value); /// <summary> /// int32x2_t vcvt_s32_f32 (float32x2_t a) /// A32: VCVT.S32.F32 Dd, Dm /// A64: FCVTZS Vd.2S, Vn.2S /// </summary> public static Vector64<int> ConvertToInt32RoundToZero(Vector64<float> value) => ConvertToInt32RoundToZero(value); /// <summary> /// int32x4_t vcvtq_s32_f32 (float32x4_t a) /// A32: VCVT.S32.F32 Qd, Qm /// A64: FCVTZS Vd.4S, Vn.4S /// </summary> public static Vector128<int> ConvertToInt32RoundToZero(Vector128<float> value) => ConvertToInt32RoundToZero(value); /// <summary> /// int32_t vcvts_s32_f32 (float32_t a) /// A32: VCVT.S32.F32 Sd, Sm /// A64: FCVTZS Sd, Sn /// </summary> public static Vector64<int> ConvertToInt32RoundToZeroScalar(Vector64<float> value) => ConvertToInt32RoundToZeroScalar(value); /// <summary> /// float32x2_t vcvt_f32_s32 (int32x2_t a) /// A32: VCVT.F32.S32 Dd, Dm /// A64: SCVTF Vd.2S, Vn.2S /// </summary> public static Vector64<float> ConvertToSingle(Vector64<int> value) => ConvertToSingle(value); /// <summary> /// float32x2_t vcvt_f32_u32 (uint32x2_t a) /// A32: VCVT.F32.U32 Dd, Dm /// A64: UCVTF Vd.2S, Vn.2S /// </summary> public static Vector64<float> ConvertToSingle(Vector64<uint> value) => ConvertToSingle(value); /// <summary> /// float32x4_t vcvtq_f32_s32 (int32x4_t a) /// A32: VCVT.F32.S32 Qd, Qm /// A64: SCVTF Vd.4S, Vn.4S /// </summary> public static Vector128<float> ConvertToSingle(Vector128<int> value) => ConvertToSingle(value); /// <summary> /// float32x4_t vcvtq_f32_u32 (uint32x4_t a) /// A32: VCVT.F32.U32 Qd, Qm /// A64: UCVTF Vd.4S, Vn.4S /// </summary> public static Vector128<float> ConvertToSingle(Vector128<uint> value) => ConvertToSingle(value); /// <summary> /// float32_t vcvts_f32_s32 (int32_t a) /// A32: VCVT.F32.S32 Sd, Sm /// A64: SCVTF Sd, Sn /// </summary> public static Vector64<float> ConvertToSingleScalar(Vector64<int> value) => ConvertToSingleScalar(value); /// <summary> /// float32_t vcvts_f32_u32 (uint32_t a) /// A32: VCVT.F32.U32 Sd, Sm /// A64: UCVTF Sd, Sn /// </summary> public static Vector64<float> ConvertToSingleScalar(Vector64<uint> value) => ConvertToSingleScalar(value); /// <summary> /// uint32x2_t vcvta_u32_f32 (float32x2_t a) /// A32: VCVTA.U32.F32 Dd, Dm /// A64: FCVTAU Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ConvertToUInt32RoundAwayFromZero(Vector64<float> value) => ConvertToUInt32RoundAwayFromZero(value); /// <summary> /// uint32x4_t vcvtaq_u32_f32 (float32x4_t a) /// A32: VCVTA.U32.F32 Qd, Qm /// A64: FCVTAU Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ConvertToUInt32RoundAwayFromZero(Vector128<float> value) => ConvertToUInt32RoundAwayFromZero(value); /// <summary> /// uint32_t vcvtas_u32_f32 (float32_t a) /// A32: VCVTA.U32.F32 Sd, Sm /// A64: FCVTAU Sd, Sn /// </summary> public static Vector64<uint> ConvertToUInt32RoundAwayFromZeroScalar(Vector64<float> value) => ConvertToUInt32RoundAwayFromZeroScalar(value); /// <summary> /// uint32x2_t vcvtn_u32_f32 (float32x2_t a) /// A32: VCVTN.U32.F32 Dd, Dm /// A64: FCVTNU Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ConvertToUInt32RoundToEven(Vector64<float> value) => ConvertToUInt32RoundToEven(value); /// <summary> /// uint32x4_t vcvtnq_u32_f32 (float32x4_t a) /// A32: VCVTN.U32.F32 Qd, Qm /// A64: FCVTNU Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ConvertToUInt32RoundToEven(Vector128<float> value) => ConvertToUInt32RoundToEven(value); /// <summary> /// uint32_t vcvtns_u32_f32 (float32_t a) /// A32: VCVTN.U32.F32 Sd, Sm /// A64: FCVTNU Sd, Sn /// </summary> public static Vector64<uint> ConvertToUInt32RoundToEvenScalar(Vector64<float> value) => ConvertToUInt32RoundToEvenScalar(value); /// <summary> /// uint32x2_t vcvtm_u32_f32 (float32x2_t a) /// A32: VCVTM.U32.F32 Dd, Dm /// A64: FCVTMU Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ConvertToUInt32RoundToNegativeInfinity(Vector64<float> value) => ConvertToUInt32RoundToNegativeInfinity(value); /// <summary> /// uint32x4_t vcvtmq_u32_f32 (float32x4_t a) /// A32: VCVTM.U32.F32 Qd, Qm /// A64: FCVTMU Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ConvertToUInt32RoundToNegativeInfinity(Vector128<float> value) => ConvertToUInt32RoundToNegativeInfinity(value); /// <summary> /// uint32_t vcvtms_u32_f32 (float32_t a) /// A32: VCVTM.U32.F32 Sd, Sm /// A64: FCVTMU Sd, Sn /// </summary> public static Vector64<uint> ConvertToUInt32RoundToNegativeInfinityScalar(Vector64<float> value) => ConvertToUInt32RoundToNegativeInfinityScalar(value); /// <summary> /// uint32x2_t vcvtp_u32_f32 (float32x2_t a) /// A32: VCVTP.U32.F32 Dd, Dm /// A64: FCVTPU Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ConvertToUInt32RoundToPositiveInfinity(Vector64<float> value) => ConvertToUInt32RoundToPositiveInfinity(value); /// <summary> /// uint32x4_t vcvtpq_u32_f32 (float32x4_t a) /// A32: VCVTP.U32.F32 Qd, Qm /// A64: FCVTPU Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ConvertToUInt32RoundToPositiveInfinity(Vector128<float> value) => ConvertToUInt32RoundToPositiveInfinity(value); /// <summary> /// uint32_t vcvtps_u32_f32 (float32_t a) /// A32: VCVTP.U32.F32 Sd, Sm /// A64: FCVTPU Sd, Sn /// </summary> public static Vector64<uint> ConvertToUInt32RoundToPositiveInfinityScalar(Vector64<float> value) => ConvertToUInt32RoundToPositiveInfinityScalar(value); /// <summary> /// uint32x2_t vcvt_u32_f32 (float32x2_t a) /// A32: VCVT.U32.F32 Dd, Dm /// A64: FCVTZU Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ConvertToUInt32RoundToZero(Vector64<float> value) => ConvertToUInt32RoundToZero(value); /// <summary> /// uint32x4_t vcvtq_u32_f32 (float32x4_t a) /// A32: VCVT.U32.F32 Qd, Qm /// A64: FCVTZU Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ConvertToUInt32RoundToZero(Vector128<float> value) => ConvertToUInt32RoundToZero(value); /// <summary> /// uint32_t vcvts_u32_f32 (float32_t a) /// A32: VCVT.U32.F32 Sd, Sm /// A64: FCVTZU Sd, Sn /// </summary> public static Vector64<uint> ConvertToUInt32RoundToZeroScalar(Vector64<float> value) => ConvertToUInt32RoundToZeroScalar(value); /// <summary> /// float64x1_t vdiv_f64 (float64x1_t a, float64x1_t b) /// A32: VDIV.F64 Dd, Dn, Dm /// A64: FDIV Dd, Dn, Dm /// </summary> public static Vector64<double> DivideScalar(Vector64<double> left, Vector64<double> right) => DivideScalar(left, right); /// <summary> /// float32_t vdivs_f32 (float32_t a, float32_t b) /// A32: VDIV.F32 Sd, Sn, Sm /// A64: FDIV Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> DivideScalar(Vector64<float> left, Vector64<float> right) => DivideScalar(left, right); /// <summary> /// uint8x8_t vdup_lane_u8 (uint8x8_t vec, const int lane) /// A32: VDUP.8 Dd, Dm[index] /// A64: DUP Vd.8B, Vn.B[index] /// </summary> public static Vector64<byte> DuplicateSelectedScalarToVector64(Vector64<byte> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int16x4_t vdup_lane_s16 (int16x4_t vec, const int lane) /// A32: VDUP.16 Dd, Dm[index] /// A64: DUP Vd.4H, Vn.H[index] /// </summary> public static Vector64<short> DuplicateSelectedScalarToVector64(Vector64<short> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int32x2_t vdup_lane_s32 (int32x2_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<int> DuplicateSelectedScalarToVector64(Vector64<int> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// float32x2_t vdup_lane_f32 (float32x2_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<float> DuplicateSelectedScalarToVector64(Vector64<float> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int8x8_t vdup_lane_s8 (int8x8_t vec, const int lane) /// A32: VDUP.8 Dd, Dm[index] /// A64: DUP Vd.8B, Vn.B[index] /// </summary> public static Vector64<sbyte> DuplicateSelectedScalarToVector64(Vector64<sbyte> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint16x4_t vdup_lane_u16 (uint16x4_t vec, const int lane) /// A32: VDUP.16 Dd, Dm[index] /// A64: DUP Vd.4H, Vn.H[index] /// </summary> public static Vector64<ushort> DuplicateSelectedScalarToVector64(Vector64<ushort> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint32x2_t vdup_lane_u32 (uint32x2_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<uint> DuplicateSelectedScalarToVector64(Vector64<uint> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint8x8_t vdup_laneq_u8 (uint8x16_t vec, const int lane) /// A32: VDUP.8 Dd, Dm[index] /// A64: DUP Vd.8B, Vn.B[index] /// </summary> public static Vector64<byte> DuplicateSelectedScalarToVector64(Vector128<byte> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int16x4_t vdup_laneq_s16 (int16x8_t vec, const int lane) /// A32: VDUP.16 Dd, Dm[index] /// A64: DUP Vd.4H, Vn.H[index] /// </summary> public static Vector64<short> DuplicateSelectedScalarToVector64(Vector128<short> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int32x2_t vdup_laneq_s32 (int32x4_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<int> DuplicateSelectedScalarToVector64(Vector128<int> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// float32x2_t vdup_laneq_f32 (float32x4_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<float> DuplicateSelectedScalarToVector64(Vector128<float> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int8x8_t vdup_laneq_s8 (int8x16_t vec, const int lane) /// A32: VDUP.8 Dd, Dm[index] /// A64: DUP Vd.8B, Vn.B[index] /// </summary> public static Vector64<sbyte> DuplicateSelectedScalarToVector64(Vector128<sbyte> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint16x4_t vdup_laneq_u16 (uint16x8_t vec, const int lane) /// A32: VDUP.16 Dd, Dm[index] /// A64: DUP Vd.4H, Vn.H[index] /// </summary> public static Vector64<ushort> DuplicateSelectedScalarToVector64(Vector128<ushort> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint32x2_t vdup_laneq_u32 (uint32x4_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<uint> DuplicateSelectedScalarToVector64(Vector128<uint> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint8x16_t vdupq_lane_u8 (uint8x8_t vec, const int lane) /// A32: VDUP.8 Qd, Dm[index] /// A64: DUP Vd.16B, Vn.B[index] /// </summary> public static Vector128<byte> DuplicateSelectedScalarToVector128(Vector64<byte> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int16x8_t vdupq_lane_s16 (int16x4_t vec, const int lane) /// A32: VDUP.16 Qd, Dm[index] /// A64: DUP Vd.8H, Vn.H[index] /// </summary> public static Vector128<short> DuplicateSelectedScalarToVector128(Vector64<short> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int32x4_t vdupq_lane_s32 (int32x2_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<int> DuplicateSelectedScalarToVector128(Vector64<int> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// float32x4_t vdupq_lane_f32 (float32x2_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<float> DuplicateSelectedScalarToVector128(Vector64<float> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int8x16_t vdupq_lane_s8 (int8x8_t vec, const int lane) /// A32: VDUP.8 Qd, Dm[index] /// A64: DUP Vd.16B, Vn.B[index] /// </summary> public static Vector128<sbyte> DuplicateSelectedScalarToVector128(Vector64<sbyte> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint16x8_t vdupq_lane_u16 (uint16x4_t vec, const int lane) /// A32: VDUP.16 Qd, Dm[index] /// A64: DUP Vd.8H, Vn.H[index] /// </summary> public static Vector128<ushort> DuplicateSelectedScalarToVector128(Vector64<ushort> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint32x4_t vdupq_lane_u32 (uint32x2_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<uint> DuplicateSelectedScalarToVector128(Vector64<uint> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint8x16_t vdupq_lane_u8 (uint8x16_t vec, const int lane) /// A32: VDUP.8 Qd, Dm[index] /// A64: DUP Vd.16B, Vn.B[index] /// </summary> public static Vector128<byte> DuplicateSelectedScalarToVector128(Vector128<byte> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int16x8_t vdupq_lane_s16 (int16x8_t vec, const int lane) /// A32: VDUP.16 Qd, Dm[index] /// A64: DUP Vd.8H, Vn.H[index] /// </summary> public static Vector128<short> DuplicateSelectedScalarToVector128(Vector128<short> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int32x4_t vdupq_lane_s32 (int32x4_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<int> DuplicateSelectedScalarToVector128(Vector128<int> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// float32x4_t vdupq_lane_f32 (float32x4_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<float> DuplicateSelectedScalarToVector128(Vector128<float> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int8x16_t vdupq_lane_s8 (int8x16_t vec, const int lane) /// A32: VDUP.8 Qd, Dm[index] /// A64: DUP Vd.16B, Vn.B[index] /// </summary> public static Vector128<sbyte> DuplicateSelectedScalarToVector128(Vector128<sbyte> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint16x8_t vdupq_lane_u16 (uint16x8_t vec, const int lane) /// A32: VDUP.16 Qd, Dm[index] /// A64: DUP Vd.8H, Vn.H[index] /// </summary> public static Vector128<ushort> DuplicateSelectedScalarToVector128(Vector128<ushort> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint32x4_t vdupq_lane_u32 (uint32x4_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<uint> DuplicateSelectedScalarToVector128(Vector128<uint> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint8x8_t vdup_n_u8 (uint8_t value) /// A32: VDUP.8 Dd, Rt /// A64: DUP Vd.8B, Rn /// </summary> public static Vector64<byte> DuplicateToVector64(byte value) => DuplicateToVector64(value); /// <summary> /// int16x4_t vdup_n_s16 (int16_t value) /// A32: VDUP.16 Dd, Rt /// A64: DUP Vd.4H, Rn /// </summary> public static Vector64<short> DuplicateToVector64(short value) => DuplicateToVector64(value); /// <summary> /// int32x2_t vdup_n_s32 (int32_t value) /// A32: VDUP.32 Dd, Rt /// A64: DUP Vd.2S, Rn /// </summary> public static Vector64<int> DuplicateToVector64(int value) => DuplicateToVector64(value); /// <summary> /// int8x8_t vdup_n_s8 (int8_t value) /// A32: VDUP.8 Dd, Rt /// A64: DUP Vd.8B, Rn /// </summary> public static Vector64<sbyte> DuplicateToVector64(sbyte value) => DuplicateToVector64(value); /// <summary> /// float32x2_t vdup_n_f32 (float32_t value) /// A32: VDUP Dd, Dm[0] /// A64: DUP Vd.2S, Vn.S[0] /// </summary> public static Vector64<float> DuplicateToVector64(float value) => DuplicateToVector64(value); /// <summary> /// uint16x4_t vdup_n_u16 (uint16_t value) /// A32: VDUP.16 Dd, Rt /// A64: DUP Vd.4H, Rn /// </summary> public static Vector64<ushort> DuplicateToVector64(ushort value) => DuplicateToVector64(value); /// <summary> /// uint32x2_t vdup_n_u32 (uint32_t value) /// A32: VDUP.32 Dd, Rt /// A64: DUP Vd.2S, Rn /// </summary> public static Vector64<uint> DuplicateToVector64(uint value) => DuplicateToVector64(value); /// <summary> /// uint8x16_t vdupq_n_u8 (uint8_t value) /// A32: VDUP.8 Qd, Rt /// A64: DUP Vd.16B, Rn /// </summary> public static Vector128<byte> DuplicateToVector128(byte value) => DuplicateToVector128(value); /// <summary> /// int16x8_t vdupq_n_s16 (int16_t value) /// A32: VDUP.16 Qd, Rt /// A64: DUP Vd.8H, Rn /// </summary> public static Vector128<short> DuplicateToVector128(short value) => DuplicateToVector128(value); /// <summary> /// int32x4_t vdupq_n_s32 (int32_t value) /// A32: VDUP.32 Qd, Rt /// A64: DUP Vd.4S, Rn /// </summary> public static Vector128<int> DuplicateToVector128(int value) => DuplicateToVector128(value); /// <summary> /// int8x16_t vdupq_n_s8 (int8_t value) /// A32: VDUP.8 Qd, Rt /// A64: DUP Vd.16B, Rn /// </summary> public static Vector128<sbyte> DuplicateToVector128(sbyte value) => DuplicateToVector128(value); /// <summary> /// float32x4_t vdupq_n_f32 (float32_t value) /// A32: VDUP Qd, Dm[0] /// A64: DUP Vd.4S, Vn.S[0] /// </summary> public static Vector128<float> DuplicateToVector128(float value) => DuplicateToVector128(value); /// <summary> /// uint16x8_t vdupq_n_u16 (uint16_t value) /// A32: VDUP.16 Qd, Rt /// A64: DUP Vd.8H, Rn /// </summary> public static Vector128<ushort> DuplicateToVector128(ushort value) => DuplicateToVector128(value); /// <summary> /// uint32x4_t vdupq_n_u32 (uint32_t value) /// A32: VDUP.32 Qd, Rt /// A64: DUP Vd.4S, Rn /// </summary> public static Vector128<uint> DuplicateToVector128(uint value) => DuplicateToVector128(value); /// <summary> /// uint8_t vget_lane_u8 (uint8x8_t v, const int lane) /// A32: VMOV.U8 Rt, Dn[lane] /// A64: UMOV Wd, Vn.B[lane] /// </summary> public static byte Extract(Vector64<byte> vector, byte index) => Extract(vector, index); /// <summary> /// int16_t vget_lane_s16 (int16x4_t v, const int lane) /// A32: VMOV.S16 Rt, Dn[lane] /// A64: SMOV Wd, Vn.H[lane] /// </summary> public static short Extract(Vector64<short> vector, byte index) => Extract(vector, index); /// <summary> /// int32_t vget_lane_s32 (int32x2_t v, const int lane) /// A32: VMOV.32 Rt, Dn[lane] /// A64: SMOV Wd, Vn.S[lane] /// </summary> public static int Extract(Vector64<int> vector, byte index) => Extract(vector, index); /// <summary> /// int8_t vget_lane_s8 (int8x8_t v, const int lane) /// A32: VMOV.S8 Rt, Dn[lane] /// A64: SMOV Wd, Vn.B[lane] /// </summary> public static sbyte Extract(Vector64<sbyte> vector, byte index) => Extract(vector, index); /// <summary> /// float32_t vget_lane_f32 (float32x2_t v, const int lane) /// A32: VMOV.F32 Sd, Sm /// A64: DUP Sd, Vn.S[lane] /// </summary> public static float Extract(Vector64<float> vector, byte index) => Extract(vector, index); /// <summary> /// uint16_t vget_lane_u16 (uint16x4_t v, const int lane) /// A32: VMOV.U16 Rt, Dn[lane] /// A64: UMOV Wd, Vn.H[lane] /// </summary> public static ushort Extract(Vector64<ushort> vector, byte index) => Extract(vector, index); /// <summary> /// uint32_t vget_lane_u32 (uint32x2_t v, const int lane) /// A32: VMOV.32 Rt, Dn[lane] /// A64: UMOV Wd, Vn.S[lane] /// </summary> public static uint Extract(Vector64<uint> vector, byte index) => Extract(vector, index); /// <summary> /// uint8_t vgetq_lane_u8 (uint8x16_t v, const int lane) /// A32: VMOV.U8 Rt, Dn[lane] /// A64: UMOV Wd, Vn.B[lane] /// </summary> public static byte Extract(Vector128<byte> vector, byte index) => Extract(vector, index); /// <summary> /// float64_t vgetq_lane_f64 (float64x2_t v, const int lane) /// A32: VMOV.F64 Dd, Dm /// A64: DUP Dd, Vn.D[lane] /// </summary> public static double Extract(Vector128<double> vector, byte index) => Extract(vector, index); /// <summary> /// int16_t vgetq_lane_s16 (int16x8_t v, const int lane) /// A32: VMOV.S16 Rt, Dn[lane] /// A64: SMOV Wd, Vn.H[lane] /// </summary> public static short Extract(Vector128<short> vector, byte index) => Extract(vector, index); /// <summary> /// int32_t vgetq_lane_s32 (int32x4_t v, const int lane) /// A32: VMOV.32 Rt, Dn[lane] /// A64: SMOV Wd, Vn.S[lane] /// </summary> public static int Extract(Vector128<int> vector, byte index) => Extract(vector, index); /// <summary> /// int64_t vgetq_lane_s64 (int64x2_t v, const int lane) /// A32: VMOV Rt, Rt2, Dm /// A64: UMOV Xd, Vn.D[lane] /// </summary> public static long Extract(Vector128<long> vector, byte index) => Extract(vector, index); /// <summary> /// int8_t vgetq_lane_s8 (int8x16_t v, const int lane) /// A32: VMOV.S8 Rt, Dn[lane] /// A64: SMOV Wd, Vn.B[lane] /// </summary> public static sbyte Extract(Vector128<sbyte> vector, byte index) => Extract(vector, index); /// <summary> /// float32_t vgetq_lane_f32 (float32x4_t v, const int lane) /// A32: VMOV.F32 Sd, Sm /// A64: DUP Sd, Vn.S[lane] /// </summary> public static float Extract(Vector128<float> vector, byte index) => Extract(vector, index); /// <summary> /// uint16_t vgetq_lane_u16 (uint16x8_t v, const int lane) /// A32: VMOV.U16 Rt, Dn[lane] /// A64: UMOV Wd, Vn.H[lane] /// </summary> public static ushort Extract(Vector128<ushort> vector, byte index) => Extract(vector, index); /// <summary> /// uint32_t vgetq_lane_u32 (uint32x4_t v, const int lane) /// A32: VMOV.32 Rt, Dn[lane] /// A64: UMOV Wd, Vn.S[lane] /// </summary> public static uint Extract(Vector128<uint> vector, byte index) => Extract(vector, index); /// <summary> /// uint64_t vgetq_lane_u64 (uint64x2_t v, const int lane) /// A32: VMOV Rt, Rt2, Dm /// A64: UMOV Xd, Vn.D[lane] /// </summary> public static ulong Extract(Vector128<ulong> vector, byte index) => Extract(vector, index); /// <summary> /// uint8x8_t vmovn_u16 (uint16x8_t a) /// A32: VMOVN.I16 Dd, Qm /// A64: XTN Vd.8B, Vn.8H /// </summary> public static Vector64<byte> ExtractNarrowingLower(Vector128<ushort> value) => ExtractNarrowingLower(value); /// <summary> /// int16x4_t vmovn_s32 (int32x4_t a) /// A32: VMOVN.I32 Dd, Qm /// A64: XTN Vd.4H, Vn.4S /// </summary> public static Vector64<short> ExtractNarrowingLower(Vector128<int> value) => ExtractNarrowingLower(value); /// <summary> /// int32x2_t vmovn_s64 (int64x2_t a) /// A32: VMOVN.I64 Dd, Qm /// A64: XTN Vd.2S, Vn.2D /// </summary> public static Vector64<int> ExtractNarrowingLower(Vector128<long> value) => ExtractNarrowingLower(value); /// <summary> /// int8x8_t vmovn_s16 (int16x8_t a) /// A32: VMOVN.I16 Dd, Qm /// A64: XTN Vd.8B, Vn.8H /// </summary> public static Vector64<sbyte> ExtractNarrowingLower(Vector128<short> value) => ExtractNarrowingLower(value); /// <summary> /// uint16x4_t vmovn_u32 (uint32x4_t a) /// A32: VMOVN.I32 Dd, Qm /// A64: XTN Vd.4H, Vn.4S /// </summary> public static Vector64<ushort> ExtractNarrowingLower(Vector128<uint> value) => ExtractNarrowingLower(value); /// <summary> /// uint32x2_t vmovn_u64 (uint64x2_t a) /// A32: VMOVN.I64 Dd, Qm /// A64: XTN Vd.2S, Vn.2D /// </summary> public static Vector64<uint> ExtractNarrowingLower(Vector128<ulong> value) => ExtractNarrowingLower(value); /// <summary> /// uint8x8_t vqmovn_u16 (uint16x8_t a) /// A32: VQMOVN.U16 Dd, Qm /// A64: UQXTN Vd.8B, Vn.8H /// </summary> public static Vector64<byte> ExtractNarrowingSaturateLower(Vector128<ushort> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// int16x4_t vqmovn_s32 (int32x4_t a) /// A32: VQMOVN.S32 Dd, Qm /// A64: SQXTN Vd.4H, Vn.4S /// </summary> public static Vector64<short> ExtractNarrowingSaturateLower(Vector128<int> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// int32x2_t vqmovn_s64 (int64x2_t a) /// A32: VQMOVN.S64 Dd, Qm /// A64: SQXTN Vd.2S, Vn.2D /// </summary> public static Vector64<int> ExtractNarrowingSaturateLower(Vector128<long> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// int8x8_t vqmovn_s16 (int16x8_t a) /// A32: VQMOVN.S16 Dd, Qm /// A64: SQXTN Vd.8B, Vn.8H /// </summary> public static Vector64<sbyte> ExtractNarrowingSaturateLower(Vector128<short> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// uint16x4_t vqmovn_u32 (uint32x4_t a) /// A32: VQMOVN.U32 Dd, Qm /// A64: UQXTN Vd.4H, Vn.4S /// </summary> public static Vector64<ushort> ExtractNarrowingSaturateLower(Vector128<uint> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// uint32x2_t vqmovn_u64 (uint64x2_t a) /// A32: VQMOVN.U64 Dd, Qm /// A64: UQXTN Vd.2S, Vn.2D /// </summary> public static Vector64<uint> ExtractNarrowingSaturateLower(Vector128<ulong> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// uint8x8_t vqmovun_s16 (int16x8_t a) /// A32: VQMOVUN.S16 Dd, Qm /// A64: SQXTUN Vd.8B, Vn.8H /// </summary> public static Vector64<byte> ExtractNarrowingSaturateUnsignedLower(Vector128<short> value) => ExtractNarrowingSaturateUnsignedLower(value); /// <summary> /// uint16x4_t vqmovun_s32 (int32x4_t a) /// A32: VQMOVUN.S32 Dd, Qm /// A64: SQXTUN Vd.4H, Vn.4S /// </summary> public static Vector64<ushort> ExtractNarrowingSaturateUnsignedLower(Vector128<int> value) => ExtractNarrowingSaturateUnsignedLower(value); /// <summary> /// uint32x2_t vqmovun_s64 (int64x2_t a) /// A32: VQMOVUN.S64 Dd, Qm /// A64: SQXTUN Vd.2S, Vn.2D /// </summary> public static Vector64<uint> ExtractNarrowingSaturateUnsignedLower(Vector128<long> value) => ExtractNarrowingSaturateUnsignedLower(value); /// <summary> /// uint8x16_t vqmovun_high_s16 (uint8x8_t r, int16x8_t a) /// A32: VQMOVUN.S16 Dd+1, Qm /// A64: SQXTUN2 Vd.16B, Vn.8H /// </summary> public static Vector128<byte> ExtractNarrowingSaturateUnsignedUpper(Vector64<byte> lower, Vector128<short> value) => ExtractNarrowingSaturateUnsignedUpper(lower, value); /// <summary> /// uint16x8_t vqmovun_high_s32 (uint16x4_t r, int32x4_t a) /// A32: VQMOVUN.S32 Dd+1, Qm /// A64: SQXTUN2 Vd.8H, Vn.4S /// </summary> public static Vector128<ushort> ExtractNarrowingSaturateUnsignedUpper(Vector64<ushort> lower, Vector128<int> value) => ExtractNarrowingSaturateUnsignedUpper(lower, value); /// <summary> /// uint32x4_t vqmovun_high_s64 (uint32x2_t r, int64x2_t a) /// A32: VQMOVUN.S64 Dd+1, Qm /// A64: SQXTUN2 Vd.4S, Vn.2D /// </summary> public static Vector128<uint> ExtractNarrowingSaturateUnsignedUpper(Vector64<uint> lower, Vector128<long> value) => ExtractNarrowingSaturateUnsignedUpper(lower, value); /// <summary> /// uint8x16_t vqmovn_high_u16 (uint8x8_t r, uint16x8_t a) /// A32: VQMOVN.U16 Dd+1, Qm /// A64: UQXTN2 Vd.16B, Vn.8H /// </summary> public static Vector128<byte> ExtractNarrowingSaturateUpper(Vector64<byte> lower, Vector128<ushort> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// int16x8_t vqmovn_high_s32 (int16x4_t r, int32x4_t a) /// A32: VQMOVN.S32 Dd+1, Qm /// A64: SQXTN2 Vd.8H, Vn.4S /// </summary> public static Vector128<short> ExtractNarrowingSaturateUpper(Vector64<short> lower, Vector128<int> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// int32x4_t vqmovn_high_s64 (int32x2_t r, int64x2_t a) /// A32: VQMOVN.S64 Dd+1, Qm /// A64: SQXTN2 Vd.4S, Vn.2D /// </summary> public static Vector128<int> ExtractNarrowingSaturateUpper(Vector64<int> lower, Vector128<long> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// int8x16_t vqmovn_high_s16 (int8x8_t r, int16x8_t a) /// A32: VQMOVN.S16 Dd+1, Qm /// A64: SQXTN2 Vd.16B, Vn.8H /// </summary> public static Vector128<sbyte> ExtractNarrowingSaturateUpper(Vector64<sbyte> lower, Vector128<short> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// uint16x8_t vqmovn_high_u32 (uint16x4_t r, uint32x4_t a) /// A32: VQMOVN.U32 Dd+1, Qm /// A64: UQXTN2 Vd.8H, Vn.4S /// </summary> public static Vector128<ushort> ExtractNarrowingSaturateUpper(Vector64<ushort> lower, Vector128<uint> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// uint32x4_t vqmovn_high_u64 (uint32x2_t r, uint64x2_t a) /// A32: VQMOVN.U64 Dd+1, Qm /// A64: UQXTN2 Vd.4S, Vn.2D /// </summary> public static Vector128<uint> ExtractNarrowingSaturateUpper(Vector64<uint> lower, Vector128<ulong> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// uint8x16_t vmovn_high_u16 (uint8x8_t r, uint16x8_t a) /// A32: VMOVN.I16 Dd+1, Qm /// A64: XTN2 Vd.16B, Vn.8H /// </summary> public static Vector128<byte> ExtractNarrowingUpper(Vector64<byte> lower, Vector128<ushort> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// int16x8_t vmovn_high_s32 (int16x4_t r, int32x4_t a) /// A32: VMOVN.I32 Dd+1, Qm /// A64: XTN2 Vd.8H, Vn.4S /// </summary> public static Vector128<short> ExtractNarrowingUpper(Vector64<short> lower, Vector128<int> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// int32x4_t vmovn_high_s64 (int32x2_t r, int64x2_t a) /// A32: VMOVN.I64 Dd+1, Qm /// A64: XTN2 Vd.4S, Vn.2D /// </summary> public static Vector128<int> ExtractNarrowingUpper(Vector64<int> lower, Vector128<long> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// int8x16_t vmovn_high_s16 (int8x8_t r, int16x8_t a) /// A32: VMOVN.I16 Dd+1, Qm /// A64: XTN2 Vd.16B, Vn.8H /// </summary> public static Vector128<sbyte> ExtractNarrowingUpper(Vector64<sbyte> lower, Vector128<short> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// uint16x8_t vmovn_high_u32 (uint16x4_t r, uint32x4_t a) /// A32: VMOVN.I32 Dd+1, Qm /// A64: XTN2 Vd.8H, Vn.4S /// </summary> public static Vector128<ushort> ExtractNarrowingUpper(Vector64<ushort> lower, Vector128<uint> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// uint32x4_t vmovn_high_u64 (uint32x2_t r, uint64x2_t a) /// A32: VMOVN.I64 Dd+1, Qm /// A64: XTN2 Vd.4S, Vn.2D /// </summary> public static Vector128<uint> ExtractNarrowingUpper(Vector64<uint> lower, Vector128<ulong> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// uint8x8_t vext_s8 (uint8x8_t a, uint8x8_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #n /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #n /// </summary> public static Vector64<byte> ExtractVector64(Vector64<byte> upper, Vector64<byte> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// int16x4_t vext_s16 (int16x4_t a, int16x4_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #(n*2) /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*2) /// </summary> public static Vector64<short> ExtractVector64(Vector64<short> upper, Vector64<short> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// int32x2_t vext_s32 (int32x2_t a, int32x2_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #(n*4) /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*4) /// </summary> public static Vector64<int> ExtractVector64(Vector64<int> upper, Vector64<int> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// int8x8_t vext_s8 (int8x8_t a, int8x8_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #n /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #n /// </summary> public static Vector64<sbyte> ExtractVector64(Vector64<sbyte> upper, Vector64<sbyte> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// float32x2_t vext_f32 (float32x2_t a, float32x2_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #(n*4) /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*4) /// </summary> public static Vector64<float> ExtractVector64(Vector64<float> upper, Vector64<float> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// uint16x4_t vext_s16 (uint16x4_t a, uint16x4_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #(n*2) /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*2) /// </summary> public static Vector64<ushort> ExtractVector64(Vector64<ushort> upper, Vector64<ushort> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// uint32x2_t vext_s32 (uint32x2_t a, uint32x2_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #(n*4) /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*4) /// </summary> public static Vector64<uint> ExtractVector64(Vector64<uint> upper, Vector64<uint> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// uint8x16_t vextq_s8 (uint8x16_t a, uint8x16_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #n /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #n /// </summary> public static Vector128<byte> ExtractVector128(Vector128<byte> upper, Vector128<byte> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// float64x2_t vextq_f64 (float64x2_t a, float64x2_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*8) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*8) /// </summary> public static Vector128<double> ExtractVector128(Vector128<double> upper, Vector128<double> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// int16x8_t vextq_s16 (int16x8_t a, int16x8_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*2) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*2) /// </summary> public static Vector128<short> ExtractVector128(Vector128<short> upper, Vector128<short> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// int32x4_t vextq_s32 (int32x4_t a, int32x4_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*4) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*4) /// </summary> public static Vector128<int> ExtractVector128(Vector128<int> upper, Vector128<int> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// int64x2_t vextq_s64 (int64x2_t a, int64x2_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*8) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*8) /// </summary> public static Vector128<long> ExtractVector128(Vector128<long> upper, Vector128<long> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// int8x16_t vextq_s8 (int8x16_t a, int8x16_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #n /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #n /// </summary> public static Vector128<sbyte> ExtractVector128(Vector128<sbyte> upper, Vector128<sbyte> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// float32x4_t vextq_f32 (float32x4_t a, float32x4_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*4) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*4) /// </summary> public static Vector128<float> ExtractVector128(Vector128<float> upper, Vector128<float> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// uint16x8_t vextq_s16 (uint16x8_t a, uint16x8_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*2) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*2) /// </summary> public static Vector128<ushort> ExtractVector128(Vector128<ushort> upper, Vector128<ushort> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// uint32x4_t vextq_s32 (uint32x4_t a, uint32x4_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*4) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*4) /// </summary> public static Vector128<uint> ExtractVector128(Vector128<uint> upper, Vector128<uint> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// uint64x2_t vextq_s64 (uint64x2_t a, uint64x2_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*8) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*8) /// </summary> public static Vector128<ulong> ExtractVector128(Vector128<ulong> upper, Vector128<ulong> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// float32x2_t vrndm_f32 (float32x2_t a) /// A32: VRINTM.F32 Dd, Dm /// A64: FRINTM Vd.2S, Vn.2S /// </summary> public static Vector64<float> Floor(Vector64<float> value) => Floor(value); /// <summary> /// float32x4_t vrndmq_f32 (float32x4_t a) /// A32: VRINTM.F32 Qd, Qm /// A64: FRINTM Vd.4S, Vn.4S /// </summary> public static Vector128<float> Floor(Vector128<float> value) => Floor(value); /// <summary> /// float64x1_t vrndm_f64 (float64x1_t a) /// A32: VRINTM.F64 Dd, Dm /// A64: FRINTM Dd, Dn /// </summary> public static Vector64<double> FloorScalar(Vector64<double> value) => FloorScalar(value); /// <summary> /// float32_t vrndms_f32 (float32_t a) /// A32: VRINTM.F32 Sd, Sm /// A64: FRINTM Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> FloorScalar(Vector64<float> value) => FloorScalar(value); /// <summary> /// uint8x8_t vhadd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VHADD.U8 Dd, Dn, Dm /// A64: UHADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> FusedAddHalving(Vector64<byte> left, Vector64<byte> right) => FusedAddHalving(left, right); /// <summary> /// int16x4_t vhadd_s16 (int16x4_t a, int16x4_t b) /// A32: VHADD.S16 Dd, Dn, Dm /// A64: SHADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> FusedAddHalving(Vector64<short> left, Vector64<short> right) => FusedAddHalving(left, right); /// <summary> /// int32x2_t vhadd_s32 (int32x2_t a, int32x2_t b) /// A32: VHADD.S32 Dd, Dn, Dm /// A64: SHADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> FusedAddHalving(Vector64<int> left, Vector64<int> right) => FusedAddHalving(left, right); /// <summary> /// int8x8_t vhadd_s8 (int8x8_t a, int8x8_t b) /// A32: VHADD.S8 Dd, Dn, Dm /// A64: SHADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> FusedAddHalving(Vector64<sbyte> left, Vector64<sbyte> right) => FusedAddHalving(left, right); /// <summary> /// uint16x4_t vhadd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VHADD.U16 Dd, Dn, Dm /// A64: UHADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> FusedAddHalving(Vector64<ushort> left, Vector64<ushort> right) => FusedAddHalving(left, right); /// <summary> /// uint32x2_t vhadd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VHADD.U32 Dd, Dn, Dm /// A64: UHADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> FusedAddHalving(Vector64<uint> left, Vector64<uint> right) => FusedAddHalving(left, right); /// <summary> /// uint8x16_t vhaddq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VHADD.U8 Qd, Qn, Qm /// A64: UHADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> FusedAddHalving(Vector128<byte> left, Vector128<byte> right) => FusedAddHalving(left, right); /// <summary> /// int16x8_t vhaddq_s16 (int16x8_t a, int16x8_t b) /// A32: VHADD.S16 Qd, Qn, Qm /// A64: SHADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> FusedAddHalving(Vector128<short> left, Vector128<short> right) => FusedAddHalving(left, right); /// <summary> /// int32x4_t vhaddq_s32 (int32x4_t a, int32x4_t b) /// A32: VHADD.S32 Qd, Qn, Qm /// A64: SHADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> FusedAddHalving(Vector128<int> left, Vector128<int> right) => FusedAddHalving(left, right); /// <summary> /// int8x16_t vhaddq_s8 (int8x16_t a, int8x16_t b) /// A32: VHADD.S8 Qd, Qn, Qm /// A64: SHADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> FusedAddHalving(Vector128<sbyte> left, Vector128<sbyte> right) => FusedAddHalving(left, right); /// <summary> /// uint16x8_t vhaddq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VHADD.U16 Qd, Qn, Qm /// A64: UHADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> FusedAddHalving(Vector128<ushort> left, Vector128<ushort> right) => FusedAddHalving(left, right); /// <summary> /// uint32x4_t vhaddq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VHADD.U32 Qd, Qn, Qm /// A64: UHADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> FusedAddHalving(Vector128<uint> left, Vector128<uint> right) => FusedAddHalving(left, right); /// <summary> /// uint8x8_t vrhadd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VRHADD.U8 Dd, Dn, Dm /// A64: URHADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> FusedAddRoundedHalving(Vector64<byte> left, Vector64<byte> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int16x4_t vrhadd_s16 (int16x4_t a, int16x4_t b) /// A32: VRHADD.S16 Dd, Dn, Dm /// A64: SRHADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> FusedAddRoundedHalving(Vector64<short> left, Vector64<short> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int32x2_t vrhadd_s32 (int32x2_t a, int32x2_t b) /// A32: VRHADD.S32 Dd, Dn, Dm /// A64: SRHADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> FusedAddRoundedHalving(Vector64<int> left, Vector64<int> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int8x8_t vrhadd_s8 (int8x8_t a, int8x8_t b) /// A32: VRHADD.S8 Dd, Dn, Dm /// A64: SRHADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> FusedAddRoundedHalving(Vector64<sbyte> left, Vector64<sbyte> right) => FusedAddRoundedHalving(left, right); /// <summary> /// uint16x4_t vrhadd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VRHADD.U16 Dd, Dn, Dm /// A64: URHADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> FusedAddRoundedHalving(Vector64<ushort> left, Vector64<ushort> right) => FusedAddRoundedHalving(left, right); /// <summary> /// uint32x2_t vrhadd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VRHADD.U32 Dd, Dn, Dm /// A64: URHADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> FusedAddRoundedHalving(Vector64<uint> left, Vector64<uint> right) => FusedAddRoundedHalving(left, right); /// <summary> /// uint8x16_t vrhaddq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VRHADD.U8 Qd, Qn, Qm /// A64: URHADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> FusedAddRoundedHalving(Vector128<byte> left, Vector128<byte> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int16x8_t vrhaddq_s16 (int16x8_t a, int16x8_t b) /// A32: VRHADD.S16 Qd, Qn, Qm /// A64: SRHADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> FusedAddRoundedHalving(Vector128<short> left, Vector128<short> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int32x4_t vrhaddq_s32 (int32x4_t a, int32x4_t b) /// A32: VRHADD.S32 Qd, Qn, Qm /// A64: SRHADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> FusedAddRoundedHalving(Vector128<int> left, Vector128<int> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int8x16_t vrhaddq_s8 (int8x16_t a, int8x16_t b) /// A32: VRHADD.S8 Qd, Qn, Qm /// A64: SRHADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> FusedAddRoundedHalving(Vector128<sbyte> left, Vector128<sbyte> right) => FusedAddRoundedHalving(left, right); /// <summary> /// uint16x8_t vrhaddq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VRHADD.U16 Qd, Qn, Qm /// A64: URHADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> FusedAddRoundedHalving(Vector128<ushort> left, Vector128<ushort> right) => FusedAddRoundedHalving(left, right); /// <summary> /// uint32x4_t vrhaddq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VRHADD.U32 Qd, Qn, Qm /// A64: URHADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> FusedAddRoundedHalving(Vector128<uint> left, Vector128<uint> right) => FusedAddRoundedHalving(left, right); /// <summary> /// float32x2_t vfma_f32 (float32x2_t a, float32x2_t b, float32x2_t c) /// A32: VFMA.F32 Dd, Dn, Dm /// A64: FMLA Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> FusedMultiplyAdd(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => FusedMultiplyAdd(addend, left, right); /// <summary> /// float32x4_t vfmaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c) /// A32: VFMA.F32 Qd, Qn, Qm /// A64: FMLA Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> FusedMultiplyAdd(Vector128<float> addend, Vector128<float> left, Vector128<float> right) => FusedMultiplyAdd(addend, left, right); /// <summary> /// float64x1_t vfnma_f64 (float64x1_t a, float64x1_t b, float64x1_t c) /// A32: VFNMA.F64 Dd, Dn, Dm /// A64: FNMADD Dd, Dn, Dm, Da /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> FusedMultiplyAddNegatedScalar(Vector64<double> addend, Vector64<double> left, Vector64<double> right) => FusedMultiplyAddNegatedScalar(addend, left, right); /// <summary> /// float32_t vfnmas_f32 (float32_t a, float32_t b, float32_t c) /// A32: VFNMA.F32 Sd, Sn, Sm /// A64: FNMADD Sd, Sn, Sm, Sa /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> FusedMultiplyAddNegatedScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => FusedMultiplyAddNegatedScalar(addend, left, right); /// <summary> /// float64x1_t vfma_f64 (float64x1_t a, float64x1_t b, float64x1_t c) /// A32: VFMA.F64 Dd, Dn, Dm /// A64: FMADD Dd, Dn, Dm, Da /// </summary> public static Vector64<double> FusedMultiplyAddScalar(Vector64<double> addend, Vector64<double> left, Vector64<double> right) => FusedMultiplyAddScalar(addend, left, right); /// <summary> /// float32_t vfmas_f32 (float32_t a, float32_t b, float32_t c) /// A32: VFMA.F32 Sd, Sn, Sm /// A64: FMADD Sd, Sn, Sm, Sa /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> FusedMultiplyAddScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => FusedMultiplyAddScalar(addend, left, right); /// <summary> /// float32x2_t vfms_f32 (float32x2_t a, float32x2_t b, float32x2_t c) /// A32: VFMS.F32 Dd, Dn, Dm /// A64: FMLS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> FusedMultiplySubtract(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => FusedMultiplySubtract(minuend, left, right); /// <summary> /// float32x4_t vfmsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c) /// A32: VFMS.F32 Qd, Qn, Qm /// A64: FMLS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> FusedMultiplySubtract(Vector128<float> minuend, Vector128<float> left, Vector128<float> right) => FusedMultiplySubtract(minuend, left, right); /// <summary> /// float64x1_t vfnms_f64 (float64x1_t a, float64x1_t b, float64x1_t c) /// A32: VFNMS.F64 Dd, Dn, Dm /// A64: FNMSUB Dd, Dn, Dm, Da /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> FusedMultiplySubtractNegatedScalar(Vector64<double> minuend, Vector64<double> left, Vector64<double> right) => FusedMultiplySubtractNegatedScalar(minuend, left, right); /// <summary> /// float32_t vfnmss_f32 (float32_t a, float32_t b, float32_t c) /// A32: VFNMS.F32 Sd, Sn, Sm /// A64: FNMSUB Sd, Sn, Sm, Sa /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> FusedMultiplySubtractNegatedScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => FusedMultiplySubtractNegatedScalar(minuend, left, right); /// <summary> /// float64x1_t vfms_f64 (float64x1_t a, float64x1_t b, float64x1_t c) /// A32: VFMS.F64 Dd, Dn, Dm /// A64: FMSUB Dd, Dn, Dm, Da /// </summary> public static Vector64<double> FusedMultiplySubtractScalar(Vector64<double> minuend, Vector64<double> left, Vector64<double> right) => FusedMultiplySubtractScalar(minuend, left, right); /// <summary> /// float32_t vfmss_f32 (float32_t a, float32_t b, float32_t c) /// A32: VFMS.F32 Sd, Sn, Sm /// A64: FMSUB Sd, Sn, Sm, Sa /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> FusedMultiplySubtractScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => FusedMultiplySubtractScalar(minuend, left, right); /// <summary> /// uint8x8_t vhsub_u8 (uint8x8_t a, uint8x8_t b) /// A32: VHSUB.U8 Dd, Dn, Dm /// A64: UHSUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> FusedSubtractHalving(Vector64<byte> left, Vector64<byte> right) => FusedSubtractHalving(left, right); /// <summary> /// int16x4_t vhsub_s16 (int16x4_t a, int16x4_t b) /// A32: VHSUB.S16 Dd, Dn, Dm /// A64: SHSUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> FusedSubtractHalving(Vector64<short> left, Vector64<short> right) => FusedSubtractHalving(left, right); /// <summary> /// int32x2_t vhsub_s32 (int32x2_t a, int32x2_t b) /// A32: VHSUB.S32 Dd, Dn, Dm /// A64: SHSUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> FusedSubtractHalving(Vector64<int> left, Vector64<int> right) => FusedSubtractHalving(left, right); /// <summary> /// int8x8_t vhsub_s8 (int8x8_t a, int8x8_t b) /// A32: VHSUB.S8 Dd, Dn, Dm /// A64: SHSUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> FusedSubtractHalving(Vector64<sbyte> left, Vector64<sbyte> right) => FusedSubtractHalving(left, right); /// <summary> /// uint16x4_t vhsub_u16 (uint16x4_t a, uint16x4_t b) /// A32: VHSUB.U16 Dd, Dn, Dm /// A64: UHSUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> FusedSubtractHalving(Vector64<ushort> left, Vector64<ushort> right) => FusedSubtractHalving(left, right); /// <summary> /// uint32x2_t vhsub_u32 (uint32x2_t a, uint32x2_t b) /// A32: VHSUB.U32 Dd, Dn, Dm /// A64: UHSUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> FusedSubtractHalving(Vector64<uint> left, Vector64<uint> right) => FusedSubtractHalving(left, right); /// <summary> /// uint8x16_t vhsubq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VHSUB.U8 Qd, Qn, Qm /// A64: UHSUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> FusedSubtractHalving(Vector128<byte> left, Vector128<byte> right) => FusedSubtractHalving(left, right); /// <summary> /// int16x8_t vhsubq_s16 (int16x8_t a, int16x8_t b) /// A32: VHSUB.S16 Qd, Qn, Qm /// A64: SHSUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> FusedSubtractHalving(Vector128<short> left, Vector128<short> right) => FusedSubtractHalving(left, right); /// <summary> /// int32x4_t vhsubq_s32 (int32x4_t a, int32x4_t b) /// A32: VHSUB.S32 Qd, Qn, Qm /// A64: SHSUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> FusedSubtractHalving(Vector128<int> left, Vector128<int> right) => FusedSubtractHalving(left, right); /// <summary> /// int8x16_t vhsubq_s8 (int8x16_t a, int8x16_t b) /// A32: VHSUB.S8 Qd, Qn, Qm /// A64: SHSUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> FusedSubtractHalving(Vector128<sbyte> left, Vector128<sbyte> right) => FusedSubtractHalving(left, right); /// <summary> /// uint16x8_t vhsubq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VHSUB.U16 Qd, Qn, Qm /// A64: UHSUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> FusedSubtractHalving(Vector128<ushort> left, Vector128<ushort> right) => FusedSubtractHalving(left, right); /// <summary> /// uint32x4_t vhsubq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VHSUB.U32 Qd, Qn, Qm /// A64: UHSUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> FusedSubtractHalving(Vector128<uint> left, Vector128<uint> right) => FusedSubtractHalving(left, right); /// <summary> /// uint8x8_t vset_lane_u8 (uint8_t a, uint8x8_t v, const int lane) /// A32: VMOV.8 Dd[lane], Rt /// A64: INS Vd.B[lane], Wn /// </summary> public static Vector64<byte> Insert(Vector64<byte> vector, byte index, byte data) => Insert(vector, index, data); /// <summary> /// int16x4_t vset_lane_s16 (int16_t a, int16x4_t v, const int lane) /// A32: VMOV.16 Dd[lane], Rt /// A64: INS Vd.H[lane], Wn /// </summary> public static Vector64<short> Insert(Vector64<short> vector, byte index, short data) => Insert(vector, index, data); /// <summary> /// int32x2_t vset_lane_s32 (int32_t a, int32x2_t v, const int lane) /// A32: VMOV.32 Dd[lane], Rt /// A64: INS Vd.S[lane], Wn /// </summary> public static Vector64<int> Insert(Vector64<int> vector, byte index, int data) => Insert(vector, index, data); /// <summary> /// int8x8_t vset_lane_s8 (int8_t a, int8x8_t v, const int lane) /// A32: VMOV.8 Dd[lane], Rt /// A64: INS Vd.B[lane], Wn /// </summary> public static Vector64<sbyte> Insert(Vector64<sbyte> vector, byte index, sbyte data) => Insert(vector, index, data); /// <summary> /// float32x2_t vset_lane_f32 (float32_t a, float32x2_t v, const int lane) /// A32: VMOV.F32 Sd, Sm /// A64: INS Vd.S[lane], Vn.S[0] /// </summary> public static Vector64<float> Insert(Vector64<float> vector, byte index, float data) => Insert(vector, index, data); /// <summary> /// uint16x4_t vset_lane_u16 (uint16_t a, uint16x4_t v, const int lane) /// A32: VMOV.16 Dd[lane], Rt /// A64: INS Vd.H[lane], Wn /// </summary> public static Vector64<ushort> Insert(Vector64<ushort> vector, byte index, ushort data) => Insert(vector, index, data); /// <summary> /// uint32x2_t vset_lane_u32 (uint32_t a, uint32x2_t v, const int lane) /// A32: VMOV.32 Dd[lane], Rt /// A64: INS Vd.S[lane], Wn /// </summary> public static Vector64<uint> Insert(Vector64<uint> vector, byte index, uint data) => Insert(vector, index, data); /// <summary> /// uint8x16_t vsetq_lane_u8 (uint8_t a, uint8x16_t v, const int lane) /// A32: VMOV.8 Dd[lane], Rt /// A64: INS Vd.B[lane], Wn /// </summary> public static Vector128<byte> Insert(Vector128<byte> vector, byte index, byte data) => Insert(vector, index, data); /// <summary> /// float64x2_t vsetq_lane_f64 (float64_t a, float64x2_t v, const int lane) /// A32: VMOV.F64 Dd, Dm /// A64: INS Vd.D[lane], Vn.D[0] /// </summary> public static Vector128<double> Insert(Vector128<double> vector, byte index, double data) => Insert(vector, index, data); /// <summary> /// int16x8_t vsetq_lane_s16 (int16_t a, int16x8_t v, const int lane) /// A32: VMOV.16 Dd[lane], Rt /// A64: INS Vd.H[lane], Wn /// </summary> public static Vector128<short> Insert(Vector128<short> vector, byte index, short data) => Insert(vector, index, data); /// <summary> /// int32x4_t vsetq_lane_s32 (int32_t a, int32x4_t v, const int lane) /// A32: VMOV.32 Dd[lane], Rt /// A64: INS Vd.S[lane], Wn /// </summary> public static Vector128<int> Insert(Vector128<int> vector, byte index, int data) => Insert(vector, index, data); /// <summary> /// int64x2_t vsetq_lane_s64 (int64_t a, int64x2_t v, const int lane) /// A32: VMOV.64 Dd, Rt, Rt2 /// A64: INS Vd.D[lane], Xn /// </summary> public static Vector128<long> Insert(Vector128<long> vector, byte index, long data) => Insert(vector, index, data); /// <summary> /// int8x16_t vsetq_lane_s8 (int8_t a, int8x16_t v, const int lane) /// A32: VMOV.8 Dd[lane], Rt /// A64: INS Vd.B[lane], Wn /// </summary> public static Vector128<sbyte> Insert(Vector128<sbyte> vector, byte index, sbyte data) => Insert(vector, index, data); /// <summary> /// float32x4_t vsetq_lane_f32 (float32_t a, float32x4_t v, const int lane) /// A32: VMOV.F32 Sd, Sm /// A64: INS Vd.S[lane], Vn.S[0] /// </summary> public static Vector128<float> Insert(Vector128<float> vector, byte index, float data) => Insert(vector, index, data); /// <summary> /// uint16x8_t vsetq_lane_u16 (uint16_t a, uint16x8_t v, const int lane) /// A32: VMOV.16 Dd[lane], Rt /// A64: INS Vd.H[lane], Wn /// </summary> public static Vector128<ushort> Insert(Vector128<ushort> vector, byte index, ushort data) => Insert(vector, index, data); /// <summary> /// uint32x4_t vsetq_lane_u32 (uint32_t a, uint32x4_t v, const int lane) /// A32: VMOV.32 Dd[lane], Rt /// A64: INS Vd.S[lane], Wn /// </summary> public static Vector128<uint> Insert(Vector128<uint> vector, byte index, uint data) => Insert(vector, index, data); /// <summary> /// uint64x2_t vsetq_lane_u64 (uint64_t a, uint64x2_t v, const int lane) /// A32: VMOV.64 Dd, Rt, Rt2 /// A64: INS Vd.D[lane], Xn /// </summary> public static Vector128<ulong> Insert(Vector128<ulong> vector, byte index, ulong data) => Insert(vector, index, data); /// <summary> /// float64x2_t vcopyq_lane_f64 (float64x2_t a, const int lane1, float64x1_t b, const int lane2) /// A32: VMOV.F64 Dd, Dm /// A64: INS Vd.D[lane1], Vn.D[0] /// </summary> public static Vector128<double> InsertScalar(Vector128<double> result, byte resultIndex, Vector64<double> value) => InsertScalar(result, resultIndex, value); /// <summary> /// int64x2_t vcopyq_lane_s64 (int64x2_t a, const int lane1, int64x1_t b, const int lane2) /// A32: VMOV Dd, Dm /// A64: INS Vd.D[lane1], Vn.D[0] /// </summary> public static Vector128<long> InsertScalar(Vector128<long> result, byte resultIndex, Vector64<long> value) => InsertScalar(result, resultIndex, value); /// <summary> /// uint64x2_t vcopyq_lane_u64 (uint64x2_t a, const int lane1, uint64x1_t b, const int lane2) /// A32: VMOV Dd, Dm /// A64: INS Vd.D[lane1], Vn.D[0] /// </summary> public static Vector128<ulong> InsertScalar(Vector128<ulong> result, byte resultIndex, Vector64<ulong> value) => InsertScalar(result, resultIndex, value); /// <summary> /// int16x4_t vcls_s16 (int16x4_t a) /// A32: VCLS.S16 Dd, Dm /// A64: CLS Vd.4H, Vn.4H /// </summary> public static Vector64<short> LeadingSignCount(Vector64<short> value) => LeadingSignCount(value); /// <summary> /// int32x2_t vcls_s32 (int32x2_t a) /// A32: VCLS.S32 Dd, Dm /// A64: CLS Vd.2S, Vn.2S /// </summary> public static Vector64<int> LeadingSignCount(Vector64<int> value) => LeadingSignCount(value); /// <summary> /// int8x8_t vcls_s8 (int8x8_t a) /// A32: VCLS.S8 Dd, Dm /// A64: CLS Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> LeadingSignCount(Vector64<sbyte> value) => LeadingSignCount(value); /// <summary> /// int16x8_t vclsq_s16 (int16x8_t a) /// A32: VCLS.S16 Qd, Qm /// A64: CLS Vd.8H, Vn.8H /// </summary> public static Vector128<short> LeadingSignCount(Vector128<short> value) => LeadingSignCount(value); /// <summary> /// int32x4_t vclsq_s32 (int32x4_t a) /// A32: VCLS.S32 Qd, Qm /// A64: CLS Vd.4S, Vn.4S /// </summary> public static Vector128<int> LeadingSignCount(Vector128<int> value) => LeadingSignCount(value); /// <summary> /// int8x16_t vclsq_s8 (int8x16_t a) /// A32: VCLS.S8 Qd, Qm /// A64: CLS Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> LeadingSignCount(Vector128<sbyte> value) => LeadingSignCount(value); /// <summary> /// uint8x8_t vclz_u8 (uint8x8_t a) /// A32: VCLZ.I8 Dd, Dm /// A64: CLZ Vd.8B, Vn.8B /// </summary> public static Vector64<byte> LeadingZeroCount(Vector64<byte> value) => LeadingZeroCount(value); /// <summary> /// int16x4_t vclz_s16 (int16x4_t a) /// A32: VCLZ.I16 Dd, Dm /// A64: CLZ Vd.4H, Vn.4H /// </summary> public static Vector64<short> LeadingZeroCount(Vector64<short> value) => LeadingZeroCount(value); /// <summary> /// int32x2_t vclz_s32 (int32x2_t a) /// A32: VCLZ.I32 Dd, Dm /// A64: CLZ Vd.2S, Vn.2S /// </summary> public static Vector64<int> LeadingZeroCount(Vector64<int> value) => LeadingZeroCount(value); /// <summary> /// int8x8_t vclz_s8 (int8x8_t a) /// A32: VCLZ.I8 Dd, Dm /// A64: CLZ Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> LeadingZeroCount(Vector64<sbyte> value) => LeadingZeroCount(value); /// <summary> /// uint16x4_t vclz_u16 (uint16x4_t a) /// A32: VCLZ.I16 Dd, Dm /// A64: CLZ Vd.4H, Vn.4H /// </summary> public static Vector64<ushort> LeadingZeroCount(Vector64<ushort> value) => LeadingZeroCount(value); /// <summary> /// uint32x2_t vclz_u32 (uint32x2_t a) /// A32: VCLZ.I32 Dd, Dm /// A64: CLZ Vd.2S, Vn.2S /// </summary> public static Vector64<uint> LeadingZeroCount(Vector64<uint> value) => LeadingZeroCount(value); /// <summary> /// uint8x16_t vclzq_u8 (uint8x16_t a) /// A32: VCLZ.I8 Qd, Qm /// A64: CLZ Vd.16B, Vn.16B /// </summary> public static Vector128<byte> LeadingZeroCount(Vector128<byte> value) => LeadingZeroCount(value); /// <summary> /// int16x8_t vclzq_s16 (int16x8_t a) /// A32: VCLZ.I16 Qd, Qm /// A64: CLZ Vd.8H, Vn.8H /// </summary> public static Vector128<short> LeadingZeroCount(Vector128<short> value) => LeadingZeroCount(value); /// <summary> /// int32x4_t vclzq_s32 (int32x4_t a) /// A32: VCLZ.I32 Qd, Qm /// A64: CLZ Vd.4S, Vn.4S /// </summary> public static Vector128<int> LeadingZeroCount(Vector128<int> value) => LeadingZeroCount(value); /// <summary> /// int8x16_t vclzq_s8 (int8x16_t a) /// A32: VCLZ.I8 Qd, Qm /// A64: CLZ Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> LeadingZeroCount(Vector128<sbyte> value) => LeadingZeroCount(value); /// <summary> /// uint16x8_t vclzq_u16 (uint16x8_t a) /// A32: VCLZ.I16 Qd, Qm /// A64: CLZ Vd.8H, Vn.8H /// </summary> public static Vector128<ushort> LeadingZeroCount(Vector128<ushort> value) => LeadingZeroCount(value); /// <summary> /// uint32x4_t vclzq_u32 (uint32x4_t a) /// A32: VCLZ.I32 Qd, Qm /// A64: CLZ Vd.4S, Vn.4S /// </summary> public static Vector128<uint> LeadingZeroCount(Vector128<uint> value) => LeadingZeroCount(value); /// <summary> /// uint8x8_t vld1_lane_u8 (uint8_t const * ptr, uint8x8_t src, const int lane) /// A32: VLD1.8 { Dd[index] }, [Rn] /// A64: LD1 { Vt.B }[index], [Xn] /// </summary> public static unsafe Vector64<byte> LoadAndInsertScalar(Vector64<byte> value, byte index, byte* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int16x4_t vld1_lane_s16 (int16_t const * ptr, int16x4_t src, const int lane) /// A32: VLD1.16 { Dd[index] }, [Rn] /// A64: LD1 { Vt.H }[index], [Xn] /// </summary> public static unsafe Vector64<short> LoadAndInsertScalar(Vector64<short> value, byte index, short* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int32x2_t vld1_lane_s32 (int32_t const * ptr, int32x2_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector64<int> LoadAndInsertScalar(Vector64<int> value, byte index, int* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int8x8_t vld1_lane_s8 (int8_t const * ptr, int8x8_t src, const int lane) /// A32: VLD1.8 { Dd[index] }, [Rn] /// A64: LD1 { Vt.B }[index], [Xn] /// </summary> public static unsafe Vector64<sbyte> LoadAndInsertScalar(Vector64<sbyte> value, byte index, sbyte* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// float32x2_t vld1_lane_f32 (float32_t const * ptr, float32x2_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector64<float> LoadAndInsertScalar(Vector64<float> value, byte index, float* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint16x4_t vld1_lane_u16 (uint16_t const * ptr, uint16x4_t src, const int lane) /// A32: VLD1.16 { Dd[index] }, [Rn] /// A64: LD1 { Vt.H }[index], [Xn] /// </summary> public static unsafe Vector64<ushort> LoadAndInsertScalar(Vector64<ushort> value, byte index, ushort* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint32x2_t vld1_lane_u32 (uint32_t const * ptr, uint32x2_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector64<uint> LoadAndInsertScalar(Vector64<uint> value, byte index, uint* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint8x16_t vld1q_lane_u8 (uint8_t const * ptr, uint8x16_t src, const int lane) /// A32: VLD1.8 { Dd[index] }, [Rn] /// A64: LD1 { Vt.B }[index], [Xn] /// </summary> public static unsafe Vector128<byte> LoadAndInsertScalar(Vector128<byte> value, byte index, byte* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// float64x2_t vld1q_lane_f64 (float64_t const * ptr, float64x2_t src, const int lane) /// A32: VLDR.64 Dd, [Rn] /// A64: LD1 { Vt.D }[index], [Xn] /// </summary> public static unsafe Vector128<double> LoadAndInsertScalar(Vector128<double> value, byte index, double* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int16x8_t vld1q_lane_s16 (int16_t const * ptr, int16x8_t src, const int lane) /// A32: VLD1.16 { Dd[index] }, [Rn] /// A64: LD1 { Vt.H }[index], [Xn] /// </summary> public static unsafe Vector128<short> LoadAndInsertScalar(Vector128<short> value, byte index, short* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int32x4_t vld1q_lane_s32 (int32_t const * ptr, int32x4_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector128<int> LoadAndInsertScalar(Vector128<int> value, byte index, int* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int64x2_t vld1q_lane_s64 (int64_t const * ptr, int64x2_t src, const int lane) /// A32: VLDR.64 Dd, [Rn] /// A64: LD1 { Vt.D }[index], [Xn] /// </summary> public static unsafe Vector128<long> LoadAndInsertScalar(Vector128<long> value, byte index, long* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int8x16_t vld1q_lane_s8 (int8_t const * ptr, int8x16_t src, const int lane) /// A32: VLD1.8 { Dd[index] }, [Rn] /// A64: LD1 { Vt.B }[index], [Xn] /// </summary> public static unsafe Vector128<sbyte> LoadAndInsertScalar(Vector128<sbyte> value, byte index, sbyte* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// float32x4_t vld1q_lane_f32 (float32_t const * ptr, float32x4_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector128<float> LoadAndInsertScalar(Vector128<float> value, byte index, float* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint16x8_t vld1q_lane_u16 (uint16_t const * ptr, uint16x8_t src, const int lane) /// A32: VLD1.16 { Dd[index] }, [Rn] /// A64: LD1 { Vt.H }[index], [Xn] /// </summary> public static unsafe Vector128<ushort> LoadAndInsertScalar(Vector128<ushort> value, byte index, ushort* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint32x4_t vld1q_lane_u32 (uint32_t const * ptr, uint32x4_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector128<uint> LoadAndInsertScalar(Vector128<uint> value, byte index, uint* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint64x2_t vld1q_lane_u64 (uint64_t const * ptr, uint64x2_t src, const int lane) /// A32: VLDR.64 Dd, [Rn] /// A64: LD1 { Vt.D }[index], [Xn] /// </summary> public static unsafe Vector128<ulong> LoadAndInsertScalar(Vector128<ulong> value, byte index, ulong* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint8x8_t vld1_dup_u8 (uint8_t const * ptr) /// A32: VLD1.8 { Dd[] }, [Rn] /// A64: LD1R { Vt.8B }, [Xn] /// </summary> public static unsafe Vector64<byte> LoadAndReplicateToVector64(byte* address) => LoadAndReplicateToVector64(address); /// <summary> /// int16x4_t vld1_dup_s16 (int16_t const * ptr) /// A32: VLD1.16 { Dd[] }, [Rn] /// A64: LD1R { Vt.4H }, [Xn] /// </summary> public static unsafe Vector64<short> LoadAndReplicateToVector64(short* address) => LoadAndReplicateToVector64(address); /// <summary> /// int32x2_t vld1_dup_s32 (int32_t const * ptr) /// A32: VLD1.32 { Dd[] }, [Rn] /// A64: LD1R { Vt.2S }, [Xn] /// </summary> public static unsafe Vector64<int> LoadAndReplicateToVector64(int* address) => LoadAndReplicateToVector64(address); /// <summary> /// int8x8_t vld1_dup_s8 (int8_t const * ptr) /// A32: VLD1.8 { Dd[] }, [Rn] /// A64: LD1R { Vt.8B }, [Xn] /// </summary> public static unsafe Vector64<sbyte> LoadAndReplicateToVector64(sbyte* address) => LoadAndReplicateToVector64(address); /// <summary> /// float32x2_t vld1_dup_f32 (float32_t const * ptr) /// A32: VLD1.32 { Dd[] }, [Rn] /// A64: LD1R { Vt.2S }, [Xn] /// </summary> public static unsafe Vector64<float> LoadAndReplicateToVector64(float* address) => LoadAndReplicateToVector64(address); /// <summary> /// uint16x4_t vld1_dup_u16 (uint16_t const * ptr) /// A32: VLD1.16 { Dd[] }, [Rn] /// A64: LD1R { Vt.4H }, [Xn] /// </summary> public static unsafe Vector64<ushort> LoadAndReplicateToVector64(ushort* address) => LoadAndReplicateToVector64(address); /// <summary> /// uint32x2_t vld1_dup_u32 (uint32_t const * ptr) /// A32: VLD1.32 { Dd[] }, [Rn] /// A64: LD1R { Vt.2S }, [Xn] /// </summary> public static unsafe Vector64<uint> LoadAndReplicateToVector64(uint* address) => LoadAndReplicateToVector64(address); /// <summary> /// uint8x16_t vld1q_dup_u8 (uint8_t const * ptr) /// A32: VLD1.8 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.16B }, [Xn] /// </summary> public static unsafe Vector128<byte> LoadAndReplicateToVector128(byte* address) => LoadAndReplicateToVector128(address); /// <summary> /// int16x8_t vld1q_dup_s16 (int16_t const * ptr) /// A32: VLD1.16 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.8H }, [Xn] /// </summary> public static unsafe Vector128<short> LoadAndReplicateToVector128(short* address) => LoadAndReplicateToVector128(address); /// <summary> /// int32x4_t vld1q_dup_s32 (int32_t const * ptr) /// A32: VLD1.32 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.4S }, [Xn] /// </summary> public static unsafe Vector128<int> LoadAndReplicateToVector128(int* address) => LoadAndReplicateToVector128(address); /// <summary> /// int8x16_t vld1q_dup_s8 (int8_t const * ptr) /// A32: VLD1.8 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.16B }, [Xn] /// </summary> public static unsafe Vector128<sbyte> LoadAndReplicateToVector128(sbyte* address) => LoadAndReplicateToVector128(address); /// <summary> /// float32x4_t vld1q_dup_f32 (float32_t const * ptr) /// A32: VLD1.32 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.4S }, [Xn] /// </summary> public static unsafe Vector128<float> LoadAndReplicateToVector128(float* address) => LoadAndReplicateToVector128(address); /// <summary> /// uint16x8_t vld1q_dup_u16 (uint16_t const * ptr) /// A32: VLD1.16 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.8H }, [Xn] /// </summary> public static unsafe Vector128<ushort> LoadAndReplicateToVector128(ushort* address) => LoadAndReplicateToVector128(address); /// <summary> /// uint32x4_t vld1q_dup_u32 (uint32_t const * ptr) /// A32: VLD1.32 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.4S }, [Xn] /// </summary> public static unsafe Vector128<uint> LoadAndReplicateToVector128(uint* address) => LoadAndReplicateToVector128(address); /// <summary> /// uint8x8_t vld1_u8 (uint8_t const * ptr) /// A32: VLD1.8 Dd, [Rn] /// A64: LD1 Vt.8B, [Xn] /// </summary> public static unsafe Vector64<byte> LoadVector64(byte* address) => LoadVector64(address); /// <summary> /// float64x1_t vld1_f64 (float64_t const * ptr) /// A32: VLD1.64 Dd, [Rn] /// A64: LD1 Vt.1D, [Xn] /// </summary> public static unsafe Vector64<double> LoadVector64(double* address) => LoadVector64(address); /// <summary> /// int16x4_t vld1_s16 (int16_t const * ptr) /// A32: VLD1.16 Dd, [Rn] /// A64: LD1 Vt.4H, [Xn] /// </summary> public static unsafe Vector64<short> LoadVector64(short* address) => LoadVector64(address); /// <summary> /// int32x2_t vld1_s32 (int32_t const * ptr) /// A32: VLD1.32 Dd, [Rn] /// A64: LD1 Vt.2S, [Xn] /// </summary> public static unsafe Vector64<int> LoadVector64(int* address) => LoadVector64(address); /// <summary> /// int64x1_t vld1_s64 (int64_t const * ptr) /// A32: VLD1.64 Dd, [Rn] /// A64: LD1 Vt.1D, [Xn] /// </summary> public static unsafe Vector64<long> LoadVector64(long* address) => LoadVector64(address); /// <summary> /// int8x8_t vld1_s8 (int8_t const * ptr) /// A32: VLD1.8 Dd, [Rn] /// A64: LD1 Vt.8B, [Xn] /// </summary> public static unsafe Vector64<sbyte> LoadVector64(sbyte* address) => LoadVector64(address); /// <summary> /// float32x2_t vld1_f32 (float32_t const * ptr) /// A32: VLD1.32 Dd, [Rn] /// A64: LD1 Vt.2S, [Xn] /// </summary> public static unsafe Vector64<float> LoadVector64(float* address) => LoadVector64(address); /// <summary> /// uint16x4_t vld1_u16 (uint16_t const * ptr) /// A32: VLD1.16 Dd, [Rn] /// A64: LD1 Vt.4H, [Xn] /// </summary> public static unsafe Vector64<ushort> LoadVector64(ushort* address) => LoadVector64(address); /// <summary> /// uint32x2_t vld1_u32 (uint32_t const * ptr) /// A32: VLD1.32 Dd, [Rn] /// A64: LD1 Vt.2S, [Xn] /// </summary> public static unsafe Vector64<uint> LoadVector64(uint* address) => LoadVector64(address); /// <summary> /// uint64x1_t vld1_u64 (uint64_t const * ptr) /// A32: VLD1.64 Dd, [Rn] /// A64: LD1 Vt.1D, [Xn] /// </summary> public static unsafe Vector64<ulong> LoadVector64(ulong* address) => LoadVector64(address); /// <summary> /// uint8x16_t vld1q_u8 (uint8_t const * ptr) /// A32: VLD1.8 Dd, Dd+1, [Rn] /// A64: LD1 Vt.16B, [Xn] /// </summary> public static unsafe Vector128<byte> LoadVector128(byte* address) => LoadVector128(address); /// <summary> /// float64x2_t vld1q_f64 (float64_t const * ptr) /// A32: VLD1.64 Dd, Dd+1, [Rn] /// A64: LD1 Vt.2D, [Xn] /// </summary> public static unsafe Vector128<double> LoadVector128(double* address) => LoadVector128(address); /// <summary> /// int16x8_t vld1q_s16 (int16_t const * ptr) /// A32: VLD1.16 Dd, Dd+1, [Rn] /// A64: LD1 Vt.8H, [Xn] /// </summary> public static unsafe Vector128<short> LoadVector128(short* address) => LoadVector128(address); /// <summary> /// int32x4_t vld1q_s32 (int32_t const * ptr) /// A32: VLD1.32 Dd, Dd+1, [Rn] /// A64: LD1 Vt.4S, [Xn] /// </summary> public static unsafe Vector128<int> LoadVector128(int* address) => LoadVector128(address); /// <summary> /// int64x2_t vld1q_s64 (int64_t const * ptr) /// A32: VLD1.64 Dd, Dd+1, [Rn] /// A64: LD1 Vt.2D, [Xn] /// </summary> public static unsafe Vector128<long> LoadVector128(long* address) => LoadVector128(address); /// <summary> /// int8x16_t vld1q_s8 (int8_t const * ptr) /// A32: VLD1.8 Dd, Dd+1, [Rn] /// A64: LD1 Vt.16B, [Xn] /// </summary> public static unsafe Vector128<sbyte> LoadVector128(sbyte* address) => LoadVector128(address); /// <summary> /// float32x4_t vld1q_f32 (float32_t const * ptr) /// A32: VLD1.32 Dd, Dd+1, [Rn] /// A64: LD1 Vt.4S, [Xn] /// </summary> public static unsafe Vector128<float> LoadVector128(float* address) => LoadVector128(address); /// <summary> /// uint16x8_t vld1q_s16 (uint16_t const * ptr) /// A32: VLD1.16 Dd, Dd+1, [Rn] /// A64: LD1 Vt.8H, [Xn] /// </summary> public static unsafe Vector128<ushort> LoadVector128(ushort* address) => LoadVector128(address); /// <summary> /// uint32x4_t vld1q_s32 (uint32_t const * ptr) /// A32: VLD1.32 Dd, Dd+1, [Rn] /// A64: LD1 Vt.4S, [Xn] /// </summary> public static unsafe Vector128<uint> LoadVector128(uint* address) => LoadVector128(address); /// <summary> /// uint64x2_t vld1q_u64 (uint64_t const * ptr) /// A32: VLD1.64 Dd, Dd+1, [Rn] /// A64: LD1 Vt.2D, [Xn] /// </summary> public static unsafe Vector128<ulong> LoadVector128(ulong* address) => LoadVector128(address); /// <summary> /// uint8x8_t vmax_u8 (uint8x8_t a, uint8x8_t b) /// A32: VMAX.U8 Dd, Dn, Dm /// A64: UMAX Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Max(Vector64<byte> left, Vector64<byte> right) => Max(left, right); /// <summary> /// int16x4_t vmax_s16 (int16x4_t a, int16x4_t b) /// A32: VMAX.S16 Dd, Dn, Dm /// A64: SMAX Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> Max(Vector64<short> left, Vector64<short> right) => Max(left, right); /// <summary> /// int32x2_t vmax_s32 (int32x2_t a, int32x2_t b) /// A32: VMAX.S32 Dd, Dn, Dm /// A64: SMAX Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> Max(Vector64<int> left, Vector64<int> right) => Max(left, right); /// <summary> /// int8x8_t vmax_s8 (int8x8_t a, int8x8_t b) /// A32: VMAX.S8 Dd, Dn, Dm /// A64: SMAX Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Max(Vector64<sbyte> left, Vector64<sbyte> right) => Max(left, right); /// <summary> /// float32x2_t vmax_f32 (float32x2_t a, float32x2_t b) /// A32: VMAX.F32 Dd, Dn, Dm /// A64: FMAX Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Max(Vector64<float> left, Vector64<float> right) => Max(left, right); /// <summary> /// uint16x4_t vmax_u16 (uint16x4_t a, uint16x4_t b) /// A32: VMAX.U16 Dd, Dn, Dm /// A64: UMAX Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> Max(Vector64<ushort> left, Vector64<ushort> right) => Max(left, right); /// <summary> /// uint32x2_t vmax_u32 (uint32x2_t a, uint32x2_t b) /// A32: VMAX.U32 Dd, Dn, Dm /// A64: UMAX Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> Max(Vector64<uint> left, Vector64<uint> right) => Max(left, right); /// <summary> /// uint8x16_t vmaxq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VMAX.U8 Qd, Qn, Qm /// A64: UMAX Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Max(Vector128<byte> left, Vector128<byte> right) => Max(left, right); /// <summary> /// int16x8_t vmaxq_s16 (int16x8_t a, int16x8_t b) /// A32: VMAX.S16 Qd, Qn, Qm /// A64: SMAX Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> Max(Vector128<short> left, Vector128<short> right) => Max(left, right); /// <summary> /// int32x4_t vmaxq_s32 (int32x4_t a, int32x4_t b) /// A32: VMAX.S32 Qd, Qn, Qm /// A64: SMAX Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> Max(Vector128<int> left, Vector128<int> right) => Max(left, right); /// <summary> /// int8x16_t vmaxq_s8 (int8x16_t a, int8x16_t b) /// A32: VMAX.S8 Qd, Qn, Qm /// A64: SMAX Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Max(Vector128<sbyte> left, Vector128<sbyte> right) => Max(left, right); /// <summary> /// float32x4_t vmaxq_f32 (float32x4_t a, float32x4_t b) /// A32: VMAX.F32 Qd, Qn, Qm /// A64: FMAX Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Max(Vector128<float> left, Vector128<float> right) => Max(left, right); /// <summary> /// uint16x8_t vmaxq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VMAX.U16 Qd, Qn, Qm /// A64: UMAX Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> Max(Vector128<ushort> left, Vector128<ushort> right) => Max(left, right); /// <summary> /// uint32x4_t vmaxq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VMAX.U32 Qd, Qn, Qm /// A64: UMAX Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> Max(Vector128<uint> left, Vector128<uint> right) => Max(left, right); /// <summary> /// float32x2_t vmaxnm_f32 (float32x2_t a, float32x2_t b) /// A32: VMAXNM.F32 Dd, Dn, Dm /// A64: FMAXNM Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MaxNumber(Vector64<float> left, Vector64<float> right) => MaxNumber(left, right); /// <summary> /// float32x4_t vmaxnmq_f32 (float32x4_t a, float32x4_t b) /// A32: VMAXNM.F32 Qd, Qn, Qm /// A64: FMAXNM Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MaxNumber(Vector128<float> left, Vector128<float> right) => MaxNumber(left, right); /// <summary> /// float64x1_t vmaxnm_f64 (float64x1_t a, float64x1_t b) /// A32: VMAXNM.F64 Dd, Dn, Dm /// A64: FMAXNM Dd, Dn, Dm /// </summary> public static Vector64<double> MaxNumberScalar(Vector64<double> left, Vector64<double> right) => MaxNumberScalar(left, right); /// <summary> /// float32_t vmaxnms_f32 (float32_t a, float32_t b) /// A32: VMAXNM.F32 Sd, Sn, Sm /// A64: FMAXNM Sd, Sn, Sm /// </summary> public static Vector64<float> MaxNumberScalar(Vector64<float> left, Vector64<float> right) => MaxNumberScalar(left, right); /// <summary> /// uint8x8_t vpmax_u8 (uint8x8_t a, uint8x8_t b) /// A32: VPMAX.U8 Dd, Dn, Dm /// A64: UMAXP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> MaxPairwise(Vector64<byte> left, Vector64<byte> right) => MaxPairwise(left, right); /// <summary> /// int16x4_t vpmax_s16 (int16x4_t a, int16x4_t b) /// A32: VPMAX.S16 Dd, Dn, Dm /// A64: SMAXP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MaxPairwise(Vector64<short> left, Vector64<short> right) => MaxPairwise(left, right); /// <summary> /// int32x2_t vpmax_s32 (int32x2_t a, int32x2_t b) /// A32: VPMAX.S32 Dd, Dn, Dm /// A64: SMAXP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MaxPairwise(Vector64<int> left, Vector64<int> right) => MaxPairwise(left, right); /// <summary> /// int8x8_t vpmax_s8 (int8x8_t a, int8x8_t b) /// A32: VPMAX.S8 Dd, Dn, Dm /// A64: SMAXP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> MaxPairwise(Vector64<sbyte> left, Vector64<sbyte> right) => MaxPairwise(left, right); /// <summary> /// float32x2_t vpmax_f32 (float32x2_t a, float32x2_t b) /// A32: VPMAX.F32 Dd, Dn, Dm /// A64: FMAXP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MaxPairwise(Vector64<float> left, Vector64<float> right) => MaxPairwise(left, right); /// <summary> /// uint16x4_t vpmax_u16 (uint16x4_t a, uint16x4_t b) /// A32: VPMAX.U16 Dd, Dn, Dm /// A64: UMAXP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> MaxPairwise(Vector64<ushort> left, Vector64<ushort> right) => MaxPairwise(left, right); /// <summary> /// uint32x2_t vpmax_u32 (uint32x2_t a, uint32x2_t b) /// A32: VPMAX.U32 Dd, Dn, Dm /// A64: UMAXP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> MaxPairwise(Vector64<uint> left, Vector64<uint> right) => MaxPairwise(left, right); /// <summary> /// uint8x8_t vmin_u8 (uint8x8_t a, uint8x8_t b) /// A32: VMIN.U8 Dd, Dn, Dm /// A64: UMIN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Min(Vector64<byte> left, Vector64<byte> right) => Min(left, right); /// <summary> /// int16x4_t vmin_s16 (int16x4_t a, int16x4_t b) /// A32: VMIN.S16 Dd, Dn, Dm /// A64: SMIN Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> Min(Vector64<short> left, Vector64<short> right) => Min(left, right); /// <summary> /// int32x2_t vmin_s32 (int32x2_t a, int32x2_t b) /// A32: VMIN.S32 Dd, Dn, Dm /// A64: SMIN Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> Min(Vector64<int> left, Vector64<int> right) => Min(left, right); /// <summary> /// int8x8_t vmin_s8 (int8x8_t a, int8x8_t b) /// A32: VMIN.S8 Dd, Dn, Dm /// A64: SMIN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Min(Vector64<sbyte> left, Vector64<sbyte> right) => Min(left, right); /// <summary> /// float32x2_t vmin_f32 (float32x2_t a, float32x2_t b) /// A32: VMIN.F32 Dd, Dn, Dm /// A64: FMIN Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Min(Vector64<float> left, Vector64<float> right) => Min(left, right); /// <summary> /// uint16x4_t vmin_u16 (uint16x4_t a, uint16x4_t b) /// A32: VMIN.U16 Dd, Dn, Dm /// A64: UMIN Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> Min(Vector64<ushort> left, Vector64<ushort> right) => Min(left, right); /// <summary> /// uint32x2_t vmin_u32 (uint32x2_t a, uint32x2_t b) /// A32: VMIN.U32 Dd, Dn, Dm /// A64: UMIN Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> Min(Vector64<uint> left, Vector64<uint> right) => Min(left, right); /// <summary> /// uint8x16_t vminq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VMIN.U8 Qd, Qn, Qm /// A64: UMIN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Min(Vector128<byte> left, Vector128<byte> right) => Min(left, right); /// <summary> /// int16x8_t vminq_s16 (int16x8_t a, int16x8_t b) /// A32: VMIN.S16 Qd, Qn, Qm /// A64: SMIN Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> Min(Vector128<short> left, Vector128<short> right) => Min(left, right); /// <summary> /// int32x4_t vminq_s32 (int32x4_t a, int32x4_t b) /// A32: VMIN.S32 Qd, Qn, Qm /// A64: SMIN Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> Min(Vector128<int> left, Vector128<int> right) => Min(left, right); /// <summary> /// int8x16_t vminq_s8 (int8x16_t a, int8x16_t b) /// A32: VMIN.S8 Qd, Qn, Qm /// A64: SMIN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Min(Vector128<sbyte> left, Vector128<sbyte> right) => Min(left, right); /// <summary> /// float32x4_t vminq_f32 (float32x4_t a, float32x4_t b) /// A32: VMIN.F32 Qd, Qn, Qm /// A64: FMIN Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Min(Vector128<float> left, Vector128<float> right) => Min(left, right); /// <summary> /// uint16x8_t vminq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VMIN.U16 Qd, Qn, Qm /// A64: UMIN Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> Min(Vector128<ushort> left, Vector128<ushort> right) => Min(left, right); /// <summary> /// uint32x4_t vminq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VMIN.U32 Qd, Qn, Qm /// A64: UMIN Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> Min(Vector128<uint> left, Vector128<uint> right) => Min(left, right); /// <summary> /// float32x2_t vminnm_f32 (float32x2_t a, float32x2_t b) /// A32: VMINNM.F32 Dd, Dn, Dm /// A64: FMINNM Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MinNumber(Vector64<float> left, Vector64<float> right) => MinNumber(left, right); /// <summary> /// float32x4_t vminnmq_f32 (float32x4_t a, float32x4_t b) /// A32: VMINNM.F32 Qd, Qn, Qm /// A64: FMINNM Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MinNumber(Vector128<float> left, Vector128<float> right) => MinNumber(left, right); /// <summary> /// float64x1_t vminnm_f64 (float64x1_t a, float64x1_t b) /// A32: VMINNM.F64 Dd, Dn, Dm /// A64: FMINNM Dd, Dn, Dm /// </summary> public static Vector64<double> MinNumberScalar(Vector64<double> left, Vector64<double> right) => MinNumberScalar(left, right); /// <summary> /// float32_t vminnms_f32 (float32_t a, float32_t b) /// A32: VMINNM.F32 Sd, Sn, Sm /// A64: FMINNM Sd, Sn, Sm /// </summary> public static Vector64<float> MinNumberScalar(Vector64<float> left, Vector64<float> right) => MinNumberScalar(left, right); /// <summary> /// uint8x8_t vpmin_u8 (uint8x8_t a, uint8x8_t b) /// A32: VPMIN.U8 Dd, Dn, Dm /// A64: UMINP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> MinPairwise(Vector64<byte> left, Vector64<byte> right) => MinPairwise(left, right); /// <summary> /// int16x4_t vpmin_s16 (int16x4_t a, int16x4_t b) /// A32: VPMIN.S16 Dd, Dn, Dm /// A64: SMINP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MinPairwise(Vector64<short> left, Vector64<short> right) => MinPairwise(left, right); /// <summary> /// int32x2_t vpmin_s32 (int32x2_t a, int32x2_t b) /// A32: VPMIN.S32 Dd, Dn, Dm /// A64: SMINP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MinPairwise(Vector64<int> left, Vector64<int> right) => MinPairwise(left, right); /// <summary> /// int8x8_t vpmin_s8 (int8x8_t a, int8x8_t b) /// A32: VPMIN.S8 Dd, Dn, Dm /// A64: SMINP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> MinPairwise(Vector64<sbyte> left, Vector64<sbyte> right) => MinPairwise(left, right); /// <summary> /// float32x2_t vpmin_f32 (float32x2_t a, float32x2_t b) /// A32: VPMIN.F32 Dd, Dn, Dm /// A64: FMINP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MinPairwise(Vector64<float> left, Vector64<float> right) => MinPairwise(left, right); /// <summary> /// uint16x4_t vpmin_u16 (uint16x4_t a, uint16x4_t b) /// A32: VPMIN.U16 Dd, Dn, Dm /// A64: UMINP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> MinPairwise(Vector64<ushort> left, Vector64<ushort> right) => MinPairwise(left, right); /// <summary> /// uint32x2_t vpmin_u32 (uint32x2_t a, uint32x2_t b) /// A32: VPMIN.U32 Dd, Dn, Dm /// A64: UMINP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> MinPairwise(Vector64<uint> left, Vector64<uint> right) => MinPairwise(left, right); /// <summary> /// uint8x8_t vmul_u8 (uint8x8_t a, uint8x8_t b) /// A32: VMUL.I8 Dd, Dn, Dm /// A64: MUL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Multiply(Vector64<byte> left, Vector64<byte> right) => Multiply(left, right); /// <summary> /// int16x4_t vmul_s16 (int16x4_t a, int16x4_t b) /// A32: VMUL.I16 Dd, Dn, Dm /// A64: MUL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> Multiply(Vector64<short> left, Vector64<short> right) => Multiply(left, right); /// <summary> /// int32x2_t vmul_s32 (int32x2_t a, int32x2_t b) /// A32: VMUL.I32 Dd, Dn, Dm /// A64: MUL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> Multiply(Vector64<int> left, Vector64<int> right) => Multiply(left, right); /// <summary> /// int8x8_t vmul_s8 (int8x8_t a, int8x8_t b) /// A32: VMUL.I8 Dd, Dn, Dm /// A64: MUL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Multiply(Vector64<sbyte> left, Vector64<sbyte> right) => Multiply(left, right); /// <summary> /// float32x2_t vmul_f32 (float32x2_t a, float32x2_t b) /// A32: VMUL.F32 Dd, Dn, Dm /// A64: FMUL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Multiply(Vector64<float> left, Vector64<float> right) => Multiply(left, right); /// <summary> /// uint16x4_t vmul_u16 (uint16x4_t a, uint16x4_t b) /// A32: VMUL.I16 Dd, Dn, Dm /// A64: MUL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> Multiply(Vector64<ushort> left, Vector64<ushort> right) => Multiply(left, right); /// <summary> /// uint32x2_t vmul_u32 (uint32x2_t a, uint32x2_t b) /// A32: VMUL.I32 Dd, Dn, Dm /// A64: MUL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> Multiply(Vector64<uint> left, Vector64<uint> right) => Multiply(left, right); /// <summary> /// uint8x16_t vmulq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VMUL.I8 Qd, Qn, Qm /// A64: MUL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Multiply(Vector128<byte> left, Vector128<byte> right) => Multiply(left, right); /// <summary> /// int16x8_t vmulq_s16 (int16x8_t a, int16x8_t b) /// A32: VMUL.I16 Qd, Qn, Qm /// A64: MUL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> Multiply(Vector128<short> left, Vector128<short> right) => Multiply(left, right); /// <summary> /// int32x4_t vmulq_s32 (int32x4_t a, int32x4_t b) /// A32: VMUL.I32 Qd, Qn, Qm /// A64: MUL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> Multiply(Vector128<int> left, Vector128<int> right) => Multiply(left, right); /// <summary> /// int8x16_t vmulq_s8 (int8x16_t a, int8x16_t b) /// A32: VMUL.I8 Qd, Qn, Qm /// A64: MUL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Multiply(Vector128<sbyte> left, Vector128<sbyte> right) => Multiply(left, right); /// <summary> /// float32x4_t vmulq_f32 (float32x4_t a, float32x4_t b) /// A32: VMUL.F32 Qd, Qn, Qm /// A64: FMUL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Multiply(Vector128<float> left, Vector128<float> right) => Multiply(left, right); /// <summary> /// uint16x8_t vmulq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VMUL.I16 Qd, Qn, Qm /// A64: MUL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> Multiply(Vector128<ushort> left, Vector128<ushort> right) => Multiply(left, right); /// <summary> /// uint32x4_t vmulq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VMUL.I32 Qd, Qn, Qm /// A64: MUL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> Multiply(Vector128<uint> left, Vector128<uint> right) => Multiply(left, right); /// <summary> /// uint8x8_t vmla_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VMLA.I8 Dd, Dn, Dm /// A64: MLA Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> MultiplyAdd(Vector64<byte> addend, Vector64<byte> left, Vector64<byte> right) => MultiplyAdd(addend, left, right); /// <summary> /// int16x4_t vmla_s16 (int16x4_t a, int16x4_t b, int16x4_t c) /// A32: VMLA.I16 Dd, Dn, Dm /// A64: MLA Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MultiplyAdd(Vector64<short> addend, Vector64<short> left, Vector64<short> right) => MultiplyAdd(addend, left, right); /// <summary> /// int32x2_t vmla_s32 (int32x2_t a, int32x2_t b, int32x2_t c) /// A32: VMLA.I32 Dd, Dn, Dm /// A64: MLA Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MultiplyAdd(Vector64<int> addend, Vector64<int> left, Vector64<int> right) => MultiplyAdd(addend, left, right); /// <summary> /// int8x8_t vmla_s8 (int8x8_t a, int8x8_t b, int8x8_t c) /// A32: VMLA.I8 Dd, Dn, Dm /// A64: MLA Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> MultiplyAdd(Vector64<sbyte> addend, Vector64<sbyte> left, Vector64<sbyte> right) => MultiplyAdd(addend, left, right); /// <summary> /// uint16x4_t vmla_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VMLA.I16 Dd, Dn, Dm /// A64: MLA Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> MultiplyAdd(Vector64<ushort> addend, Vector64<ushort> left, Vector64<ushort> right) => MultiplyAdd(addend, left, right); /// <summary> /// uint32x2_t vmla_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VMLA.I32 Dd, Dn, Dm /// A64: MLA Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> MultiplyAdd(Vector64<uint> addend, Vector64<uint> left, Vector64<uint> right) => MultiplyAdd(addend, left, right); /// <summary> /// uint8x16_t vmlaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) /// A32: VMLA.I8 Qd, Qn, Qm /// A64: MLA Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> MultiplyAdd(Vector128<byte> addend, Vector128<byte> left, Vector128<byte> right) => MultiplyAdd(addend, left, right); /// <summary> /// int16x8_t vmlaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c) /// A32: VMLA.I16 Qd, Qn, Qm /// A64: MLA Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MultiplyAdd(Vector128<short> addend, Vector128<short> left, Vector128<short> right) => MultiplyAdd(addend, left, right); /// <summary> /// int32x4_t vmlaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c) /// A32: VMLA.I32 Qd, Qn, Qm /// A64: MLA Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MultiplyAdd(Vector128<int> addend, Vector128<int> left, Vector128<int> right) => MultiplyAdd(addend, left, right); /// <summary> /// int8x16_t vmlaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c) /// A32: VMLA.I8 Qd, Qn, Qm /// A64: MLA Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> MultiplyAdd(Vector128<sbyte> addend, Vector128<sbyte> left, Vector128<sbyte> right) => MultiplyAdd(addend, left, right); /// <summary> /// uint16x8_t vmlaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) /// A32: VMLA.I16 Qd, Qn, Qm /// A64: MLA Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> MultiplyAdd(Vector128<ushort> addend, Vector128<ushort> left, Vector128<ushort> right) => MultiplyAdd(addend, left, right); /// <summary> /// uint32x4_t vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) /// A32: VMLA.I32 Qd, Qn, Qm /// A64: MLA Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> MultiplyAdd(Vector128<uint> addend, Vector128<uint> left, Vector128<uint> right) => MultiplyAdd(addend, left, right); /// <summary> /// int16x4_t vmla_n_s16 (int16x4_t a, int16x4_t b, int16_t c) /// A32: VMLA.I16 Dd, Dn, Dm[0] /// A64: MLA Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<short> MultiplyAddByScalar(Vector64<short> addend, Vector64<short> left, Vector64<short> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// int32x2_t vmla_n_s32 (int32x2_t a, int32x2_t b, int32_t c) /// A32: VMLA.I32 Dd, Dn, Dm[0] /// A64: MLA Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<int> MultiplyAddByScalar(Vector64<int> addend, Vector64<int> left, Vector64<int> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// uint16x4_t vmla_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c) /// A32: VMLA.I16 Dd, Dn, Dm[0] /// A64: MLA Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<ushort> MultiplyAddByScalar(Vector64<ushort> addend, Vector64<ushort> left, Vector64<ushort> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// uint32x2_t vmla_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c) /// A32: VMLA.I32 Dd, Dn, Dm[0] /// A64: MLA Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<uint> MultiplyAddByScalar(Vector64<uint> addend, Vector64<uint> left, Vector64<uint> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// int16x8_t vmlaq_n_s16 (int16x8_t a, int16x8_t b, int16_t c) /// A32: VMLA.I16 Qd, Qn, Dm[0] /// A64: MLA Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<short> MultiplyAddByScalar(Vector128<short> addend, Vector128<short> left, Vector64<short> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// int32x4_t vmlaq_n_s32 (int32x4_t a, int32x4_t b, int32_t c) /// A32: VMLA.I32 Qd, Qn, Dm[0] /// A64: MLA Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<int> MultiplyAddByScalar(Vector128<int> addend, Vector128<int> left, Vector64<int> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// uint16x8_t vmlaq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c) /// A32: VMLA.I16 Qd, Qn, Dm[0] /// A64: MLA Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<ushort> MultiplyAddByScalar(Vector128<ushort> addend, Vector128<ushort> left, Vector64<ushort> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// uint32x4_t vmlaq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c) /// A32: VMLA.I32 Qd, Qn, Dm[0] /// A64: MLA Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<uint> MultiplyAddByScalar(Vector128<uint> addend, Vector128<uint> left, Vector64<uint> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// int16x4_t vmla_lane_s16 (int16x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VMLA.I16 Dd, Dn, Dm[lane] /// A64: MLA Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyAddBySelectedScalar(Vector64<short> addend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int16x4_t vmla_laneq_s16 (int16x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VMLA.I16 Dd, Dn, Dm[lane] /// A64: MLA Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyAddBySelectedScalar(Vector64<short> addend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int32x2_t vmla_lane_s32 (int32x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VMLA.I32 Dd, Dn, Dm[lane] /// A64: MLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyAddBySelectedScalar(Vector64<int> addend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int32x2_t vmla_laneq_s32 (int32x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VMLA.I32 Dd, Dn, Dm[lane] /// A64: MLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyAddBySelectedScalar(Vector64<int> addend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint16x4_t vmla_lane_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t v, const int lane) /// A32: VMLA.I16 Dd, Dn, Dm[lane] /// A64: MLA Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplyAddBySelectedScalar(Vector64<ushort> addend, Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint16x4_t vmla_laneq_u16 (uint16x4_t a, uint16x4_t b, uint16x8_t v, const int lane) /// A32: VMLA.I16 Dd, Dn, Dm[lane] /// A64: MLA Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplyAddBySelectedScalar(Vector64<ushort> addend, Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint32x2_t vmla_lane_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t v, const int lane) /// A32: VMLA.I32 Dd, Dn, Dm[lane] /// A64: MLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplyAddBySelectedScalar(Vector64<uint> addend, Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint32x2_t vmla_laneq_u32 (uint32x2_t a, uint32x2_t b, uint32x4_t v, const int lane) /// A32: VMLA.I32 Dd, Dn, Dm[lane] /// A64: MLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplyAddBySelectedScalar(Vector64<uint> addend, Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int16x8_t vmlaq_lane_s16 (int16x8_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VMLA.I16 Qd, Qn, Dm[lane] /// A64: MLA Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyAddBySelectedScalar(Vector128<short> addend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int16x8_t vmlaq_laneq_s16 (int16x8_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VMLA.I16 Qd, Qn, Dm[lane] /// A64: MLA Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyAddBySelectedScalar(Vector128<short> addend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlaq_lane_s32 (int32x4_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VMLA.I32 Qd, Qn, Dm[lane] /// A64: MLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyAddBySelectedScalar(Vector128<int> addend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlaq_laneq_s32 (int32x4_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VMLA.I32 Qd, Qn, Dm[lane] /// A64: MLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyAddBySelectedScalar(Vector128<int> addend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint16x8_t vmlaq_lane_u16 (uint16x8_t a, uint16x8_t b, uint16x4_t v, const int lane) /// A32: VMLA.I16 Qd, Qn, Dm[lane] /// A64: MLA Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplyAddBySelectedScalar(Vector128<ushort> addend, Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint16x8_t vmlaq_laneq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t v, const int lane) /// A32: VMLA.I16 Qd, Qn, Dm[lane] /// A64: MLA Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplyAddBySelectedScalar(Vector128<ushort> addend, Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlaq_lane_u32 (uint32x4_t a, uint32x4_t b, uint32x2_t v, const int lane) /// A32: VMLA.I32 Qd, Qn, Dm[lane] /// A64: MLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplyAddBySelectedScalar(Vector128<uint> addend, Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlaq_laneq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t v, const int lane) /// A32: VMLA.I32 Qd, Qn, Dm[lane] /// A64: MLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplyAddBySelectedScalar(Vector128<uint> addend, Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int16x4_t vmul_n_s16 (int16x4_t a, int16_t b) /// A32: VMUL.I16 Dd, Dn, Dm[0] /// A64: MUL Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<short> MultiplyByScalar(Vector64<short> left, Vector64<short> right) => MultiplyByScalar(left, right); /// <summary> /// int32x2_t vmul_n_s32 (int32x2_t a, int32_t b) /// A32: VMUL.I32 Dd, Dn, Dm[0] /// A64: MUL Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<int> MultiplyByScalar(Vector64<int> left, Vector64<int> right) => MultiplyByScalar(left, right); /// <summary> /// float32x2_t vmul_n_f32 (float32x2_t a, float32_t b) /// A32: VMUL.F32 Dd, Dn, Dm[0] /// A64: FMUL Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<float> MultiplyByScalar(Vector64<float> left, Vector64<float> right) => MultiplyByScalar(left, right); /// <summary> /// uint16x4_t vmul_n_u16 (uint16x4_t a, uint16_t b) /// A32: VMUL.I16 Dd, Dn, Dm[0] /// A64: MUL Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<ushort> MultiplyByScalar(Vector64<ushort> left, Vector64<ushort> right) => MultiplyByScalar(left, right); /// <summary> /// uint32x2_t vmul_n_u32 (uint32x2_t a, uint32_t b) /// A32: VMUL.I32 Dd, Dn, Dm[0] /// A64: MUL Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<uint> MultiplyByScalar(Vector64<uint> left, Vector64<uint> right) => MultiplyByScalar(left, right); /// <summary> /// int16x8_t vmulq_n_s16 (int16x8_t a, int16_t b) /// A32: VMUL.I16 Qd, Qn, Dm[0] /// A64: MUL Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<short> MultiplyByScalar(Vector128<short> left, Vector64<short> right) => MultiplyByScalar(left, right); /// <summary> /// int32x4_t vmulq_n_s32 (int32x4_t a, int32_t b) /// A32: VMUL.I32 Qd, Qn, Dm[0] /// A64: MUL Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<int> MultiplyByScalar(Vector128<int> left, Vector64<int> right) => MultiplyByScalar(left, right); /// <summary> /// float32x4_t vmulq_n_f32 (float32x4_t a, float32_t b) /// A32: VMUL.F32 Qd, Qn, Dm[0] /// A64: FMUL Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<float> MultiplyByScalar(Vector128<float> left, Vector64<float> right) => MultiplyByScalar(left, right); /// <summary> /// uint16x8_t vmulq_n_u16 (uint16x8_t a, uint16_t b) /// A32: VMUL.I16 Qd, Qn, Dm[0] /// A64: MUL Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<ushort> MultiplyByScalar(Vector128<ushort> left, Vector64<ushort> right) => MultiplyByScalar(left, right); /// <summary> /// uint32x4_t vmulq_n_u32 (uint32x4_t a, uint32_t b) /// A32: VMUL.I32 Qd, Qn, Dm[0] /// A64: MUL Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<uint> MultiplyByScalar(Vector128<uint> left, Vector64<uint> right) => MultiplyByScalar(left, right); /// <summary> /// int16x4_t vmul_lane_s16 (int16x4_t a, int16x4_t v, const int lane) /// A32: VMUL.I16 Dd, Dn, Dm[lane] /// A64: MUL Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyBySelectedScalar(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int16x4_t vmul_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) /// A32: VMUL.I16 Dd, Dn, Dm[lane] /// A64: MUL Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyBySelectedScalar(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x2_t vmul_lane_s32 (int32x2_t a, int32x2_t v, const int lane) /// A32: VMUL.I32 Dd, Dn, Dm[lane] /// A64: MUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyBySelectedScalar(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x2_t vmul_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) /// A32: VMUL.I32 Dd, Dn, Dm[lane] /// A64: MUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyBySelectedScalar(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x2_t vmul_lane_f32 (float32x2_t a, float32x2_t v, const int lane) /// A32: VMUL.F32 Dd, Dn, Dm[lane] /// A64: FMUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyBySelectedScalar(Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x2_t vmul_laneq_f32 (float32x2_t a, float32x4_t v, const int lane) /// A32: VMUL.F32 Dd, Dn, Dm[lane] /// A64: FMUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyBySelectedScalar(Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint16x4_t vmul_lane_u16 (uint16x4_t a, uint16x4_t v, const int lane) /// A32: VMUL.I16 Dd, Dn, Dm[lane] /// A64: MUL Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplyBySelectedScalar(Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint16x4_t vmul_laneq_u16 (uint16x4_t a, uint16x8_t v, const int lane) /// A32: VMUL.I16 Dd, Dn, Dm[lane] /// A64: MUL Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplyBySelectedScalar(Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint32x2_t vmul_lane_u32 (uint32x2_t a, uint32x2_t v, const int lane) /// A32: VMUL.I32 Dd, Dn, Dm[lane] /// A64: MUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplyBySelectedScalar(Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint32x2_t vmul_laneq_u32 (uint32x2_t a, uint32x4_t v, const int lane) /// A32: VMUL.I32 Dd, Dn, Dm[lane] /// A64: MUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplyBySelectedScalar(Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int16x8_t vmulq_lane_s16 (int16x8_t a, int16x4_t v, const int lane) /// A32: VMUL.I16 Qd, Qn, Dm[lane] /// A64: MUL Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyBySelectedScalar(Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int16x8_t vmulq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) /// A32: VMUL.I16 Qd, Qn, Dm[lane] /// A64: MUL Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyBySelectedScalar(Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vmulq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) /// A32: VMUL.I32 Qd, Qn, Dm[lane] /// A64: MUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalar(Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vmulq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) /// A32: VMUL.I32 Qd, Qn, Dm[lane] /// A64: MUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalar(Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x4_t vmulq_lane_f32 (float32x4_t a, float32x2_t v, const int lane) /// A32: VMUL.F32 Qd, Qn, Dm[lane] /// A64: FMUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> MultiplyBySelectedScalar(Vector128<float> left, Vector64<float> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x4_t vmulq_laneq_f32 (float32x4_t a, float32x4_t v, const int lane) /// A32: VMUL.F32 Qd, Qn, Dm[lane] /// A64: FMUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> MultiplyBySelectedScalar(Vector128<float> left, Vector128<float> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint16x8_t vmulq_lane_u16 (uint16x8_t a, uint16x4_t v, const int lane) /// A32: VMUL.I16 Qd, Qn, Dm[lane] /// A64: MUL Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplyBySelectedScalar(Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint16x8_t vmulq_laneq_u16 (uint16x8_t a, uint16x8_t v, const int lane) /// A32: VMUL.I16 Qd, Qn, Dm[lane] /// A64: MUL Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplyBySelectedScalar(Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint32x4_t vmulq_lane_u32 (uint32x4_t a, uint32x2_t v, const int lane) /// A32: VMUL.I32 Qd, Qn, Dm[lane] /// A64: MUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalar(Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint32x4_t vmulq_laneq_u32 (uint32x4_t a, uint32x4_t v, const int lane) /// A32: VMUL.I32 Qd, Qn, Dm[lane] /// A64: MUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalar(Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vmull_lane_s16 (int16x4_t a, int16x4_t v, const int lane) /// A32: VMULL.S16 Qd, Dn, Dm[lane] /// A64: SMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLower(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// int32x4_t vmull_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) /// A32: VMULL.S16 Qd, Dn, Dm[lane] /// A64: SMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLower(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// int64x2_t vmull_lane_s32 (int32x2_t a, int32x2_t v, const int lane) /// A32: VMULL.S32 Qd, Dn, Dm[lane] /// A64: SMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLower(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// int64x2_t vmull_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) /// A32: VMULL.S32 Qd, Dn, Dm[lane] /// A64: SMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLower(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// uint32x4_t vmull_lane_u16 (uint16x4_t a, uint16x4_t v, const int lane) /// A32: VMULL.U16 Qd, Dn, Dm[lane] /// A64: UMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLower(Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// uint32x4_t vmull_laneq_u16 (uint16x4_t a, uint16x8_t v, const int lane) /// A32: VMULL.U16 Qd, Dn, Dm[lane] /// A64: UMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLower(Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// uint64x2_t vmull_lane_u32 (uint32x2_t a, uint32x2_t v, const int lane) /// A32: VMULL.U32 Qd, Dn, Dm[lane] /// A64: UMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLower(Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// uint64x2_t vmull_laneq_u32 (uint32x2_t a, uint32x4_t v, const int lane) /// A32: VMULL.U32 Qd, Dn, Dm[lane] /// A64: UMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLower(Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// int32x4_t vmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VMLAL.S16 Qd, Dn, Dm[lane] /// A64: SMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<int> addend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlal_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VMLAL.S16 Qd, Dn, Dm[lane] /// A64: SMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<int> addend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// int64x2_t vmlal_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VMLAL.S32 Qd, Dn, Dm[lane] /// A64: SMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<long> addend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// int64x2_t vmlal_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VMLAL.S32 Qd, Dn, Dm[lane] /// A64: SMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<long> addend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlal_lane_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t v, const int lane) /// A32: VMLAL.U16 Qd, Dn, Dm[lane] /// A64: UMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<uint> addend, Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlal_laneq_u16 (uint32x4_t a, uint16x4_t b, uint16x8_t v, const int lane) /// A32: VMLAL.U16 Qd, Dn, Dm[lane] /// A64: UMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<uint> addend, Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlal_lane_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t v, const int lane) /// A32: VMLAL.U32 Qd, Dn, Dm[lane] /// A64: UMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<ulong> addend, Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlal_laneq_u32 (uint64x2_t a, uint32x2_t b, uint32x4_t v, const int lane) /// A32: VMLAL.U32 Qd, Dn, Dm[lane] /// A64: UMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<ulong> addend, Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VMLSL.S16 Qd, Dn, Dm[lane] /// A64: SMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<int> minuend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsl_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VMLSL.S16 Qd, Dn, Dm[lane] /// A64: SMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<int> minuend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vmlsl_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VMLSL.S32 Qd, Dn, Dm[lane] /// A64: SMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<long> minuend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vmlsl_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VMLSL.S32 Qd, Dn, Dm[lane] /// A64: SMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<long> minuend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsl_lane_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t v, const int lane) /// A32: VMLSL.U16 Qd, Dn, Dm[lane] /// A64: UMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<uint> minuend, Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsl_laneq_u16 (uint32x4_t a, uint16x4_t b, uint16x8_t v, const int lane) /// A32: VMLSL.U16 Qd, Dn, Dm[lane] /// A64: UMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<uint> minuend, Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlsl_lane_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t v, const int lane) /// A32: VMLSL.U32 Qd, Dn, Dm[lane] /// A64: UMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<ulong> minuend, Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlsl_laneq_u32 (uint64x2_t a, uint32x2_t b, uint32x4_t v, const int lane) /// A32: VMLSL.U32 Qd, Dn, Dm[lane] /// A64: UMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<ulong> minuend, Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vmull_high_lane_s16 (int16x8_t a, int16x4_t v, const int lane) /// A32: VMULL.S16 Qd, Dn+1, Dm[lane] /// A64: SMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpper(Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// int32x4_t vmull_high_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) /// A32: VMULL.S16 Qd, Dn+1, Dm[lane] /// A64: SMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpper(Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// int64x2_t vmull_high_lane_s32 (int32x4_t a, int32x2_t v, const int lane) /// A32: VMULL.S32 Qd, Dn+1, Dm[lane] /// A64: SMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpper(Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// int64x2_t vmull_high_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) /// A32: VMULL.S32 Qd, Dn+1, Dm[lane] /// A64: SMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpper(Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// uint32x4_t vmull_high_lane_u16 (uint16x8_t a, uint16x4_t v, const int lane) /// A32: VMULL.U16 Qd, Dn+1, Dm[lane] /// A64: UMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpper(Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// uint32x4_t vmull_high_laneq_u16 (uint16x8_t a, uint16x8_t v, const int lane) /// A32: VMULL.U16 Qd, Dn+1, Dm[lane] /// A64: UMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpper(Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// uint64x2_t vmull_high_lane_u32 (uint32x4_t a, uint32x2_t v, const int lane) /// A32: VMULL.U32 Qd, Dn+1, Dm[lane] /// A64: UMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpper(Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// uint64x2_t vmull_high_laneq_u32 (uint32x4_t a, uint32x4_t v, const int lane) /// A32: VMULL.U32 Qd, Dn+1, Dm[lane] /// A64: UMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpper(Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// int32x4_t vmlal_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VMLAL.S16 Qd, Dn+1, Dm[lane] /// A64: SMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<int> addend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlal_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VMLAL.S16 Qd, Dn+1, Dm[lane] /// A64: SMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<int> addend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// int64x2_t vmlal_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VMLAL.S32 Qd, Dn+1, Dm[lane] /// A64: SMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<long> addend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// int64x2_t vmlal_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VMLAL.S32 Qd, Dn+1, Dm[lane] /// A64: SMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<long> addend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlal_high_lane_u16 (uint32x4_t a, uint16x8_t b, uint16x4_t v, const int lane) /// A32: VMLAL.U16 Qd, Dn+1, Dm[lane] /// A64: UMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<uint> addend, Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlal_high_laneq_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t v, const int lane) /// A32: VMLAL.U16 Qd, Dn+1, Dm[lane] /// A64: UMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<uint> addend, Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlal_high_lane_u32 (uint64x2_t a, uint32x4_t b, uint32x2_t v, const int lane) /// A32: VMLAL.U32 Qd, Dn+1, Dm[lane] /// A64: UMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<ulong> addend, Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlal_high_laneq_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t v, const int lane) /// A32: VMLAL.U32 Qd, Dn+1, Dm[lane] /// A64: UMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<ulong> addend, Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsl_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VMLSL.S16 Qd, Dn+1, Dm[lane] /// A64: SMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<int> minuend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsl_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VMLSL.S16 Qd, Dn+1, Dm[lane] /// A64: SMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<int> minuend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vmlsl_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VMLSL.S32 Qd, Dn+1, Dm[lane] /// A64: SMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<long> minuend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vmlsl_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VMLSL.S32 Qd, Dn+1, Dm[lane] /// A64: SMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<long> minuend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsl_high_lane_u16 (uint32x4_t a, uint16x8_t b, uint16x4_t v, const int lane) /// A32: VMLSL.U16 Qd, Dn+1, Dm[lane] /// A64: UMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<uint> minuend, Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsl_high_laneq_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t v, const int lane) /// A32: VMLSL.U16 Qd, Dn+1, Dm[lane] /// A64: UMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<uint> minuend, Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlsl_high_lane_u32 (uint64x2_t a, uint32x4_t b, uint32x2_t v, const int lane) /// A32: VMLSL.U32 Qd, Dn+1, Dm[lane] /// A64: UMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<ulong> minuend, Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlsl_high_laneq_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t v, const int lane) /// A32: VMLSL.U32 Qd, Dn+1, Dm[lane] /// A64: UMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<ulong> minuend, Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int16x4_t vqdmulh_n_s16 (int16x4_t a, int16_t b) /// A32: VQDMULH.S16 Dd, Dn, Dm[0] /// A64: SQDMULH Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<short> MultiplyDoublingByScalarSaturateHigh(Vector64<short> left, Vector64<short> right) => MultiplyDoublingByScalarSaturateHigh(left, right); /// <summary> /// int32x2_t vqdmulh_n_s32 (int32x2_t a, int32_t b) /// A32: VQDMULH.S32 Dd, Dn, Dm[0] /// A64: SQDMULH Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<int> MultiplyDoublingByScalarSaturateHigh(Vector64<int> left, Vector64<int> right) => MultiplyDoublingByScalarSaturateHigh(left, right); /// <summary> /// int16x8_t vqdmulhq_n_s16 (int16x8_t a, int16_t b) /// A32: VQDMULH.S16 Qd, Qn, Dm[0] /// A64: SQDMULH Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<short> MultiplyDoublingByScalarSaturateHigh(Vector128<short> left, Vector64<short> right) => MultiplyDoublingByScalarSaturateHigh(left, right); /// <summary> /// int32x4_t vqdmulhq_n_s32 (int32x4_t a, int32_t b) /// A32: VQDMULH.S32 Qd, Qn, Dm[0] /// A64: SQDMULH Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<int> MultiplyDoublingByScalarSaturateHigh(Vector128<int> left, Vector64<int> right) => MultiplyDoublingByScalarSaturateHigh(left, right); /// <summary> /// int16x4_t vqdmulh_lane_s16 (int16x4_t a, int16x4_t v, const int lane) /// A32: VQDMULH.S16 Dd, Dn, Dm[lane] /// A64: SQDMULH Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x4_t vqdmulh_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) /// A32: VQDMULH.S16 Dd, Dn, Dm[lane] /// A64: SQDMULH Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x2_t vqdmulh_lane_s32 (int32x2_t a, int32x2_t v, const int lane) /// A32: VQDMULH.S32 Dd, Dn, Dm[lane] /// A64: SQDMULH Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x2_t vqdmulh_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) /// A32: VQDMULH.S32 Dd, Dn, Dm[lane] /// A64: SQDMULH Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x8_t vqdmulhq_lane_s16 (int16x8_t a, int16x4_t v, const int lane) /// A32: VQDMULH.S16 Qd, Qn, Dm[lane] /// A64: SQDMULH Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x8_t vqdmulhq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) /// A32: VQDMULH.S16 Qd, Qn, Dm[lane] /// A64: SQDMULH Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x4_t vqdmulhq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) /// A32: VQDMULH.S32 Qd, Qn, Dm[lane] /// A64: SQDMULH Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x4_t vqdmulhq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) /// A32: VQDMULH.S32 Qd, Qn, Dm[lane] /// A64: SQDMULH Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x4_t vqdmulh_s16 (int16x4_t a, int16x4_t b) /// A32: VQDMULH.S16 Dd, Dn, Dm /// A64: SQDMULH Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MultiplyDoublingSaturateHigh(Vector64<short> left, Vector64<short> right) => MultiplyDoublingSaturateHigh(left, right); /// <summary> /// int32x2_t vqdmulh_s32 (int32x2_t a, int32x2_t b) /// A32: VQDMULH.S32 Dd, Dn, Dm /// A64: SQDMULH Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MultiplyDoublingSaturateHigh(Vector64<int> left, Vector64<int> right) => MultiplyDoublingSaturateHigh(left, right); /// <summary> /// int16x8_t vqdmulhq_s16 (int16x8_t a, int16x8_t b) /// A32: VQDMULH.S16 Qd, Qn, Qm /// A64: SQDMULH Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MultiplyDoublingSaturateHigh(Vector128<short> left, Vector128<short> right) => MultiplyDoublingSaturateHigh(left, right); /// <summary> /// int32x4_t vqdmulhq_s32 (int32x4_t a, int32x4_t b) /// A32: VQDMULH.S32 Qd, Qn, Qm /// A64: SQDMULH Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MultiplyDoublingSaturateHigh(Vector128<int> left, Vector128<int> right) => MultiplyDoublingSaturateHigh(left, right); /// <summary> /// int32x4_t vqdmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c) /// A32: VQDMLAL.S16 Qd, Dn, Dm /// A64: SQDMLAL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerAndAddSaturate(Vector128<int> addend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningLowerAndAddSaturate(addend, left, right); /// <summary> /// int64x2_t vqdmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c) /// A32: VQDMLAL.S32 Qd, Dn, Dm /// A64: SQDMLAL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerAndAddSaturate(Vector128<long> addend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningLowerAndAddSaturate(addend, left, right); /// <summary> /// int32x4_t vqdmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c) /// A32: VQDMLSL.S16 Qd, Dn, Dm /// A64: SQDMLSL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerAndSubtractSaturate(Vector128<int> minuend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningLowerAndSubtractSaturate(minuend, left, right); /// <summary> /// int64x2_t vqdmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c) /// A32: VQDMLSL.S32 Qd, Dn, Dm /// A64: SQDMLSL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerAndSubtractSaturate(Vector128<long> minuend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningLowerAndSubtractSaturate(minuend, left, right); /// <summary> /// int32x4_t vqdmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c) /// A32: VQDMLAL.S16 Qd, Dn, Dm[0] /// A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerByScalarAndAddSaturate(Vector128<int> addend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningLowerByScalarAndAddSaturate(addend, left, right); /// <summary> /// int64x2_t vqdmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c) /// A32: VQDMLAL.S32 Qd, Dn, Dm[0] /// A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerByScalarAndAddSaturate(Vector128<long> addend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningLowerByScalarAndAddSaturate(addend, left, right); /// <summary> /// int32x4_t vqdmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c) /// A32: VQDMLSL.S16 Qd, Dn, Dm[0] /// A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerByScalarAndSubtractSaturate(Vector128<int> minuend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningLowerByScalarAndSubtractSaturate(minuend, left, right); /// <summary> /// int64x2_t vqdmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c) /// A32: VQDMLSL.S32 Qd, Dn, Dm[0] /// A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerByScalarAndSubtractSaturate(Vector128<long> minuend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningLowerByScalarAndSubtractSaturate(minuend, left, right); /// <summary> /// int32x4_t vqdmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VQDMLAL.S16 Qd, Dn, Dm[lane] /// A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<int> addend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlal_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VQDMLAL.S16 Qd, Dn, Dm[lane] /// A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<int> addend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlal_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VQDMLAL.S32 Qd, Dn, Dm[lane] /// A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<long> addend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlal_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VQDMLAL.S32 Qd, Dn, Dm[lane] /// A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<long> addend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VQDMLSL.S16 Qd, Dn, Dm[lane] /// A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<int> minuend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlsl_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VQDMLSL.S16 Qd, Dn, Dm[lane] /// A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<int> minuend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlsl_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VQDMLSL.S32 Qd, Dn, Dm[lane] /// A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<long> minuend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlsl_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VQDMLSL.S32 Qd, Dn, Dm[lane] /// A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<long> minuend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmull_s16 (int16x4_t a, int16x4_t b) /// A32: VQDMULL.S16 Qd, Dn, Dm /// A64: SQDMULL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateLower(Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningSaturateLower(left, right); /// <summary> /// int64x2_t vqdmull_s32 (int32x2_t a, int32x2_t b) /// A32: VQDMULL.S32 Qd, Dn, Dm /// A64: SQDMULL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateLower(Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningSaturateLower(left, right); /// <summary> /// int32x4_t vqdmull_n_s16 (int16x4_t a, int16_t b) /// A32: VQDMULL.S16 Qd, Dn, Dm[0] /// A64: SQDMULL Vd.4S, Vn.4H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateLowerByScalar(Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningSaturateLowerByScalar(left, right); /// <summary> /// int64x2_t vqdmull_n_s32 (int32x2_t a, int32_t b) /// A32: VQDMULL.S32 Qd, Dn, Dm[0] /// A64: SQDMULL Vd.2D, Vn.2S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateLowerByScalar(Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningSaturateLowerByScalar(left, right); /// <summary> /// int32x4_t vqdmull_lane_s16 (int16x4_t a, int16x4_t v, const int lane) /// A32: VQDMULL.S16 Qd, Dn, Dm[lane] /// A64: SQDMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateLowerBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vqdmull_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) /// A32: VQDMULL.S16 Qd, Dn, Dm[lane] /// A64: SQDMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateLowerBySelectedScalar(left, right, rightIndex); /// <summary> /// int64x2_t vqdmull_lane_s32 (int32x2_t a, int32x2_t v, const int lane) /// A32: VQDMULL.S32 Qd, Dn, Dm[lane] /// A64: SQDMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateLowerBySelectedScalar(left, right, rightIndex); /// <summary> /// int64x2_t vqdmull_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) /// A32: VQDMULL.S32 Qd, Dn, Dm[lane] /// A64: SQDMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateLowerBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vqdmull_high_s16 (int16x8_t a, int16x8_t b) /// A32: VQDMULL.S16 Qd, Dn+1, Dm+1 /// A64: SQDMULL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateUpper(Vector128<short> left, Vector128<short> right) => MultiplyDoublingWideningSaturateUpper(left, right); /// <summary> /// int64x2_t vqdmull_high_s32 (int32x4_t a, int32x4_t b) /// A32: VQDMULL.S32 Qd, Dn+1, Dm+1 /// A64: SQDMULL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateUpper(Vector128<int> left, Vector128<int> right) => MultiplyDoublingWideningSaturateUpper(left, right); /// <summary> /// int32x4_t vqdmull_high_n_s16 (int16x8_t a, int16_t b) /// A32: VQDMULL.S16 Qd, Dn+1, Dm[0] /// A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateUpperByScalar(Vector128<short> left, Vector64<short> right) => MultiplyDoublingWideningSaturateUpperByScalar(left, right); /// <summary> /// int64x2_t vqdmull_high_n_s32 (int32x4_t a, int32_t b) /// A32: VQDMULL.S32 Qd, Dn+1, Dm[0] /// A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateUpperByScalar(Vector128<int> left, Vector64<int> right) => MultiplyDoublingWideningSaturateUpperByScalar(left, right); /// <summary> /// int32x4_t vqdmull_high_lane_s16 (int16x8_t a, int16x4_t v, const int lane) /// A32: VQDMULL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateUpperBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vqdmull_high_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) /// A32: VQDMULL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateUpperBySelectedScalar(left, right, rightIndex); /// <summary> /// int64x2_t vqdmull_high_lane_s32 (int32x4_t a, int32x2_t v, const int lane) /// A32: VQDMULL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateUpperBySelectedScalar(left, right, rightIndex); /// <summary> /// int64x2_t vqdmull_high_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) /// A32: VQDMULL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateUpperBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vqdmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) /// A32: VQDMLAL.S16 Qd, Dn+1, Dm+1 /// A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperAndAddSaturate(Vector128<int> addend, Vector128<short> left, Vector128<short> right) => MultiplyDoublingWideningUpperAndAddSaturate(addend, left, right); /// <summary> /// int64x2_t vqdmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) /// A32: VQDMLAL.S32 Qd, Dn+1, Dm+1 /// A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperAndAddSaturate(Vector128<long> addend, Vector128<int> left, Vector128<int> right) => MultiplyDoublingWideningUpperAndAddSaturate(addend, left, right); /// <summary> /// int32x4_t vqdmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) /// A32: VQDMLSL.S16 Qd, Dn+1, Dm+1 /// A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperAndSubtractSaturate(Vector128<int> minuend, Vector128<short> left, Vector128<short> right) => MultiplyDoublingWideningUpperAndSubtractSaturate(minuend, left, right); /// <summary> /// int64x2_t vqdmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) /// A32: VQDMLSL.S32 Qd, Dn+1, Dm+1 /// A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperAndSubtractSaturate(Vector128<long> minuend, Vector128<int> left, Vector128<int> right) => MultiplyDoublingWideningUpperAndSubtractSaturate(minuend, left, right); /// <summary> /// int32x4_t vqdmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c) /// A32: VQDMLAL.S16 Qd, Dn+1, Dm[0] /// A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperByScalarAndAddSaturate(Vector128<int> addend, Vector128<short> left, Vector64<short> right) => MultiplyDoublingWideningUpperByScalarAndAddSaturate(addend, left, right); /// <summary> /// int64x2_t vqdmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c) /// A32: VQDMLAL.S32 Qd, Dn+1, Dm[0] /// A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperByScalarAndAddSaturate(Vector128<long> addend, Vector128<int> left, Vector64<int> right) => MultiplyDoublingWideningUpperByScalarAndAddSaturate(addend, left, right); /// <summary> /// int32x4_t vqdmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c) /// A32: VQDMLSL.S16 Qd, Dn+1, Dm[0] /// A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperByScalarAndSubtractSaturate(Vector128<int> minuend, Vector128<short> left, Vector64<short> right) => MultiplyDoublingWideningUpperByScalarAndSubtractSaturate(minuend, left, right); /// <summary> /// int64x2_t vqdmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c) /// A32: VQDMLSL.S32 Qd, Dn+1, Dm[0] /// A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperByScalarAndSubtractSaturate(Vector128<long> minuend, Vector128<int> left, Vector64<int> right) => MultiplyDoublingWideningUpperByScalarAndSubtractSaturate(minuend, left, right); /// <summary> /// int32x4_t vqdmlal_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VQDMLAL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<int> addend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlal_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VQDMLAL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<int> addend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlal_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VQDMLAL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<long> addend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlal_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VQDMLAL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<long> addend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlsl_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VQDMLSL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<int> minuend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlsl_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VQDMLSL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<int> minuend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlsl_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VQDMLSL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<long> minuend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlsl_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VQDMLSL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<long> minuend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int16x4_t vqrdmulh_n_s16 (int16x4_t a, int16_t b) /// A32: VQRDMULH.S16 Dd, Dn, Dm[0] /// A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<short> MultiplyRoundedDoublingByScalarSaturateHigh(Vector64<short> left, Vector64<short> right) => MultiplyRoundedDoublingByScalarSaturateHigh(left, right); /// <summary> /// int32x2_t vqrdmulh_n_s32 (int32x2_t a, int32_t b) /// A32: VQRDMULH.S32 Dd, Dn, Dm[0] /// A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<int> MultiplyRoundedDoublingByScalarSaturateHigh(Vector64<int> left, Vector64<int> right) => MultiplyRoundedDoublingByScalarSaturateHigh(left, right); /// <summary> /// int16x8_t vqrdmulhq_n_s16 (int16x8_t a, int16_t b) /// A32: VQRDMULH.S16 Qd, Qn, Dm[0] /// A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<short> MultiplyRoundedDoublingByScalarSaturateHigh(Vector128<short> left, Vector64<short> right) => MultiplyRoundedDoublingByScalarSaturateHigh(left, right); /// <summary> /// int32x4_t vqrdmulhq_n_s32 (int32x4_t a, int32_t b) /// A32: VQRDMULH.S32 Qd, Qn, Dm[0] /// A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<int> MultiplyRoundedDoublingByScalarSaturateHigh(Vector128<int> left, Vector64<int> right) => MultiplyRoundedDoublingByScalarSaturateHigh(left, right); /// <summary> /// int16x4_t vqrdmulh_lane_s16 (int16x4_t a, int16x4_t v, const int lane) /// A32: VQRDMULH.S16 Dd, Dn, Dm[lane] /// A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x4_t vqrdmulh_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) /// A32: VQRDMULH.S16 Dd, Dn, Dm[lane] /// A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x2_t vqrdmulh_lane_s32 (int32x2_t a, int32x2_t v, const int lane) /// A32: VQRDMULH.S32 Dd, Dn, Dm[lane] /// A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x2_t vqrdmulh_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) /// A32: VQRDMULH.S32 Dd, Dn, Dm[lane] /// A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x8_t vqrdmulhq_lane_s16 (int16x8_t a, int16x4_t v, const int lane) /// A32: VQRDMULH.S16 Qd, Qn, Dm[lane] /// A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x8_t vqrdmulhq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) /// A32: VQRDMULH.S16 Qd, Qn, Dm[lane] /// A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x4_t vqrdmulhq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) /// A32: VQRDMULH.S32 Qd, Qn, Dm[lane] /// A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x4_t vqrdmulhq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) /// A32: VQRDMULH.S32 Qd, Qn, Dm[lane] /// A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x4_t vqrdmulh_s16 (int16x4_t a, int16x4_t b) /// A32: VQRDMULH.S16 Dd, Dn, Dm /// A64: SQRDMULH Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MultiplyRoundedDoublingSaturateHigh(Vector64<short> left, Vector64<short> right) => MultiplyRoundedDoublingSaturateHigh(left, right); /// <summary> /// int32x2_t vqrdmulh_s32 (int32x2_t a, int32x2_t b) /// A32: VQRDMULH.S32 Dd, Dn, Dm /// A64: SQRDMULH Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MultiplyRoundedDoublingSaturateHigh(Vector64<int> left, Vector64<int> right) => MultiplyRoundedDoublingSaturateHigh(left, right); /// <summary> /// int16x8_t vqrdmulhq_s16 (int16x8_t a, int16x8_t b) /// A32: VQRDMULH.S16 Qd, Qn, Qm /// A64: SQRDMULH Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MultiplyRoundedDoublingSaturateHigh(Vector128<short> left, Vector128<short> right) => MultiplyRoundedDoublingSaturateHigh(left, right); /// <summary> /// int32x4_t vqrdmulhq_s32 (int32x4_t a, int32x4_t b) /// A32: VQRDMULH.S32 Qd, Qn, Qm /// A64: SQRDMULH Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MultiplyRoundedDoublingSaturateHigh(Vector128<int> left, Vector128<int> right) => MultiplyRoundedDoublingSaturateHigh(left, right); /// <summary> /// float64x1_t vmul_f64 (float64x1_t a, float64x1_t b) /// A32: VMUL.F64 Dd, Dn, Dm /// A64: FMUL Dd, Dn, Dm /// </summary> public static Vector64<double> MultiplyScalar(Vector64<double> left, Vector64<double> right) => MultiplyScalar(left, right); /// <summary> /// float32_t vmuls_f32 (float32_t a, float32_t b) /// A32: VMUL.F32 Sd, Sn, Sm /// A64: FMUL Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> MultiplyScalar(Vector64<float> left, Vector64<float> right) => MultiplyScalar(left, right); /// <summary> /// float32_t vmuls_lane_f32 (float32_t a, float32x2_t v, const int lane) /// A32: VMUL.F32 Sd, Sn, Dm[lane] /// A64: FMUL Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyScalarBySelectedScalar(Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplyScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// float32_t vmuls_laneq_f32 (float32_t a, float32x4_t v, const int lane) /// A32: VMUL.F32 Sd, Sn, Dm[lane] /// A64: FMUL Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyScalarBySelectedScalar(Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplyScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// uint8x8_t vmls_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VMLS.I8 Dd, Dn, Dm /// A64: MLS Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> MultiplySubtract(Vector64<byte> minuend, Vector64<byte> left, Vector64<byte> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int16x4_t vmls_s16 (int16x4_t a, int16x4_t b, int16x4_t c) /// A32: VMLS.I16 Dd, Dn, Dm /// A64: MLS Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MultiplySubtract(Vector64<short> minuend, Vector64<short> left, Vector64<short> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int32x2_t vmls_s32 (int32x2_t a, int32x2_t b, int32x2_t c) /// A32: VMLS.I32 Dd, Dn, Dm /// A64: MLS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MultiplySubtract(Vector64<int> minuend, Vector64<int> left, Vector64<int> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int8x8_t vmls_s8 (int8x8_t a, int8x8_t b, int8x8_t c) /// A32: VMLS.I8 Dd, Dn, Dm /// A64: MLS Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> MultiplySubtract(Vector64<sbyte> minuend, Vector64<sbyte> left, Vector64<sbyte> right) => MultiplySubtract(minuend, left, right); /// <summary> /// uint16x4_t vmls_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VMLS.I16 Dd, Dn, Dm /// A64: MLS Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> MultiplySubtract(Vector64<ushort> minuend, Vector64<ushort> left, Vector64<ushort> right) => MultiplySubtract(minuend, left, right); /// <summary> /// uint32x2_t vmls_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VMLS.I32 Dd, Dn, Dm /// A64: MLS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> MultiplySubtract(Vector64<uint> minuend, Vector64<uint> left, Vector64<uint> right) => MultiplySubtract(minuend, left, right); /// <summary> /// uint8x16_t vmlsq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) /// A32: VMLS.I8 Qd, Qn, Qm /// A64: MLS Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> MultiplySubtract(Vector128<byte> minuend, Vector128<byte> left, Vector128<byte> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int16x8_t vmlsq_s16 (int16x8_t a, int16x8_t b, int16x8_t c) /// A32: VMLS.I16 Qd, Qn, Qm /// A64: MLS Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MultiplySubtract(Vector128<short> minuend, Vector128<short> left, Vector128<short> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int32x4_t vmlsq_s32 (int32x4_t a, int32x4_t b, int32x4_t c) /// A32: VMLS.I32 Qd, Qn, Qm /// A64: MLS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MultiplySubtract(Vector128<int> minuend, Vector128<int> left, Vector128<int> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int8x16_t vmlsq_s8 (int8x16_t a, int8x16_t b, int8x16_t c) /// A32: VMLS.I8 Qd, Qn, Qm /// A64: MLS Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> MultiplySubtract(Vector128<sbyte> minuend, Vector128<sbyte> left, Vector128<sbyte> right) => MultiplySubtract(minuend, left, right); /// <summary> /// uint16x8_t vmlsq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) /// A32: VMLS.I16 Qd, Qn, Qm /// A64: MLS Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> MultiplySubtract(Vector128<ushort> minuend, Vector128<ushort> left, Vector128<ushort> right) => MultiplySubtract(minuend, left, right); /// <summary> /// uint32x4_t vmlsq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) /// A32: VMLS.I32 Qd, Qn, Qm /// A64: MLS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> MultiplySubtract(Vector128<uint> minuend, Vector128<uint> left, Vector128<uint> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int16x4_t vmls_n_s16 (int16x4_t a, int16x4_t b, int16_t c) /// A32: VMLS.I16 Dd, Dn, Dm[0] /// A64: MLS Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<short> MultiplySubtractByScalar(Vector64<short> minuend, Vector64<short> left, Vector64<short> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// int32x2_t vmls_n_s32 (int32x2_t a, int32x2_t b, int32_t c) /// A32: VMLS.I32 Dd, Dn, Dm[0] /// A64: MLS Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<int> MultiplySubtractByScalar(Vector64<int> minuend, Vector64<int> left, Vector64<int> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// uint16x4_t vmls_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c) /// A32: VMLS.I16 Dd, Dn, Dm[0] /// A64: MLS Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<ushort> MultiplySubtractByScalar(Vector64<ushort> minuend, Vector64<ushort> left, Vector64<ushort> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// uint32x2_t vmls_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c) /// A32: VMLS.I32 Dd, Dn, Dm[0] /// A64: MLS Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<uint> MultiplySubtractByScalar(Vector64<uint> minuend, Vector64<uint> left, Vector64<uint> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// int16x8_t vmlsq_n_s16 (int16x8_t a, int16x8_t b, int16_t c) /// A32: VMLS.I16 Qd, Qn, Dm[0] /// A64: MLS Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<short> MultiplySubtractByScalar(Vector128<short> minuend, Vector128<short> left, Vector64<short> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// int32x4_t vmlsq_n_s32 (int32x4_t a, int32x4_t b, int32_t c) /// A32: VMLS.I32 Qd, Qn, Dm[0] /// A64: MLS Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<int> MultiplySubtractByScalar(Vector128<int> minuend, Vector128<int> left, Vector64<int> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// uint16x8_t vmlsq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c) /// A32: VMLS.I16 Qd, Qn, Dm[0] /// A64: MLS Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<ushort> MultiplySubtractByScalar(Vector128<ushort> minuend, Vector128<ushort> left, Vector64<ushort> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// uint32x4_t vmlsq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c) /// A32: VMLS.I32 Qd, Qn, Dm[0] /// A64: MLS Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<uint> MultiplySubtractByScalar(Vector128<uint> minuend, Vector128<uint> left, Vector64<uint> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// int16x4_t vmls_lane_s16 (int16x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VMLS.I16 Dd, Dn, Dm[lane] /// A64: MLS Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplySubtractBySelectedScalar(Vector64<short> minuend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int16x4_t vmls_laneq_s16 (int16x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VMLS.I16 Dd, Dn, Dm[lane] /// A64: MLS Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplySubtractBySelectedScalar(Vector64<short> minuend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int32x2_t vmls_lane_s32 (int32x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VMLS.I32 Dd, Dn, Dm[lane] /// A64: MLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplySubtractBySelectedScalar(Vector64<int> minuend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int32x2_t vmls_laneq_s32 (int32x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VMLS.I32 Dd, Dn, Dm[lane] /// A64: MLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplySubtractBySelectedScalar(Vector64<int> minuend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint16x4_t vmls_lane_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t v, const int lane) /// A32: VMLS.I16 Dd, Dn, Dm[lane] /// A64: MLS Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplySubtractBySelectedScalar(Vector64<ushort> minuend, Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint16x4_t vmls_laneq_u16 (uint16x4_t a, uint16x4_t b, uint16x8_t v, const int lane) /// A32: VMLS.I16 Dd, Dn, Dm[lane] /// A64: MLS Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplySubtractBySelectedScalar(Vector64<ushort> minuend, Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint32x2_t vmls_lane_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t v, const int lane) /// A32: VMLS.I32 Dd, Dn, Dm[lane] /// A64: MLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplySubtractBySelectedScalar(Vector64<uint> minuend, Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint32x2_t vmls_laneq_u32 (uint32x2_t a, uint32x2_t b, uint32x4_t v, const int lane) /// A32: VMLS.I32 Dd, Dn, Dm[lane] /// A64: MLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplySubtractBySelectedScalar(Vector64<uint> minuend, Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int16x8_t vmlsq_lane_s16 (int16x8_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VMLS.I16 Qd, Qn, Dm[lane] /// A64: MLS Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplySubtractBySelectedScalar(Vector128<short> minuend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int16x8_t vmlsq_laneq_s16 (int16x8_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VMLS.I16 Qd, Qn, Dm[lane] /// A64: MLS Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplySubtractBySelectedScalar(Vector128<short> minuend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsq_lane_s32 (int32x4_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VMLS.I32 Qd, Qn, Dm[lane] /// A64: MLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplySubtractBySelectedScalar(Vector128<int> minuend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsq_laneq_s32 (int32x4_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VMLS.I32 Qd, Qn, Dm[lane] /// A64: MLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplySubtractBySelectedScalar(Vector128<int> minuend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint16x8_t vmlsq_lane_u16 (uint16x8_t a, uint16x8_t b, uint16x4_t v, const int lane) /// A32: VMLS.I16 Qd, Qn, Dm[lane] /// A64: MLS Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplySubtractBySelectedScalar(Vector128<ushort> minuend, Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint16x8_t vmlsq_laneq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t v, const int lane) /// A32: VMLS.I16 Qd, Qn, Dm[lane] /// A64: MLS Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplySubtractBySelectedScalar(Vector128<ushort> minuend, Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsq_lane_u32 (uint32x4_t a, uint32x4_t b, uint32x2_t v, const int lane) /// A32: VMLS.I32 Qd, Qn, Dm[lane] /// A64: MLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplySubtractBySelectedScalar(Vector128<uint> minuend, Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsq_laneq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t v, const int lane) /// A32: VMLS.I32 Qd, Qn, Dm[lane] /// A64: MLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplySubtractBySelectedScalar(Vector128<uint> minuend, Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint16x8_t vmull_u8 (uint8x8_t a, uint8x8_t b) /// A32: VMULL.U8 Qd, Dn, Dm /// A64: UMULL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> MultiplyWideningLower(Vector64<byte> left, Vector64<byte> right) => MultiplyWideningLower(left, right); /// <summary> /// int32x4_t vmull_s16 (int16x4_t a, int16x4_t b) /// A32: VMULL.S16 Qd, Dn, Dm /// A64: SMULL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyWideningLower(Vector64<short> left, Vector64<short> right) => MultiplyWideningLower(left, right); /// <summary> /// int64x2_t vmull_s32 (int32x2_t a, int32x2_t b) /// A32: VMULL.S32 Qd, Dn, Dm /// A64: SMULL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyWideningLower(Vector64<int> left, Vector64<int> right) => MultiplyWideningLower(left, right); /// <summary> /// int16x8_t vmull_s8 (int8x8_t a, int8x8_t b) /// A32: VMULL.S8 Qd, Dn, Dm /// A64: SMULL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> MultiplyWideningLower(Vector64<sbyte> left, Vector64<sbyte> right) => MultiplyWideningLower(left, right); /// <summary> /// uint32x4_t vmull_u16 (uint16x4_t a, uint16x4_t b) /// A32: VMULL.U16 Qd, Dn, Dm /// A64: UMULL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> MultiplyWideningLower(Vector64<ushort> left, Vector64<ushort> right) => MultiplyWideningLower(left, right); /// <summary> /// uint64x2_t vmull_u32 (uint32x2_t a, uint32x2_t b) /// A32: VMULL.U32 Qd, Dn, Dm /// A64: UMULL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> MultiplyWideningLower(Vector64<uint> left, Vector64<uint> right) => MultiplyWideningLower(left, right); /// <summary> /// uint16x8_t vmlal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VMLAL.U8 Qd, Dn, Dm /// A64: UMLAL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> MultiplyWideningLowerAndAdd(Vector128<ushort> addend, Vector64<byte> left, Vector64<byte> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// int32x4_t vmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c) /// A32: VMLAL.S16 Qd, Dn, Dm /// A64: SMLAL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyWideningLowerAndAdd(Vector128<int> addend, Vector64<short> left, Vector64<short> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// int64x2_t vmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c) /// A32: VMLAL.S32 Qd, Dn, Dm /// A64: SMLAL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyWideningLowerAndAdd(Vector128<long> addend, Vector64<int> left, Vector64<int> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// int16x8_t vmlal_s8 (int16x8_t a, int8x8_t b, int8x8_t c) /// A32: VMLAL.S8 Qd, Dn, Dm /// A64: SMLAL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> MultiplyWideningLowerAndAdd(Vector128<short> addend, Vector64<sbyte> left, Vector64<sbyte> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// uint32x4_t vmlal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VMLAL.U16 Qd, Dn, Dm /// A64: UMLAL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> MultiplyWideningLowerAndAdd(Vector128<uint> addend, Vector64<ushort> left, Vector64<ushort> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// uint64x2_t vmlal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VMLAL.U32 Qd, Dn, Dm /// A64: UMLAL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> MultiplyWideningLowerAndAdd(Vector128<ulong> addend, Vector64<uint> left, Vector64<uint> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// uint16x8_t vmlsl_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VMLSL.U8 Qd, Dn, Dm /// A64: UMLSL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> MultiplyWideningLowerAndSubtract(Vector128<ushort> minuend, Vector64<byte> left, Vector64<byte> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// int32x4_t vmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c) /// A32: VMLSL.S16 Qd, Dn, Dm /// A64: SMLSL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyWideningLowerAndSubtract(Vector128<int> minuend, Vector64<short> left, Vector64<short> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// int64x2_t vmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c) /// A32: VMLSL.S32 Qd, Dn, Dm /// A64: SMLSL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyWideningLowerAndSubtract(Vector128<long> minuend, Vector64<int> left, Vector64<int> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// int16x8_t vmlsl_s8 (int16x8_t a, int8x8_t b, int8x8_t c) /// A32: VMLSL.S8 Qd, Dn, Dm /// A64: SMLSL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> MultiplyWideningLowerAndSubtract(Vector128<short> minuend, Vector64<sbyte> left, Vector64<sbyte> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// uint32x4_t vmlsl_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VMLSL.U16 Qd, Dn, Dm /// A64: UMLSL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> MultiplyWideningLowerAndSubtract(Vector128<uint> minuend, Vector64<ushort> left, Vector64<ushort> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// uint64x2_t vmlsl_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VMLSL.U32 Qd, Dn, Dm /// A64: UMLSL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> MultiplyWideningLowerAndSubtract(Vector128<ulong> minuend, Vector64<uint> left, Vector64<uint> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// uint16x8_t vmull_high_u8 (uint8x16_t a, uint8x16_t b) /// A32: VMULL.U8 Qd, Dn+1, Dm+1 /// A64: UMULL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> MultiplyWideningUpper(Vector128<byte> left, Vector128<byte> right) => MultiplyWideningUpper(left, right); /// <summary> /// int32x4_t vmull_high_s16 (int16x8_t a, int16x8_t b) /// A32: VMULL.S16 Qd, Dn+1, Dm+1 /// A64: SMULL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyWideningUpper(Vector128<short> left, Vector128<short> right) => MultiplyWideningUpper(left, right); /// <summary> /// int64x2_t vmull_high_s32 (int32x4_t a, int32x4_t b) /// A32: VMULL.S32 Qd, Dn+1, Dm+1 /// A64: SMULL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyWideningUpper(Vector128<int> left, Vector128<int> right) => MultiplyWideningUpper(left, right); /// <summary> /// int16x8_t vmull_high_s8 (int8x16_t a, int8x16_t b) /// A32: VMULL.S8 Qd, Dn+1, Dm+1 /// A64: SMULL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> MultiplyWideningUpper(Vector128<sbyte> left, Vector128<sbyte> right) => MultiplyWideningUpper(left, right); /// <summary> /// uint32x4_t vmull_high_u16 (uint16x8_t a, uint16x8_t b) /// A32: VMULL.U16 Qd, Dn+1, Dm+1 /// A64: UMULL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> MultiplyWideningUpper(Vector128<ushort> left, Vector128<ushort> right) => MultiplyWideningUpper(left, right); /// <summary> /// uint64x2_t vmull_high_u32 (uint32x4_t a, uint32x4_t b) /// A32: VMULL.U32 Qd, Dn+1, Dm+1 /// A64: UMULL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> MultiplyWideningUpper(Vector128<uint> left, Vector128<uint> right) => MultiplyWideningUpper(left, right); /// <summary> /// uint16x8_t vmlal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c) /// A32: VMLAL.U8 Qd, Dn+1, Dm+1 /// A64: UMLAL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> MultiplyWideningUpperAndAdd(Vector128<ushort> addend, Vector128<byte> left, Vector128<byte> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// int32x4_t vmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) /// A32: VMLAL.S16 Qd, Dn+1, Dm+1 /// A64: SMLAL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyWideningUpperAndAdd(Vector128<int> addend, Vector128<short> left, Vector128<short> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// int64x2_t vmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) /// A32: VMLAL.S32 Qd, Dn+1, Dm+1 /// A64: SMLAL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyWideningUpperAndAdd(Vector128<long> addend, Vector128<int> left, Vector128<int> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// int16x8_t vmlal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c) /// A32: VMLAL.S8 Qd, Dn+1, Dm+1 /// A64: SMLAL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> MultiplyWideningUpperAndAdd(Vector128<short> addend, Vector128<sbyte> left, Vector128<sbyte> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// uint32x4_t vmlal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c) /// A32: VMLAL.U16 Qd, Dn+1, Dm+1 /// A64: UMLAL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> MultiplyWideningUpperAndAdd(Vector128<uint> addend, Vector128<ushort> left, Vector128<ushort> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// uint64x2_t vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c) /// A32: VMLAL.U32 Qd, Dn+1, Dm+1 /// A64: UMLAL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> MultiplyWideningUpperAndAdd(Vector128<ulong> addend, Vector128<uint> left, Vector128<uint> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// uint16x8_t vmlsl_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c) /// A32: VMLSL.U8 Qd, Dn+1, Dm+1 /// A64: UMLSL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> MultiplyWideningUpperAndSubtract(Vector128<ushort> minuend, Vector128<byte> left, Vector128<byte> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// int32x4_t vmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) /// A32: VMLSL.S16 Qd, Dn+1, Dm+1 /// A64: SMLSL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyWideningUpperAndSubtract(Vector128<int> minuend, Vector128<short> left, Vector128<short> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// int64x2_t vmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) /// A32: VMLSL.S32 Qd, Dn+1, Dm+1 /// A64: SMLSL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyWideningUpperAndSubtract(Vector128<long> minuend, Vector128<int> left, Vector128<int> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// int16x8_t vmlsl_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c) /// A32: VMLSL.S8 Qd, Dn+1, Dm+1 /// A64: SMLSL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> MultiplyWideningUpperAndSubtract(Vector128<short> minuend, Vector128<sbyte> left, Vector128<sbyte> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// uint32x4_t vmlsl_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c) /// A32: VMLSL.U16 Qd, Dn+1, Dm+1 /// A64: UMLSL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> MultiplyWideningUpperAndSubtract(Vector128<uint> minuend, Vector128<ushort> left, Vector128<ushort> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// uint64x2_t vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c) /// A32: VMLSL.U32 Qd, Dn+1, Dm+1 /// A64: UMLSL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> MultiplyWideningUpperAndSubtract(Vector128<ulong> minuend, Vector128<uint> left, Vector128<uint> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// int16x4_t vneg_s16 (int16x4_t a) /// A32: VNEG.S16 Dd, Dm /// A64: NEG Vd.4H, Vn.4H /// </summary> public static Vector64<short> Negate(Vector64<short> value) => Negate(value); /// <summary> /// int32x2_t vneg_s32 (int32x2_t a) /// A32: VNEG.S32 Dd, Dm /// A64: NEG Vd.2S, Vn.2S /// </summary> public static Vector64<int> Negate(Vector64<int> value) => Negate(value); /// <summary> /// int8x8_t vneg_s8 (int8x8_t a) /// A32: VNEG.S8 Dd, Dm /// A64: NEG Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> Negate(Vector64<sbyte> value) => Negate(value); /// <summary> /// float32x2_t vneg_f32 (float32x2_t a) /// A32: VNEG.F32 Dd, Dm /// A64: FNEG Vd.2S, Vn.2S /// </summary> public static Vector64<float> Negate(Vector64<float> value) => Negate(value); /// <summary> /// int16x8_t vnegq_s16 (int16x8_t a) /// A32: VNEG.S16 Qd, Qm /// A64: NEG Vd.8H, Vn.8H /// </summary> public static Vector128<short> Negate(Vector128<short> value) => Negate(value); /// <summary> /// int32x4_t vnegq_s32 (int32x4_t a) /// A32: VNEG.S32 Qd, Qm /// A64: NEG Vd.4S, Vn.4S /// </summary> public static Vector128<int> Negate(Vector128<int> value) => Negate(value); /// <summary> /// int8x16_t vnegq_s8 (int8x16_t a) /// A32: VNEG.S8 Qd, Qm /// A64: NEG Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> Negate(Vector128<sbyte> value) => Negate(value); /// <summary> /// float32x4_t vnegq_f32 (float32x4_t a) /// A32: VNEG.F32 Qd, Qm /// A64: FNEG Vd.4S, Vn.4S /// </summary> public static Vector128<float> Negate(Vector128<float> value) => Negate(value); /// <summary> /// int16x4_t vqneg_s16 (int16x4_t a) /// A32: VQNEG.S16 Dd, Dm /// A64: SQNEG Vd.4H, Vn.4H /// </summary> public static Vector64<short> NegateSaturate(Vector64<short> value) => NegateSaturate(value); /// <summary> /// int32x2_t vqneg_s32 (int32x2_t a) /// A32: VQNEG.S32 Dd, Dm /// A64: SQNEG Vd.2S, Vn.2S /// </summary> public static Vector64<int> NegateSaturate(Vector64<int> value) => NegateSaturate(value); /// <summary> /// int8x8_t vqneg_s8 (int8x8_t a) /// A32: VQNEG.S8 Dd, Dm /// A64: SQNEG Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> NegateSaturate(Vector64<sbyte> value) => NegateSaturate(value); /// <summary> /// int16x8_t vqnegq_s16 (int16x8_t a) /// A32: VQNEG.S16 Qd, Qm /// A64: SQNEG Vd.8H, Vn.8H /// </summary> public static Vector128<short> NegateSaturate(Vector128<short> value) => NegateSaturate(value); /// <summary> /// int32x4_t vqnegq_s32 (int32x4_t a) /// A32: VQNEG.S32 Qd, Qm /// A64: SQNEG Vd.4S, Vn.4S /// </summary> public static Vector128<int> NegateSaturate(Vector128<int> value) => NegateSaturate(value); /// <summary> /// int8x16_t vqnegq_s8 (int8x16_t a) /// A32: VQNEG.S8 Qd, Qm /// A64: SQNEG Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> NegateSaturate(Vector128<sbyte> value) => NegateSaturate(value); /// <summary> /// float64x1_t vneg_f64 (float64x1_t a) /// A32: VNEG.F64 Dd, Dm /// A64: FNEG Dd, Dn /// </summary> public static Vector64<double> NegateScalar(Vector64<double> value) => NegateScalar(value); /// <summary> /// float32_t vnegs_f32 (float32_t a) /// A32: VNEG.F32 Sd, Sm /// A64: FNEG Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> NegateScalar(Vector64<float> value) => NegateScalar(value); /// <summary> /// uint8x8_t vmvn_u8 (uint8x8_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<byte> Not(Vector64<byte> value) => Not(value); /// <summary> /// float64x1_t vmvn_f64 (float64x1_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> Not(Vector64<double> value) => Not(value); /// <summary> /// int16x4_t vmvn_s16 (int16x4_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<short> Not(Vector64<short> value) => Not(value); /// <summary> /// int32x2_t vmvn_s32 (int32x2_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<int> Not(Vector64<int> value) => Not(value); /// <summary> /// int64x1_t vmvn_s64 (int64x1_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<long> Not(Vector64<long> value) => Not(value); /// <summary> /// int8x8_t vmvn_s8 (int8x8_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> Not(Vector64<sbyte> value) => Not(value); /// <summary> /// float32x2_t vmvn_f32 (float32x2_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> Not(Vector64<float> value) => Not(value); /// <summary> /// uint16x4_t vmvn_u16 (uint16x4_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<ushort> Not(Vector64<ushort> value) => Not(value); /// <summary> /// uint32x2_t vmvn_u32 (uint32x2_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<uint> Not(Vector64<uint> value) => Not(value); /// <summary> /// uint64x1_t vmvn_u64 (uint64x1_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<ulong> Not(Vector64<ulong> value) => Not(value); /// <summary> /// uint8x16_t vmvnq_u8 (uint8x16_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<byte> Not(Vector128<byte> value) => Not(value); /// <summary> /// float64x2_t vmvnq_f64 (float64x2_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> Not(Vector128<double> value) => Not(value); /// <summary> /// int16x8_t vmvnq_s16 (int16x8_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<short> Not(Vector128<short> value) => Not(value); /// <summary> /// int32x4_t vmvnq_s32 (int32x4_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<int> Not(Vector128<int> value) => Not(value); /// <summary> /// int64x2_t vmvnq_s64 (int64x2_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<long> Not(Vector128<long> value) => Not(value); /// <summary> /// int8x16_t vmvnq_s8 (int8x16_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> Not(Vector128<sbyte> value) => Not(value); /// <summary> /// float32x4_t vmvnq_f32 (float32x4_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> Not(Vector128<float> value) => Not(value); /// <summary> /// uint16x8_t vmvnq_u16 (uint16x8_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<ushort> Not(Vector128<ushort> value) => Not(value); /// <summary> /// uint32x4_t vmvnq_u32 (uint32x4_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<uint> Not(Vector128<uint> value) => Not(value); /// <summary> /// uint64x2_t vmvnq_u64 (uint64x2_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<ulong> Not(Vector128<ulong> value) => Not(value); /// <summary> /// uint8x8_t vorr_u8 (uint8x8_t a, uint8x8_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Or(Vector64<byte> left, Vector64<byte> right) => Or(left, right); /// <summary> /// float64x1_t vorr_f64 (float64x1_t a, float64x1_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> Or(Vector64<double> left, Vector64<double> right) => Or(left, right); /// <summary> /// int16x4_t vorr_s16 (int16x4_t a, int16x4_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> Or(Vector64<short> left, Vector64<short> right) => Or(left, right); /// <summary> /// int32x2_t vorr_s32 (int32x2_t a, int32x2_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> Or(Vector64<int> left, Vector64<int> right) => Or(left, right); /// <summary> /// int64x1_t vorr_s64 (int64x1_t a, int64x1_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> Or(Vector64<long> left, Vector64<long> right) => Or(left, right); /// <summary> /// int8x8_t vorr_s8 (int8x8_t a, int8x8_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Or(Vector64<sbyte> left, Vector64<sbyte> right) => Or(left, right); /// <summary> /// float32x2_t vorr_f32 (float32x2_t a, float32x2_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> Or(Vector64<float> left, Vector64<float> right) => Or(left, right); /// <summary> /// uint16x4_t vorr_u16 (uint16x4_t a, uint16x4_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> Or(Vector64<ushort> left, Vector64<ushort> right) => Or(left, right); /// <summary> /// uint32x2_t vorr_u32 (uint32x2_t a, uint32x2_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> Or(Vector64<uint> left, Vector64<uint> right) => Or(left, right); /// <summary> /// uint64x1_t vorr_u64 (uint64x1_t a, uint64x1_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> Or(Vector64<ulong> left, Vector64<ulong> right) => Or(left, right); /// <summary> /// uint8x16_t vorrq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Or(Vector128<byte> left, Vector128<byte> right) => Or(left, right); /// <summary> /// float64x2_t vorrq_f64 (float64x2_t a, float64x2_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> Or(Vector128<double> left, Vector128<double> right) => Or(left, right); /// <summary> /// int16x8_t vorrq_s16 (int16x8_t a, int16x8_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> Or(Vector128<short> left, Vector128<short> right) => Or(left, right); /// <summary> /// int32x4_t vorrq_s32 (int32x4_t a, int32x4_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> Or(Vector128<int> left, Vector128<int> right) => Or(left, right); /// <summary> /// int64x2_t vorrq_s64 (int64x2_t a, int64x2_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> Or(Vector128<long> left, Vector128<long> right) => Or(left, right); /// <summary> /// int8x16_t vorrq_s8 (int8x16_t a, int8x16_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Or(Vector128<sbyte> left, Vector128<sbyte> right) => Or(left, right); /// <summary> /// float32x4_t vorrq_f32 (float32x4_t a, float32x4_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> Or(Vector128<float> left, Vector128<float> right) => Or(left, right); /// <summary> /// uint16x8_t vorrq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> Or(Vector128<ushort> left, Vector128<ushort> right) => Or(left, right); /// <summary> /// uint32x4_t vorrq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> Or(Vector128<uint> left, Vector128<uint> right) => Or(left, right); /// <summary> /// uint64x2_t vorrq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> Or(Vector128<ulong> left, Vector128<ulong> right) => Or(left, right); /// <summary> /// uint8x8_t vorn_u8 (uint8x8_t a, uint8x8_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> OrNot(Vector64<byte> left, Vector64<byte> right) => OrNot(left, right); /// <summary> /// float64x1_t vorn_f64 (float64x1_t a, float64x1_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> OrNot(Vector64<double> left, Vector64<double> right) => OrNot(left, right); /// <summary> /// int16x4_t vorn_s16 (int16x4_t a, int16x4_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> OrNot(Vector64<short> left, Vector64<short> right) => OrNot(left, right); /// <summary> /// int32x2_t vorn_s32 (int32x2_t a, int32x2_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> OrNot(Vector64<int> left, Vector64<int> right) => OrNot(left, right); /// <summary> /// int64x1_t vorn_s64 (int64x1_t a, int64x1_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> OrNot(Vector64<long> left, Vector64<long> right) => OrNot(left, right); /// <summary> /// int8x8_t vorn_s8 (int8x8_t a, int8x8_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> OrNot(Vector64<sbyte> left, Vector64<sbyte> right) => OrNot(left, right); /// <summary> /// float32x2_t vorn_f32 (float32x2_t a, float32x2_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> OrNot(Vector64<float> left, Vector64<float> right) => OrNot(left, right); /// <summary> /// uint16x4_t vorn_u16 (uint16x4_t a, uint16x4_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> OrNot(Vector64<ushort> left, Vector64<ushort> right) => OrNot(left, right); /// <summary> /// uint32x2_t vorn_u32 (uint32x2_t a, uint32x2_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> OrNot(Vector64<uint> left, Vector64<uint> right) => OrNot(left, right); /// <summary> /// uint64x1_t vorn_u64 (uint64x1_t a, uint64x1_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> OrNot(Vector64<ulong> left, Vector64<ulong> right) => OrNot(left, right); /// <summary> /// uint8x16_t vornq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> OrNot(Vector128<byte> left, Vector128<byte> right) => OrNot(left, right); /// <summary> /// float64x2_t vornq_f64 (float64x2_t a, float64x2_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> OrNot(Vector128<double> left, Vector128<double> right) => OrNot(left, right); /// <summary> /// int16x8_t vornq_s16 (int16x8_t a, int16x8_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> OrNot(Vector128<short> left, Vector128<short> right) => OrNot(left, right); /// <summary> /// int32x4_t vornq_s32 (int32x4_t a, int32x4_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> OrNot(Vector128<int> left, Vector128<int> right) => OrNot(left, right); /// <summary> /// int64x2_t vornq_s64 (int64x2_t a, int64x2_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> OrNot(Vector128<long> left, Vector128<long> right) => OrNot(left, right); /// <summary> /// int8x16_t vornq_s8 (int8x16_t a, int8x16_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> OrNot(Vector128<sbyte> left, Vector128<sbyte> right) => OrNot(left, right); /// <summary> /// float32x4_t vornq_f32 (float32x4_t a, float32x4_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> OrNot(Vector128<float> left, Vector128<float> right) => OrNot(left, right); /// <summary> /// uint16x8_t vornq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> OrNot(Vector128<ushort> left, Vector128<ushort> right) => OrNot(left, right); /// <summary> /// uint32x4_t vornq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> OrNot(Vector128<uint> left, Vector128<uint> right) => OrNot(left, right); /// <summary> /// uint64x2_t vornq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> OrNot(Vector128<ulong> left, Vector128<ulong> right) => OrNot(left, right); /// <summary> /// poly8x8_t vmul_p8 (poly8x8_t a, poly8x8_t b) /// A32: VMUL.P8 Dd, Dn, Dm /// A64: PMUL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> PolynomialMultiply(Vector64<byte> left, Vector64<byte> right) => PolynomialMultiply(left, right); /// <summary> /// poly8x8_t vmul_p8 (poly8x8_t a, poly8x8_t b) /// A32: VMUL.P8 Dd, Dn, Dm /// A64: PMUL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> PolynomialMultiply(Vector64<sbyte> left, Vector64<sbyte> right) => PolynomialMultiply(left, right); /// <summary> /// poly8x16_t vmulq_p8 (poly8x16_t a, poly8x16_t b) /// A32: VMUL.P8 Qd, Qn, Qm /// A64: PMUL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> PolynomialMultiply(Vector128<byte> left, Vector128<byte> right) => PolynomialMultiply(left, right); /// <summary> /// poly8x16_t vmulq_p8 (poly8x16_t a, poly8x16_t b) /// A32: VMUL.P8 Qd, Qn, Qm /// A64: PMUL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> PolynomialMultiply(Vector128<sbyte> left, Vector128<sbyte> right) => PolynomialMultiply(left, right); /// <summary> /// poly16x8_t vmull_p8 (poly8x8_t a, poly8x8_t b) /// A32: VMULL.P8 Qd, Dn, Dm /// A64: PMULL Vd.16B, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> PolynomialMultiplyWideningLower(Vector64<byte> left, Vector64<byte> right) => PolynomialMultiplyWideningLower(left, right); /// <summary> /// poly16x8_t vmull_p8 (poly8x8_t a, poly8x8_t b) /// A32: VMULL.P8 Qd, Dn, Dm /// A64: PMULL Vd.16B, Vn.8B, Vm.8B /// </summary> public static Vector128<short> PolynomialMultiplyWideningLower(Vector64<sbyte> left, Vector64<sbyte> right) => PolynomialMultiplyWideningLower(left, right); /// <summary> /// poly16x8_t vmull_high_p8 (poly8x16_t a, poly8x16_t b) /// A32: VMULL.P8 Qd, Dn+1, Dm+1 /// A64: PMULL2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> PolynomialMultiplyWideningUpper(Vector128<byte> left, Vector128<byte> right) => PolynomialMultiplyWideningUpper(left, right); /// <summary> /// poly16x8_t vmull_high_p8 (poly8x16_t a, poly8x16_t b) /// A32: VMULL.P8 Qd, Dn+1, Dm+1 /// A64: PMULL2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> PolynomialMultiplyWideningUpper(Vector128<sbyte> left, Vector128<sbyte> right) => PolynomialMultiplyWideningUpper(left, right); /// <summary> /// uint8x8_t vcnt_u8 (uint8x8_t a) /// A32: VCNT.I8 Dd, Dm /// A64: CNT Vd.8B, Vn.8B /// </summary> public static Vector64<byte> PopCount(Vector64<byte> value) => PopCount(value); /// <summary> /// int8x8_t vcnt_s8 (int8x8_t a) /// A32: VCNT.I8 Dd, Dm /// A64: CNT Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> PopCount(Vector64<sbyte> value) => PopCount(value); /// <summary> /// uint8x16_t vcntq_u8 (uint8x16_t a) /// A32: VCNT.I8 Qd, Qm /// A64: CNT Vd.16B, Vn.16B /// </summary> public static Vector128<byte> PopCount(Vector128<byte> value) => PopCount(value); /// <summary> /// int8x16_t vcntq_s8 (int8x16_t a) /// A32: VCNT.I8 Qd, Qm /// A64: CNT Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> PopCount(Vector128<sbyte> value) => PopCount(value); /// <summary> /// float32x2_t vrecpe_f32 (float32x2_t a) /// A32: VRECPE.F32 Dd, Dm /// A64: FRECPE Vd.2S, Vn.2S /// </summary> public static Vector64<float> ReciprocalEstimate(Vector64<float> value) => ReciprocalEstimate(value); /// <summary> /// uint32x2_t vrecpe_u32 (uint32x2_t a) /// A32: VRECPE.U32 Dd, Dm /// A64: URECPE Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ReciprocalEstimate(Vector64<uint> value) => ReciprocalEstimate(value); /// <summary> /// float32x4_t vrecpeq_f32 (float32x4_t a) /// A32: VRECPE.F32 Qd, Qm /// A64: FRECPE Vd.4S, Vn.4S /// </summary> public static Vector128<float> ReciprocalEstimate(Vector128<float> value) => ReciprocalEstimate(value); /// <summary> /// uint32x4_t vrecpeq_u32 (uint32x4_t a) /// A32: VRECPE.U32 Qd, Qm /// A64: URECPE Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ReciprocalEstimate(Vector128<uint> value) => ReciprocalEstimate(value); /// <summary> /// float32x2_t vrsqrte_f32 (float32x2_t a) /// A32: VRSQRTE.F32 Dd, Dm /// A64: FRSQRTE Vd.2S, Vn.2S /// </summary> public static Vector64<float> ReciprocalSquareRootEstimate(Vector64<float> value) => ReciprocalSquareRootEstimate(value); /// <summary> /// uint32x2_t vrsqrte_u32 (uint32x2_t a) /// A32: VRSQRTE.U32 Dd, Dm /// A64: URSQRTE Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ReciprocalSquareRootEstimate(Vector64<uint> value) => ReciprocalSquareRootEstimate(value); /// <summary> /// float32x4_t vrsqrteq_f32 (float32x4_t a) /// A32: VRSQRTE.F32 Qd, Qm /// A64: FRSQRTE Vd.4S, Vn.4S /// </summary> public static Vector128<float> ReciprocalSquareRootEstimate(Vector128<float> value) => ReciprocalSquareRootEstimate(value); /// <summary> /// uint32x4_t vrsqrteq_u32 (uint32x4_t a) /// A32: VRSQRTE.U32 Qd, Qm /// A64: URSQRTE Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ReciprocalSquareRootEstimate(Vector128<uint> value) => ReciprocalSquareRootEstimate(value); /// <summary> /// float32x2_t vrsqrts_f32 (float32x2_t a, float32x2_t b) /// A32: VRSQRTS.F32 Dd, Dn, Dm /// A64: FRSQRTS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> ReciprocalSquareRootStep(Vector64<float> left, Vector64<float> right) => ReciprocalSquareRootStep(left, right); /// <summary> /// float32x4_t vrsqrtsq_f32 (float32x4_t a, float32x4_t b) /// A32: VRSQRTS.F32 Qd, Qn, Qm /// A64: FRSQRTS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> ReciprocalSquareRootStep(Vector128<float> left, Vector128<float> right) => ReciprocalSquareRootStep(left, right); /// <summary> /// float32x2_t vrecps_f32 (float32x2_t a, float32x2_t b) /// A32: VRECPS.F32 Dd, Dn, Dm /// A64: FRECPS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> ReciprocalStep(Vector64<float> left, Vector64<float> right) => ReciprocalStep(left, right); /// <summary> /// float32x4_t vrecpsq_f32 (float32x4_t a, float32x4_t b) /// A32: VRECPS.F32 Qd, Qn, Qm /// A64: FRECPS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> ReciprocalStep(Vector128<float> left, Vector128<float> right) => ReciprocalStep(left, right); /// <summary> /// int16x4_t vrev32_s16 (int16x4_t vec) /// A32: VREV32.16 Dd, Dm /// A64: REV32 Vd.4H, Vn.4H /// </summary> public static Vector64<int> ReverseElement16(Vector64<int> value) => ReverseElement16(value); /// <summary> /// int16x4_t vrev64_s16 (int16x4_t vec) /// A32: VREV64.16 Dd, Dm /// A64: REV64 Vd.4H, Vn.4H /// </summary> public static Vector64<long> ReverseElement16(Vector64<long> value) => ReverseElement16(value); /// <summary> /// uint16x4_t vrev32_u16 (uint16x4_t vec) /// A32: VREV32.16 Dd, Dm /// A64: REV32 Vd.4H, Vn.4H /// </summary> public static Vector64<uint> ReverseElement16(Vector64<uint> value) => ReverseElement16(value); /// <summary> /// uint16x4_t vrev64_u16 (uint16x4_t vec) /// A32: VREV64.16 Dd, Dm /// A64: REV64 Vd.4H, Vn.4H /// </summary> public static Vector64<ulong> ReverseElement16(Vector64<ulong> value) => ReverseElement16(value); /// <summary> /// int16x8_t vrev32q_s16 (int16x8_t vec) /// A32: VREV32.16 Qd, Qm /// A64: REV32 Vd.8H, Vn.8H /// </summary> public static Vector128<int> ReverseElement16(Vector128<int> value) => ReverseElement16(value); /// <summary> /// int16x8_t vrev64q_s16 (int16x8_t vec) /// A32: VREV64.16 Qd, Qm /// A64: REV64 Vd.8H, Vn.8H /// </summary> public static Vector128<long> ReverseElement16(Vector128<long> value) => ReverseElement16(value); /// <summary> /// uint16x8_t vrev32q_u16 (uint16x8_t vec) /// A32: VREV32.16 Qd, Qm /// A64: REV32 Vd.8H, Vn.8H /// </summary> public static Vector128<uint> ReverseElement16(Vector128<uint> value) => ReverseElement16(value); /// <summary> /// uint16x8_t vrev64q_u16 (uint16x8_t vec) /// A32: VREV64.16 Qd, Qm /// A64: REV64 Vd.8H, Vn.8H /// </summary> public static Vector128<ulong> ReverseElement16(Vector128<ulong> value) => ReverseElement16(value); /// <summary> /// int32x2_t vrev64_s32 (int32x2_t vec) /// A32: VREV64.32 Dd, Dm /// A64: REV64 Vd.2S, Vn.2S /// </summary> public static Vector64<long> ReverseElement32(Vector64<long> value) => ReverseElement32(value); /// <summary> /// uint32x2_t vrev64_u32 (uint32x2_t vec) /// A32: VREV64.32 Dd, Dm /// A64: REV64 Vd.2S, Vn.2S /// </summary> public static Vector64<ulong> ReverseElement32(Vector64<ulong> value) => ReverseElement32(value); /// <summary> /// int32x4_t vrev64q_s32 (int32x4_t vec) /// A32: VREV64.32 Qd, Qm /// A64: REV64 Vd.4S, Vn.4S /// </summary> public static Vector128<long> ReverseElement32(Vector128<long> value) => ReverseElement32(value); /// <summary> /// uint32x4_t vrev64q_u32 (uint32x4_t vec) /// A32: VREV64.32 Qd, Qm /// A64: REV64 Vd.4S, Vn.4S /// </summary> public static Vector128<ulong> ReverseElement32(Vector128<ulong> value) => ReverseElement32(value); /// <summary> /// int8x8_t vrev16_s8 (int8x8_t vec) /// A32: VREV16.8 Dd, Dm /// A64: REV16 Vd.8B, Vn.8B /// </summary> public static Vector64<short> ReverseElement8(Vector64<short> value) => ReverseElement8(value); /// <summary> /// int8x8_t vrev32_s8 (int8x8_t vec) /// A32: VREV32.8 Dd, Dm /// A64: REV32 Vd.8B, Vn.8B /// </summary> public static Vector64<int> ReverseElement8(Vector64<int> value) => ReverseElement8(value); /// <summary> /// int8x8_t vrev64_s8 (int8x8_t vec) /// A32: VREV64.8 Dd, Dm /// A64: REV64 Vd.8B, Vn.8B /// </summary> public static Vector64<long> ReverseElement8(Vector64<long> value) => ReverseElement8(value); /// <summary> /// uint8x8_t vrev16_u8 (uint8x8_t vec) /// A32: VREV16.8 Dd, Dm /// A64: REV16 Vd.8B, Vn.8B /// </summary> public static Vector64<ushort> ReverseElement8(Vector64<ushort> value) => ReverseElement8(value); /// <summary> /// uint8x8_t vrev32_u8 (uint8x8_t vec) /// A32: VREV32.8 Dd, Dm /// A64: REV32 Vd.8B, Vn.8B /// </summary> public static Vector64<uint> ReverseElement8(Vector64<uint> value) => ReverseElement8(value); /// <summary> /// uint8x8_t vrev64_u8 (uint8x8_t vec) /// A32: VREV64.8 Dd, Dm /// A64: REV64 Vd.8B, Vn.8B /// </summary> public static Vector64<ulong> ReverseElement8(Vector64<ulong> value) => ReverseElement8(value); /// <summary> /// int8x16_t vrev16q_s8 (int8x16_t vec) /// A32: VREV16.8 Qd, Qm /// A64: REV16 Vd.16B, Vn.16B /// </summary> public static Vector128<short> ReverseElement8(Vector128<short> value) => ReverseElement8(value); /// <summary> /// int8x16_t vrev32q_s8 (int8x16_t vec) /// A32: VREV32.8 Qd, Qm /// A64: REV32 Vd.16B, Vn.16B /// </summary> public static Vector128<int> ReverseElement8(Vector128<int> value) => ReverseElement8(value); /// <summary> /// int8x16_t vrev64q_s8 (int8x16_t vec) /// A32: VREV64.8 Qd, Qm /// A64: REV64 Vd.16B, Vn.16B /// </summary> public static Vector128<long> ReverseElement8(Vector128<long> value) => ReverseElement8(value); /// <summary> /// uint8x16_t vrev16q_u8 (uint8x16_t vec) /// A32: VREV16.8 Qd, Qm /// A64: REV16 Vd.16B, Vn.16B /// </summary> public static Vector128<ushort> ReverseElement8(Vector128<ushort> value) => ReverseElement8(value); /// <summary> /// uint8x16_t vrev32q_u8 (uint8x16_t vec) /// A32: VREV32.8 Qd, Qm /// A64: REV32 Vd.16B, Vn.16B /// </summary> public static Vector128<uint> ReverseElement8(Vector128<uint> value) => ReverseElement8(value); /// <summary> /// uint8x16_t vrev64q_u8 (uint8x16_t vec) /// A32: VREV64.8 Qd, Qm /// A64: REV64 Vd.16B, Vn.16B /// </summary> public static Vector128<ulong> ReverseElement8(Vector128<ulong> value) => ReverseElement8(value); /// <summary> /// float32x2_t vrnda_f32 (float32x2_t a) /// A32: VRINTA.F32 Dd, Dm /// A64: FRINTA Vd.2S, Vn.2S /// </summary> public static Vector64<float> RoundAwayFromZero(Vector64<float> value) => RoundAwayFromZero(value); /// <summary> /// float32x4_t vrndaq_f32 (float32x4_t a) /// A32: VRINTA.F32 Qd, Qm /// A64: FRINTA Vd.4S, Vn.4S /// </summary> public static Vector128<float> RoundAwayFromZero(Vector128<float> value) => RoundAwayFromZero(value); /// <summary> /// float64x1_t vrnda_f64 (float64x1_t a) /// A32: VRINTA.F64 Dd, Dm /// A64: FRINTA Dd, Dn /// </summary> public static Vector64<double> RoundAwayFromZeroScalar(Vector64<double> value) => RoundAwayFromZeroScalar(value); /// <summary> /// float32_t vrndas_f32 (float32_t a) /// A32: VRINTA.F32 Sd, Sm /// A64: FRINTA Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> RoundAwayFromZeroScalar(Vector64<float> value) => RoundAwayFromZeroScalar(value); /// <summary> /// float32x2_t vrndn_f32 (float32x2_t a) /// A32: VRINTN.F32 Dd, Dm /// A64: FRINTN Vd.2S, Vn.2S /// </summary> public static Vector64<float> RoundToNearest(Vector64<float> value) => RoundToNearest(value); /// <summary> /// float32x4_t vrndnq_f32 (float32x4_t a) /// A32: VRINTN.F32 Qd, Qm /// A64: FRINTN Vd.4S, Vn.4S /// </summary> public static Vector128<float> RoundToNearest(Vector128<float> value) => RoundToNearest(value); /// <summary> /// float64x1_t vrndn_f64 (float64x1_t a) /// A32: VRINTN.F64 Dd, Dm /// A64: FRINTN Dd, Dn /// </summary> public static Vector64<double> RoundToNearestScalar(Vector64<double> value) => RoundToNearestScalar(value); /// <summary> /// float32_t vrndns_f32 (float32_t a) /// A32: VRINTN.F32 Sd, Sm /// A64: FRINTN Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> RoundToNearestScalar(Vector64<float> value) => RoundToNearestScalar(value); /// <summary> /// float32x2_t vrndm_f32 (float32x2_t a) /// A32: VRINTM.F32 Dd, Dm /// A64: FRINTM Vd.2S, Vn.2S /// </summary> public static Vector64<float> RoundToNegativeInfinity(Vector64<float> value) => RoundToNegativeInfinity(value); /// <summary> /// float32x4_t vrndmq_f32 (float32x4_t a) /// A32: VRINTM.F32 Qd, Qm /// A64: FRINTM Vd.4S, Vn.4S /// </summary> public static Vector128<float> RoundToNegativeInfinity(Vector128<float> value) => RoundToNegativeInfinity(value); /// <summary> /// float64x1_t vrndm_f64 (float64x1_t a) /// A32: VRINTM.F64 Dd, Dm /// A64: FRINTM Dd, Dn /// </summary> public static Vector64<double> RoundToNegativeInfinityScalar(Vector64<double> value) => RoundToNegativeInfinityScalar(value); /// <summary> /// float32_t vrndms_f32 (float32_t a) /// A32: VRINTM.F32 Sd, Sm /// A64: FRINTM Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> RoundToNegativeInfinityScalar(Vector64<float> value) => RoundToNegativeInfinityScalar(value); /// <summary> /// float32x2_t vrndp_f32 (float32x2_t a) /// A32: VRINTP.F32 Dd, Dm /// A64: FRINTP Vd.2S, Vn.2S /// </summary> public static Vector64<float> RoundToPositiveInfinity(Vector64<float> value) => RoundToPositiveInfinity(value); /// <summary> /// float32x4_t vrndpq_f32 (float32x4_t a) /// A32: VRINTP.F32 Qd, Qm /// A64: FRINTP Vd.4S, Vn.4S /// </summary> public static Vector128<float> RoundToPositiveInfinity(Vector128<float> value) => RoundToPositiveInfinity(value); /// <summary> /// float64x1_t vrndp_f64 (float64x1_t a) /// A32: VRINTP.F64 Dd, Dm /// A64: FRINTP Dd, Dn /// </summary> public static Vector64<double> RoundToPositiveInfinityScalar(Vector64<double> value) => RoundToPositiveInfinityScalar(value); /// <summary> /// float32_t vrndps_f32 (float32_t a) /// A32: VRINTP.F32 Sd, Sm /// A64: FRINTP Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> RoundToPositiveInfinityScalar(Vector64<float> value) => RoundToPositiveInfinityScalar(value); /// <summary> /// float32x2_t vrnd_f32 (float32x2_t a) /// A32: VRINTZ.F32 Dd, Dm /// A64: FRINTZ Vd.2S, Vn.2S /// </summary> public static Vector64<float> RoundToZero(Vector64<float> value) => RoundToZero(value); /// <summary> /// float32x4_t vrndq_f32 (float32x4_t a) /// A32: VRINTZ.F32 Qd, Qm /// A64: FRINTZ Vd.4S, Vn.4S /// </summary> public static Vector128<float> RoundToZero(Vector128<float> value) => RoundToZero(value); /// <summary> /// float64x1_t vrnd_f64 (float64x1_t a) /// A32: VRINTZ.F64 Dd, Dm /// A64: FRINTZ Dd, Dn /// </summary> public static Vector64<double> RoundToZeroScalar(Vector64<double> value) => RoundToZeroScalar(value); /// <summary> /// float32_t vrnds_f32 (float32_t a) /// A32: VRINTZ.F32 Sd, Sm /// A64: FRINTZ Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> RoundToZeroScalar(Vector64<float> value) => RoundToZeroScalar(value); /// <summary> /// int16x4_t vshl_s16 (int16x4_t a, int16x4_t b) /// A32: VSHL.S16 Dd, Dn, Dm /// A64: SSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftArithmetic(Vector64<short> value, Vector64<short> count) => ShiftArithmetic(value, count); /// <summary> /// int32x2_t vshl_s32 (int32x2_t a, int32x2_t b) /// A32: VSHL.S32 Dd, Dn, Dm /// A64: SSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftArithmetic(Vector64<int> value, Vector64<int> count) => ShiftArithmetic(value, count); /// <summary> /// int8x8_t vshl_s8 (int8x8_t a, int8x8_t b) /// A32: VSHL.S8 Dd, Dn, Dm /// A64: SSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftArithmetic(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmetic(value, count); /// <summary> /// int16x8_t vshlq_s16 (int16x8_t a, int16x8_t b) /// A32: VSHL.S16 Qd, Qn, Qm /// A64: SSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftArithmetic(Vector128<short> value, Vector128<short> count) => ShiftArithmetic(value, count); /// <summary> /// int32x4_t vshlq_s32 (int32x4_t a, int32x4_t b) /// A32: VSHL.S32 Qd, Qn, Qm /// A64: SSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftArithmetic(Vector128<int> value, Vector128<int> count) => ShiftArithmetic(value, count); /// <summary> /// int64x2_t vshlq_s64 (int64x2_t a, int64x2_t b) /// A32: VSHL.S64 Qd, Qn, Qm /// A64: SSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftArithmetic(Vector128<long> value, Vector128<long> count) => ShiftArithmetic(value, count); /// <summary> /// int8x16_t vshlq_s8 (int8x16_t a, int8x16_t b) /// A32: VSHL.S8 Qd, Qn, Qm /// A64: SSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftArithmetic(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftArithmetic(value, count); /// <summary> /// int16x4_t vrshl_s16 (int16x4_t a, int16x4_t b) /// A32: VRSHL.S16 Dd, Dn, Dm /// A64: SRSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftArithmeticRounded(Vector64<short> value, Vector64<short> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int32x2_t vrshl_s32 (int32x2_t a, int32x2_t b) /// A32: VRSHL.S32 Dd, Dn, Dm /// A64: SRSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftArithmeticRounded(Vector64<int> value, Vector64<int> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int8x8_t vrshl_s8 (int8x8_t a, int8x8_t b) /// A32: VRSHL.S8 Dd, Dn, Dm /// A64: SRSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftArithmeticRounded(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int16x8_t vrshlq_s16 (int16x8_t a, int16x8_t b) /// A32: VRSHL.S16 Qd, Qn, Qm /// A64: SRSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftArithmeticRounded(Vector128<short> value, Vector128<short> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int32x4_t vrshlq_s32 (int32x4_t a, int32x4_t b) /// A32: VRSHL.S32 Qd, Qn, Qm /// A64: SRSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftArithmeticRounded(Vector128<int> value, Vector128<int> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int64x2_t vrshlq_s64 (int64x2_t a, int64x2_t b) /// A32: VRSHL.S64 Qd, Qn, Qm /// A64: SRSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftArithmeticRounded(Vector128<long> value, Vector128<long> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int8x16_t vrshlq_s8 (int8x16_t a, int8x16_t b) /// A32: VRSHL.S8 Qd, Qn, Qm /// A64: SRSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftArithmeticRounded(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int16x4_t vqrshl_s16 (int16x4_t a, int16x4_t b) /// A32: VQRSHL.S16 Dd, Dn, Dm /// A64: SQRSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftArithmeticRoundedSaturate(Vector64<short> value, Vector64<short> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int32x2_t vqrshl_s32 (int32x2_t a, int32x2_t b) /// A32: VQRSHL.S32 Dd, Dn, Dm /// A64: SQRSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftArithmeticRoundedSaturate(Vector64<int> value, Vector64<int> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int8x8_t vqrshl_s8 (int8x8_t a, int8x8_t b) /// A32: VQRSHL.S8 Dd, Dn, Dm /// A64: SQRSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftArithmeticRoundedSaturate(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int16x8_t vqrshlq_s16 (int16x8_t a, int16x8_t b) /// A32: VQRSHL.S16 Qd, Qn, Qm /// A64: SQRSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftArithmeticRoundedSaturate(Vector128<short> value, Vector128<short> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int32x4_t vqrshlq_s32 (int32x4_t a, int32x4_t b) /// A32: VQRSHL.S32 Qd, Qn, Qm /// A64: SQRSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftArithmeticRoundedSaturate(Vector128<int> value, Vector128<int> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int64x2_t vqrshlq_s64 (int64x2_t a, int64x2_t b) /// A32: VQRSHL.S64 Qd, Qn, Qm /// A64: SQRSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftArithmeticRoundedSaturate(Vector128<long> value, Vector128<long> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int8x16_t vqrshlq_s8 (int8x16_t a, int8x16_t b) /// A32: VQRSHL.S8 Qd, Qn, Qm /// A64: SQRSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftArithmeticRoundedSaturate(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int64x1_t vqrshl_s64 (int64x1_t a, int64x1_t b) /// A32: VQRSHL.S64 Dd, Dn, Dm /// A64: SQRSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftArithmeticRoundedSaturateScalar(Vector64<long> value, Vector64<long> count) => ShiftArithmeticRoundedSaturateScalar(value, count); /// <summary> /// int64x1_t vrshl_s64 (int64x1_t a, int64x1_t b) /// A32: VRSHL.S64 Dd, Dn, Dm /// A64: SRSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftArithmeticRoundedScalar(Vector64<long> value, Vector64<long> count) => ShiftArithmeticRoundedScalar(value, count); /// <summary> /// int16x4_t vqshl_s16 (int16x4_t a, int16x4_t b) /// A32: VQSHL.S16 Dd, Dn, Dm /// A64: SQSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftArithmeticSaturate(Vector64<short> value, Vector64<short> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int32x2_t vqshl_s32 (int32x2_t a, int32x2_t b) /// A32: VQSHL.S32 Dd, Dn, Dm /// A64: SQSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftArithmeticSaturate(Vector64<int> value, Vector64<int> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int8x8_t vqshl_s8 (int8x8_t a, int8x8_t b) /// A32: VQSHL.S8 Dd, Dn, Dm /// A64: SQSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftArithmeticSaturate(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int16x8_t vqshlq_s16 (int16x8_t a, int16x8_t b) /// A32: VQSHL.S16 Qd, Qn, Qm /// A64: SQSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftArithmeticSaturate(Vector128<short> value, Vector128<short> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int32x4_t vqshlq_s32 (int32x4_t a, int32x4_t b) /// A32: VQSHL.S32 Qd, Qn, Qm /// A64: SQSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftArithmeticSaturate(Vector128<int> value, Vector128<int> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int64x2_t vqshlq_s64 (int64x2_t a, int64x2_t b) /// A32: VQSHL.S64 Qd, Qn, Qm /// A64: SQSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftArithmeticSaturate(Vector128<long> value, Vector128<long> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int8x16_t vqshlq_s8 (int8x16_t a, int8x16_t b) /// A32: VQSHL.S8 Qd, Qn, Qm /// A64: SQSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftArithmeticSaturate(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int64x1_t vqshl_s64 (int64x1_t a, int64x1_t b) /// A32: VQSHL.S64 Dd, Dn, Dm /// A64: SQSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftArithmeticSaturateScalar(Vector64<long> value, Vector64<long> count) => ShiftArithmeticSaturateScalar(value, count); /// <summary> /// int64x1_t vshl_s64 (int64x1_t a, int64x1_t b) /// A32: VSHL.S64 Dd, Dn, Dm /// A64: SSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftArithmeticScalar(Vector64<long> value, Vector64<long> count) => ShiftArithmeticScalar(value, count); /// <summary> /// uint8x8_t vsli_n_u8(uint8x8_t a, uint8x8_t b, __builtin_constant_p(n)) /// A32: VSLI.8 Dd, Dm, #n /// A64: SLI Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftLeftAndInsert(Vector64<byte> left, Vector64<byte> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int16x4_t vsli_n_s16(int16x4_t a, int16x4_t b, __builtin_constant_p(n)) /// A32: VSLI.16 Dd, Dm, #n /// A64: SLI Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftLeftAndInsert(Vector64<short> left, Vector64<short> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int32x2_t vsli_n_s32(int32x2_t a, int32x2_t b, __builtin_constant_p(n)) /// A32: VSLI.32 Dd, Dm, #n /// A64: SLI Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftLeftAndInsert(Vector64<int> left, Vector64<int> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int8x8_t vsli_n_s8(int8x8_t a, int8x8_t b, __builtin_constant_p(n)) /// A32: VSLI.8 Dd, Dm, #n /// A64: SLI Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftLeftAndInsert(Vector64<sbyte> left, Vector64<sbyte> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint16x4_t vsli_n_u16(uint16x4_t a, uint16x4_t b, __builtin_constant_p(n)) /// A32: VSLI.16 Dd, Dm, #n /// A64: SLI Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftLeftAndInsert(Vector64<ushort> left, Vector64<ushort> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint32x2_t vsli_n_u32(uint32x2_t a, uint32x2_t b, __builtin_constant_p(n)) /// A32: VSLI.32 Dd, Dm, #n /// A64: SLI Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftLeftAndInsert(Vector64<uint> left, Vector64<uint> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint8x16_t vsliq_n_u8(uint8x16_t a, uint8x16_t b, __builtin_constant_p(n)) /// A32: VSLI.8 Qd, Qm, #n /// A64: SLI Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftLeftAndInsert(Vector128<byte> left, Vector128<byte> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int16x8_t vsliq_n_s16(int16x8_t a, int16x8_t b, __builtin_constant_p(n)) /// A32: VSLI.16 Qd, Qm, #n /// A64: SLI Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftLeftAndInsert(Vector128<short> left, Vector128<short> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int32x4_t vsliq_n_s32(int32x4_t a, int32x4_t b, __builtin_constant_p(n)) /// A32: VSLI.32 Qd, Qm, #n /// A64: SLI Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftLeftAndInsert(Vector128<int> left, Vector128<int> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int64x2_t vsliq_n_s64(int64x2_t a, int64x2_t b, __builtin_constant_p(n)) /// A32: VSLI.64 Qd, Qm, #n /// A64: SLI Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftLeftAndInsert(Vector128<long> left, Vector128<long> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int8x16_t vsliq_n_s8(int8x16_t a, int8x16_t b, __builtin_constant_p(n)) /// A32: VSLI.8 Qd, Qm, #n /// A64: SLI Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftLeftAndInsert(Vector128<sbyte> left, Vector128<sbyte> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint16x8_t vsliq_n_u16(uint16x8_t a, uint16x8_t b, __builtin_constant_p(n)) /// A32: VSLI.16 Qd, Qm, #n /// A64: SLI Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftLeftAndInsert(Vector128<ushort> left, Vector128<ushort> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint32x4_t vsliq_n_u32(uint32x4_t a, uint32x4_t b, __builtin_constant_p(n)) /// A32: VSLI.32 Qd, Qm, #n /// A64: SLI Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftLeftAndInsert(Vector128<uint> left, Vector128<uint> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint64x2_t vsliq_n_u64(uint64x2_t a, uint64x2_t b, __builtin_constant_p(n)) /// A32: VSLI.64 Qd, Qm, #n /// A64: SLI Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftLeftAndInsert(Vector128<ulong> left, Vector128<ulong> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int64_t vslid_n_s64(int64_t a, int64_t b, __builtin_constant_p(n)) /// A32: VSLI.64 Dd, Dm, #n /// A64: SLI Dd, Dn, #n /// </summary> public static Vector64<long> ShiftLeftAndInsertScalar(Vector64<long> left, Vector64<long> right, byte shift) => ShiftLeftAndInsertScalar(left, right, shift); /// <summary> /// uint64_t vslid_n_u64(uint64_t a, uint64_t b, __builtin_constant_p(n)) /// A32: VSLI.64 Dd, Dm, #n /// A64: SLI Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftLeftAndInsertScalar(Vector64<ulong> left, Vector64<ulong> right, byte shift) => ShiftLeftAndInsertScalar(left, right, shift); /// <summary> /// uint8x8_t vshl_n_u8 (uint8x8_t a, const int n) /// A32: VSHL.I8 Dd, Dm, #n /// A64: SHL Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftLeftLogical(Vector64<byte> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int16x4_t vshl_n_s16 (int16x4_t a, const int n) /// A32: VSHL.I16 Dd, Dm, #n /// A64: SHL Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftLeftLogical(Vector64<short> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int32x2_t vshl_n_s32 (int32x2_t a, const int n) /// A32: VSHL.I32 Dd, Dm, #n /// A64: SHL Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftLeftLogical(Vector64<int> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int8x8_t vshl_n_s8 (int8x8_t a, const int n) /// A32: VSHL.I8 Dd, Dm, #n /// A64: SHL Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftLeftLogical(Vector64<sbyte> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint16x4_t vshl_n_u16 (uint16x4_t a, const int n) /// A32: VSHL.I16 Dd, Dm, #n /// A64: SHL Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftLeftLogical(Vector64<ushort> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint32x2_t vshl_n_u32 (uint32x2_t a, const int n) /// A32: VSHL.I32 Dd, Dm, #n /// A64: SHL Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftLeftLogical(Vector64<uint> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint8x16_t vshlq_n_u8 (uint8x16_t a, const int n) /// A32: VSHL.I8 Qd, Qm, #n /// A64: SHL Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftLeftLogical(Vector128<byte> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int16x8_t vshlq_n_s16 (int16x8_t a, const int n) /// A32: VSHL.I16 Qd, Qm, #n /// A64: SHL Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftLeftLogical(Vector128<short> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int64x2_t vshlq_n_s64 (int64x2_t a, const int n) /// A32: VSHL.I64 Qd, Qm, #n /// A64: SHL Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftLeftLogical(Vector128<long> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int8x16_t vshlq_n_s8 (int8x16_t a, const int n) /// A32: VSHL.I8 Qd, Qm, #n /// A64: SHL Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftLeftLogical(Vector128<sbyte> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint16x8_t vshlq_n_u16 (uint16x8_t a, const int n) /// A32: VSHL.I16 Qd, Qm, #n /// A64: SHL Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftLeftLogical(Vector128<ushort> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint32x4_t vshlq_n_u32 (uint32x4_t a, const int n) /// A32: VSHL.I32 Qd, Qm, #n /// A64: SHL Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftLeftLogical(Vector128<uint> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint64x2_t vshlq_n_u64 (uint64x2_t a, const int n) /// A32: VSHL.I64 Qd, Qm, #n /// A64: SHL Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftLeftLogical(Vector128<ulong> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint8x8_t vqshl_n_u8 (uint8x8_t a, const int n) /// A32: VQSHL.U8 Dd, Dm, #n /// A64: UQSHL Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftLeftLogicalSaturate(Vector64<byte> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int16x4_t vqshl_n_s16 (int16x4_t a, const int n) /// A32: VQSHL.S16 Dd, Dm, #n /// A64: SQSHL Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftLeftLogicalSaturate(Vector64<short> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int32x2_t vqshl_n_s32 (int32x2_t a, const int n) /// A32: VQSHL.S32 Dd, Dm, #n /// A64: SQSHL Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftLeftLogicalSaturate(Vector64<int> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int8x8_t vqshl_n_s8 (int8x8_t a, const int n) /// A32: VQSHL.S8 Dd, Dm, #n /// A64: SQSHL Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftLeftLogicalSaturate(Vector64<sbyte> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint16x4_t vqshl_n_u16 (uint16x4_t a, const int n) /// A32: VQSHL.U16 Dd, Dm, #n /// A64: UQSHL Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftLeftLogicalSaturate(Vector64<ushort> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint32x2_t vqshl_n_u32 (uint32x2_t a, const int n) /// A32: VQSHL.U32 Dd, Dm, #n /// A64: UQSHL Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftLeftLogicalSaturate(Vector64<uint> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint8x16_t vqshlq_n_u8 (uint8x16_t a, const int n) /// A32: VQSHL.U8 Qd, Qm, #n /// A64: UQSHL Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftLeftLogicalSaturate(Vector128<byte> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int16x8_t vqshlq_n_s16 (int16x8_t a, const int n) /// A32: VQSHL.S16 Qd, Qm, #n /// A64: SQSHL Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftLeftLogicalSaturate(Vector128<short> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int32x4_t vqshlq_n_s32 (int32x4_t a, const int n) /// A32: VQSHL.S32 Qd, Qm, #n /// A64: SQSHL Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftLeftLogicalSaturate(Vector128<int> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int64x2_t vqshlq_n_s64 (int64x2_t a, const int n) /// A32: VQSHL.S64 Qd, Qm, #n /// A64: SQSHL Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftLeftLogicalSaturate(Vector128<long> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int8x16_t vqshlq_n_s8 (int8x16_t a, const int n) /// A32: VQSHL.S8 Qd, Qm, #n /// A64: SQSHL Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftLeftLogicalSaturate(Vector128<sbyte> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint16x8_t vqshlq_n_u16 (uint16x8_t a, const int n) /// A32: VQSHL.U16 Qd, Qm, #n /// A64: UQSHL Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftLeftLogicalSaturate(Vector128<ushort> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint32x4_t vqshlq_n_u32 (uint32x4_t a, const int n) /// A32: VQSHL.U32 Qd, Qm, #n /// A64: UQSHL Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftLeftLogicalSaturate(Vector128<uint> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint64x2_t vqshlq_n_u64 (uint64x2_t a, const int n) /// A32: VQSHL.U64 Qd, Qm, #n /// A64: UQSHL Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftLeftLogicalSaturate(Vector128<ulong> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int64x1_t vqshl_n_s64 (int64x1_t a, const int n) /// A32: VQSHL.S64 Dd, Dm, #n /// A64: SQSHL Dd, Dn, #n /// </summary> public static Vector64<long> ShiftLeftLogicalSaturateScalar(Vector64<long> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// uint64x1_t vqshl_n_u64 (uint64x1_t a, const int n) /// A32: VQSHL.U64 Dd, Dm, #n /// A64: UQSHL Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftLeftLogicalSaturateScalar(Vector64<ulong> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// uint16x4_t vqshlu_n_s16 (int16x4_t a, const int n) /// A32: VQSHLU.S16 Dd, Dm, #n /// A64: SQSHLU Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftLeftLogicalSaturateUnsigned(Vector64<short> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint32x2_t vqshlu_n_s32 (int32x2_t a, const int n) /// A32: VQSHLU.S32 Dd, Dm, #n /// A64: SQSHLU Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftLeftLogicalSaturateUnsigned(Vector64<int> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint8x8_t vqshlu_n_s8 (int8x8_t a, const int n) /// A32: VQSHLU.S8 Dd, Dm, #n /// A64: SQSHLU Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftLeftLogicalSaturateUnsigned(Vector64<sbyte> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint16x8_t vqshluq_n_s16 (int16x8_t a, const int n) /// A32: VQSHLU.S16 Qd, Qm, #n /// A64: SQSHLU Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftLeftLogicalSaturateUnsigned(Vector128<short> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint32x4_t vqshluq_n_s32 (int32x4_t a, const int n) /// A32: VQSHLU.S32 Qd, Qm, #n /// A64: SQSHLU Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftLeftLogicalSaturateUnsigned(Vector128<int> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint64x2_t vqshluq_n_s64 (int64x2_t a, const int n) /// A32: VQSHLU.S64 Qd, Qm, #n /// A64: SQSHLU Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftLeftLogicalSaturateUnsigned(Vector128<long> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint8x16_t vqshluq_n_s8 (int8x16_t a, const int n) /// A32: VQSHLU.S8 Qd, Qm, #n /// A64: SQSHLU Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftLeftLogicalSaturateUnsigned(Vector128<sbyte> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint64x1_t vqshlu_n_s64 (int64x1_t a, const int n) /// A32: VQSHLU.S64 Dd, Dm, #n /// A64: SQSHLU Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftLeftLogicalSaturateUnsignedScalar(Vector64<long> value, byte count) => ShiftLeftLogicalSaturateUnsignedScalar(value, count); /// <summary> /// int64x1_t vshl_n_s64 (int64x1_t a, const int n) /// A32: VSHL.I64 Dd, Dm, #n /// A64: SHL Dd, Dn, #n /// </summary> public static Vector64<long> ShiftLeftLogicalScalar(Vector64<long> value, byte count) => ShiftLeftLogicalScalar(value, count); /// <summary> /// uint64x1_t vshl_n_u64 (uint64x1_t a, const int n) /// A32: VSHL.I64 Dd, Dm, #n /// A64: SHL Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftLeftLogicalScalar(Vector64<ulong> value, byte count) => ShiftLeftLogicalScalar(value, count); /// <summary> /// uint16x8_t vshll_n_u8 (uint8x8_t a, const int n) /// A32: VSHLL.U8 Qd, Dm, #n /// A64: USHLL Vd.8H, Vn.8B, #n /// </summary> public static Vector128<ushort> ShiftLeftLogicalWideningLower(Vector64<byte> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// int32x4_t vshll_n_s16 (int16x4_t a, const int n) /// A32: VSHLL.S16 Qd, Dm, #n /// A64: SSHLL Vd.4S, Vn.4H, #n /// </summary> public static Vector128<int> ShiftLeftLogicalWideningLower(Vector64<short> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// int64x2_t vshll_n_s32 (int32x2_t a, const int n) /// A32: VSHLL.S32 Qd, Dm, #n /// A64: SSHLL Vd.2D, Vn.2S, #n /// </summary> public static Vector128<long> ShiftLeftLogicalWideningLower(Vector64<int> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// int16x8_t vshll_n_s8 (int8x8_t a, const int n) /// A32: VSHLL.S8 Qd, Dm, #n /// A64: SSHLL Vd.8H, Vn.8B, #n /// </summary> public static Vector128<short> ShiftLeftLogicalWideningLower(Vector64<sbyte> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// uint32x4_t vshll_n_u16 (uint16x4_t a, const int n) /// A32: VSHLL.U16 Qd, Dm, #n /// A64: USHLL Vd.4S, Vn.4H, #n /// </summary> public static Vector128<uint> ShiftLeftLogicalWideningLower(Vector64<ushort> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// uint64x2_t vshll_n_u32 (uint32x2_t a, const int n) /// A32: VSHLL.U32 Qd, Dm, #n /// A64: USHLL Vd.2D, Vn.2S, #n /// </summary> public static Vector128<ulong> ShiftLeftLogicalWideningLower(Vector64<uint> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// uint16x8_t vshll_high_n_u8 (uint8x16_t a, const int n) /// A32: VSHLL.U8 Qd, Dm+1, #n /// A64: USHLL2 Vd.8H, Vn.16B, #n /// </summary> public static Vector128<ushort> ShiftLeftLogicalWideningUpper(Vector128<byte> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// int32x4_t vshll_high_n_s16 (int16x8_t a, const int n) /// A32: VSHLL.S16 Qd, Dm+1, #n /// A64: SSHLL2 Vd.4S, Vn.8H, #n /// </summary> public static Vector128<int> ShiftLeftLogicalWideningUpper(Vector128<short> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// int64x2_t vshll_high_n_s32 (int32x4_t a, const int n) /// A32: VSHLL.S32 Qd, Dm+1, #n /// A64: SSHLL2 Vd.2D, Vn.4S, #n /// </summary> public static Vector128<long> ShiftLeftLogicalWideningUpper(Vector128<int> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// int16x8_t vshll_high_n_s8 (int8x16_t a, const int n) /// A32: VSHLL.S8 Qd, Dm+1, #n /// A64: SSHLL2 Vd.8H, Vn.16B, #n /// </summary> public static Vector128<short> ShiftLeftLogicalWideningUpper(Vector128<sbyte> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// uint32x4_t vshll_high_n_u16 (uint16x8_t a, const int n) /// A32: VSHLL.U16 Qd, Dm+1, #n /// A64: USHLL2 Vd.4S, Vn.8H, #n /// </summary> public static Vector128<uint> ShiftLeftLogicalWideningUpper(Vector128<ushort> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// uint64x2_t vshll_high_n_u32 (uint32x4_t a, const int n) /// A32: VSHLL.U32 Qd, Dm+1, #n /// A64: USHLL2 Vd.2D, Vn.4S, #n /// </summary> public static Vector128<ulong> ShiftLeftLogicalWideningUpper(Vector128<uint> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// uint8x8_t vshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VSHL.U8 Dd, Dn, Dm /// A64: USHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ShiftLogical(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogical(value, count); /// <summary> /// uint16x4_t vshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VSHL.U16 Dd, Dn, Dm /// A64: USHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftLogical(Vector64<short> value, Vector64<short> count) => ShiftLogical(value, count); /// <summary> /// uint32x2_t vshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VSHL.U32 Dd, Dn, Dm /// A64: USHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftLogical(Vector64<int> value, Vector64<int> count) => ShiftLogical(value, count); /// <summary> /// uint8x8_t vshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VSHL.U8 Dd, Dn, Dm /// A64: USHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftLogical(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogical(value, count); /// <summary> /// uint16x4_t vshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VSHL.U16 Dd, Dn, Dm /// A64: USHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ShiftLogical(Vector64<ushort> value, Vector64<short> count) => ShiftLogical(value, count); /// <summary> /// uint32x2_t vshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VSHL.U32 Dd, Dn, Dm /// A64: USHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ShiftLogical(Vector64<uint> value, Vector64<int> count) => ShiftLogical(value, count); /// <summary> /// uint8x16_t vshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VSHL.U8 Qd, Qn, Qm /// A64: USHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ShiftLogical(Vector128<byte> value, Vector128<sbyte> count) => ShiftLogical(value, count); /// <summary> /// uint16x8_t vshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VSHL.U16 Qd, Qn, Qm /// A64: USHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftLogical(Vector128<short> value, Vector128<short> count) => ShiftLogical(value, count); /// <summary> /// uint32x4_t vshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VSHL.U32 Qd, Qn, Qm /// A64: USHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftLogical(Vector128<int> value, Vector128<int> count) => ShiftLogical(value, count); /// <summary> /// uint64x2_t vshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VSHL.U64 Qd, Qn, Qm /// A64: USHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftLogical(Vector128<long> value, Vector128<long> count) => ShiftLogical(value, count); /// <summary> /// uint8x16_t vshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VSHL.U8 Qd, Qn, Qm /// A64: USHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftLogical(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftLogical(value, count); /// <summary> /// uint16x8_t vshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VSHL.U16 Qd, Qn, Qm /// A64: USHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ShiftLogical(Vector128<ushort> value, Vector128<short> count) => ShiftLogical(value, count); /// <summary> /// uint32x4_t vshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VSHL.U32 Qd, Qn, Qm /// A64: USHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ShiftLogical(Vector128<uint> value, Vector128<int> count) => ShiftLogical(value, count); /// <summary> /// uint64x2_t vshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VSHL.U64 Qd, Qn, Qm /// A64: USHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ShiftLogical(Vector128<ulong> value, Vector128<long> count) => ShiftLogical(value, count); /// <summary> /// uint8x8_t vrshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VRSHL.U8 Dd, Dn, Dm /// A64: URSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ShiftLogicalRounded(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint16x4_t vrshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VRSHL.U16 Dd, Dn, Dm /// A64: URSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftLogicalRounded(Vector64<short> value, Vector64<short> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint32x2_t vrshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VRSHL.U32 Dd, Dn, Dm /// A64: URSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftLogicalRounded(Vector64<int> value, Vector64<int> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint8x8_t vrshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VRSHL.U8 Dd, Dn, Dm /// A64: URSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftLogicalRounded(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint16x4_t vrshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VRSHL.U16 Dd, Dn, Dm /// A64: URSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ShiftLogicalRounded(Vector64<ushort> value, Vector64<short> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint32x2_t vrshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VRSHL.U32 Dd, Dn, Dm /// A64: URSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ShiftLogicalRounded(Vector64<uint> value, Vector64<int> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint8x16_t vrshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VRSHL.U8 Qd, Qn, Qm /// A64: URSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ShiftLogicalRounded(Vector128<byte> value, Vector128<sbyte> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint16x8_t vrshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VRSHL.U16 Qd, Qn, Qm /// A64: URSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftLogicalRounded(Vector128<short> value, Vector128<short> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint32x4_t vrshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VRSHL.U32 Qd, Qn, Qm /// A64: URSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftLogicalRounded(Vector128<int> value, Vector128<int> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint64x2_t vrshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VRSHL.U64 Qd, Qn, Qm /// A64: URSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftLogicalRounded(Vector128<long> value, Vector128<long> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint8x16_t vrshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VRSHL.U8 Qd, Qn, Qm /// A64: URSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftLogicalRounded(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint16x8_t vrshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VRSHL.U16 Qd, Qn, Qm /// A64: URSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ShiftLogicalRounded(Vector128<ushort> value, Vector128<short> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint32x4_t vrshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VRSHL.U32 Qd, Qn, Qm /// A64: URSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ShiftLogicalRounded(Vector128<uint> value, Vector128<int> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint64x2_t vrshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VRSHL.U64 Qd, Qn, Qm /// A64: URSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ShiftLogicalRounded(Vector128<ulong> value, Vector128<long> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint8x8_t vqrshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VQRSHL.U8 Dd, Dn, Dm /// A64: UQRSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ShiftLogicalRoundedSaturate(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint16x4_t vqrshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VQRSHL.U16 Dd, Dn, Dm /// A64: UQRSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftLogicalRoundedSaturate(Vector64<short> value, Vector64<short> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint32x2_t vqrshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VQRSHL.U32 Dd, Dn, Dm /// A64: UQRSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftLogicalRoundedSaturate(Vector64<int> value, Vector64<int> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint8x8_t vqrshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VQRSHL.U8 Dd, Dn, Dm /// A64: UQRSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftLogicalRoundedSaturate(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint16x4_t vqrshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VQRSHL.U16 Dd, Dn, Dm /// A64: UQRSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ShiftLogicalRoundedSaturate(Vector64<ushort> value, Vector64<short> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint32x2_t vqrshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VQRSHL.U32 Dd, Dn, Dm /// A64: UQRSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ShiftLogicalRoundedSaturate(Vector64<uint> value, Vector64<int> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint8x16_t vqrshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VQRSHL.U8 Qd, Qn, Qm /// A64: UQRSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ShiftLogicalRoundedSaturate(Vector128<byte> value, Vector128<sbyte> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint16x8_t vqrshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VQRSHL.U16 Qd, Qn, Qm /// A64: UQRSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftLogicalRoundedSaturate(Vector128<short> value, Vector128<short> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint32x4_t vqrshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VQRSHL.U32 Qd, Qn, Qm /// A64: UQRSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftLogicalRoundedSaturate(Vector128<int> value, Vector128<int> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint64x2_t vqrshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VQRSHL.U64 Qd, Qn, Qm /// A64: UQRSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftLogicalRoundedSaturate(Vector128<long> value, Vector128<long> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint8x16_t vqrshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VQRSHL.U8 Qd, Qn, Qm /// A64: UQRSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftLogicalRoundedSaturate(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint16x8_t vqrshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VQRSHL.U16 Qd, Qn, Qm /// A64: UQRSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ShiftLogicalRoundedSaturate(Vector128<ushort> value, Vector128<short> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint32x4_t vqrshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VQRSHL.U32 Qd, Qn, Qm /// A64: UQRSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ShiftLogicalRoundedSaturate(Vector128<uint> value, Vector128<int> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint64x2_t vqrshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VQRSHL.U64 Qd, Qn, Qm /// A64: UQRSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ShiftLogicalRoundedSaturate(Vector128<ulong> value, Vector128<long> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint64x1_t vqrshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VQRSHL.U64 Dd, Dn, Dm /// A64: UQRSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftLogicalRoundedSaturateScalar(Vector64<long> value, Vector64<long> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint64x1_t vqrshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VQRSHL.U64 Dd, Dn, Dm /// A64: UQRSHL Dd, Dn, Dm /// </summary> public static Vector64<ulong> ShiftLogicalRoundedSaturateScalar(Vector64<ulong> value, Vector64<long> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint64x1_t vrshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VRSHL.U64 Dd, Dn, Dm /// A64: URSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftLogicalRoundedScalar(Vector64<long> value, Vector64<long> count) => ShiftLogicalRoundedScalar(value, count); /// <summary> /// uint64x1_t vrshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VRSHL.U64 Dd, Dn, Dm /// A64: URSHL Dd, Dn, Dm /// </summary> public static Vector64<ulong> ShiftLogicalRoundedScalar(Vector64<ulong> value, Vector64<long> count) => ShiftLogicalRoundedScalar(value, count); /// <summary> /// uint8x8_t vqshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VQSHL.U8 Dd, Dn, Dm /// A64: UQSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ShiftLogicalSaturate(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint16x4_t vqshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VQSHL.U16 Dd, Dn, Dm /// A64: UQSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftLogicalSaturate(Vector64<short> value, Vector64<short> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint32x2_t vqshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VQSHL.U32 Dd, Dn, Dm /// A64: UQSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftLogicalSaturate(Vector64<int> value, Vector64<int> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint8x8_t vqshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VQSHL.U8 Dd, Dn, Dm /// A64: UQSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftLogicalSaturate(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint16x4_t vqshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VQSHL.U16 Dd, Dn, Dm /// A64: UQSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ShiftLogicalSaturate(Vector64<ushort> value, Vector64<short> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint32x2_t vqshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VQSHL.U32 Dd, Dn, Dm /// A64: UQSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ShiftLogicalSaturate(Vector64<uint> value, Vector64<int> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint8x16_t vqshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VQSHL.U8 Qd, Qn, Qm /// A64: UQSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ShiftLogicalSaturate(Vector128<byte> value, Vector128<sbyte> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint16x8_t vqshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VQSHL.U16 Qd, Qn, Qm /// A64: UQSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftLogicalSaturate(Vector128<short> value, Vector128<short> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint32x4_t vqshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VQSHL.U32 Qd, Qn, Qm /// A64: UQSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftLogicalSaturate(Vector128<int> value, Vector128<int> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint64x2_t vqshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VQSHL.U64 Qd, Qn, Qm /// A64: UQSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftLogicalSaturate(Vector128<long> value, Vector128<long> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint8x16_t vqshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VQSHL.U8 Qd, Qn, Qm /// A64: UQSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftLogicalSaturate(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint16x8_t vqshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VQSHL.U16 Qd, Qn, Qm /// A64: UQSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ShiftLogicalSaturate(Vector128<ushort> value, Vector128<short> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint32x4_t vqshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VQSHL.U32 Qd, Qn, Qm /// A64: UQSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ShiftLogicalSaturate(Vector128<uint> value, Vector128<int> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint64x2_t vqshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VQSHL.U64 Qd, Qn, Qm /// A64: UQSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ShiftLogicalSaturate(Vector128<ulong> value, Vector128<long> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint64x1_t vqshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VQSHL.U64 Dd, Dn, Dm /// A64: UQSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftLogicalSaturateScalar(Vector64<long> value, Vector64<long> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint64x1_t vqshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VQSHL.U64 Dd, Dn, Dm /// A64: UQSHL Dd, Dn, Dm /// </summary> public static Vector64<ulong> ShiftLogicalSaturateScalar(Vector64<ulong> value, Vector64<long> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint64x1_t vshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VSHL.U64 Dd, Dn, Dm /// A64: USHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftLogicalScalar(Vector64<long> value, Vector64<long> count) => ShiftLogicalScalar(value, count); /// <summary> /// uint64x1_t vshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VSHL.U64 Dd, Dn, Dm /// A64: USHL Dd, Dn, Dm /// </summary> public static Vector64<ulong> ShiftLogicalScalar(Vector64<ulong> value, Vector64<long> count) => ShiftLogicalScalar(value, count); /// <summary> /// uint8x8_t vsri_n_u8(uint8x8_t a, uint8x8_t b, __builtin_constant_p(n)) /// A32: VSRI.8 Dd, Dm, #n /// A64: SRI Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftRightAndInsert(Vector64<byte> left, Vector64<byte> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int16x4_t vsri_n_s16(int16x4_t a, int16x4_t b, __builtin_constant_p(n)) /// A32: VSRI.16 Dd, Dm, #n /// A64: SRI Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightAndInsert(Vector64<short> left, Vector64<short> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int32x2_t vsri_n_s32(int32x2_t a, int32x2_t b, __builtin_constant_p(n)) /// A32: VSRI.32 Dd, Dm, #n /// A64: SRI Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightAndInsert(Vector64<int> left, Vector64<int> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int8x8_t vsri_n_s8(int8x8_t a, int8x8_t b, __builtin_constant_p(n)) /// A32: VSRI.8 Dd, Dm, #n /// A64: SRI Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightAndInsert(Vector64<sbyte> left, Vector64<sbyte> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint16x4_t vsri_n_u16(uint16x4_t a, uint16x4_t b, __builtin_constant_p(n)) /// A32: VSRI.16 Dd, Dm, #n /// A64: SRI Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftRightAndInsert(Vector64<ushort> left, Vector64<ushort> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint32x2_t vsri_n_u32(uint32x2_t a, uint32x2_t b, __builtin_constant_p(n)) /// A32: VSRI.32 Dd, Dm, #n /// A64: SRI Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftRightAndInsert(Vector64<uint> left, Vector64<uint> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint8x16_t vsriq_n_u8(uint8x16_t a, uint8x16_t b, __builtin_constant_p(n)) /// A32: VSRI.8 Qd, Qm, #n /// A64: SRI Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftRightAndInsert(Vector128<byte> left, Vector128<byte> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int16x8_t vsriq_n_s16(int16x8_t a, int16x8_t b, __builtin_constant_p(n)) /// A32: VSRI.16 Qd, Qm, #n /// A64: SRI Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightAndInsert(Vector128<short> left, Vector128<short> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int32x4_t vsriq_n_s32(int32x4_t a, int32x4_t b, __builtin_constant_p(n)) /// A32: VSRI.32 Qd, Qm, #n /// A64: SRI Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightAndInsert(Vector128<int> left, Vector128<int> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int64x2_t vsriq_n_s64(int64x2_t a, int64x2_t b, __builtin_constant_p(n)) /// A32: VSRI.64 Qd, Qm, #n /// A64: SRI Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightAndInsert(Vector128<long> left, Vector128<long> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int8x16_t vsriq_n_s8(int8x16_t a, int8x16_t b, __builtin_constant_p(n)) /// A32: VSRI.8 Qd, Qm, #n /// A64: SRI Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightAndInsert(Vector128<sbyte> left, Vector128<sbyte> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint16x8_t vsriq_n_u16(uint16x8_t a, uint16x8_t b, __builtin_constant_p(n)) /// A32: VSRI.16 Qd, Qm, #n /// A64: SRI Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftRightAndInsert(Vector128<ushort> left, Vector128<ushort> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint32x4_t vsriq_n_u32(uint32x4_t a, uint32x4_t b, __builtin_constant_p(n)) /// A32: VSRI.32 Qd, Qm, #n /// A64: SRI Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftRightAndInsert(Vector128<uint> left, Vector128<uint> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint64x2_t vsriq_n_u64(uint64x2_t a, uint64x2_t b, __builtin_constant_p(n)) /// A32: VSRI.64 Qd, Qm, #n /// A64: SRI Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftRightAndInsert(Vector128<ulong> left, Vector128<ulong> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int64_t vsrid_n_s64(int64_t a, int64_t b, __builtin_constant_p(n)) /// A32: VSRI.64 Dd, Dm, #n /// A64: SRI Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightAndInsertScalar(Vector64<long> left, Vector64<long> right, byte shift) => ShiftRightAndInsertScalar(left, right, shift); /// <summary> /// uint64_t vsrid_n_u64(uint64_t a, uint64_t b, __builtin_constant_p(n)) /// A32: VSRI.64 Dd, Dm, #n /// A64: SRI Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftRightAndInsertScalar(Vector64<ulong> left, Vector64<ulong> right, byte shift) => ShiftRightAndInsertScalar(left, right, shift); /// <summary> /// int16x4_t vshr_n_s16 (int16x4_t a, const int n) /// A32: VSHR.S16 Dd, Dm, #n /// A64: SSHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightArithmetic(Vector64<short> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int32x2_t vshr_n_s32 (int32x2_t a, const int n) /// A32: VSHR.S32 Dd, Dm, #n /// A64: SSHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightArithmetic(Vector64<int> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int8x8_t vshr_n_s8 (int8x8_t a, const int n) /// A32: VSHR.S8 Dd, Dm, #n /// A64: SSHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmetic(Vector64<sbyte> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int16x8_t vshrq_n_s16 (int16x8_t a, const int n) /// A32: VSHR.S16 Qd, Qm, #n /// A64: SSHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightArithmetic(Vector128<short> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int32x4_t vshrq_n_s32 (int32x4_t a, const int n) /// A32: VSHR.S32 Qd, Qm, #n /// A64: SSHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightArithmetic(Vector128<int> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int64x2_t vshrq_n_s64 (int64x2_t a, const int n) /// A32: VSHR.S64 Qd, Qm, #n /// A64: SSHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightArithmetic(Vector128<long> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int8x16_t vshrq_n_s8 (int8x16_t a, const int n) /// A32: VSHR.S8 Qd, Qm, #n /// A64: SSHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmetic(Vector128<sbyte> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int16x4_t vsra_n_s16 (int16x4_t a, int16x4_t b, const int n) /// A32: VSRA.S16 Dd, Dm, #n /// A64: SSRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightArithmeticAdd(Vector64<short> addend, Vector64<short> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int32x2_t vsra_n_s32 (int32x2_t a, int32x2_t b, const int n) /// A32: VSRA.S32 Dd, Dm, #n /// A64: SSRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightArithmeticAdd(Vector64<int> addend, Vector64<int> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int8x8_t vsra_n_s8 (int8x8_t a, int8x8_t b, const int n) /// A32: VSRA.S8 Dd, Dm, #n /// A64: SSRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticAdd(Vector64<sbyte> addend, Vector64<sbyte> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int16x8_t vsraq_n_s16 (int16x8_t a, int16x8_t b, const int n) /// A32: VSRA.S16 Qd, Qm, #n /// A64: SSRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightArithmeticAdd(Vector128<short> addend, Vector128<short> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int32x4_t vsraq_n_s32 (int32x4_t a, int32x4_t b, const int n) /// A32: VSRA.S32 Qd, Qm, #n /// A64: SSRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightArithmeticAdd(Vector128<int> addend, Vector128<int> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int64x2_t vsraq_n_s64 (int64x2_t a, int64x2_t b, const int n) /// A32: VSRA.S64 Qd, Qm, #n /// A64: SSRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightArithmeticAdd(Vector128<long> addend, Vector128<long> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int8x16_t vsraq_n_s8 (int8x16_t a, int8x16_t b, const int n) /// A32: VSRA.S8 Qd, Qm, #n /// A64: SSRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmeticAdd(Vector128<sbyte> addend, Vector128<sbyte> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int64x1_t vsra_n_s64 (int64x1_t a, int64x1_t b, const int n) /// A32: VSRA.S64 Dd, Dm, #n /// A64: SSRA Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightArithmeticAddScalar(Vector64<long> addend, Vector64<long> value, byte count) => ShiftRightArithmeticAddScalar(addend, value, count); /// <summary> /// int16x4_t vqshrn_n_s32 (int32x4_t a, const int n) /// A32: VQSHRN.S32 Dd, Qm, #n /// A64: SQSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightArithmeticNarrowingSaturateLower(Vector128<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateLower(value, count); /// <summary> /// int32x2_t vqshrn_n_s64 (int64x2_t a, const int n) /// A32: VQSHRN.S64 Dd, Qm, #n /// A64: SQSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightArithmeticNarrowingSaturateLower(Vector128<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateLower(value, count); /// <summary> /// int8x8_t vqshrn_n_s16 (int16x8_t a, const int n) /// A32: VQSHRN.S16 Dd, Qm, #n /// A64: SQSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticNarrowingSaturateLower(Vector128<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateLower(value, count); /// <summary> /// uint8x8_t vqshrun_n_s16 (int16x8_t a, const int n) /// A32: VQSHRUN.S16 Dd, Qm, #n /// A64: SQSHRUN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightArithmeticNarrowingSaturateUnsignedLower(Vector128<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint16x4_t vqshrun_n_s32 (int32x4_t a, const int n) /// A32: VQSHRUN.S32 Dd, Qm, #n /// A64: SQSHRUN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightArithmeticNarrowingSaturateUnsignedLower(Vector128<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint32x2_t vqshrun_n_s64 (int64x2_t a, const int n) /// A32: VQSHRUN.S64 Dd, Qm, #n /// A64: SQSHRUN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightArithmeticNarrowingSaturateUnsignedLower(Vector128<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint8x16_t vqshrun_high_n_s16 (uint8x8_t r, int16x8_t a, const int n) /// A32: VQSHRUN.S16 Dd+1, Dn, #n /// A64: SQSHRUN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightArithmeticNarrowingSaturateUnsignedUpper(Vector64<byte> lower, Vector128<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// uint16x8_t vqshrun_high_n_s32 (uint16x4_t r, int32x4_t a, const int n) /// A32: VQSHRUN.S32 Dd+1, Dn, #n /// A64: SQSHRUN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightArithmeticNarrowingSaturateUnsignedUpper(Vector64<ushort> lower, Vector128<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// uint32x4_t vqshrun_high_n_s64 (uint32x2_t r, int64x2_t a, const int n) /// A32: VQSHRUN.S64 Dd+1, Dn, #n /// A64: SQSHRUN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightArithmeticNarrowingSaturateUnsignedUpper(Vector64<uint> lower, Vector128<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// int16x8_t vqshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) /// A32: VQSHRN.S32 Dd+1, Qm, #n /// A64: SQSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightArithmeticNarrowingSaturateUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateUpper(lower, value, count); /// <summary> /// int32x4_t vqshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) /// A32: VQSHRN.S64 Dd+1, Qm, #n /// A64: SQSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightArithmeticNarrowingSaturateUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateUpper(lower, value, count); /// <summary> /// int8x16_t vqshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) /// A32: VQSHRN.S16 Dd+1, Qm, #n /// A64: SQSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmeticNarrowingSaturateUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateUpper(lower, value, count); /// <summary> /// int16x4_t vrshr_n_s16 (int16x4_t a, const int n) /// A32: VRSHR.S16 Dd, Dm, #n /// A64: SRSHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightArithmeticRounded(Vector64<short> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int32x2_t vrshr_n_s32 (int32x2_t a, const int n) /// A32: VRSHR.S32 Dd, Dm, #n /// A64: SRSHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightArithmeticRounded(Vector64<int> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int8x8_t vrshr_n_s8 (int8x8_t a, const int n) /// A32: VRSHR.S8 Dd, Dm, #n /// A64: SRSHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticRounded(Vector64<sbyte> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int16x8_t vrshrq_n_s16 (int16x8_t a, const int n) /// A32: VRSHR.S16 Qd, Qm, #n /// A64: SRSHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightArithmeticRounded(Vector128<short> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int32x4_t vrshrq_n_s32 (int32x4_t a, const int n) /// A32: VRSHR.S32 Qd, Qm, #n /// A64: SRSHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightArithmeticRounded(Vector128<int> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int64x2_t vrshrq_n_s64 (int64x2_t a, const int n) /// A32: VRSHR.S64 Qd, Qm, #n /// A64: SRSHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightArithmeticRounded(Vector128<long> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int8x16_t vrshrq_n_s8 (int8x16_t a, const int n) /// A32: VRSHR.S8 Qd, Qm, #n /// A64: SRSHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmeticRounded(Vector128<sbyte> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int16x4_t vrsra_n_s16 (int16x4_t a, int16x4_t b, const int n) /// A32: VRSRA.S16 Dd, Dm, #n /// A64: SRSRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightArithmeticRoundedAdd(Vector64<short> addend, Vector64<short> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int32x2_t vrsra_n_s32 (int32x2_t a, int32x2_t b, const int n) /// A32: VRSRA.S32 Dd, Dm, #n /// A64: SRSRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightArithmeticRoundedAdd(Vector64<int> addend, Vector64<int> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int8x8_t vrsra_n_s8 (int8x8_t a, int8x8_t b, const int n) /// A32: VRSRA.S8 Dd, Dm, #n /// A64: SRSRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticRoundedAdd(Vector64<sbyte> addend, Vector64<sbyte> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int16x8_t vrsraq_n_s16 (int16x8_t a, int16x8_t b, const int n) /// A32: VRSRA.S16 Qd, Qm, #n /// A64: SRSRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightArithmeticRoundedAdd(Vector128<short> addend, Vector128<short> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int32x4_t vrsraq_n_s32 (int32x4_t a, int32x4_t b, const int n) /// A32: VRSRA.S32 Qd, Qm, #n /// A64: SRSRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightArithmeticRoundedAdd(Vector128<int> addend, Vector128<int> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int64x2_t vrsraq_n_s64 (int64x2_t a, int64x2_t b, const int n) /// A32: VRSRA.S64 Qd, Qm, #n /// A64: SRSRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightArithmeticRoundedAdd(Vector128<long> addend, Vector128<long> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int8x16_t vrsraq_n_s8 (int8x16_t a, int8x16_t b, const int n) /// A32: VRSRA.S8 Qd, Qm, #n /// A64: SRSRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmeticRoundedAdd(Vector128<sbyte> addend, Vector128<sbyte> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int64x1_t vrsra_n_s64 (int64x1_t a, int64x1_t b, const int n) /// A32: VRSRA.S64 Dd, Dm, #n /// A64: SRSRA Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightArithmeticRoundedAddScalar(Vector64<long> addend, Vector64<long> value, byte count) => ShiftRightArithmeticRoundedAddScalar(addend, value, count); /// <summary> /// int16x4_t vqrshrn_n_s32 (int32x4_t a, const int n) /// A32: VQRSHRN.S32 Dd, Qm, #n /// A64: SQRSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightArithmeticRoundedNarrowingSaturateLower(Vector128<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateLower(value, count); /// <summary> /// int32x2_t vqrshrn_n_s64 (int64x2_t a, const int n) /// A32: VQRSHRN.S64 Dd, Qm, #n /// A64: SQRSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightArithmeticRoundedNarrowingSaturateLower(Vector128<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateLower(value, count); /// <summary> /// int8x8_t vqrshrn_n_s16 (int16x8_t a, const int n) /// A32: VQRSHRN.S16 Dd, Qm, #n /// A64: SQRSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticRoundedNarrowingSaturateLower(Vector128<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint8x8_t vqrshrun_n_s16 (int16x8_t a, const int n) /// A32: VQRSHRUN.S16 Dd, Qm, #n /// A64: SQRSHRUN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(Vector128<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint16x4_t vqrshrun_n_s32 (int32x4_t a, const int n) /// A32: VQRSHRUN.S32 Dd, Qm, #n /// A64: SQRSHRUN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(Vector128<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint32x2_t vqrshrun_n_s64 (int64x2_t a, const int n) /// A32: VQRSHRUN.S64 Dd, Qm, #n /// A64: SQRSHRUN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(Vector128<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint8x16_t vqrshrun_high_n_s16 (uint8x8_t r, int16x8_t a, const int n) /// A32: VQRSHRUN.S16 Dd+1, Dn, #n /// A64: SQRSHRUN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(Vector64<byte> lower, Vector128<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// uint16x8_t vqrshrun_high_n_s32 (uint16x4_t r, int32x4_t a, const int n) /// A32: VQRSHRUN.S32 Dd+1, Dn, #n /// A64: SQRSHRUN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(Vector64<ushort> lower, Vector128<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// uint32x4_t vqrshrun_high_n_s64 (uint32x2_t r, int64x2_t a, const int n) /// A32: VQRSHRUN.S64 Dd+1, Dn, #n /// A64: SQRSHRUN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(Vector64<uint> lower, Vector128<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// int16x8_t vqrshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) /// A32: VQRSHRN.S32 Dd+1, Dn, #n /// A64: SQRSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightArithmeticRoundedNarrowingSaturateUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// int32x4_t vqrshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) /// A32: VQRSHRN.S64 Dd+1, Dn, #n /// A64: SQRSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightArithmeticRoundedNarrowingSaturateUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// int8x16_t vqrshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) /// A32: VQRSHRN.S16 Dd+1, Dn, #n /// A64: SQRSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmeticRoundedNarrowingSaturateUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// int64x1_t vrshr_n_s64 (int64x1_t a, const int n) /// A32: VRSHR.S64 Dd, Dm, #n /// A64: SRSHR Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightArithmeticRoundedScalar(Vector64<long> value, byte count) => ShiftRightArithmeticRoundedScalar(value, count); /// <summary> /// int64x1_t vshr_n_s64 (int64x1_t a, const int n) /// A32: VSHR.S64 Dd, Dm, #n /// A64: SSHR Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightArithmeticScalar(Vector64<long> value, byte count) => ShiftRightArithmeticScalar(value, count); /// <summary> /// uint8x8_t vshr_n_u8 (uint8x8_t a, const int n) /// A32: VSHR.U8 Dd, Dm, #n /// A64: USHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftRightLogical(Vector64<byte> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint16x4_t vshr_n_u16 (uint16x4_t a, const int n) /// A32: VSHR.U16 Dd, Dm, #n /// A64: USHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightLogical(Vector64<short> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint32x2_t vshr_n_u32 (uint32x2_t a, const int n) /// A32: VSHR.U32 Dd, Dm, #n /// A64: USHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightLogical(Vector64<int> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint8x8_t vshr_n_u8 (uint8x8_t a, const int n) /// A32: VSHR.U8 Dd, Dm, #n /// A64: USHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightLogical(Vector64<sbyte> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint16x4_t vshr_n_u16 (uint16x4_t a, const int n) /// A32: VSHR.U16 Dd, Dm, #n /// A64: USHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftRightLogical(Vector64<ushort> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint32x2_t vshr_n_u32 (uint32x2_t a, const int n) /// A32: VSHR.U32 Dd, Dm, #n /// A64: USHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftRightLogical(Vector64<uint> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint8x16_t vshrq_n_u8 (uint8x16_t a, const int n) /// A32: VSHR.U8 Qd, Qm, #n /// A64: USHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftRightLogical(Vector128<byte> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint16x8_t vshrq_n_u16 (uint16x8_t a, const int n) /// A32: VSHR.U16 Qd, Qm, #n /// A64: USHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightLogical(Vector128<short> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint32x4_t vshrq_n_u32 (uint32x4_t a, const int n) /// A32: VSHR.U32 Qd, Qm, #n /// A64: USHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightLogical(Vector128<int> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint64x2_t vshrq_n_u64 (uint64x2_t a, const int n) /// A32: VSHR.U64 Qd, Qm, #n /// A64: USHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightLogical(Vector128<long> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint8x16_t vshrq_n_u8 (uint8x16_t a, const int n) /// A32: VSHR.U8 Qd, Qm, #n /// A64: USHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightLogical(Vector128<sbyte> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint16x8_t vshrq_n_u16 (uint16x8_t a, const int n) /// A32: VSHR.U16 Qd, Qm, #n /// A64: USHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftRightLogical(Vector128<ushort> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint32x4_t vshrq_n_u32 (uint32x4_t a, const int n) /// A32: VSHR.U32 Qd, Qm, #n /// A64: USHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftRightLogical(Vector128<uint> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint64x2_t vshrq_n_u64 (uint64x2_t a, const int n) /// A32: VSHR.U64 Qd, Qm, #n /// A64: USHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftRightLogical(Vector128<ulong> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint8x8_t vsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) /// A32: VSRA.U8 Dd, Dm, #n /// A64: USRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftRightLogicalAdd(Vector64<byte> addend, Vector64<byte> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint16x4_t vsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) /// A32: VSRA.U16 Dd, Dm, #n /// A64: USRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightLogicalAdd(Vector64<short> addend, Vector64<short> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint32x2_t vsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) /// A32: VSRA.U32 Dd, Dm, #n /// A64: USRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightLogicalAdd(Vector64<int> addend, Vector64<int> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint8x8_t vsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) /// A32: VSRA.U8 Dd, Dm, #n /// A64: USRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalAdd(Vector64<sbyte> addend, Vector64<sbyte> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint16x4_t vsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) /// A32: VSRA.U16 Dd, Dm, #n /// A64: USRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalAdd(Vector64<ushort> addend, Vector64<ushort> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint32x2_t vsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) /// A32: VSRA.U32 Dd, Dm, #n /// A64: USRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftRightLogicalAdd(Vector64<uint> addend, Vector64<uint> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint8x16_t vsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) /// A32: VSRA.U8 Qd, Qm, #n /// A64: USRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftRightLogicalAdd(Vector128<byte> addend, Vector128<byte> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint16x8_t vsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) /// A32: VSRA.U16 Qd, Qm, #n /// A64: USRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightLogicalAdd(Vector128<short> addend, Vector128<short> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint32x4_t vsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) /// A32: VSRA.U32 Qd, Qm, #n /// A64: USRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightLogicalAdd(Vector128<int> addend, Vector128<int> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint64x2_t vsraq_n_u64 (uint64x2_t a, uint64x2_t b, const int n) /// A32: VSRA.U64 Qd, Qm, #n /// A64: USRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightLogicalAdd(Vector128<long> addend, Vector128<long> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint8x16_t vsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) /// A32: VSRA.U8 Qd, Qm, #n /// A64: USRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalAdd(Vector128<sbyte> addend, Vector128<sbyte> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint16x8_t vsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) /// A32: VSRA.U16 Qd, Qm, #n /// A64: USRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalAdd(Vector128<ushort> addend, Vector128<ushort> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint32x4_t vsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) /// A32: VSRA.U32 Qd, Qm, #n /// A64: USRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftRightLogicalAdd(Vector128<uint> addend, Vector128<uint> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint64x2_t vsraq_n_u64 (uint64x2_t a, uint64x2_t b, const int n) /// A32: VSRA.U64 Qd, Qm, #n /// A64: USRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftRightLogicalAdd(Vector128<ulong> addend, Vector128<ulong> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint64x1_t vsra_n_u64 (uint64x1_t a, uint64x1_t b, const int n) /// A32: VSRA.U64 Dd, Dm, #n /// A64: USRA Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightLogicalAddScalar(Vector64<long> addend, Vector64<long> value, byte count) => ShiftRightLogicalAddScalar(addend, value, count); /// <summary> /// uint64x1_t vsra_n_u64 (uint64x1_t a, uint64x1_t b, const int n) /// A32: VSRA.U64 Dd, Dm, #n /// A64: USRA Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftRightLogicalAddScalar(Vector64<ulong> addend, Vector64<ulong> value, byte count) => ShiftRightLogicalAddScalar(addend, value, count); /// <summary> /// uint8x8_t vshrn_n_u16 (uint16x8_t a, const int n) /// A32: VSHRN.I16 Dd, Qm, #n /// A64: SHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightLogicalNarrowingLower(Vector128<ushort> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// int16x4_t vshrn_n_s32 (int32x4_t a, const int n) /// A32: VSHRN.I32 Dd, Qm, #n /// A64: SHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightLogicalNarrowingLower(Vector128<int> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// int32x2_t vshrn_n_s64 (int64x2_t a, const int n) /// A32: VSHRN.I64 Dd, Qm, #n /// A64: SHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightLogicalNarrowingLower(Vector128<long> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// int8x8_t vshrn_n_s16 (int16x8_t a, const int n) /// A32: VSHRN.I16 Dd, Qm, #n /// A64: SHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalNarrowingLower(Vector128<short> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// uint16x4_t vshrn_n_u32 (uint32x4_t a, const int n) /// A32: VSHRN.I32 Dd, Qm, #n /// A64: SHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalNarrowingLower(Vector128<uint> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// uint32x2_t vshrn_n_u64 (uint64x2_t a, const int n) /// A32: VSHRN.I64 Dd, Qm, #n /// A64: SHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightLogicalNarrowingLower(Vector128<ulong> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// uint8x8_t vqshrn_n_u16 (uint16x8_t a, const int n) /// A32: VQSHRN.U16 Dd, Qm, #n /// A64: UQSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightLogicalNarrowingSaturateLower(Vector128<ushort> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint16x4_t vqshrn_n_u32 (uint32x4_t a, const int n) /// A32: VQSHRN.U32 Dd, Qm, #n /// A64: UQSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightLogicalNarrowingSaturateLower(Vector128<int> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint32x2_t vqshrn_n_u64 (uint64x2_t a, const int n) /// A32: VQSHRN.U64 Dd, Qm, #n /// A64: UQSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightLogicalNarrowingSaturateLower(Vector128<long> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint8x8_t vqshrn_n_u16 (uint16x8_t a, const int n) /// A32: VQSHRN.U16 Dd, Qm, #n /// A64: UQSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalNarrowingSaturateLower(Vector128<short> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint16x4_t vqshrn_n_u32 (uint32x4_t a, const int n) /// A32: VQSHRN.U32 Dd, Qm, #n /// A64: UQSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalNarrowingSaturateLower(Vector128<uint> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint32x2_t vqshrn_n_u64 (uint64x2_t a, const int n) /// A32: VQSHRN.U64 Dd, Qm, #n /// A64: UQSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightLogicalNarrowingSaturateLower(Vector128<ulong> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint8x16_t vqshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VQSHRN.U16 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightLogicalNarrowingSaturateUpper(Vector64<byte> lower, Vector128<ushort> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint16x8_t vqshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VQSHRN.U32 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightLogicalNarrowingSaturateUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint32x4_t vqshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VQSHRN.U64 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightLogicalNarrowingSaturateUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint8x16_t vqshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VQSHRN.U16 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalNarrowingSaturateUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint16x8_t vqshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VQSHRN.U32 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalNarrowingSaturateUpper(Vector64<ushort> lower, Vector128<uint> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint32x4_t vqshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VQSHRN.U64 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightLogicalNarrowingSaturateUpper(Vector64<uint> lower, Vector128<ulong> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint8x16_t vshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VSHRN.I16 Dd+1, Qm, #n /// A64: SHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightLogicalNarrowingUpper(Vector64<byte> lower, Vector128<ushort> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// int16x8_t vshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) /// A32: VSHRN.I32 Dd+1, Qm, #n /// A64: SHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightLogicalNarrowingUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// int32x4_t vshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) /// A32: VSHRN.I64 Dd+1, Qm, #n /// A64: SHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightLogicalNarrowingUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// int8x16_t vshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) /// A32: VSHRN.I16 Dd+1, Qm, #n /// A64: SHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalNarrowingUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// uint16x8_t vshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VSHRN.I32 Dd+1, Qm, #n /// A64: SHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalNarrowingUpper(Vector64<ushort> lower, Vector128<uint> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// uint32x4_t vshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VSHRN.I64 Dd+1, Qm, #n /// A64: SHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightLogicalNarrowingUpper(Vector64<uint> lower, Vector128<ulong> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// uint8x8_t vrshr_n_u8 (uint8x8_t a, const int n) /// A32: VRSHR.U8 Dd, Dm, #n /// A64: URSHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftRightLogicalRounded(Vector64<byte> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint16x4_t vrshr_n_u16 (uint16x4_t a, const int n) /// A32: VRSHR.U16 Dd, Dm, #n /// A64: URSHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightLogicalRounded(Vector64<short> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint32x2_t vrshr_n_u32 (uint32x2_t a, const int n) /// A32: VRSHR.U32 Dd, Dm, #n /// A64: URSHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightLogicalRounded(Vector64<int> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint8x8_t vrshr_n_u8 (uint8x8_t a, const int n) /// A32: VRSHR.U8 Dd, Dm, #n /// A64: URSHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalRounded(Vector64<sbyte> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint16x4_t vrshr_n_u16 (uint16x4_t a, const int n) /// A32: VRSHR.U16 Dd, Dm, #n /// A64: URSHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalRounded(Vector64<ushort> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint32x2_t vrshr_n_u32 (uint32x2_t a, const int n) /// A32: VRSHR.U32 Dd, Dm, #n /// A64: URSHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftRightLogicalRounded(Vector64<uint> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint8x16_t vrshrq_n_u8 (uint8x16_t a, const int n) /// A32: VRSHR.U8 Qd, Qm, #n /// A64: URSHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftRightLogicalRounded(Vector128<byte> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint16x8_t vrshrq_n_u16 (uint16x8_t a, const int n) /// A32: VRSHR.U16 Qd, Qm, #n /// A64: URSHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightLogicalRounded(Vector128<short> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint32x4_t vrshrq_n_u32 (uint32x4_t a, const int n) /// A32: VRSHR.U32 Qd, Qm, #n /// A64: URSHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightLogicalRounded(Vector128<int> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint64x2_t vrshrq_n_u64 (uint64x2_t a, const int n) /// A32: VRSHR.U64 Qd, Qm, #n /// A64: URSHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightLogicalRounded(Vector128<long> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint8x16_t vrshrq_n_u8 (uint8x16_t a, const int n) /// A32: VRSHR.U8 Qd, Qm, #n /// A64: URSHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalRounded(Vector128<sbyte> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint16x8_t vrshrq_n_u16 (uint16x8_t a, const int n) /// A32: VRSHR.U16 Qd, Qm, #n /// A64: URSHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalRounded(Vector128<ushort> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint32x4_t vrshrq_n_u32 (uint32x4_t a, const int n) /// A32: VRSHR.U32 Qd, Qm, #n /// A64: URSHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftRightLogicalRounded(Vector128<uint> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint64x2_t vrshrq_n_u64 (uint64x2_t a, const int n) /// A32: VRSHR.U64 Qd, Qm, #n /// A64: URSHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftRightLogicalRounded(Vector128<ulong> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint8x8_t vrsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) /// A32: VRSRA.U8 Dd, Dm, #n /// A64: URSRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftRightLogicalRoundedAdd(Vector64<byte> addend, Vector64<byte> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint16x4_t vrsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) /// A32: VRSRA.U16 Dd, Dm, #n /// A64: URSRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightLogicalRoundedAdd(Vector64<short> addend, Vector64<short> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint32x2_t vrsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) /// A32: VRSRA.U32 Dd, Dm, #n /// A64: URSRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightLogicalRoundedAdd(Vector64<int> addend, Vector64<int> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint8x8_t vrsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) /// A32: VRSRA.U8 Dd, Dm, #n /// A64: URSRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalRoundedAdd(Vector64<sbyte> addend, Vector64<sbyte> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint16x4_t vrsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) /// A32: VRSRA.U16 Dd, Dm, #n /// A64: URSRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalRoundedAdd(Vector64<ushort> addend, Vector64<ushort> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint32x2_t vrsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) /// A32: VRSRA.U32 Dd, Dm, #n /// A64: URSRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftRightLogicalRoundedAdd(Vector64<uint> addend, Vector64<uint> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint8x16_t vrsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) /// A32: VRSRA.U8 Qd, Qm, #n /// A64: URSRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftRightLogicalRoundedAdd(Vector128<byte> addend, Vector128<byte> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint16x8_t vrsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) /// A32: VRSRA.U16 Qd, Qm, #n /// A64: URSRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightLogicalRoundedAdd(Vector128<short> addend, Vector128<short> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint32x4_t vrsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) /// A32: VRSRA.U32 Qd, Qm, #n /// A64: URSRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightLogicalRoundedAdd(Vector128<int> addend, Vector128<int> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint64x2_t vrsraq_n_u64 (uint64x2_t a, uint64x2_t b, const int n) /// A32: VRSRA.U64 Qd, Qm, #n /// A64: URSRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightLogicalRoundedAdd(Vector128<long> addend, Vector128<long> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint8x16_t vrsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) /// A32: VRSRA.U8 Qd, Qm, #n /// A64: URSRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalRoundedAdd(Vector128<sbyte> addend, Vector128<sbyte> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint16x8_t vrsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) /// A32: VRSRA.U16 Qd, Qm, #n /// A64: URSRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalRoundedAdd(Vector128<ushort> addend, Vector128<ushort> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint32x4_t vrsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) /// A32: VRSRA.U32 Qd, Qm, #n /// A64: URSRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftRightLogicalRoundedAdd(Vector128<uint> addend, Vector128<uint> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint64x2_t vrsraq_n_u64 (uint64x2_t a, uint64x2_t b, const int n) /// A32: VRSRA.U64 Qd, Qm, #n /// A64: URSRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftRightLogicalRoundedAdd(Vector128<ulong> addend, Vector128<ulong> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint64x1_t vrsra_n_u64 (uint64x1_t a, uint64x1_t b, const int n) /// A32: VRSRA.U64 Dd, Dm, #n /// A64: URSRA Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightLogicalRoundedAddScalar(Vector64<long> addend, Vector64<long> value, byte count) => ShiftRightLogicalRoundedAddScalar(addend, value, count); /// <summary> /// uint64x1_t vrsra_n_u64 (uint64x1_t a, uint64x1_t b, const int n) /// A32: VRSRA.U64 Dd, Dm, #n /// A64: URSRA Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftRightLogicalRoundedAddScalar(Vector64<ulong> addend, Vector64<ulong> value, byte count) => ShiftRightLogicalRoundedAddScalar(addend, value, count); /// <summary> /// uint8x8_t vrshrn_n_u16 (uint16x8_t a, const int n) /// A32: VRSHRN.I16 Dd, Qm, #n /// A64: RSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightLogicalRoundedNarrowingLower(Vector128<ushort> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// int16x4_t vrshrn_n_s32 (int32x4_t a, const int n) /// A32: VRSHRN.I32 Dd, Qm, #n /// A64: RSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightLogicalRoundedNarrowingLower(Vector128<int> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// int32x2_t vrshrn_n_s64 (int64x2_t a, const int n) /// A32: VRSHRN.I64 Dd, Qm, #n /// A64: RSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightLogicalRoundedNarrowingLower(Vector128<long> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// int8x8_t vrshrn_n_s16 (int16x8_t a, const int n) /// A32: VRSHRN.I16 Dd, Qm, #n /// A64: RSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalRoundedNarrowingLower(Vector128<short> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// uint16x4_t vrshrn_n_u32 (uint32x4_t a, const int n) /// A32: VRSHRN.I32 Dd, Qm, #n /// A64: RSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalRoundedNarrowingLower(Vector128<uint> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// uint32x2_t vrshrn_n_u64 (uint64x2_t a, const int n) /// A32: VRSHRN.I64 Dd, Qm, #n /// A64: RSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightLogicalRoundedNarrowingLower(Vector128<ulong> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// uint8x8_t vqrshrn_n_u16 (uint16x8_t a, const int n) /// A32: VQRSHRN.U16 Dd, Qm, #n /// A64: UQRSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<ushort> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint16x4_t vqrshrn_n_u32 (uint32x4_t a, const int n) /// A32: VQRSHRN.U32 Dd, Qm, #n /// A64: UQRSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<int> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint32x2_t vqrshrn_n_u64 (uint64x2_t a, const int n) /// A32: VQRSHRN.U64 Dd, Qm, #n /// A64: UQRSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<long> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint8x8_t vqrshrn_n_u16 (uint16x8_t a, const int n) /// A32: VQRSHRN.U16 Dd, Qm, #n /// A64: UQRSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<short> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint16x4_t vqrshrn_n_u32 (uint32x4_t a, const int n) /// A32: VQRSHRN.U32 Dd, Qm, #n /// A64: UQRSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<uint> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint32x2_t vqrshrn_n_u64 (uint64x2_t a, const int n) /// A32: VQRSHRN.U64 Dd, Qm, #n /// A64: UQRSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<ulong> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint8x16_t vqrshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VQRSHRN.U16 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<byte> lower, Vector128<ushort> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint16x8_t vqrshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VQRSHRN.U32 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint32x4_t vqrshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VQRSHRN.U64 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint8x16_t vqrshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VQRSHRN.U16 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint16x8_t vqrshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VQRSHRN.U32 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<ushort> lower, Vector128<uint> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint32x4_t vqrshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VQRSHRN.U64 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<uint> lower, Vector128<ulong> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint8x16_t vrshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VRSHRN.I16 Dd+1, Qm, #n /// A64: RSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightLogicalRoundedNarrowingUpper(Vector64<byte> lower, Vector128<ushort> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// int16x8_t vrshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) /// A32: VRSHRN.I32 Dd+1, Qm, #n /// A64: RSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightLogicalRoundedNarrowingUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// int32x4_t vrshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) /// A32: VRSHRN.I64 Dd+1, Qm, #n /// A64: RSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightLogicalRoundedNarrowingUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// int8x16_t vrshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) /// A32: VRSHRN.I16 Dd+1, Qm, #n /// A64: RSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalRoundedNarrowingUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// uint16x8_t vrshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VRSHRN.I32 Dd+1, Qm, #n /// A64: RSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalRoundedNarrowingUpper(Vector64<ushort> lower, Vector128<uint> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// uint32x4_t vrshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VRSHRN.I64 Dd+1, Qm, #n /// A64: RSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightLogicalRoundedNarrowingUpper(Vector64<uint> lower, Vector128<ulong> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// uint64x1_t vrshr_n_u64 (uint64x1_t a, const int n) /// A32: VRSHR.U64 Dd, Dm, #n /// A64: URSHR Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightLogicalRoundedScalar(Vector64<long> value, byte count) => ShiftRightLogicalRoundedScalar(value, count); /// <summary> /// uint64x1_t vrshr_n_u64 (uint64x1_t a, const int n) /// A32: VRSHR.U64 Dd, Dm, #n /// A64: URSHR Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftRightLogicalRoundedScalar(Vector64<ulong> value, byte count) => ShiftRightLogicalRoundedScalar(value, count); /// <summary> /// uint64x1_t vshr_n_u64 (uint64x1_t a, const int n) /// A32: VSHR.U64 Dd, Dm, #n /// A64: USHR Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightLogicalScalar(Vector64<long> value, byte count) => ShiftRightLogicalScalar(value, count); /// <summary> /// uint64x1_t vshr_n_u64 (uint64x1_t a, const int n) /// A32: VSHR.U64 Dd, Dm, #n /// A64: USHR Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftRightLogicalScalar(Vector64<ulong> value, byte count) => ShiftRightLogicalScalar(value, count); /// <summary> /// int32x4_t vmovl_s16 (int16x4_t a) /// A32: VMOVL.S16 Qd, Dm /// A64: SXTL Vd.4S, Vn.4H /// </summary> public static Vector128<int> SignExtendWideningLower(Vector64<short> value) => SignExtendWideningLower(value); /// <summary> /// int64x2_t vmovl_s32 (int32x2_t a) /// A32: VMOVL.S32 Qd, Dm /// A64: SXTL Vd.2D, Vn.2S /// </summary> public static Vector128<long> SignExtendWideningLower(Vector64<int> value) => SignExtendWideningLower(value); /// <summary> /// int16x8_t vmovl_s8 (int8x8_t a) /// A32: VMOVL.S8 Qd, Dm /// A64: SXTL Vd.8H, Vn.8B /// </summary> public static Vector128<short> SignExtendWideningLower(Vector64<sbyte> value) => SignExtendWideningLower(value); /// <summary> /// int32x4_t vmovl_high_s16 (int16x8_t a) /// A32: VMOVL.S16 Qd, Dm+1 /// A64: SXTL2 Vd.4S, Vn.8H /// </summary> public static Vector128<int> SignExtendWideningUpper(Vector128<short> value) => SignExtendWideningUpper(value); /// <summary> /// int64x2_t vmovl_high_s32 (int32x4_t a) /// A32: VMOVL.S32 Qd, Dm+1 /// A64: SXTL2 Vd.2D, Vn.4S /// </summary> public static Vector128<long> SignExtendWideningUpper(Vector128<int> value) => SignExtendWideningUpper(value); /// <summary> /// int16x8_t vmovl_high_s8 (int8x16_t a) /// A32: VMOVL.S8 Qd, Dm+1 /// A64: SXTL2 Vd.8H, Vn.16B /// </summary> public static Vector128<short> SignExtendWideningUpper(Vector128<sbyte> value) => SignExtendWideningUpper(value); /// <summary> /// float64x1_t vsqrt_f64 (float64x1_t a) /// A32: VSQRT.F64 Dd, Dm /// A64: FSQRT Dd, Dn /// </summary> public static Vector64<double> SqrtScalar(Vector64<double> value) => SqrtScalar(value); /// <summary> /// float32_t vsqrts_f32 (float32_t a) /// A32: VSQRT.F32 Sd, Sm /// A64: FSQRT Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> SqrtScalar(Vector64<float> value) => SqrtScalar(value); /// <summary> /// void vst1_u8 (uint8_t * ptr, uint8x8_t val) /// A32: VST1.8 { Dd }, [Rn] /// A64: ST1 { Vt.8B }, [Xn] /// </summary> public static unsafe void Store(byte* address, Vector64<byte> source) => Store(address, source); /// <summary> /// void vst1_f64 (float64_t * ptr, float64x1_t val) /// A32: VST1.64 { Dd }, [Rn] /// A64: ST1 { Vt.1D }, [Xn] /// </summary> public static unsafe void Store(double* address, Vector64<double> source) => Store(address, source); /// <summary> /// void vst1_s16 (int16_t * ptr, int16x4_t val) /// A32: VST1.16 { Dd }, [Rn] /// A64: ST1 {Vt.4H }, [Xn] /// </summary> public static unsafe void Store(short* address, Vector64<short> source) => Store(address, source); /// <summary> /// void vst1_s32 (int32_t * ptr, int32x2_t val) /// A32: VST1.32 { Dd }, [Rn] /// A64: ST1 { Vt.2S }, [Xn] /// </summary> public static unsafe void Store(int* address, Vector64<int> source) => Store(address, source); /// <summary> /// void vst1_s64 (int64_t * ptr, int64x1_t val) /// A32: VST1.64 { Dd }, [Rn] /// A64: ST1 { Vt.1D }, [Xn] /// </summary> public static unsafe void Store(long* address, Vector64<long> source) => Store(address, source); /// <summary> /// void vst1_s8 (int8_t * ptr, int8x8_t val) /// A32: VST1.8 { Dd }, [Rn] /// A64: ST1 { Vt.8B }, [Xn] /// </summary> public static unsafe void Store(sbyte* address, Vector64<sbyte> source) => Store(address, source); /// <summary> /// void vst1_f32 (float32_t * ptr, float32x2_t val) /// A32: VST1.32 { Dd }, [Rn] /// A64: ST1 { Vt.2S }, [Xn] /// </summary> public static unsafe void Store(float* address, Vector64<float> source) => Store(address, source); /// <summary> /// void vst1_u16 (uint16_t * ptr, uint16x4_t val) /// A32: VST1.16 { Dd }, [Rn] /// A64: ST1 { Vt.4H }, [Xn] /// </summary> public static unsafe void Store(ushort* address, Vector64<ushort> source) => Store(address, source); /// <summary> /// void vst1_u32 (uint32_t * ptr, uint32x2_t val) /// A32: VST1.32 { Dd }, [Rn] /// A64: ST1 { Vt.2S }, [Xn] /// </summary> public static unsafe void Store(uint* address, Vector64<uint> source) => Store(address, source); /// <summary> /// void vst1_u64 (uint64_t * ptr, uint64x1_t val) /// A32: VST1.64 { Dd }, [Rn] /// A64: ST1 { Vt.1D }, [Xn] /// </summary> public static unsafe void Store(ulong* address, Vector64<ulong> source) => Store(address, source); /// <summary> /// void vst1q_u8 (uint8_t * ptr, uint8x16_t val) /// A32: VST1.8 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.16B }, [Xn] /// </summary> public static unsafe void Store(byte* address, Vector128<byte> source) => Store(address, source); /// <summary> /// void vst1q_f64 (float64_t * ptr, float64x2_t val) /// A32: VST1.64 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.2D }, [Xn] /// </summary> public static unsafe void Store(double* address, Vector128<double> source) => Store(address, source); /// <summary> /// void vst1q_s16 (int16_t * ptr, int16x8_t val) /// A32: VST1.16 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.8H }, [Xn] /// </summary> public static unsafe void Store(short* address, Vector128<short> source) => Store(address, source); /// <summary> /// void vst1q_s32 (int32_t * ptr, int32x4_t val) /// A32: VST1.32 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.4S }, [Xn] /// </summary> public static unsafe void Store(int* address, Vector128<int> source) => Store(address, source); /// <summary> /// void vst1q_s64 (int64_t * ptr, int64x2_t val) /// A32: VST1.64 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.2D }, [Xn] /// </summary> public static unsafe void Store(long* address, Vector128<long> source) => Store(address, source); /// <summary> /// void vst1q_s8 (int8_t * ptr, int8x16_t val) /// A32: VST1.8 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.16B }, [Xn] /// </summary> public static unsafe void Store(sbyte* address, Vector128<sbyte> source) => Store(address, source); /// <summary> /// void vst1q_f32 (float32_t * ptr, float32x4_t val) /// A32: VST1.32 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.4S }, [Xn] /// </summary> public static unsafe void Store(float* address, Vector128<float> source) => Store(address, source); /// <summary> /// void vst1q_u16 (uint16_t * ptr, uint16x8_t val) /// A32: VST1.16 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.8H }, [Xn] /// </summary> public static unsafe void Store(ushort* address, Vector128<ushort> source) => Store(address, source); /// <summary> /// void vst1q_u32 (uint32_t * ptr, uint32x4_t val) /// A32: VST1.32 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.4S }, [Xn] /// </summary> public static unsafe void Store(uint* address, Vector128<uint> source) => Store(address, source); /// <summary> /// void vst1q_u64 (uint64_t * ptr, uint64x2_t val) /// A32: VST1.64 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.2D }, [Xn] /// </summary> public static unsafe void Store(ulong* address, Vector128<ulong> source) => Store(address, source); /// <summary> /// void vst1_lane_u8 (uint8_t * ptr, uint8x8_t val, const int lane) /// A32: VST1.8 { Dd[index] }, [Rn] /// A64: ST1 { Vt.B }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(byte* address, Vector64<byte> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_s16 (int16_t * ptr, int16x4_t val, const int lane) /// A32: VST1.16 { Dd[index] }, [Rn] /// A64: ST1 { Vt.H }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(short* address, Vector64<short> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_s32 (int32_t * ptr, int32x2_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(int* address, Vector64<int> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_s8 (int8_t * ptr, int8x8_t val, const int lane) /// A32: VST1.8 { Dd[index] }, [Rn] /// A64: ST1 { Vt.B }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(sbyte* address, Vector64<sbyte> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_f32 (float32_t * ptr, float32x2_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(float* address, Vector64<float> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_u16 (uint16_t * ptr, uint16x4_t val, const int lane) /// A32: VST1.16 { Dd[index] }, [Rn] /// A64: ST1 { Vt.H }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(ushort* address, Vector64<ushort> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_u32 (uint32_t * ptr, uint32x2_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(uint* address, Vector64<uint> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_u8 (uint8_t * ptr, uint8x16_t val, const int lane) /// A32: VST1.8 { Dd[index] }, [Rn] /// A64: ST1 { Vt.B }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(byte* address, Vector128<byte> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_f64 (float64_t * ptr, float64x2_t val, const int lane) /// A32: VSTR.64 Dd, [Rn] /// A64: ST1 { Vt.D }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(double* address, Vector128<double> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_s16 (int16_t * ptr, int16x8_t val, const int lane) /// A32: VST1.16 { Dd[index] }, [Rn] /// A64: ST1 { Vt.H }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(short* address, Vector128<short> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_s32 (int32_t * ptr, int32x4_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(int* address, Vector128<int> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_s64 (int64_t * ptr, int64x2_t val, const int lane) /// A32: VSTR.64 Dd, [Rn] /// A64: ST1 { Vt.D }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(long* address, Vector128<long> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_s8 (int8_t * ptr, int8x16_t val, const int lane) /// A32: VST1.8 { Dd[index] }, [Rn] /// A64: ST1 { Vt.B }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(sbyte* address, Vector128<sbyte> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_f32 (float32_t * ptr, float32x4_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(float* address, Vector128<float> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_u16 (uint16_t * ptr, uint16x8_t val, const int lane) /// A32: VST1.16 { Dd[index] }, [Rn] /// A64: ST1 { Vt.H }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(ushort* address, Vector128<ushort> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_u32 (uint32_t * ptr, uint32x4_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(uint* address, Vector128<uint> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_u64 (uint64_t * ptr, uint64x2_t val, const int lane) /// A32: VSTR.64 Dd, [Rn] /// A64: ST1 { Vt.D }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(ulong* address, Vector128<ulong> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// uint8x8_t vsub_u8 (uint8x8_t a, uint8x8_t b) /// A32: VSUB.I8 Dd, Dn, Dm /// A64: SUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Subtract(Vector64<byte> left, Vector64<byte> right) => Subtract(left, right); /// <summary> /// int16x4_t vsub_s16 (int16x4_t a, int16x4_t b) /// A32: VSUB.I16 Dd, Dn, Dm /// A64: SUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> Subtract(Vector64<short> left, Vector64<short> right) => Subtract(left, right); /// <summary> /// int32x2_t vsub_s32 (int32x2_t a, int32x2_t b) /// A32: VSUB.I32 Dd, Dn, Dm /// A64: SUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> Subtract(Vector64<int> left, Vector64<int> right) => Subtract(left, right); /// <summary> /// int8x8_t vsub_s8 (int8x8_t a, int8x8_t b) /// A32: VSUB.I8 Dd, Dn, Dm /// A64: SUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Subtract(Vector64<sbyte> left, Vector64<sbyte> right) => Subtract(left, right); /// <summary> /// float32x2_t vsub_f32 (float32x2_t a, float32x2_t b) /// A32: VSUB.F32 Dd, Dn, Dm /// A64: FSUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Subtract(Vector64<float> left, Vector64<float> right) => Subtract(left, right); /// <summary> /// uint16x4_t vsub_u16 (uint16x4_t a, uint16x4_t b) /// A32: VSUB.I16 Dd, Dn, Dm /// A64: SUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> Subtract(Vector64<ushort> left, Vector64<ushort> right) => Subtract(left, right); /// <summary> /// uint32x2_t vsub_u32 (uint32x2_t a, uint32x2_t b) /// A32: VSUB.I32 Dd, Dn, Dm /// A64: SUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> Subtract(Vector64<uint> left, Vector64<uint> right) => Subtract(left, right); /// <summary> /// uint8x16_t vsubq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VSUB.I8 Qd, Qn, Qm /// A64: SUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Subtract(Vector128<byte> left, Vector128<byte> right) => Subtract(left, right); /// <summary> /// int16x8_t vsubq_s16 (int16x8_t a, int16x8_t b) /// A32: VSUB.I16 Qd, Qn, Qm /// A64: SUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> Subtract(Vector128<short> left, Vector128<short> right) => Subtract(left, right); /// <summary> /// int32x4_t vsubq_s32 (int32x4_t a, int32x4_t b) /// A32: VSUB.I32 Qd, Qn, Qm /// A64: SUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> Subtract(Vector128<int> left, Vector128<int> right) => Subtract(left, right); /// <summary> /// int64x2_t vsubq_s64 (int64x2_t a, int64x2_t b) /// A32: VSUB.I64 Qd, Qn, Qm /// A64: SUB Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> Subtract(Vector128<long> left, Vector128<long> right) => Subtract(left, right); /// <summary> /// int8x16_t vsubq_s8 (int8x16_t a, int8x16_t b) /// A32: VSUB.I8 Qd, Qn, Qm /// A64: SUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Subtract(Vector128<sbyte> left, Vector128<sbyte> right) => Subtract(left, right); /// <summary> /// float32x4_t vsubq_f32 (float32x4_t a, float32x4_t b) /// A32: VSUB.F32 Qd, Qn, Qm /// A64: FSUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Subtract(Vector128<float> left, Vector128<float> right) => Subtract(left, right); /// <summary> /// uint16x8_t vsubq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VSUB.I16 Qd, Qn, Qm /// A64: SUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> Subtract(Vector128<ushort> left, Vector128<ushort> right) => Subtract(left, right); /// <summary> /// uint32x4_t vsubq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VSUB.I32 Qd, Qn, Qm /// A64: SUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> Subtract(Vector128<uint> left, Vector128<uint> right) => Subtract(left, right); /// <summary> /// uint64x2_t vsubq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VSUB.I64 Qd, Qn, Qm /// A64: SUB Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> Subtract(Vector128<ulong> left, Vector128<ulong> right) => Subtract(left, right); /// <summary> /// uint8x8_t vsubhn_u16 (uint16x8_t a, uint16x8_t b) /// A32: VSUBHN.I16 Dd, Qn, Qm /// A64: SUBHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<byte> SubtractHighNarrowingLower(Vector128<ushort> left, Vector128<ushort> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// int16x4_t vsubhn_s32 (int32x4_t a, int32x4_t b) /// A32: VSUBHN.I32 Dd, Qn, Qm /// A64: SUBHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<short> SubtractHighNarrowingLower(Vector128<int> left, Vector128<int> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// int32x2_t vsubhn_s64 (int64x2_t a, int64x2_t b) /// A32: VSUBHN.I64 Dd, Qn, Qm /// A64: SUBHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<int> SubtractHighNarrowingLower(Vector128<long> left, Vector128<long> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// int8x8_t vsubhn_s16 (int16x8_t a, int16x8_t b) /// A32: VSUBHN.I16 Dd, Qn, Qm /// A64: SUBHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<sbyte> SubtractHighNarrowingLower(Vector128<short> left, Vector128<short> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// uint16x4_t vsubhn_u32 (uint32x4_t a, uint32x4_t b) /// A32: VSUBHN.I32 Dd, Qn, Qm /// A64: SUBHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<ushort> SubtractHighNarrowingLower(Vector128<uint> left, Vector128<uint> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// uint32x2_t vsubhn_u64 (uint64x2_t a, uint64x2_t b) /// A32: VSUBHN.I64 Dd, Qn, Qm /// A64: SUBHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<uint> SubtractHighNarrowingLower(Vector128<ulong> left, Vector128<ulong> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// uint8x16_t vsubhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) /// A32: VSUBHN.I16 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<byte> SubtractHighNarrowingUpper(Vector64<byte> lower, Vector128<ushort> left, Vector128<ushort> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// int16x8_t vsubhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) /// A32: VSUBHN.I32 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<short> SubtractHighNarrowingUpper(Vector64<short> lower, Vector128<int> left, Vector128<int> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// int32x4_t vsubhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) /// A32: VSUBHN.I64 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<int> SubtractHighNarrowingUpper(Vector64<int> lower, Vector128<long> left, Vector128<long> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// int8x16_t vsubhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) /// A32: VSUBHN.I16 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<sbyte> SubtractHighNarrowingUpper(Vector64<sbyte> lower, Vector128<short> left, Vector128<short> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// uint16x8_t vsubhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) /// A32: VSUBHN.I32 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<ushort> SubtractHighNarrowingUpper(Vector64<ushort> lower, Vector128<uint> left, Vector128<uint> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// uint32x4_t vsubhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) /// A32: VSUBHN.I64 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<uint> SubtractHighNarrowingUpper(Vector64<uint> lower, Vector128<ulong> left, Vector128<ulong> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// uint8x8_t vrsubhn_u16 (uint16x8_t a, uint16x8_t b) /// A32: VRSUBHN.I16 Dd, Qn, Qm /// A64: RSUBHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<byte> SubtractRoundedHighNarrowingLower(Vector128<ushort> left, Vector128<ushort> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// int16x4_t vrsubhn_s32 (int32x4_t a, int32x4_t b) /// A32: VRSUBHN.I32 Dd, Qn, Qm /// A64: RSUBHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<short> SubtractRoundedHighNarrowingLower(Vector128<int> left, Vector128<int> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// int32x2_t vrsubhn_s64 (int64x2_t a, int64x2_t b) /// A32: VRSUBHN.I64 Dd, Qn, Qm /// A64: RSUBHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<int> SubtractRoundedHighNarrowingLower(Vector128<long> left, Vector128<long> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// int8x8_t vrsubhn_s16 (int16x8_t a, int16x8_t b) /// A32: VRSUBHN.I16 Dd, Qn, Qm /// A64: RSUBHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<sbyte> SubtractRoundedHighNarrowingLower(Vector128<short> left, Vector128<short> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// uint16x4_t vrsubhn_u32 (uint32x4_t a, uint32x4_t b) /// A32: VRSUBHN.I32 Dd, Qn, Qm /// A64: RSUBHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<ushort> SubtractRoundedHighNarrowingLower(Vector128<uint> left, Vector128<uint> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// uint32x2_t vrsubhn_u64 (uint64x2_t a, uint64x2_t b) /// A32: VRSUBHN.I64 Dd, Qn, Qm /// A64: RSUBHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<uint> SubtractRoundedHighNarrowingLower(Vector128<ulong> left, Vector128<ulong> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// uint8x16_t vrsubhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) /// A32: VRSUBHN.I16 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<byte> SubtractRoundedHighNarrowingUpper(Vector64<byte> lower, Vector128<ushort> left, Vector128<ushort> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int16x8_t vrsubhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) /// A32: VRSUBHN.I32 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<short> SubtractRoundedHighNarrowingUpper(Vector64<short> lower, Vector128<int> left, Vector128<int> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int32x4_t vrsubhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) /// A32: VRSUBHN.I64 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<int> SubtractRoundedHighNarrowingUpper(Vector64<int> lower, Vector128<long> left, Vector128<long> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int8x16_t vrsubhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) /// A32: VRSUBHN.I16 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<sbyte> SubtractRoundedHighNarrowingUpper(Vector64<sbyte> lower, Vector128<short> left, Vector128<short> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint16x8_t vrsubhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) /// A32: VRSUBHN.I32 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<ushort> SubtractRoundedHighNarrowingUpper(Vector64<ushort> lower, Vector128<uint> left, Vector128<uint> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint32x4_t vrsubhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) /// A32: VRSUBHN.I64 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<uint> SubtractRoundedHighNarrowingUpper(Vector64<uint> lower, Vector128<ulong> left, Vector128<ulong> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint8x8_t vqsub_u8 (uint8x8_t a, uint8x8_t b) /// A32: VQSUB.U8 Dd, Dn, Dm /// A64: UQSUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> SubtractSaturate(Vector64<byte> left, Vector64<byte> right) => SubtractSaturate(left, right); /// <summary> /// int16x4_t vqsub_s16 (int16x4_t a, int16x4_t b) /// A32: VQSUB.S16 Dd, Dn, Dm /// A64: SQSUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> SubtractSaturate(Vector64<short> left, Vector64<short> right) => SubtractSaturate(left, right); /// <summary> /// int32x2_t vqsub_s32 (int32x2_t a, int32x2_t b) /// A32: VQSUB.S32 Dd, Dn, Dm /// A64: SQSUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> SubtractSaturate(Vector64<int> left, Vector64<int> right) => SubtractSaturate(left, right); /// <summary> /// int8x8_t vqsub_s8 (int8x8_t a, int8x8_t b) /// A32: VQSUB.S8 Dd, Dn, Dm /// A64: SQSUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> SubtractSaturate(Vector64<sbyte> left, Vector64<sbyte> right) => SubtractSaturate(left, right); /// <summary> /// uint16x4_t vqsub_u16 (uint16x4_t a, uint16x4_t b) /// A32: VQSUB.U16 Dd, Dn, Dm /// A64: UQSUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> SubtractSaturate(Vector64<ushort> left, Vector64<ushort> right) => SubtractSaturate(left, right); /// <summary> /// uint32x2_t vqsub_u32 (uint32x2_t a, uint32x2_t b) /// A32: VQSUB.U32 Dd, Dn, Dm /// A64: UQSUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> SubtractSaturate(Vector64<uint> left, Vector64<uint> right) => SubtractSaturate(left, right); /// <summary> /// uint8x16_t vqsubq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VQSUB.U8 Qd, Qn, Qm /// A64: UQSUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> SubtractSaturate(Vector128<byte> left, Vector128<byte> right) => SubtractSaturate(left, right); /// <summary> /// int16x8_t vqsubq_s16 (int16x8_t a, int16x8_t b) /// A32: VQSUB.S16 Qd, Qn, Qm /// A64: SQSUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> SubtractSaturate(Vector128<short> left, Vector128<short> right) => SubtractSaturate(left, right); /// <summary> /// int32x4_t vqsubq_s32 (int32x4_t a, int32x4_t b) /// A32: VQSUB.S32 Qd, Qn, Qm /// A64: SQSUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> SubtractSaturate(Vector128<int> left, Vector128<int> right) => SubtractSaturate(left, right); /// <summary> /// int64x2_t vqsubq_s64 (int64x2_t a, int64x2_t b) /// A32: VQSUB.S64 Qd, Qn, Qm /// A64: SQSUB Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> SubtractSaturate(Vector128<long> left, Vector128<long> right) => SubtractSaturate(left, right); /// <summary> /// int8x16_t vqsubq_s8 (int8x16_t a, int8x16_t b) /// A32: VQSUB.S8 Qd, Qn, Qm /// A64: SQSUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> SubtractSaturate(Vector128<sbyte> left, Vector128<sbyte> right) => SubtractSaturate(left, right); /// <summary> /// uint16x8_t vqsubq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VQSUB.U16 Qd, Qn, Qm /// A64: UQSUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> SubtractSaturate(Vector128<ushort> left, Vector128<ushort> right) => SubtractSaturate(left, right); /// <summary> /// uint32x4_t vqsubq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VQSUB.U32 Qd, Qn, Qm /// A64: UQSUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> SubtractSaturate(Vector128<uint> left, Vector128<uint> right) => SubtractSaturate(left, right); /// <summary> /// uint64x2_t vqsubq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VQSUB.U64 Qd, Qn, Qm /// A64: UQSUB Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> SubtractSaturate(Vector128<ulong> left, Vector128<ulong> right) => SubtractSaturate(left, right); /// <summary> /// int64x1_t vqsub_s64 (int64x1_t a, int64x1_t b) /// A32: VQSUB.S64 Dd, Dn, Dm /// A64: SQSUB Dd, Dn, Dm /// </summary> public static Vector64<long> SubtractSaturateScalar(Vector64<long> left, Vector64<long> right) => SubtractSaturateScalar(left, right); /// <summary> /// uint64x1_t vqsub_u64 (uint64x1_t a, uint64x1_t b) /// A32: VQSUB.U64 Dd, Dn, Dm /// A64: UQSUB Dd, Dn, Dm /// </summary> public static Vector64<ulong> SubtractSaturateScalar(Vector64<ulong> left, Vector64<ulong> right) => SubtractSaturateScalar(left, right); /// <summary> /// float64x1_t vsub_f64 (float64x1_t a, float64x1_t b) /// A32: VSUB.F64 Dd, Dn, Dm /// A64: FSUB Dd, Dn, Dm /// </summary> public static Vector64<double> SubtractScalar(Vector64<double> left, Vector64<double> right) => SubtractScalar(left, right); /// <summary> /// int64x1_t vsub_s64 (int64x1_t a, int64x1_t b) /// A32: VSUB.I64 Dd, Dn, Dm /// A64: SUB Dd, Dn, Dm /// </summary> public static Vector64<long> SubtractScalar(Vector64<long> left, Vector64<long> right) => SubtractScalar(left, right); /// <summary> /// float32_t vsubs_f32 (float32_t a, float32_t b) /// A32: VSUB.F32 Sd, Sn, Sm /// A64: FSUB Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> SubtractScalar(Vector64<float> left, Vector64<float> right) => SubtractScalar(left, right); /// <summary> /// uint64x1_t vsub_u64 (uint64x1_t a, uint64x1_t b) /// A32: VSUB.I64 Dd, Dn, Dm /// A64: SUB Dd, Dn, Dm /// </summary> public static Vector64<ulong> SubtractScalar(Vector64<ulong> left, Vector64<ulong> right) => SubtractScalar(left, right); /// <summary> /// uint16x8_t vsubl_u8 (uint8x8_t a, uint8x8_t b) /// A32: VSUBL.U8 Qd, Dn, Dm /// A64: USUBL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> SubtractWideningLower(Vector64<byte> left, Vector64<byte> right) => SubtractWideningLower(left, right); /// <summary> /// int32x4_t vsubl_s16 (int16x4_t a, int16x4_t b) /// A32: VSUBL.S16 Qd, Dn, Dm /// A64: SSUBL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> SubtractWideningLower(Vector64<short> left, Vector64<short> right) => SubtractWideningLower(left, right); /// <summary> /// int64x2_t vsubl_s32 (int32x2_t a, int32x2_t b) /// A32: VSUBL.S32 Qd, Dn, Dm /// A64: SSUBL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> SubtractWideningLower(Vector64<int> left, Vector64<int> right) => SubtractWideningLower(left, right); /// <summary> /// int16x8_t vsubl_s8 (int8x8_t a, int8x8_t b) /// A32: VSUBL.S8 Qd, Dn, Dm /// A64: SSUBL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> SubtractWideningLower(Vector64<sbyte> left, Vector64<sbyte> right) => SubtractWideningLower(left, right); /// <summary> /// uint32x4_t vsubl_u16 (uint16x4_t a, uint16x4_t b) /// A32: VSUBL.U16 Qd, Dn, Dm /// A64: USUBL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> SubtractWideningLower(Vector64<ushort> left, Vector64<ushort> right) => SubtractWideningLower(left, right); /// <summary> /// uint64x2_t vsubl_u32 (uint32x2_t a, uint32x2_t b) /// A32: VSUBL.U32 Qd, Dn, Dm /// A64: USUBL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> SubtractWideningLower(Vector64<uint> left, Vector64<uint> right) => SubtractWideningLower(left, right); /// <summary> /// int16x8_t vsubw_s8 (int16x8_t a, int8x8_t b) /// A32: VSUBW.S8 Qd, Qn, Dm /// A64: SSUBW Vd.8H, Vn.8H, Vm.8B /// </summary> public static Vector128<short> SubtractWideningLower(Vector128<short> left, Vector64<sbyte> right) => SubtractWideningLower(left, right); /// <summary> /// int32x4_t vsubw_s16 (int32x4_t a, int16x4_t b) /// A32: VSUBW.S16 Qd, Qn, Dm /// A64: SSUBW Vd.4S, Vn.4S, Vm.4H /// </summary> public static Vector128<int> SubtractWideningLower(Vector128<int> left, Vector64<short> right) => SubtractWideningLower(left, right); /// <summary> /// int64x2_t vsubw_s32 (int64x2_t a, int32x2_t b) /// A32: VSUBW.S32 Qd, Qn, Dm /// A64: SSUBW Vd.2D, Vn.2D, Vm.2S /// </summary> public static Vector128<long> SubtractWideningLower(Vector128<long> left, Vector64<int> right) => SubtractWideningLower(left, right); /// <summary> /// uint16x8_t vsubw_u8 (uint16x8_t a, uint8x8_t b) /// A32: VSUBW.U8 Qd, Qn, Dm /// A64: USUBW Vd.8H, Vn.8H, Vm.8B /// </summary> public static Vector128<ushort> SubtractWideningLower(Vector128<ushort> left, Vector64<byte> right) => SubtractWideningLower(left, right); /// <summary> /// uint32x4_t vsubw_u16 (uint32x4_t a, uint16x4_t b) /// A32: VSUBW.U16 Qd, Qn, Dm /// A64: USUBW Vd.4S, Vn.4S, Vm.4H /// </summary> public static Vector128<uint> SubtractWideningLower(Vector128<uint> left, Vector64<ushort> right) => SubtractWideningLower(left, right); /// <summary> /// uint64x2_t vsubw_u32 (uint64x2_t a, uint32x2_t b) /// A32: VSUBW.U32 Qd, Qn, Dm /// A64: USUBW Vd.2D, Vn.2D, Vm.2S /// </summary> public static Vector128<ulong> SubtractWideningLower(Vector128<ulong> left, Vector64<uint> right) => SubtractWideningLower(left, right); /// <summary> /// uint16x8_t vsubl_high_u8 (uint8x16_t a, uint8x16_t b) /// A32: VSUBL.U8 Qd, Dn+1, Dm+1 /// A64: USUBL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> SubtractWideningUpper(Vector128<byte> left, Vector128<byte> right) => SubtractWideningUpper(left, right); /// <summary> /// int32x4_t vsubl_high_s16 (int16x8_t a, int16x8_t b) /// A32: VSUBL.S16 Qd, Dn+1, Dm+1 /// A64: SSUBL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> SubtractWideningUpper(Vector128<short> left, Vector128<short> right) => SubtractWideningUpper(left, right); /// <summary> /// int16x8_t vsubw_high_s8 (int16x8_t a, int8x16_t b) /// A32: VSUBW.S8 Qd, Qn, Dm+1 /// A64: SSUBW2 Vd.8H, Vn.8H, Vm.16B /// </summary> public static Vector128<short> SubtractWideningUpper(Vector128<short> left, Vector128<sbyte> right) => SubtractWideningUpper(left, right); /// <summary> /// int32x4_t vsubw_high_s16 (int32x4_t a, int16x8_t b) /// A32: VSUBW.S16 Qd, Qn, Dm+1 /// A64: SSUBW2 Vd.4S, Vn.4S, Vm.8H /// </summary> public static Vector128<int> SubtractWideningUpper(Vector128<int> left, Vector128<short> right) => SubtractWideningUpper(left, right); /// <summary> /// int64x2_t vsubl_high_s32 (int32x4_t a, int32x4_t b) /// A32: VSUBL.S32 Qd, Dn+1, Dm+1 /// A64: SSUBL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> SubtractWideningUpper(Vector128<int> left, Vector128<int> right) => SubtractWideningUpper(left, right); /// <summary> /// int64x2_t vsubw_high_s32 (int64x2_t a, int32x4_t b) /// A32: VSUBW.S32 Qd, Qn, Dm+1 /// A64: SSUBW2 Vd.2D, Vn.2D, Vm.4S /// </summary> public static Vector128<long> SubtractWideningUpper(Vector128<long> left, Vector128<int> right) => SubtractWideningUpper(left, right); /// <summary> /// int16x8_t vsubl_high_s8 (int8x16_t a, int8x16_t b) /// A32: VSUBL.S8 Qd, Dn+1, Dm+1 /// A64: SSUBL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> SubtractWideningUpper(Vector128<sbyte> left, Vector128<sbyte> right) => SubtractWideningUpper(left, right); /// <summary> /// uint16x8_t vsubw_high_u8 (uint16x8_t a, uint8x16_t b) /// A32: VSUBW.U8 Qd, Qn, Dm+1 /// A64: USUBW2 Vd.8H, Vn.8H, Vm.16B /// </summary> public static Vector128<ushort> SubtractWideningUpper(Vector128<ushort> left, Vector128<byte> right) => SubtractWideningUpper(left, right); /// <summary> /// uint32x4_t vsubl_high_u16 (uint16x8_t a, uint16x8_t b) /// A32: VSUBL.U16 Qd, Dn+1, Dm+1 /// A64: USUBL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> SubtractWideningUpper(Vector128<ushort> left, Vector128<ushort> right) => SubtractWideningUpper(left, right); /// <summary> /// uint32x4_t vsubw_high_u16 (uint32x4_t a, uint16x8_t b) /// A32: VSUBW.U16 Qd, Qn, Dm+1 /// A64: USUBW2 Vd.4S, Vn.4S, Vm.8H /// </summary> public static Vector128<uint> SubtractWideningUpper(Vector128<uint> left, Vector128<ushort> right) => SubtractWideningUpper(left, right); /// <summary> /// uint64x2_t vsubl_high_u32 (uint32x4_t a, uint32x4_t b) /// A32: VSUBL.U32 Qd, Dn+1, Dm+1 /// A64: USUBL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> SubtractWideningUpper(Vector128<uint> left, Vector128<uint> right) => SubtractWideningUpper(left, right); /// <summary> /// uint64x2_t vsubw_high_u32 (uint64x2_t a, uint32x4_t b) /// A32: VSUBW.U32 Qd, Qn, Dm+1 /// A64: USUBW2 Vd.2D, Vn.2D, Vm.4S /// </summary> public static Vector128<ulong> SubtractWideningUpper(Vector128<ulong> left, Vector128<uint> right) => SubtractWideningUpper(left, right); /// <summary> /// uint8x8_t vqvtbl1_u8(uint8x16_t t, uint8x8_t idx) /// A32: VTBL Dd, {Dn, Dn+1}, Dm /// A64: TBL Vd.8B, {Vn.16B}, Vm.8B /// </summary> public static Vector64<byte> VectorTableLookup(Vector128<byte> table, Vector64<byte> byteIndexes) => VectorTableLookup(table, byteIndexes); /// <summary> /// int8x8_t vqvtbl1_s8(int8x16_t t, uint8x8_t idx) /// A32: VTBL Dd, {Dn, Dn+1}, Dm /// A64: TBL Vd.8B, {Vn.16B}, Vm.8B /// </summary> public static Vector64<sbyte> VectorTableLookup(Vector128<sbyte> table, Vector64<sbyte> byteIndexes) => VectorTableLookup(table, byteIndexes); /// <summary> /// uint8x8_t vqvtbx1_u8(uint8x8_t r, uint8x16_t t, uint8x8_t idx) /// A32: VTBX Dd, {Dn, Dn+1}, Dm /// A64: TBX Vd.8B, {Vn.16B}, Vm.8B /// </summary> public static Vector64<byte> VectorTableLookupExtension(Vector64<byte> defaultValues, Vector128<byte> table, Vector64<byte> byteIndexes) => VectorTableLookupExtension(defaultValues, table, byteIndexes); /// <summary> /// int8x8_t vqvtbx1_s8(int8x8_t r, int8x16_t t, uint8x8_t idx) /// A32: VTBX Dd, {Dn, Dn+1}, Dm /// A64: TBX Vd.8B, {Vn.16B}, Vm.8B /// </summary> public static Vector64<sbyte> VectorTableLookupExtension(Vector64<sbyte> defaultValues, Vector128<sbyte> table, Vector64<sbyte> byteIndexes) => VectorTableLookupExtension(defaultValues, table, byteIndexes); /// <summary> /// uint8x8_t veor_u8 (uint8x8_t a, uint8x8_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Xor(Vector64<byte> left, Vector64<byte> right) => Xor(left, right); /// <summary> /// float64x1_t veor_f64 (float64x1_t a, float64x1_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> Xor(Vector64<double> left, Vector64<double> right) => Xor(left, right); /// <summary> /// int16x4_t veor_s16 (int16x4_t a, int16x4_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> Xor(Vector64<short> left, Vector64<short> right) => Xor(left, right); /// <summary> /// int32x2_t veor_s32 (int32x2_t a, int32x2_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> Xor(Vector64<int> left, Vector64<int> right) => Xor(left, right); /// <summary> /// int64x1_t veor_s64 (int64x1_t a, int64x1_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> Xor(Vector64<long> left, Vector64<long> right) => Xor(left, right); /// <summary> /// int8x8_t veor_s8 (int8x8_t a, int8x8_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Xor(Vector64<sbyte> left, Vector64<sbyte> right) => Xor(left, right); /// <summary> /// float32x2_t veor_f32 (float32x2_t a, float32x2_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> Xor(Vector64<float> left, Vector64<float> right) => Xor(left, right); /// <summary> /// uint16x4_t veor_u16 (uint16x4_t a, uint16x4_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> Xor(Vector64<ushort> left, Vector64<ushort> right) => Xor(left, right); /// <summary> /// uint32x2_t veor_u32 (uint32x2_t a, uint32x2_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> Xor(Vector64<uint> left, Vector64<uint> right) => Xor(left, right); /// <summary> /// uint64x1_t veor_u64 (uint64x1_t a, uint64x1_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> Xor(Vector64<ulong> left, Vector64<ulong> right) => Xor(left, right); /// <summary> /// uint8x16_t veorq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Xor(Vector128<byte> left, Vector128<byte> right) => Xor(left, right); /// <summary> /// float64x2_t veorq_f64 (float64x2_t a, float64x2_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> Xor(Vector128<double> left, Vector128<double> right) => Xor(left, right); /// <summary> /// int16x8_t veorq_s16 (int16x8_t a, int16x8_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> Xor(Vector128<short> left, Vector128<short> right) => Xor(left, right); /// <summary> /// int32x4_t veorq_s32 (int32x4_t a, int32x4_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> Xor(Vector128<int> left, Vector128<int> right) => Xor(left, right); /// <summary> /// int64x2_t veorq_s64 (int64x2_t a, int64x2_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> Xor(Vector128<long> left, Vector128<long> right) => Xor(left, right); /// <summary> /// int8x16_t veorq_s8 (int8x16_t a, int8x16_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Xor(Vector128<sbyte> left, Vector128<sbyte> right) => Xor(left, right); /// <summary> /// float32x4_t veorq_f32 (float32x4_t a, float32x4_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> Xor(Vector128<float> left, Vector128<float> right) => Xor(left, right); /// <summary> /// uint16x8_t veorq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> Xor(Vector128<ushort> left, Vector128<ushort> right) => Xor(left, right); /// <summary> /// uint32x4_t veorq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> Xor(Vector128<uint> left, Vector128<uint> right) => Xor(left, right); /// <summary> /// uint64x2_t veorq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> Xor(Vector128<ulong> left, Vector128<ulong> right) => Xor(left, right); /// <summary> /// uint16x8_t vmovl_u8 (uint8x8_t a) /// A32: VMOVL.U8 Qd, Dm /// A64: UXTL Vd.8H, Vn.8B /// </summary> public static Vector128<ushort> ZeroExtendWideningLower(Vector64<byte> value) => ZeroExtendWideningLower(value); /// <summary> /// uint32x4_t vmovl_u16 (uint16x4_t a) /// A32: VMOVL.U16 Qd, Dm /// A64: UXTL Vd.4S, Vn.4H /// </summary> public static Vector128<int> ZeroExtendWideningLower(Vector64<short> value) => ZeroExtendWideningLower(value); /// <summary> /// uint64x2_t vmovl_u32 (uint32x2_t a) /// A32: VMOVL.U32 Qd, Dm /// A64: UXTL Vd.2D, Vn.2S /// </summary> public static Vector128<long> ZeroExtendWideningLower(Vector64<int> value) => ZeroExtendWideningLower(value); /// <summary> /// uint16x8_t vmovl_u8 (uint8x8_t a) /// A32: VMOVL.U8 Qd, Dm /// A64: UXTL Vd.8H, Vn.8B /// </summary> public static Vector128<short> ZeroExtendWideningLower(Vector64<sbyte> value) => ZeroExtendWideningLower(value); /// <summary> /// uint32x4_t vmovl_u16 (uint16x4_t a) /// A32: VMOVL.U16 Qd, Dm /// A64: UXTL Vd.4S, Vn.4H /// </summary> public static Vector128<uint> ZeroExtendWideningLower(Vector64<ushort> value) => ZeroExtendWideningLower(value); /// <summary> /// uint64x2_t vmovl_u32 (uint32x2_t a) /// A32: VMOVL.U32 Qd, Dm /// A64: UXTL Vd.2D, Vn.2S /// </summary> public static Vector128<ulong> ZeroExtendWideningLower(Vector64<uint> value) => ZeroExtendWideningLower(value); /// <summary> /// uint16x8_t vmovl_high_u8 (uint8x16_t a) /// A32: VMOVL.U8 Qd, Dm+1 /// A64: UXTL2 Vd.8H, Vn.16B /// </summary> public static Vector128<ushort> ZeroExtendWideningUpper(Vector128<byte> value) => ZeroExtendWideningUpper(value); /// <summary> /// uint32x4_t vmovl_high_u16 (uint16x8_t a) /// A32: VMOVL.U16 Qd, Dm+1 /// A64: UXTL2 Vd.4S, Vn.8H /// </summary> public static Vector128<int> ZeroExtendWideningUpper(Vector128<short> value) => ZeroExtendWideningUpper(value); /// <summary> /// uint64x2_t vmovl_high_u32 (uint32x4_t a) /// A32: VMOVL.U32 Qd, Dm+1 /// A64: UXTL2 Vd.2D, Vn.4S /// </summary> public static Vector128<long> ZeroExtendWideningUpper(Vector128<int> value) => ZeroExtendWideningUpper(value); /// <summary> /// uint16x8_t vmovl_high_u8 (uint8x16_t a) /// A32: VMOVL.U8 Qd, Dm+1 /// A64: UXTL2 Vd.8H, Vn.16B /// </summary> public static Vector128<short> ZeroExtendWideningUpper(Vector128<sbyte> value) => ZeroExtendWideningUpper(value); /// <summary> /// uint32x4_t vmovl_high_u16 (uint16x8_t a) /// A32: VMOVL.U16 Qd, Dm+1 /// A64: UXTL2 Vd.4S, Vn.8H /// </summary> public static Vector128<uint> ZeroExtendWideningUpper(Vector128<ushort> value) => ZeroExtendWideningUpper(value); /// <summary> /// uint64x2_t vmovl_high_u32 (uint32x4_t a) /// A32: VMOVL.U32 Qd, Dm+1 /// A64: UXTL2 Vd.2D, Vn.4S /// </summary> public static Vector128<ulong> ZeroExtendWideningUpper(Vector128<uint> value) => ZeroExtendWideningUpper(value); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.CompilerServices; namespace System.Runtime.Intrinsics.Arm { /// <summary> /// This class provides access to the ARM AdvSIMD hardware instructions via intrinsics /// </summary> [Intrinsic] [CLSCompliant(false)] public abstract class AdvSimd : ArmBase { internal AdvSimd() { } public static new bool IsSupported { get => IsSupported; } // [Intrinsic] // public new abstract class Arm32 : ArmBase.Arm32 // { // internal Arm32() { } // // public static new bool IsSupported { get => IsSupported; } // // /// <summary> // /// float32x2_t vmla_f32 (float32x2_t a, float32x2_t b, float32x2_t c) // /// A32: VMLA.F32 Dd, Dn, Dm // /// </summary> // public static Vector64<float> MultiplyAdd(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => MultiplyAdd(addend, left, right); // // /// <summary> // /// float32x4_t vmlaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c) // /// A32: VMLA.F32 Qd, Qn, Qm // /// </summary> // public static Vector128<float> MultiplyAdd(Vector128<float> addend, Vector128<float> left, Vector128<float> right) => MultiplyAdd(addend, left, right); // // /// <summary> // /// float32x2_t vmla_n_f32 (float32x2_t a, float32x2_t b, float32_t c) // /// A32: VMLA.F32 Dd, Dn, Dm[0] // /// </summary> // public static Vector64<float> MultiplyAddByScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => MultiplyAddByScalar(addend, left, right); // // /// <summary> // /// float32x4_t vmlaq_n_f32 (float32x4_t a, float32x4_t b, float32_t c) // /// A32: VMLA.F32 Qd, Qn, Dm[0] // /// </summary> // public static Vector128<float> MultiplyAddByScalar(Vector128<float> addend, Vector128<float> left, Vector64<float> right) => MultiplyAddByScalar(addend, left, right); // // /// <summary> // /// float32x2_t vmla_lane_f32 (float32x2_t a, float32x2_t b, float32x2_t v, const int lane) // /// A32: VMLA.F32 Dd, Dn, Dm[lane] // /// </summary> // public static Vector64<float> MultiplyAddBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); // // /// <summary> // /// float32x2_t vmla_laneq_f32 (float32x2_t a, float32x2_t b, float32x4_t v, const int lane) // /// A32: VMLA.F32 Dd, Dn, Dm[lane] // /// </summary> // public static Vector64<float> MultiplyAddBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); // // /// <summary> // /// float32x4_t vmlaq_lane_f32 (float32x4_t a, float32x4_t b, float32x2_t v, const int lane) // /// A32: VMLA.F32 Qd, Qn, Dm[lane] // /// </summary> // public static Vector128<float> MultiplyAddBySelectedScalar(Vector128<float> addend, Vector128<float> left, Vector64<float> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); // // /// <summary> // /// float32x4_t vmlaq_laneq_f32 (float32x4_t a, float32x4_t b, float32x4_t v, const int lane) // /// A32: VMLA.F32 Qd, Qn, Dm[lane] // /// </summary> // public static Vector128<float> MultiplyAddBySelectedScalar(Vector128<float> addend, Vector128<float> left, Vector128<float> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); // // /// <summary> // /// float64x1_t vmla_f64 (float64x1_t a, float64x1_t b, float64x1_t c) // /// A32: VMLA.F64 Dd, Dn, Dm // /// </summary> // public static Vector64<double> MultiplyAddScalar(Vector64<double> addend, Vector64<double> left, Vector64<double> right) => MultiplyAddScalar(addend, left, right); // // /// <summary> // /// float32_t vmlas_f32 (float32_t a, float32_t b, float32_t c) // /// A32: VMLA.F32 Sd, Sn, Sm // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. // /// </summary> // public static Vector64<float> MultiplyAddScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => MultiplyAddScalar(addend, left, right); // // /// <summary> // /// float32x2_t vmls_f32 (float32x2_t a, float32x2_t b, float32x2_t c) // /// A32: VMLS.F32 Dd, Dn, Dm // /// </summary> // public static Vector64<float> MultiplySubtract(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => MultiplySubtract(minuend, left, right); // // /// <summary> // /// float32x4_t vmlsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c) // /// A32: VMLS.F32 Qd, Qn, Qm // /// </summary> // public static Vector128<float> MultiplySubtract(Vector128<float> minuend, Vector128<float> left, Vector128<float> right) => MultiplySubtract(minuend, left, right); // // /// <summary> // /// float32x2_t vmls_n_f32 (float32x2_t a, float32x2_t b, float32_t c) // /// A32: VMLS.F32 Dd, Dn, Dm[0] // /// </summary> // public static Vector64<float> MultiplySubtractByScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => MultiplySubtractByScalar(minuend, left, right); // // /// <summary> // /// float32x4_t vmlsq_n_f32 (float32x4_t a, float32x4_t b, float32_t c) // /// A32: VMLS.F32 Qd, Qn, Dm[0] // /// </summary> // public static Vector128<float> MultiplySubtractByScalar(Vector128<float> minuend, Vector128<float> left, Vector64<float> right) => MultiplySubtractByScalar(minuend, left, right); // // /// <summary> // /// float32x2_t vmls_lane_f32 (float32x2_t a, float32x2_t b, float32x2_t v, const int lane) // /// A32: VMLS.F32 Dd, Dn, Dm[lane] // /// </summary> // public static Vector64<float> MultiplySubtractBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); // // /// <summary> // /// float32x2_t vmls_laneq_f32 (float32x2_t a, float32x2_t b, float32x4_t v, const int lane) // /// A32: VMLS.F32 Dd, Dn, Dm[lane] // /// </summary> // public static Vector64<float> MultiplySubtractBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); // // /// <summary> // /// float32x4_t vmlsq_lane_f32 (float32x4_t a, float32x4_t b, float32x2_t v, const int lane) // /// A32: VMLS.F32 Qd, Qn, Dm[lane] // /// </summary> // public static Vector128<float> MultiplySubtractBySelectedScalar(Vector128<float> minuend, Vector128<float> left, Vector64<float> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); // // /// <summary> // /// float32x4_t vmlsq_laneq_f32 (float32x4_t a, float32x4_t b, float32x4_t v, const int lane) // /// A32: VMLS.F32 Qd, Qn, Dm[lane] // /// </summary> // public static Vector128<float> MultiplySubtractBySelectedScalar(Vector128<float> minuend, Vector128<float> left, Vector128<float> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); // // /// <summary> // /// float64x1_t vmls_f64 (float64x1_t a, float64x1_t b, float64x1_t c) // /// A32: VMLS.F64 Dd, Dn, Dm // /// </summary> // public static Vector64<double> MultiplySubtractScalar(Vector64<double> minuend, Vector64<double> left, Vector64<double> right) => MultiplySubtractScalar(minuend, left, right); // // /// <summary> // /// float32_t vmlss_f32 (float32_t a, float32_t b, float32_t c) // /// A32: VMLS.F32 Sd, Sn, Sm // /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. // /// </summary> // public static Vector64<float> MultiplySubtractScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => MultiplySubtractScalar(minuend, left, right); // } [Intrinsic] public new abstract class Arm64 : ArmBase.Arm64 { internal Arm64() { } public static new bool IsSupported { get => IsSupported; } /// <summary> /// float64x2_t vabsq_f64 (float64x2_t a) /// A64: FABS Vd.2D, Vn.2D /// </summary> public static Vector128<double> Abs(Vector128<double> value) => Abs(value); /// <summary> /// int64x2_t vabsq_s64 (int64x2_t a) /// A64: ABS Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> Abs(Vector128<long> value) => Abs(value); /// <summary> /// int64x2_t vqabsq_s64 (int64x2_t a) /// A64: SQABS Vd.2D, Vn.2D /// </summary> public static Vector128<long> AbsSaturate(Vector128<long> value) => AbsSaturate(value); /// <summary> /// int16_t vqabsh_s16 (int16_t a) /// A64: SQABS Hd, Hn /// </summary> public static Vector64<short> AbsSaturateScalar(Vector64<short> value) => AbsSaturateScalar(value); /// <summary> /// int32_t vqabss_s32 (int32_t a) /// A64: SQABS Sd, Sn /// </summary> public static Vector64<int> AbsSaturateScalar(Vector64<int> value) => AbsSaturateScalar(value); /// <summary> /// int64_t vqabsd_s64 (int64_t a) /// A64: SQABS Dd, Dn /// </summary> public static Vector64<long> AbsSaturateScalar(Vector64<long> value) => AbsSaturateScalar(value); /// <summary> /// int8_t vqabsb_s8 (int8_t a) /// A64: SQABS Bd, Bn /// </summary> public static Vector64<sbyte> AbsSaturateScalar(Vector64<sbyte> value) => AbsSaturateScalar(value); /// <summary> /// int64x1_t vabs_s64 (int64x1_t a) /// A64: ABS Dd, Dn /// </summary> public static Vector64<ulong> AbsScalar(Vector64<long> value) => AbsScalar(value); /// <summary> /// uint64x2_t vcagtq_f64 (float64x2_t a, float64x2_t b) /// A64: FACGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AbsoluteCompareGreaterThan(Vector128<double> left, Vector128<double> right) => AbsoluteCompareGreaterThan(left, right); /// <summary> /// uint64x1_t vcagt_f64 (float64x1_t a, float64x1_t b) /// A64: FACGT Dd, Dn, Dm /// </summary> public static Vector64<double> AbsoluteCompareGreaterThanScalar(Vector64<double> left, Vector64<double> right) => AbsoluteCompareGreaterThanScalar(left, right); /// <summary> /// uint32_t vcagts_f32 (float32_t a, float32_t b) /// A64: FACGT Sd, Sn, Sm /// </summary> public static Vector64<float> AbsoluteCompareGreaterThanScalar(Vector64<float> left, Vector64<float> right) => AbsoluteCompareGreaterThanScalar(left, right); /// <summary> /// uint64x2_t vcageq_f64 (float64x2_t a, float64x2_t b) /// A64: FACGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AbsoluteCompareGreaterThanOrEqual(Vector128<double> left, Vector128<double> right) => AbsoluteCompareGreaterThanOrEqual(left, right); /// <summary> /// uint64x1_t vcage_f64 (float64x1_t a, float64x1_t b) /// A64: FACGE Dd, Dn, Dm /// </summary> public static Vector64<double> AbsoluteCompareGreaterThanOrEqualScalar(Vector64<double> left, Vector64<double> right) => AbsoluteCompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint32_t vcages_f32 (float32_t a, float32_t b) /// A64: FACGE Sd, Sn, Sm /// </summary> public static Vector64<float> AbsoluteCompareGreaterThanOrEqualScalar(Vector64<float> left, Vector64<float> right) => AbsoluteCompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint64x2_t vcaltq_f64 (float64x2_t a, float64x2_t b) /// A64: FACGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AbsoluteCompareLessThan(Vector128<double> left, Vector128<double> right) => AbsoluteCompareLessThan(left, right); /// <summary> /// uint64x1_t vcalt_f64 (float64x1_t a, float64x1_t b) /// A64: FACGT Dd, Dn, Dm /// </summary> public static Vector64<double> AbsoluteCompareLessThanScalar(Vector64<double> left, Vector64<double> right) => AbsoluteCompareLessThanScalar(left, right); /// <summary> /// uint32_t vcalts_f32 (float32_t a, float32_t b) /// A64: FACGT Sd, Sn, Sm /// </summary> public static Vector64<float> AbsoluteCompareLessThanScalar(Vector64<float> left, Vector64<float> right) => AbsoluteCompareLessThanScalar(left, right); /// <summary> /// uint64x2_t vcaleq_f64 (float64x2_t a, float64x2_t b) /// A64: FACGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AbsoluteCompareLessThanOrEqual(Vector128<double> left, Vector128<double> right) => AbsoluteCompareLessThanOrEqual(left, right); /// <summary> /// uint64x1_t vcale_f64 (float64x1_t a, float64x1_t b) /// A64: FACGE Dd, Dn, Dm /// </summary> public static Vector64<double> AbsoluteCompareLessThanOrEqualScalar(Vector64<double> left, Vector64<double> right) => AbsoluteCompareLessThanOrEqualScalar(left, right); /// <summary> /// uint32_t vcales_f32 (float32_t a, float32_t b) /// A64: FACGE Sd, Sn, Sm /// </summary> public static Vector64<float> AbsoluteCompareLessThanOrEqualScalar(Vector64<float> left, Vector64<float> right) => AbsoluteCompareLessThanOrEqualScalar(left, right); /// <summary> /// float64x2_t vabdq_f64 (float64x2_t a, float64x2_t b) /// A64: FABD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AbsoluteDifference(Vector128<double> left, Vector128<double> right) => AbsoluteDifference(left, right); /// <summary> /// float64x1_t vabd_f64 (float64x1_t a, float64x1_t b) /// A64: FABD Dd, Dn, Dm /// </summary> public static Vector64<double> AbsoluteDifferenceScalar(Vector64<double> left, Vector64<double> right) => AbsoluteDifferenceScalar(left, right); /// <summary> /// float32_t vabds_f32 (float32_t a, float32_t b) /// A64: FABD Sd, Sn, Sm /// </summary> public static Vector64<float> AbsoluteDifferenceScalar(Vector64<float> left, Vector64<float> right) => AbsoluteDifferenceScalar(left, right); /// <summary> /// float64x2_t vaddq_f64 (float64x2_t a, float64x2_t b) /// A64: FADD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Add(Vector128<double> left, Vector128<double> right) => Add(left, right); /// <summary> /// uint8_t vaddv_u8 (uint8x8_t a) /// A64: ADDV Bd, Vn.8B /// </summary> public static Vector64<byte> AddAcross(Vector64<byte> value) => AddAcross(value); /// <summary> /// int16_t vaddv_s16 (int16x4_t a) /// A64: ADDV Hd, Vn.4H /// </summary> public static Vector64<short> AddAcross(Vector64<short> value) => AddAcross(value); /// <summary> /// int8_t vaddv_s8 (int8x8_t a) /// A64: ADDV Bd, Vn.8B /// </summary> public static Vector64<sbyte> AddAcross(Vector64<sbyte> value) => AddAcross(value); /// <summary> /// uint16_t vaddv_u16 (uint16x4_t a) /// A64: ADDV Hd, Vn.4H /// </summary> public static Vector64<ushort> AddAcross(Vector64<ushort> value) => AddAcross(value); /// <summary> /// uint8_t vaddvq_u8 (uint8x16_t a) /// A64: ADDV Bd, Vn.16B /// </summary> public static Vector64<byte> AddAcross(Vector128<byte> value) => AddAcross(value); /// <summary> /// int16_t vaddvq_s16 (int16x8_t a) /// A64: ADDV Hd, Vn.8H /// </summary> public static Vector64<short> AddAcross(Vector128<short> value) => AddAcross(value); /// <summary> /// int32_t vaddvq_s32 (int32x4_t a) /// A64: ADDV Sd, Vn.4S /// </summary> public static Vector64<int> AddAcross(Vector128<int> value) => AddAcross(value); /// <summary> /// int8_t vaddvq_s8 (int8x16_t a) /// A64: ADDV Bd, Vn.16B /// </summary> public static Vector64<sbyte> AddAcross(Vector128<sbyte> value) => AddAcross(value); /// <summary> /// uint16_t vaddvq_u16 (uint16x8_t a) /// A64: ADDV Hd, Vn.8H /// </summary> public static Vector64<ushort> AddAcross(Vector128<ushort> value) => AddAcross(value); /// <summary> /// uint32_t vaddvq_u32 (uint32x4_t a) /// A64: ADDV Sd, Vn.4S /// </summary> public static Vector64<uint> AddAcross(Vector128<uint> value) => AddAcross(value); /// <summary> /// uint16_t vaddlv_u8 (uint8x8_t a) /// A64: UADDLV Hd, Vn.8B /// </summary> public static Vector64<ushort> AddAcrossWidening(Vector64<byte> value) => AddAcrossWidening(value); /// <summary> /// int32_t vaddlv_s16 (int16x4_t a) /// A64: SADDLV Sd, Vn.4H /// </summary> public static Vector64<int> AddAcrossWidening(Vector64<short> value) => AddAcrossWidening(value); /// <summary> /// int16_t vaddlv_s8 (int8x8_t a) /// A64: SADDLV Hd, Vn.8B /// </summary> public static Vector64<short> AddAcrossWidening(Vector64<sbyte> value) => AddAcrossWidening(value); /// <summary> /// uint32_t vaddlv_u16 (uint16x4_t a) /// A64: UADDLV Sd, Vn.4H /// </summary> public static Vector64<uint> AddAcrossWidening(Vector64<ushort> value) => AddAcrossWidening(value); /// <summary> /// uint16_t vaddlvq_u8 (uint8x16_t a) /// A64: UADDLV Hd, Vn.16B /// </summary> public static Vector64<ushort> AddAcrossWidening(Vector128<byte> value) => AddAcrossWidening(value); /// <summary> /// int32_t vaddlvq_s16 (int16x8_t a) /// A64: SADDLV Sd, Vn.8H /// </summary> public static Vector64<int> AddAcrossWidening(Vector128<short> value) => AddAcrossWidening(value); /// <summary> /// int64_t vaddlvq_s32 (int32x4_t a) /// A64: SADDLV Dd, Vn.4S /// </summary> public static Vector64<long> AddAcrossWidening(Vector128<int> value) => AddAcrossWidening(value); /// <summary> /// int16_t vaddlvq_s8 (int8x16_t a) /// A64: SADDLV Hd, Vn.16B /// </summary> public static Vector64<short> AddAcrossWidening(Vector128<sbyte> value) => AddAcrossWidening(value); /// <summary> /// uint32_t vaddlvq_u16 (uint16x8_t a) /// A64: UADDLV Sd, Vn.8H /// </summary> public static Vector64<uint> AddAcrossWidening(Vector128<ushort> value) => AddAcrossWidening(value); /// <summary> /// uint64_t vaddlvq_u32 (uint32x4_t a) /// A64: UADDLV Dd, Vn.4S /// </summary> public static Vector64<ulong> AddAcrossWidening(Vector128<uint> value) => AddAcrossWidening(value); /// <summary> /// uint8x16_t vpaddq_u8 (uint8x16_t a, uint8x16_t b) /// A64: ADDP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> AddPairwise(Vector128<byte> left, Vector128<byte> right) => AddPairwise(left, right); /// <summary> /// float64x2_t vpaddq_f64 (float64x2_t a, float64x2_t b) /// A64: FADDP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> AddPairwise(Vector128<double> left, Vector128<double> right) => AddPairwise(left, right); /// <summary> /// int16x8_t vpaddq_s16 (int16x8_t a, int16x8_t b) /// A64: ADDP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> AddPairwise(Vector128<short> left, Vector128<short> right) => AddPairwise(left, right); /// <summary> /// int32x4_t vpaddq_s32 (int32x4_t a, int32x4_t b) /// A64: ADDP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> AddPairwise(Vector128<int> left, Vector128<int> right) => AddPairwise(left, right); /// <summary> /// int64x2_t vpaddq_s64 (int64x2_t a, int64x2_t b) /// A64: ADDP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> AddPairwise(Vector128<long> left, Vector128<long> right) => AddPairwise(left, right); /// <summary> /// int8x16_t vpaddq_s8 (int8x16_t a, int8x16_t b) /// A64: ADDP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> AddPairwise(Vector128<sbyte> left, Vector128<sbyte> right) => AddPairwise(left, right); /// <summary> /// float32x4_t vpaddq_f32 (float32x4_t a, float32x4_t b) /// A64: FADDP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AddPairwise(Vector128<float> left, Vector128<float> right) => AddPairwise(left, right); /// <summary> /// uint16x8_t vpaddq_u16 (uint16x8_t a, uint16x8_t b) /// A64: ADDP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> AddPairwise(Vector128<ushort> left, Vector128<ushort> right) => AddPairwise(left, right); /// <summary> /// uint32x4_t vpaddq_u32 (uint32x4_t a, uint32x4_t b) /// A64: ADDP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> AddPairwise(Vector128<uint> left, Vector128<uint> right) => AddPairwise(left, right); /// <summary> /// uint64x2_t vpaddq_u64 (uint64x2_t a, uint64x2_t b) /// A64: ADDP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> AddPairwise(Vector128<ulong> left, Vector128<ulong> right) => AddPairwise(left, right); /// <summary> /// float32_t vpadds_f32 (float32x2_t a) /// A64: FADDP Sd, Vn.2S /// </summary> public static Vector64<float> AddPairwiseScalar(Vector64<float> value) => AddPairwiseScalar(value); /// <summary> /// float64_t vpaddd_f64 (float64x2_t a) /// A64: FADDP Dd, Vn.2D /// </summary> public static Vector64<double> AddPairwiseScalar(Vector128<double> value) => AddPairwiseScalar(value); /// <summary> /// int64_t vpaddd_s64 (int64x2_t a) /// A64: ADDP Dd, Vn.2D /// </summary> public static Vector64<long> AddPairwiseScalar(Vector128<long> value) => AddPairwiseScalar(value); /// <summary> /// uint64_t vpaddd_u64 (uint64x2_t a) /// A64: ADDP Dd, Vn.2D /// </summary> public static Vector64<ulong> AddPairwiseScalar(Vector128<ulong> value) => AddPairwiseScalar(value); /// <summary> /// uint8x8_t vsqadd_u8 (uint8x8_t a, int8x8_t b) /// A64: USQADD Vd.8B, Vn.8B /// </summary> public static Vector64<byte> AddSaturate(Vector64<byte> left, Vector64<sbyte> right) => AddSaturate(left, right); /// <summary> /// int16x4_t vuqadd_s16 (int16x4_t a, uint16x4_t b) /// A64: SUQADD Vd.4H, Vn.4H /// </summary> public static Vector64<short> AddSaturate(Vector64<short> left, Vector64<ushort> right) => AddSaturate(left, right); /// <summary> /// int32x2_t vuqadd_s32 (int32x2_t a, uint32x2_t b) /// A64: SUQADD Vd.2S, Vn.2S /// </summary> public static Vector64<int> AddSaturate(Vector64<int> left, Vector64<uint> right) => AddSaturate(left, right); /// <summary> /// int8x8_t vuqadd_s8 (int8x8_t a, uint8x8_t b) /// A64: SUQADD Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> AddSaturate(Vector64<sbyte> left, Vector64<byte> right) => AddSaturate(left, right); /// <summary> /// uint16x4_t vsqadd_u16 (uint16x4_t a, int16x4_t b) /// A64: USQADD Vd.4H, Vn.4H /// </summary> public static Vector64<ushort> AddSaturate(Vector64<ushort> left, Vector64<short> right) => AddSaturate(left, right); /// <summary> /// uint32x2_t vsqadd_u32 (uint32x2_t a, int32x2_t b) /// A64: USQADD Vd.2S, Vn.2S /// </summary> public static Vector64<uint> AddSaturate(Vector64<uint> left, Vector64<int> right) => AddSaturate(left, right); /// <summary> /// uint8x16_t vsqaddq_u8 (uint8x16_t a, int8x16_t b) /// A64: USQADD Vd.16B, Vn.16B /// </summary> public static Vector128<byte> AddSaturate(Vector128<byte> left, Vector128<sbyte> right) => AddSaturate(left, right); /// <summary> /// int16x8_t vuqaddq_s16 (int16x8_t a, uint16x8_t b) /// A64: SUQADD Vd.8H, Vn.8H /// </summary> public static Vector128<short> AddSaturate(Vector128<short> left, Vector128<ushort> right) => AddSaturate(left, right); /// <summary> /// int32x4_t vuqaddq_s32 (int32x4_t a, uint32x4_t b) /// A64: SUQADD Vd.4S, Vn.4S /// </summary> public static Vector128<int> AddSaturate(Vector128<int> left, Vector128<uint> right) => AddSaturate(left, right); /// <summary> /// int64x2_t vuqaddq_s64 (int64x2_t a, uint64x2_t b) /// A64: SUQADD Vd.2D, Vn.2D /// </summary> public static Vector128<long> AddSaturate(Vector128<long> left, Vector128<ulong> right) => AddSaturate(left, right); /// <summary> /// int8x16_t vuqaddq_s8 (int8x16_t a, uint8x16_t b) /// A64: SUQADD Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> AddSaturate(Vector128<sbyte> left, Vector128<byte> right) => AddSaturate(left, right); /// <summary> /// uint16x8_t vsqaddq_u16 (uint16x8_t a, int16x8_t b) /// A64: USQADD Vd.8H, Vn.8H /// </summary> public static Vector128<ushort> AddSaturate(Vector128<ushort> left, Vector128<short> right) => AddSaturate(left, right); /// <summary> /// uint32x4_t vsqaddq_u32 (uint32x4_t a, int32x4_t b) /// A64: USQADD Vd.4S, Vn.4S /// </summary> public static Vector128<uint> AddSaturate(Vector128<uint> left, Vector128<int> right) => AddSaturate(left, right); /// <summary> /// uint64x2_t vsqaddq_u64 (uint64x2_t a, int64x2_t b) /// A64: USQADD Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> AddSaturate(Vector128<ulong> left, Vector128<long> right) => AddSaturate(left, right); /// <summary> /// uint8_t vqaddb_u8 (uint8_t a, uint8_t b) /// A64: UQADD Bd, Bn, Bm /// </summary> public static Vector64<byte> AddSaturateScalar(Vector64<byte> left, Vector64<byte> right) => AddSaturateScalar(left, right); /// <summary> /// uint8_t vsqaddb_u8 (uint8_t a, int8_t b) /// A64: USQADD Bd, Bn /// </summary> public static Vector64<byte> AddSaturateScalar(Vector64<byte> left, Vector64<sbyte> right) => AddSaturateScalar(left, right); /// <summary> /// int16_t vqaddh_s16 (int16_t a, int16_t b) /// A64: SQADD Hd, Hn, Hm /// </summary> public static Vector64<short> AddSaturateScalar(Vector64<short> left, Vector64<short> right) => AddSaturateScalar(left, right); /// <summary> /// int16_t vuqaddh_s16 (int16_t a, uint16_t b) /// A64: SUQADD Hd, Hn /// </summary> public static Vector64<short> AddSaturateScalar(Vector64<short> left, Vector64<ushort> right) => AddSaturateScalar(left, right); /// <summary> /// int32_t vqadds_s32 (int32_t a, int32_t b) /// A64: SQADD Sd, Sn, Sm /// </summary> public static Vector64<int> AddSaturateScalar(Vector64<int> left, Vector64<int> right) => AddSaturateScalar(left, right); /// <summary> /// int32_t vuqadds_s32 (int32_t a, uint32_t b) /// A64: SUQADD Sd, Sn /// </summary> public static Vector64<int> AddSaturateScalar(Vector64<int> left, Vector64<uint> right) => AddSaturateScalar(left, right); /// <summary> /// int64x1_t vuqadd_s64 (int64x1_t a, uint64x1_t b) /// A64: SUQADD Dd, Dn /// </summary> public static Vector64<long> AddSaturateScalar(Vector64<long> left, Vector64<ulong> right) => AddSaturateScalar(left, right); /// <summary> /// int8_t vqaddb_s8 (int8_t a, int8_t b) /// A64: SQADD Bd, Bn, Bm /// </summary> public static Vector64<sbyte> AddSaturateScalar(Vector64<sbyte> left, Vector64<sbyte> right) => AddSaturateScalar(left, right); /// <summary> /// int8_t vuqaddb_s8 (int8_t a, uint8_t b) /// A64: SUQADD Bd, Bn /// </summary> public static Vector64<sbyte> AddSaturateScalar(Vector64<sbyte> left, Vector64<byte> right) => AddSaturateScalar(left, right); /// <summary> /// uint16_t vqaddh_u16 (uint16_t a, uint16_t b) /// A64: UQADD Hd, Hn, Hm /// </summary> public static Vector64<ushort> AddSaturateScalar(Vector64<ushort> left, Vector64<ushort> right) => AddSaturateScalar(left, right); /// <summary> /// uint16_t vsqaddh_u16 (uint16_t a, int16_t b) /// A64: USQADD Hd, Hn /// </summary> public static Vector64<ushort> AddSaturateScalar(Vector64<ushort> left, Vector64<short> right) => AddSaturateScalar(left, right); /// <summary> /// uint32_t vqadds_u32 (uint32_t a, uint32_t b) /// A64: UQADD Sd, Sn, Sm /// </summary> public static Vector64<uint> AddSaturateScalar(Vector64<uint> left, Vector64<uint> right) => AddSaturateScalar(left, right); /// <summary> /// uint32_t vsqadds_u32 (uint32_t a, int32_t b) /// A64: USQADD Sd, Sn /// </summary> public static Vector64<uint> AddSaturateScalar(Vector64<uint> left, Vector64<int> right) => AddSaturateScalar(left, right); /// <summary> /// uint64x1_t vsqadd_u64 (uint64x1_t a, int64x1_t b) /// A64: USQADD Dd, Dn /// </summary> public static Vector64<ulong> AddSaturateScalar(Vector64<ulong> left, Vector64<long> right) => AddSaturateScalar(left, right); /// <summary> /// float64x2_t vrndpq_f64 (float64x2_t a) /// A64: FRINTP Vd.2D, Vn.2D /// </summary> public static Vector128<double> Ceiling(Vector128<double> value) => Ceiling(value); /// <summary> /// uint64x2_t vceqq_f64 (float64x2_t a, float64x2_t b) /// A64: FCMEQ Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> CompareEqual(Vector128<double> left, Vector128<double> right) => CompareEqual(left, right); /// <summary> /// uint64x2_t vceqq_s64 (int64x2_t a, int64x2_t b) /// A64: CMEQ Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareEqual(Vector128<long> left, Vector128<long> right) => CompareEqual(left, right); /// <summary> /// uint64x2_t vceqq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMEQ Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareEqual(Vector128<ulong> left, Vector128<ulong> right) => CompareEqual(left, right); /// <summary> /// uint64x1_t vceq_f64 (float64x1_t a, float64x1_t b) /// A64: FCMEQ Dd, Dn, Dm /// </summary> public static Vector64<double> CompareEqualScalar(Vector64<double> left, Vector64<double> right) => CompareEqualScalar(left, right); /// <summary> /// uint64x1_t vceq_s64 (int64x1_t a, int64x1_t b) /// A64: CMEQ Dd, Dn, Dm /// </summary> public static Vector64<long> CompareEqualScalar(Vector64<long> left, Vector64<long> right) => CompareEqualScalar(left, right); /// <summary> /// uint32_t vceqs_f32 (float32_t a, float32_t b) /// A64: FCMEQ Sd, Sn, Sm /// </summary> public static Vector64<float> CompareEqualScalar(Vector64<float> left, Vector64<float> right) => CompareEqualScalar(left, right); /// <summary> /// uint64x1_t vceq_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMEQ Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareEqualScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareEqualScalar(left, right); /// <summary> /// uint64x2_t vcgtq_f64 (float64x2_t a, float64x2_t b) /// A64: FCMGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> CompareGreaterThan(Vector128<double> left, Vector128<double> right) => CompareGreaterThan(left, right); /// <summary> /// uint64x2_t vcgtq_s64 (int64x2_t a, int64x2_t b) /// A64: CMGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareGreaterThan(Vector128<long> left, Vector128<long> right) => CompareGreaterThan(left, right); /// <summary> /// uint64x2_t vcgtq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMHI Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareGreaterThan(Vector128<ulong> left, Vector128<ulong> right) => CompareGreaterThan(left, right); /// <summary> /// uint64x1_t vcgt_f64 (float64x1_t a, float64x1_t b) /// A64: FCMGT Dd, Dn, Dm /// </summary> public static Vector64<double> CompareGreaterThanScalar(Vector64<double> left, Vector64<double> right) => CompareGreaterThanScalar(left, right); /// <summary> /// uint64x1_t vcgt_s64 (int64x1_t a, int64x1_t b) /// A64: CMGT Dd, Dn, Dm /// </summary> public static Vector64<long> CompareGreaterThanScalar(Vector64<long> left, Vector64<long> right) => CompareGreaterThanScalar(left, right); /// <summary> /// uint32_t vcgts_f32 (float32_t a, float32_t b) /// A64: FCMGT Sd, Sn, Sm /// </summary> public static Vector64<float> CompareGreaterThanScalar(Vector64<float> left, Vector64<float> right) => CompareGreaterThanScalar(left, right); /// <summary> /// uint64x1_t vcgt_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMHI Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareGreaterThanScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareGreaterThanScalar(left, right); /// <summary> /// uint64x2_t vcgeq_f64 (float64x2_t a, float64x2_t b) /// A64: FCMGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> CompareGreaterThanOrEqual(Vector128<double> left, Vector128<double> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint64x2_t vcgeq_s64 (int64x2_t a, int64x2_t b) /// A64: CMGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareGreaterThanOrEqual(Vector128<long> left, Vector128<long> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint64x2_t vcgeq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMHS Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareGreaterThanOrEqual(Vector128<ulong> left, Vector128<ulong> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint64x1_t vcge_f64 (float64x1_t a, float64x1_t b) /// A64: FCMGE Dd, Dn, Dm /// </summary> public static Vector64<double> CompareGreaterThanOrEqualScalar(Vector64<double> left, Vector64<double> right) => CompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint64x1_t vcge_s64 (int64x1_t a, int64x1_t b) /// A64: CMGE Dd, Dn, Dm /// </summary> public static Vector64<long> CompareGreaterThanOrEqualScalar(Vector64<long> left, Vector64<long> right) => CompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint32_t vcges_f32 (float32_t a, float32_t b) /// A64: FCMGE Sd, Sn, Sm /// </summary> public static Vector64<float> CompareGreaterThanOrEqualScalar(Vector64<float> left, Vector64<float> right) => CompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint64x1_t vcge_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMHS Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareGreaterThanOrEqualScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareGreaterThanOrEqualScalar(left, right); /// <summary> /// uint64x2_t vcltq_f64 (float64x2_t a, float64x2_t b) /// A64: FCMGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> CompareLessThan(Vector128<double> left, Vector128<double> right) => CompareLessThan(left, right); /// <summary> /// uint64x2_t vcltq_s64 (int64x2_t a, int64x2_t b) /// A64: CMGT Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareLessThan(Vector128<long> left, Vector128<long> right) => CompareLessThan(left, right); /// <summary> /// uint64x2_t vcltq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMHI Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareLessThan(Vector128<ulong> left, Vector128<ulong> right) => CompareLessThan(left, right); /// <summary> /// uint64x1_t vclt_f64 (float64x1_t a, float64x1_t b) /// A64: FCMGT Dd, Dn, Dm /// </summary> public static Vector64<double> CompareLessThanScalar(Vector64<double> left, Vector64<double> right) => CompareLessThanScalar(left, right); /// <summary> /// uint64x1_t vclt_s64 (int64x1_t a, int64x1_t b) /// A64: CMGT Dd, Dn, Dm /// </summary> public static Vector64<long> CompareLessThanScalar(Vector64<long> left, Vector64<long> right) => CompareLessThanScalar(left, right); /// <summary> /// uint32_t vclts_f32 (float32_t a, float32_t b) /// A64: FCMGT Sd, Sn, Sm /// </summary> public static Vector64<float> CompareLessThanScalar(Vector64<float> left, Vector64<float> right) => CompareLessThanScalar(left, right); /// <summary> /// uint64x1_t vclt_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMHI Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareLessThanScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareLessThanScalar(left, right); /// <summary> /// uint64x2_t vcleq_f64 (float64x2_t a, float64x2_t b) /// A64: FCMGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> CompareLessThanOrEqual(Vector128<double> left, Vector128<double> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint64x2_t vcleq_s64 (int64x2_t a, int64x2_t b) /// A64: CMGE Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareLessThanOrEqual(Vector128<long> left, Vector128<long> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint64x2_t vcleq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMHS Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareLessThanOrEqual(Vector128<ulong> left, Vector128<ulong> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint64x1_t vcle_f64 (float64x1_t a, float64x1_t b) /// A64: FCMGE Dd, Dn, Dm /// </summary> public static Vector64<double> CompareLessThanOrEqualScalar(Vector64<double> left, Vector64<double> right) => CompareLessThanOrEqualScalar(left, right); /// <summary> /// uint64x1_t vcle_s64 (int64x1_t a, int64x1_t b) /// A64: CMGE Dd, Dn, Dm /// </summary> public static Vector64<long> CompareLessThanOrEqualScalar(Vector64<long> left, Vector64<long> right) => CompareLessThanOrEqualScalar(left, right); /// <summary> /// uint32_t vcles_f32 (float32_t a, float32_t b) /// A64: FCMGE Sd, Sn, Sm /// </summary> public static Vector64<float> CompareLessThanOrEqualScalar(Vector64<float> left, Vector64<float> right) => CompareLessThanOrEqualScalar(left, right); /// <summary> /// uint64x1_t vcle_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMHS Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareLessThanOrEqualScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareLessThanOrEqualScalar(left, right); /// <summary> /// uint64x2_t vtstq_f64 (float64x2_t a, float64x2_t b) /// A64: CMTST Vd.2D, Vn.2D, Vm.2D /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> CompareTest(Vector128<double> left, Vector128<double> right) => CompareTest(left, right); /// <summary> /// uint64x2_t vtstq_s64 (int64x2_t a, int64x2_t b) /// A64: CMTST Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> CompareTest(Vector128<long> left, Vector128<long> right) => CompareTest(left, right); /// <summary> /// uint64x2_t vtstq_u64 (uint64x2_t a, uint64x2_t b) /// A64: CMTST Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> CompareTest(Vector128<ulong> left, Vector128<ulong> right) => CompareTest(left, right); /// <summary> /// uint64x1_t vtst_f64 (float64x1_t a, float64x1_t b) /// A64: CMTST Dd, Dn, Dm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> CompareTestScalar(Vector64<double> left, Vector64<double> right) => CompareTestScalar(left, right); /// <summary> /// uint64x1_t vtst_s64 (int64x1_t a, int64x1_t b) /// A64: CMTST Dd, Dn, Dm /// </summary> public static Vector64<long> CompareTestScalar(Vector64<long> left, Vector64<long> right) => CompareTestScalar(left, right); /// <summary> /// uint64x1_t vtst_u64 (uint64x1_t a, uint64x1_t b) /// A64: CMTST Dd, Dn, Dm /// </summary> public static Vector64<ulong> CompareTestScalar(Vector64<ulong> left, Vector64<ulong> right) => CompareTestScalar(left, right); /// <summary> /// float64x2_t vcvt_f64_f32 (float32x2_t a) /// A64: FCVTL Vd.2D, Vn.2S /// </summary> public static Vector128<double> ConvertToDouble(Vector64<float> value) => ConvertToDouble(value); /// <summary> /// float64x2_t vcvtq_f64_s64 (int64x2_t a) /// A64: SCVTF Vd.2D, Vn.2D /// </summary> public static Vector128<double> ConvertToDouble(Vector128<long> value) => ConvertToDouble(value); /// <summary> /// float64x2_t vcvtq_f64_u64 (uint64x2_t a) /// A64: UCVTF Vd.2D, Vn.2D /// </summary> public static Vector128<double> ConvertToDouble(Vector128<ulong> value) => ConvertToDouble(value); /// <summary> /// float64x1_t vcvt_f64_s64 (int64x1_t a) /// A64: SCVTF Dd, Dn /// </summary> public static Vector64<double> ConvertToDoubleScalar(Vector64<long> value) => ConvertToDoubleScalar(value); /// <summary> /// float64x1_t vcvt_f64_u64 (uint64x1_t a) /// A64: UCVTF Dd, Dn /// </summary> public static Vector64<double> ConvertToDoubleScalar(Vector64<ulong> value) => ConvertToDoubleScalar(value); /// <summary> /// float64x2_t vcvt_high_f64_f32 (float32x4_t a) /// A64: FCVTL2 Vd.2D, Vn.4S /// </summary> public static Vector128<double> ConvertToDoubleUpper(Vector128<float> value) => ConvertToDoubleUpper(value); /// <summary> /// int64x2_t vcvtaq_s64_f64 (float64x2_t a) /// A64: FCVTAS Vd.2D, Vn.2D /// </summary> public static Vector128<long> ConvertToInt64RoundAwayFromZero(Vector128<double> value) => ConvertToInt64RoundAwayFromZero(value); /// <summary> /// int64x1_t vcvta_s64_f64 (float64x1_t a) /// A64: FCVTAS Dd, Dn /// </summary> public static Vector64<long> ConvertToInt64RoundAwayFromZeroScalar(Vector64<double> value) => ConvertToInt64RoundAwayFromZeroScalar(value); /// <summary> /// int64x2_t vcvtnq_s64_f64 (float64x2_t a) /// A64: FCVTNS Vd.2D, Vn.2D /// </summary> public static Vector128<long> ConvertToInt64RoundToEven(Vector128<double> value) => ConvertToInt64RoundToEven(value); /// <summary> /// int64x1_t vcvtn_s64_f64 (float64x1_t a) /// A64: FCVTNS Dd, Dn /// </summary> public static Vector64<long> ConvertToInt64RoundToEvenScalar(Vector64<double> value) => ConvertToInt64RoundToEvenScalar(value); /// <summary> /// int64x2_t vcvtmq_s64_f64 (float64x2_t a) /// A64: FCVTMS Vd.2D, Vn.2D /// </summary> public static Vector128<long> ConvertToInt64RoundToNegativeInfinity(Vector128<double> value) => ConvertToInt64RoundToNegativeInfinity(value); /// <summary> /// int64x1_t vcvtm_s64_f64 (float64x1_t a) /// A64: FCVTMS Dd, Dn /// </summary> public static Vector64<long> ConvertToInt64RoundToNegativeInfinityScalar(Vector64<double> value) => ConvertToInt64RoundToNegativeInfinityScalar(value); /// <summary> /// int64x2_t vcvtpq_s64_f64 (float64x2_t a) /// A64: FCVTPS Vd.2D, Vn.2D /// </summary> public static Vector128<long> ConvertToInt64RoundToPositiveInfinity(Vector128<double> value) => ConvertToInt64RoundToPositiveInfinity(value); /// <summary> /// int64x1_t vcvtp_s64_f64 (float64x1_t a) /// A64: FCVTPS Dd, Dn /// </summary> public static Vector64<long> ConvertToInt64RoundToPositiveInfinityScalar(Vector64<double> value) => ConvertToInt64RoundToPositiveInfinityScalar(value); /// <summary> /// int64x2_t vcvtq_s64_f64 (float64x2_t a) /// A64: FCVTZS Vd.2D, Vn.2D /// </summary> public static Vector128<long> ConvertToInt64RoundToZero(Vector128<double> value) => ConvertToInt64RoundToZero(value); /// <summary> /// int64x1_t vcvt_s64_f64 (float64x1_t a) /// A64: FCVTZS Dd, Dn /// </summary> public static Vector64<long> ConvertToInt64RoundToZeroScalar(Vector64<double> value) => ConvertToInt64RoundToZeroScalar(value); /// <summary> /// float32x2_t vcvt_f32_f64 (float64x2_t a) /// A64: FCVTN Vd.2S, Vn.2D /// </summary> public static Vector64<float> ConvertToSingleLower(Vector128<double> value) => ConvertToSingleLower(value); /// <summary> /// float32x2_t vcvtx_f32_f64 (float64x2_t a) /// A64: FCVTXN Vd.2S, Vn.2D /// </summary> public static Vector64<float> ConvertToSingleRoundToOddLower(Vector128<double> value) => ConvertToSingleRoundToOddLower(value); /// <summary> /// float32x4_t vcvtx_high_f32_f64 (float32x2_t r, float64x2_t a) /// A64: FCVTXN2 Vd.4S, Vn.2D /// </summary> public static Vector128<float> ConvertToSingleRoundToOddUpper(Vector64<float> lower, Vector128<double> value) => ConvertToSingleRoundToOddUpper(lower, value); /// <summary> /// float32x4_t vcvt_high_f32_f64 (float32x2_t r, float64x2_t a) /// A64: FCVTN2 Vd.4S, Vn.2D /// </summary> public static Vector128<float> ConvertToSingleUpper(Vector64<float> lower, Vector128<double> value) => ConvertToSingleUpper(lower, value); /// <summary> /// uint64x2_t vcvtaq_u64_f64 (float64x2_t a) /// A64: FCVTAU Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> ConvertToUInt64RoundAwayFromZero(Vector128<double> value) => ConvertToUInt64RoundAwayFromZero(value); /// <summary> /// uint64x1_t vcvta_u64_f64 (float64x1_t a) /// A64: FCVTAU Dd, Dn /// </summary> public static Vector64<ulong> ConvertToUInt64RoundAwayFromZeroScalar(Vector64<double> value) => ConvertToUInt64RoundAwayFromZeroScalar(value); /// <summary> /// uint64x2_t vcvtnq_u64_f64 (float64x2_t a) /// A64: FCVTNU Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> ConvertToUInt64RoundToEven(Vector128<double> value) => ConvertToUInt64RoundToEven(value); /// <summary> /// uint64x1_t vcvtn_u64_f64 (float64x1_t a) /// A64: FCVTNU Dd, Dn /// </summary> public static Vector64<ulong> ConvertToUInt64RoundToEvenScalar(Vector64<double> value) => ConvertToUInt64RoundToEvenScalar(value); /// <summary> /// uint64x2_t vcvtmq_u64_f64 (float64x2_t a) /// A64: FCVTMU Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> ConvertToUInt64RoundToNegativeInfinity(Vector128<double> value) => ConvertToUInt64RoundToNegativeInfinity(value); /// <summary> /// uint64x1_t vcvtm_u64_f64 (float64x1_t a) /// A64: FCVTMU Dd, Dn /// </summary> public static Vector64<ulong> ConvertToUInt64RoundToNegativeInfinityScalar(Vector64<double> value) => ConvertToUInt64RoundToNegativeInfinityScalar(value); /// <summary> /// uint64x2_t vcvtpq_u64_f64 (float64x2_t a) /// A64: FCVTPU Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> ConvertToUInt64RoundToPositiveInfinity(Vector128<double> value) => ConvertToUInt64RoundToPositiveInfinity(value); /// <summary> /// uint64x1_t vcvtp_u64_f64 (float64x1_t a) /// A64: FCVTPU Dd, Dn /// </summary> public static Vector64<ulong> ConvertToUInt64RoundToPositiveInfinityScalar(Vector64<double> value) => ConvertToUInt64RoundToPositiveInfinityScalar(value); /// <summary> /// uint64x2_t vcvtq_u64_f64 (float64x2_t a) /// A64: FCVTZU Vd.2D, Vn.2D /// </summary> public static Vector128<ulong> ConvertToUInt64RoundToZero(Vector128<double> value) => ConvertToUInt64RoundToZero(value); /// <summary> /// uint64x1_t vcvt_u64_f64 (float64x1_t a) /// A64: FCVTZU Dd, Dn /// </summary> public static Vector64<ulong> ConvertToUInt64RoundToZeroScalar(Vector64<double> value) => ConvertToUInt64RoundToZeroScalar(value); /// <summary> /// float32x2_t vdiv_f32 (float32x2_t a, float32x2_t b) /// A64: FDIV Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Divide(Vector64<float> left, Vector64<float> right) => Divide(left, right); /// <summary> /// float64x2_t vdivq_f64 (float64x2_t a, float64x2_t b) /// A64: FDIV Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Divide(Vector128<double> left, Vector128<double> right) => Divide(left, right); /// <summary> /// float32x4_t vdivq_f32 (float32x4_t a, float32x4_t b) /// A64: FDIV Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Divide(Vector128<float> left, Vector128<float> right) => Divide(left, right); /// <summary> /// float64x2_t vdupq_laneq_f64 (float64x2_t vec, const int lane) /// A64: DUP Vd.2D, Vn.D[index] /// </summary> public static Vector128<double> DuplicateSelectedScalarToVector128(Vector128<double> value, byte index) => DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int64x2_t vdupq_laneq_s64 (int64x2_t vec, const int lane) /// A64: DUP Vd.2D, Vn.D[index] /// </summary> public static Vector128<long> DuplicateSelectedScalarToVector128(Vector128<long> value, byte index) => DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint64x2_t vdupq_laneq_u64 (uint64x2_t vec, const int lane) /// A64: DUP Vd.2D, Vn.D[index] /// </summary> public static Vector128<ulong> DuplicateSelectedScalarToVector128(Vector128<ulong> value, byte index) => DuplicateSelectedScalarToVector128(value, index); /// <summary> /// float64x2_t vdupq_n_f64 (float64_t value) /// A64: DUP Vd.2D, Vn.D[0] /// </summary> public static Vector128<double> DuplicateToVector128(double value) => DuplicateToVector128(value); /// <summary> /// int64x2_t vdupq_n_s64 (int64_t value) /// A64: DUP Vd.2D, Rn /// </summary> public static Vector128<long> DuplicateToVector128(long value) => DuplicateToVector128(value); /// <summary> /// uint64x2_t vdupq_n_s64 (uint64_t value) /// A64: DUP Vd.2D, Rn /// </summary> public static Vector128<ulong> DuplicateToVector128(ulong value) => DuplicateToVector128(value); /// <summary> /// uint8_t vqmovnh_u16 (uint16_t a) /// A64: UQXTN Bd, Hn /// </summary> public static Vector64<byte> ExtractNarrowingSaturateScalar(Vector64<ushort> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// int16_t vqmovns_s32 (int32_t a) /// A64: SQXTN Hd, Sn /// </summary> public static Vector64<short> ExtractNarrowingSaturateScalar(Vector64<int> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// int32_t vqmovnd_s64 (int64_t a) /// A64: SQXTN Sd, Dn /// </summary> public static Vector64<int> ExtractNarrowingSaturateScalar(Vector64<long> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// int8_t vqmovnh_s16 (int16_t a) /// A64: SQXTN Bd, Hn /// </summary> public static Vector64<sbyte> ExtractNarrowingSaturateScalar(Vector64<short> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// uint16_t vqmovns_u32 (uint32_t a) /// A64: UQXTN Hd, Sn /// </summary> public static Vector64<ushort> ExtractNarrowingSaturateScalar(Vector64<uint> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// uint32_t vqmovnd_u64 (uint64_t a) /// A64: UQXTN Sd, Dn /// </summary> public static Vector64<uint> ExtractNarrowingSaturateScalar(Vector64<ulong> value) => ExtractNarrowingSaturateScalar(value); /// <summary> /// uint8_t vqmovunh_s16 (int16_t a) /// A64: SQXTUN Bd, Hn /// </summary> public static Vector64<byte> ExtractNarrowingSaturateUnsignedScalar(Vector64<short> value) => ExtractNarrowingSaturateUnsignedScalar(value); /// <summary> /// uint16_t vqmovuns_s32 (int32_t a) /// A64: SQXTUN Hd, Sn /// </summary> public static Vector64<ushort> ExtractNarrowingSaturateUnsignedScalar(Vector64<int> value) => ExtractNarrowingSaturateUnsignedScalar(value); /// <summary> /// uint32_t vqmovund_s64 (int64_t a) /// A64: SQXTUN Sd, Dn /// </summary> public static Vector64<uint> ExtractNarrowingSaturateUnsignedScalar(Vector64<long> value) => ExtractNarrowingSaturateUnsignedScalar(value); /// <summary> /// float64x2_t vrndmq_f64 (float64x2_t a) /// A64: FRINTM Vd.2D, Vn.2D /// </summary> public static Vector128<double> Floor(Vector128<double> value) => Floor(value); /// <summary> /// float64x2_t vfmaq_f64 (float64x2_t a, float64x2_t b, float64x2_t c) /// A64: FMLA Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> FusedMultiplyAdd(Vector128<double> addend, Vector128<double> left, Vector128<double> right) => FusedMultiplyAdd(addend, left, right); /// <summary> /// float32x2_t vfma_n_f32 (float32x2_t a, float32x2_t b, float32_t n) /// A64: FMLA Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<float> FusedMultiplyAddByScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => FusedMultiplyAddByScalar(addend, left, right); /// <summary> /// float64x2_t vfmaq_n_f64 (float64x2_t a, float64x2_t b, float64_t n) /// A64: FMLA Vd.2D, Vn.2D, Vm.D[0] /// </summary> public static Vector128<double> FusedMultiplyAddByScalar(Vector128<double> addend, Vector128<double> left, Vector64<double> right) => FusedMultiplyAddByScalar(addend, left, right); /// <summary> /// float32x4_t vfmaq_n_f32 (float32x4_t a, float32x4_t b, float32_t n) /// A64: FMLA Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<float> FusedMultiplyAddByScalar(Vector128<float> addend, Vector128<float> left, Vector64<float> right) => FusedMultiplyAddByScalar(addend, left, right); /// <summary> /// float32x2_t vfma_lane_f32 (float32x2_t a, float32x2_t b, float32x2_t v, const int lane) /// A64: FMLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplyAddBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float32x2_t vfma_laneq_f32 (float32x2_t a, float32x2_t b, float32x4_t v, const int lane) /// A64: FMLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplyAddBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float64x2_t vfmaq_laneq_f64 (float64x2_t a, float64x2_t b, float64x2_t v, const int lane) /// A64: FMLA Vd.2D, Vn.2D, Vm.D[lane] /// </summary> public static Vector128<double> FusedMultiplyAddBySelectedScalar(Vector128<double> addend, Vector128<double> left, Vector128<double> right, byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float32x4_t vfmaq_lane_f32 (float32x4_t a, float32x4_t b, float32x2_t v, const int lane) /// A64: FMLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> FusedMultiplyAddBySelectedScalar(Vector128<float> addend, Vector128<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float32x4_t vfmaq_laneq_f32 (float32x4_t a, float32x4_t b, float32x4_t v, const int lane) /// A64: FMLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> FusedMultiplyAddBySelectedScalar(Vector128<float> addend, Vector128<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float64_t vfmad_laneq_f64 (float64_t a, float64_t b, float64x2_t v, const int lane) /// A64: FMLA Dd, Dn, Vm.D[lane] /// </summary> public static Vector64<double> FusedMultiplyAddScalarBySelectedScalar(Vector64<double> addend, Vector64<double> left, Vector128<double> right, byte rightIndex) => FusedMultiplyAddScalarBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float32_t vfmas_lane_f32 (float32_t a, float32_t b, float32x2_t v, const int lane) /// A64: FMLA Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplyAddScalarBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplyAddScalarBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float32_t vfmas_laneq_f32 (float32_t a, float32_t b, float32x4_t v, const int lane) /// A64: FMLA Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplyAddScalarBySelectedScalar(Vector64<float> addend, Vector64<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplyAddScalarBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// float64x2_t vfmsq_f64 (float64x2_t a, float64x2_t b, float64x2_t c) /// A64: FMLS Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> FusedMultiplySubtract(Vector128<double> minuend, Vector128<double> left, Vector128<double> right) => FusedMultiplySubtract(minuend, left, right); /// <summary> /// float32x2_t vfms_n_f32 (float32x2_t a, float32x2_t b, float32_t n) /// A64: FMLS Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<float> FusedMultiplySubtractByScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => FusedMultiplySubtractByScalar(minuend, left, right); /// <summary> /// float64x2_t vfmsq_n_f64 (float64x2_t a, float64x2_t b, float64_t n) /// A64: FMLS Vd.2D, Vn.2D, Vm.D[0] /// </summary> public static Vector128<double> FusedMultiplySubtractByScalar(Vector128<double> minuend, Vector128<double> left, Vector64<double> right) => FusedMultiplySubtractByScalar(minuend, left, right); /// <summary> /// float32x4_t vfmsq_n_f32 (float32x4_t a, float32x4_t b, float32_t n) /// A64: FMLS Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<float> FusedMultiplySubtractByScalar(Vector128<float> minuend, Vector128<float> left, Vector64<float> right) => FusedMultiplySubtractByScalar(minuend, left, right); /// <summary> /// float32x2_t vfms_lane_f32 (float32x2_t a, float32x2_t b, float32x2_t v, const int lane) /// A64: FMLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplySubtractBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float32x2_t vfms_laneq_f32 (float32x2_t a, float32x2_t b, float32x4_t v, const int lane) /// A64: FMLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplySubtractBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float64x2_t vfmsq_laneq_f64 (float64x2_t a, float64x2_t b, float64x2_t v, const int lane) /// A64: FMLS Vd.2D, Vn.2D, Vm.D[lane] /// </summary> public static Vector128<double> FusedMultiplySubtractBySelectedScalar(Vector128<double> minuend, Vector128<double> left, Vector128<double> right, byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float32x4_t vfmsq_lane_f32 (float32x4_t a, float32x4_t b, float32x2_t v, const int lane) /// A64: FMLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> FusedMultiplySubtractBySelectedScalar(Vector128<float> minuend, Vector128<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float32x4_t vfmsq_laneq_f32 (float32x4_t a, float32x4_t b, float32x4_t v, const int lane) /// A64: FMLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> FusedMultiplySubtractBySelectedScalar(Vector128<float> minuend, Vector128<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float64_t vfmsd_laneq_f64 (float64_t a, float64_t b, float64x2_t v, const int lane) /// A64: FMLS Dd, Dn, Vm.D[lane] /// </summary> public static Vector64<double> FusedMultiplySubtractScalarBySelectedScalar(Vector64<double> minuend, Vector64<double> left, Vector128<double> right, byte rightIndex) => FusedMultiplySubtractScalarBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float32_t vfmss_lane_f32 (float32_t a, float32_t b, float32x2_t v, const int lane) /// A64: FMLS Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplySubtractScalarBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right, byte rightIndex) => FusedMultiplySubtractScalarBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// float32_t vfmss_laneq_f32 (float32_t a, float32_t b, float32x4_t v, const int lane) /// A64: FMLS Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> FusedMultiplySubtractScalarBySelectedScalar(Vector64<float> minuend, Vector64<float> left, Vector128<float> right, byte rightIndex) => FusedMultiplySubtractScalarBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint8x8_t vcopy_lane_u8 (uint8x8_t a, const int lane1, uint8x8_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector64<byte> InsertSelectedScalar(Vector64<byte> result, byte resultIndex, Vector64<byte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint8x8_t vcopy_laneq_u8 (uint8x8_t a, const int lane1, uint8x16_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector64<byte> InsertSelectedScalar(Vector64<byte> result, byte resultIndex, Vector128<byte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int16x4_t vcopy_lane_s16 (int16x4_t a, const int lane1, int16x4_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector64<short> InsertSelectedScalar(Vector64<short> result, byte resultIndex, Vector64<short> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int16x4_t vcopy_laneq_s16 (int16x4_t a, const int lane1, int16x8_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector64<short> InsertSelectedScalar(Vector64<short> result, byte resultIndex, Vector128<short> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int32x2_t vcopy_lane_s32 (int32x2_t a, const int lane1, int32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<int> InsertSelectedScalar(Vector64<int> result, byte resultIndex, Vector64<int> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int32x2_t vcopy_laneq_s32 (int32x2_t a, const int lane1, int32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<int> InsertSelectedScalar(Vector64<int> result, byte resultIndex, Vector128<int> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int8x8_t vcopy_lane_s8 (int8x8_t a, const int lane1, int8x8_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector64<sbyte> InsertSelectedScalar(Vector64<sbyte> result, byte resultIndex, Vector64<sbyte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int8x8_t vcopy_laneq_s8 (int8x8_t a, const int lane1, int8x16_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector64<sbyte> InsertSelectedScalar(Vector64<sbyte> result, byte resultIndex, Vector128<sbyte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float32x2_t vcopy_lane_f32 (float32x2_t a, const int lane1, float32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<float> InsertSelectedScalar(Vector64<float> result, byte resultIndex, Vector64<float> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float32x2_t vcopy_laneq_f32 (float32x2_t a, const int lane1, float32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<float> InsertSelectedScalar(Vector64<float> result, byte resultIndex, Vector128<float> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint16x4_t vcopy_lane_u16 (uint16x4_t a, const int lane1, uint16x4_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector64<ushort> InsertSelectedScalar(Vector64<ushort> result, byte resultIndex, Vector64<ushort> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint16x4_t vcopy_laneq_u16 (uint16x4_t a, const int lane1, uint16x8_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector64<ushort> InsertSelectedScalar(Vector64<ushort> result, byte resultIndex, Vector128<ushort> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint32x2_t vcopy_lane_u32 (uint32x2_t a, const int lane1, uint32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<uint> InsertSelectedScalar(Vector64<uint> result, byte resultIndex, Vector64<uint> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint32x2_t vcopy_laneq_u32 (uint32x2_t a, const int lane1, uint32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector64<uint> InsertSelectedScalar(Vector64<uint> result, byte resultIndex, Vector128<uint> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint8x16_t vcopyq_lane_u8 (uint8x16_t a, const int lane1, uint8x8_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector128<byte> InsertSelectedScalar(Vector128<byte> result, byte resultIndex, Vector64<byte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint8x16_t vcopyq_laneq_u8 (uint8x16_t a, const int lane1, uint8x16_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector128<byte> InsertSelectedScalar(Vector128<byte> result, byte resultIndex, Vector128<byte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float64x2_t vcopyq_laneq_f64 (float64x2_t a, const int lane1, float64x2_t b, const int lane2) /// A64: INS Vd.D[lane1], Vn.D[lane2] /// </summary> public static Vector128<double> InsertSelectedScalar(Vector128<double> result, byte resultIndex, Vector128<double> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int16x8_t vcopyq_lane_s16 (int16x8_t a, const int lane1, int16x4_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector128<short> InsertSelectedScalar(Vector128<short> result, byte resultIndex, Vector64<short> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int16x8_t vcopyq_laneq_s16 (int16x8_t a, const int lane1, int16x8_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector128<short> InsertSelectedScalar(Vector128<short> result, byte resultIndex, Vector128<short> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int32x4_t vcopyq_lane_s32 (int32x4_t a, const int lane1, int32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<int> InsertSelectedScalar(Vector128<int> result, byte resultIndex, Vector64<int> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int32x4_t vcopyq_laneq_s32 (int32x4_t a, const int lane1, int32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<int> InsertSelectedScalar(Vector128<int> result, byte resultIndex, Vector128<int> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int64x2_t vcopyq_laneq_s64 (int64x2_t a, const int lane1, int64x2_t b, const int lane2) /// A64: INS Vd.D[lane1], Vn.D[lane2] /// </summary> public static Vector128<long> InsertSelectedScalar(Vector128<long> result, byte resultIndex, Vector128<long> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int8x16_t vcopyq_lane_s8 (int8x16_t a, const int lane1, int8x8_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector128<sbyte> InsertSelectedScalar(Vector128<sbyte> result, byte resultIndex, Vector64<sbyte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// int8x16_t vcopyq_laneq_s8 (int8x16_t a, const int lane1, int8x16_t b, const int lane2) /// A64: INS Vd.B[lane1], Vn.B[lane2] /// </summary> public static Vector128<sbyte> InsertSelectedScalar(Vector128<sbyte> result, byte resultIndex, Vector128<sbyte> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float32x4_t vcopyq_lane_f32 (float32x4_t a, const int lane1, float32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<float> InsertSelectedScalar(Vector128<float> result, byte resultIndex, Vector64<float> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float32x4_t vcopyq_laneq_f32 (float32x4_t a, const int lane1, float32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<float> InsertSelectedScalar(Vector128<float> result, byte resultIndex, Vector128<float> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint16x8_t vcopyq_lane_u16 (uint16x8_t a, const int lane1, uint16x4_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector128<ushort> InsertSelectedScalar(Vector128<ushort> result, byte resultIndex, Vector64<ushort> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint16x8_t vcopyq_laneq_u16 (uint16x8_t a, const int lane1, uint16x8_t b, const int lane2) /// A64: INS Vd.H[lane1], Vn.H[lane2] /// </summary> public static Vector128<ushort> InsertSelectedScalar(Vector128<ushort> result, byte resultIndex, Vector128<ushort> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint32x4_t vcopyq_lane_u32 (uint32x4_t a, const int lane1, uint32x2_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<uint> InsertSelectedScalar(Vector128<uint> result, byte resultIndex, Vector64<uint> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint32x4_t vcopyq_laneq_u32 (uint32x4_t a, const int lane1, uint32x4_t b, const int lane2) /// A64: INS Vd.S[lane1], Vn.S[lane2] /// </summary> public static Vector128<uint> InsertSelectedScalar(Vector128<uint> result, byte resultIndex, Vector128<uint> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// uint64x2_t vcopyq_laneq_u64 (uint64x2_t a, const int lane1, uint64x2_t b, const int lane2) /// A64: INS Vd.D[lane1], Vn.D[lane2] /// </summary> public static Vector128<ulong> InsertSelectedScalar(Vector128<ulong> result, byte resultIndex, Vector128<ulong> value, byte valueIndex) => Insert(result, resultIndex, Extract(value, valueIndex)); /// <summary> /// float64x2_t vld1q_dup_f64 (float64_t const * ptr) /// A64: LD1R { Vt.2D }, [Xn] /// </summary> public static unsafe Vector128<double> LoadAndReplicateToVector128(double* address) => LoadAndReplicateToVector128(address); /// <summary> /// int64x2_t vld1q_dup_s64 (int64_t const * ptr) /// A64: LD1R { Vt.2D }, [Xn] /// </summary> public static unsafe Vector128<long> LoadAndReplicateToVector128(long* address) => LoadAndReplicateToVector128(address); /// <summary> /// uint64x2_t vld1q_dup_u64 (uint64_t const * ptr) /// A64: LD1R { Vt.2D }, [Xn] /// </summary> public static unsafe Vector128<ulong> LoadAndReplicateToVector128(ulong* address) => LoadAndReplicateToVector128(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<byte> Value1, Vector64<byte> Value2) LoadPairVector64(byte* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<double> Value1, Vector64<double> Value2) LoadPairVector64(double* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<short> Value1, Vector64<short> Value2) LoadPairVector64(short* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<int> Value1, Vector64<int> Value2) LoadPairVector64(int* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<long> Value1, Vector64<long> Value2) LoadPairVector64(long* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<sbyte> Value1, Vector64<sbyte> Value2) LoadPairVector64(sbyte* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<float> Value1, Vector64<float> Value2) LoadPairVector64(float* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<ushort> Value1, Vector64<ushort> Value2) LoadPairVector64(ushort* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<uint> Value1, Vector64<uint> Value2) LoadPairVector64(uint* address) => LoadPairVector64(address); /// <summary> /// A64: LDP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<ulong> Value1, Vector64<ulong> Value2) LoadPairVector64(ulong* address) => LoadPairVector64(address); /// <summary> /// A64: LDP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<int> Value1, Vector64<int> Value2) LoadPairScalarVector64(int* address) => LoadPairScalarVector64(address); /// <summary> /// A64: LDP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<float> Value1, Vector64<float> Value2) LoadPairScalarVector64(float* address) => LoadPairScalarVector64(address); /// <summary> /// A64: LDP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<uint> Value1, Vector64<uint> Value2) LoadPairScalarVector64(uint* address) => LoadPairScalarVector64(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<byte> Value1, Vector128<byte> Value2) LoadPairVector128(byte* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<double> Value1, Vector128<double> Value2) LoadPairVector128(double* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<short> Value1, Vector128<short> Value2) LoadPairVector128(short* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<int> Value1, Vector128<int> Value2) LoadPairVector128(int* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<long> Value1, Vector128<long> Value2) LoadPairVector128(long* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<sbyte> Value1, Vector128<sbyte> Value2) LoadPairVector128(sbyte* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<float> Value1, Vector128<float> Value2) LoadPairVector128(float* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<ushort> Value1, Vector128<ushort> Value2) LoadPairVector128(ushort* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<uint> Value1, Vector128<uint> Value2) LoadPairVector128(uint* address) => LoadPairVector128(address); /// <summary> /// A64: LDP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<ulong> Value1, Vector128<ulong> Value2) LoadPairVector128(ulong* address) => LoadPairVector128(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<byte> Value1, Vector64<byte> Value2) LoadPairVector64NonTemporal(byte* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<double> Value1, Vector64<double> Value2) LoadPairVector64NonTemporal(double* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<short> Value1, Vector64<short> Value2) LoadPairVector64NonTemporal(short* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<int> Value1, Vector64<int> Value2) LoadPairVector64NonTemporal(int* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<long> Value1, Vector64<long> Value2) LoadPairVector64NonTemporal(long* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<sbyte> Value1, Vector64<sbyte> Value2) LoadPairVector64NonTemporal(sbyte* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<float> Value1, Vector64<float> Value2) LoadPairVector64NonTemporal(float* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<ushort> Value1, Vector64<ushort> Value2) LoadPairVector64NonTemporal(ushort* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<uint> Value1, Vector64<uint> Value2) LoadPairVector64NonTemporal(uint* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP Dt1, Dt2, [Xn] /// </summary> public static unsafe (Vector64<ulong> Value1, Vector64<ulong> Value2) LoadPairVector64NonTemporal(ulong* address) => LoadPairVector64NonTemporal(address); /// <summary> /// A64: LDNP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<int> Value1, Vector64<int> Value2) LoadPairScalarVector64NonTemporal(int* address) => LoadPairScalarVector64NonTemporal(address); /// <summary> /// A64: LDNP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<float> Value1, Vector64<float> Value2) LoadPairScalarVector64NonTemporal(float* address) => LoadPairScalarVector64NonTemporal(address); /// <summary> /// A64: LDNP St1, St2, [Xn] /// </summary> public static unsafe (Vector64<uint> Value1, Vector64<uint> Value2) LoadPairScalarVector64NonTemporal(uint* address) => LoadPairScalarVector64NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<byte> Value1, Vector128<byte> Value2) LoadPairVector128NonTemporal(byte* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<double> Value1, Vector128<double> Value2) LoadPairVector128NonTemporal(double* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<short> Value1, Vector128<short> Value2) LoadPairVector128NonTemporal(short* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<int> Value1, Vector128<int> Value2) LoadPairVector128NonTemporal(int* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<long> Value1, Vector128<long> Value2) LoadPairVector128NonTemporal(long* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<sbyte> Value1, Vector128<sbyte> Value2) LoadPairVector128NonTemporal(sbyte* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<float> Value1, Vector128<float> Value2) LoadPairVector128NonTemporal(float* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<ushort> Value1, Vector128<ushort> Value2) LoadPairVector128NonTemporal(ushort* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<uint> Value1, Vector128<uint> Value2) LoadPairVector128NonTemporal(uint* address) => LoadPairVector128NonTemporal(address); /// <summary> /// A64: LDNP Qt1, Qt2, [Xn] /// </summary> public static unsafe (Vector128<ulong> Value1, Vector128<ulong> Value2) LoadPairVector128NonTemporal(ulong* address) => LoadPairVector128NonTemporal(address); /// <summary> /// float64x2_t vmaxq_f64 (float64x2_t a, float64x2_t b) /// A64: FMAX Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Max(Vector128<double> left, Vector128<double> right) => Max(left, right); /// <summary> /// uint8_t vmaxv_u8 (uint8x8_t a) /// A64: UMAXV Bd, Vn.8B /// </summary> public static Vector64<byte> MaxAcross(Vector64<byte> value) => MaxAcross(value); /// <summary> /// int16_t vmaxv_s16 (int16x4_t a) /// A64: SMAXV Hd, Vn.4H /// </summary> public static Vector64<short> MaxAcross(Vector64<short> value) => MaxAcross(value); /// <summary> /// int8_t vmaxv_s8 (int8x8_t a) /// A64: SMAXV Bd, Vn.8B /// </summary> public static Vector64<sbyte> MaxAcross(Vector64<sbyte> value) => MaxAcross(value); /// <summary> /// uint16_t vmaxv_u16 (uint16x4_t a) /// A64: UMAXV Hd, Vn.4H /// </summary> public static Vector64<ushort> MaxAcross(Vector64<ushort> value) => MaxAcross(value); /// <summary> /// uint8_t vmaxvq_u8 (uint8x16_t a) /// A64: UMAXV Bd, Vn.16B /// </summary> public static Vector64<byte> MaxAcross(Vector128<byte> value) => MaxAcross(value); /// <summary> /// int16_t vmaxvq_s16 (int16x8_t a) /// A64: SMAXV Hd, Vn.8H /// </summary> public static Vector64<short> MaxAcross(Vector128<short> value) => MaxAcross(value); /// <summary> /// int32_t vmaxvq_s32 (int32x4_t a) /// A64: SMAXV Sd, Vn.4S /// </summary> public static Vector64<int> MaxAcross(Vector128<int> value) => MaxAcross(value); /// <summary> /// int8_t vmaxvq_s8 (int8x16_t a) /// A64: SMAXV Bd, Vn.16B /// </summary> public static Vector64<sbyte> MaxAcross(Vector128<sbyte> value) => MaxAcross(value); /// <summary> /// float32_t vmaxvq_f32 (float32x4_t a) /// A64: FMAXV Sd, Vn.4S /// </summary> public static Vector64<float> MaxAcross(Vector128<float> value) => MaxAcross(value); /// <summary> /// uint16_t vmaxvq_u16 (uint16x8_t a) /// A64: UMAXV Hd, Vn.8H /// </summary> public static Vector64<ushort> MaxAcross(Vector128<ushort> value) => MaxAcross(value); /// <summary> /// uint32_t vmaxvq_u32 (uint32x4_t a) /// A64: UMAXV Sd, Vn.4S /// </summary> public static Vector64<uint> MaxAcross(Vector128<uint> value) => MaxAcross(value); /// <summary> /// float64x2_t vmaxnmq_f64 (float64x2_t a, float64x2_t b) /// A64: FMAXNM Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MaxNumber(Vector128<double> left, Vector128<double> right) => MaxNumber(left, right); /// <summary> /// float32_t vmaxnmvq_f32 (float32x4_t a) /// A64: FMAXNMV Sd, Vn.4S /// </summary> public static Vector64<float> MaxNumberAcross(Vector128<float> value) => MaxNumberAcross(value); /// <summary> /// float32x2_t vpmaxnm_f32 (float32x2_t a, float32x2_t b) /// A64: FMAXNMP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MaxNumberPairwise(Vector64<float> left, Vector64<float> right) => MaxNumberPairwise(left, right); /// <summary> /// float64x2_t vpmaxnmq_f64 (float64x2_t a, float64x2_t b) /// A64: FMAXNMP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MaxNumberPairwise(Vector128<double> left, Vector128<double> right) => MaxNumberPairwise(left, right); /// <summary> /// float32x4_t vpmaxnmq_f32 (float32x4_t a, float32x4_t b) /// A64: FMAXNMP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MaxNumberPairwise(Vector128<float> left, Vector128<float> right) => MaxNumberPairwise(left, right); /// <summary> /// float32_t vpmaxnms_f32 (float32x2_t a) /// A64: FMAXNMP Sd, Vn.2S /// </summary> public static Vector64<float> MaxNumberPairwiseScalar(Vector64<float> value) => MaxNumberPairwiseScalar(value); /// <summary> /// float64_t vpmaxnmqd_f64 (float64x2_t a) /// A64: FMAXNMP Dd, Vn.2D /// </summary> public static Vector64<double> MaxNumberPairwiseScalar(Vector128<double> value) => MaxNumberPairwiseScalar(value); /// <summary> /// uint8x16_t vpmaxq_u8 (uint8x16_t a, uint8x16_t b) /// A64: UMAXP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> MaxPairwise(Vector128<byte> left, Vector128<byte> right) => MaxPairwise(left, right); /// <summary> /// float64x2_t vpmaxq_f64 (float64x2_t a, float64x2_t b) /// A64: FMAXP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MaxPairwise(Vector128<double> left, Vector128<double> right) => MaxPairwise(left, right); /// <summary> /// int16x8_t vpmaxq_s16 (int16x8_t a, int16x8_t b) /// A64: SMAXP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MaxPairwise(Vector128<short> left, Vector128<short> right) => MaxPairwise(left, right); /// <summary> /// int32x4_t vpmaxq_s32 (int32x4_t a, int32x4_t b) /// A64: SMAXP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MaxPairwise(Vector128<int> left, Vector128<int> right) => MaxPairwise(left, right); /// <summary> /// int8x16_t vpmaxq_s8 (int8x16_t a, int8x16_t b) /// A64: SMAXP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> MaxPairwise(Vector128<sbyte> left, Vector128<sbyte> right) => MaxPairwise(left, right); /// <summary> /// float32x4_t vpmaxq_f32 (float32x4_t a, float32x4_t b) /// A64: FMAXP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MaxPairwise(Vector128<float> left, Vector128<float> right) => MaxPairwise(left, right); /// <summary> /// uint16x8_t vpmaxq_u16 (uint16x8_t a, uint16x8_t b) /// A64: UMAXP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> MaxPairwise(Vector128<ushort> left, Vector128<ushort> right) => MaxPairwise(left, right); /// <summary> /// uint32x4_t vpmaxq_u32 (uint32x4_t a, uint32x4_t b) /// A64: UMAXP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> MaxPairwise(Vector128<uint> left, Vector128<uint> right) => MaxPairwise(left, right); /// <summary> /// float32_t vpmaxs_f32 (float32x2_t a) /// A64: FMAXP Sd, Vn.2S /// </summary> public static Vector64<float> MaxPairwiseScalar(Vector64<float> value) => MaxPairwiseScalar(value); /// <summary> /// float64_t vpmaxqd_f64 (float64x2_t a) /// A64: FMAXP Dd, Vn.2D /// </summary> public static Vector64<double> MaxPairwiseScalar(Vector128<double> value) => MaxPairwiseScalar(value); /// <summary> /// float64x1_t vmax_f64 (float64x1_t a, float64x1_t b) /// A64: FMAX Dd, Dn, Dm /// </summary> public static Vector64<double> MaxScalar(Vector64<double> left, Vector64<double> right) => MaxScalar(left, right); /// <summary> /// float32_t vmaxs_f32 (float32_t a, float32_t b) /// A64: FMAX Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> MaxScalar(Vector64<float> left, Vector64<float> right) => MaxScalar(left, right); /// <summary> /// float64x2_t vminq_f64 (float64x2_t a, float64x2_t b) /// A64: FMIN Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Min(Vector128<double> left, Vector128<double> right) => Min(left, right); /// <summary> /// uint8_t vminv_u8 (uint8x8_t a) /// A64: UMINV Bd, Vn.8B /// </summary> public static Vector64<byte> MinAcross(Vector64<byte> value) => MinAcross(value); /// <summary> /// int16_t vminv_s16 (int16x4_t a) /// A64: SMINV Hd, Vn.4H /// </summary> public static Vector64<short> MinAcross(Vector64<short> value) => MinAcross(value); /// <summary> /// int8_t vminv_s8 (int8x8_t a) /// A64: SMINV Bd, Vn.8B /// </summary> public static Vector64<sbyte> MinAcross(Vector64<sbyte> value) => MinAcross(value); /// <summary> /// uint16_t vminv_u16 (uint16x4_t a) /// A64: UMINV Hd, Vn.4H /// </summary> public static Vector64<ushort> MinAcross(Vector64<ushort> value) => MinAcross(value); /// <summary> /// uint8_t vminvq_u8 (uint8x16_t a) /// A64: UMINV Bd, Vn.16B /// </summary> public static Vector64<byte> MinAcross(Vector128<byte> value) => MinAcross(value); /// <summary> /// int16_t vminvq_s16 (int16x8_t a) /// A64: SMINV Hd, Vn.8H /// </summary> public static Vector64<short> MinAcross(Vector128<short> value) => MinAcross(value); /// <summary> /// int32_t vaddvq_s32 (int32x4_t a) /// A64: SMINV Sd, Vn.4S /// </summary> public static Vector64<int> MinAcross(Vector128<int> value) => MinAcross(value); /// <summary> /// int8_t vminvq_s8 (int8x16_t a) /// A64: SMINV Bd, Vn.16B /// </summary> public static Vector64<sbyte> MinAcross(Vector128<sbyte> value) => MinAcross(value); /// <summary> /// float32_t vminvq_f32 (float32x4_t a) /// A64: FMINV Sd, Vn.4S /// </summary> public static Vector64<float> MinAcross(Vector128<float> value) => MinAcross(value); /// <summary> /// uint16_t vminvq_u16 (uint16x8_t a) /// A64: UMINV Hd, Vn.8H /// </summary> public static Vector64<ushort> MinAcross(Vector128<ushort> value) => MinAcross(value); /// <summary> /// uint32_t vminvq_u32 (uint32x4_t a) /// A64: UMINV Sd, Vn.4S /// </summary> public static Vector64<uint> MinAcross(Vector128<uint> value) => MinAcross(value); /// <summary> /// float64x2_t vminnmq_f64 (float64x2_t a, float64x2_t b) /// A64: FMINNM Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MinNumber(Vector128<double> left, Vector128<double> right) => MinNumber(left, right); /// <summary> /// float32_t vminnmvq_f32 (float32x4_t a) /// A64: FMINNMV Sd, Vn.4S /// </summary> public static Vector64<float> MinNumberAcross(Vector128<float> value) => MinNumberAcross(value); /// <summary> /// float32x2_t vpminnm_f32 (float32x2_t a, float32x2_t b) /// A64: FMINNMP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MinNumberPairwise(Vector64<float> left, Vector64<float> right) => MinNumberPairwise(left, right); /// <summary> /// float64x2_t vpminnmq_f64 (float64x2_t a, float64x2_t b) /// A64: FMINNMP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MinNumberPairwise(Vector128<double> left, Vector128<double> right) => MinNumberPairwise(left, right); /// <summary> /// float32x4_t vpminnmq_f32 (float32x4_t a, float32x4_t b) /// A64: FMINNMP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MinNumberPairwise(Vector128<float> left, Vector128<float> right) => MinNumberPairwise(left, right); /// <summary> /// float32_t vpminnms_f32 (float32x2_t a) /// A64: FMINNMP Sd, Vn.2S /// </summary> public static Vector64<float> MinNumberPairwiseScalar(Vector64<float> value) => MinNumberPairwiseScalar(value); /// <summary> /// float64_t vpminnmqd_f64 (float64x2_t a) /// A64: FMINNMP Dd, Vn.2D /// </summary> public static Vector64<double> MinNumberPairwiseScalar(Vector128<double> value) => MinNumberPairwiseScalar(value); /// <summary> /// uint8x16_t vpminq_u8 (uint8x16_t a, uint8x16_t b) /// A64: UMINP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> MinPairwise(Vector128<byte> left, Vector128<byte> right) => MinPairwise(left, right); /// <summary> /// float64x2_t vpminq_f64 (float64x2_t a, float64x2_t b) /// A64: FMINP Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MinPairwise(Vector128<double> left, Vector128<double> right) => MinPairwise(left, right); /// <summary> /// int16x8_t vpminq_s16 (int16x8_t a, int16x8_t b) /// A64: SMINP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MinPairwise(Vector128<short> left, Vector128<short> right) => MinPairwise(left, right); /// <summary> /// int32x4_t vpminq_s32 (int32x4_t a, int32x4_t b) /// A64: SMINP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MinPairwise(Vector128<int> left, Vector128<int> right) => MinPairwise(left, right); /// <summary> /// int8x16_t vpminq_s8 (int8x16_t a, int8x16_t b) /// A64: SMINP Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> MinPairwise(Vector128<sbyte> left, Vector128<sbyte> right) => MinPairwise(left, right); /// <summary> /// float32x4_t vpminq_f32 (float32x4_t a, float32x4_t b) /// A64: FMINP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MinPairwise(Vector128<float> left, Vector128<float> right) => MinPairwise(left, right); /// <summary> /// uint16x8_t vpminq_u16 (uint16x8_t a, uint16x8_t b) /// A64: UMINP Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> MinPairwise(Vector128<ushort> left, Vector128<ushort> right) => MinPairwise(left, right); /// <summary> /// uint32x4_t vpminq_u32 (uint32x4_t a, uint32x4_t b) /// A64: UMINP Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> MinPairwise(Vector128<uint> left, Vector128<uint> right) => MinPairwise(left, right); /// <summary> /// float32_t vpmins_f32 (float32x2_t a) /// A64: FMINP Sd, Vn.2S /// </summary> public static Vector64<float> MinPairwiseScalar(Vector64<float> value) => MinPairwiseScalar(value); /// <summary> /// float64_t vpminqd_f64 (float64x2_t a) /// A64: FMINP Dd, Vn.2D /// </summary> public static Vector64<double> MinPairwiseScalar(Vector128<double> value) => MinPairwiseScalar(value); /// <summary> /// float64x1_t vmin_f64 (float64x1_t a, float64x1_t b) /// A64: FMIN Dd, Dn, Dm /// </summary> public static Vector64<double> MinScalar(Vector64<double> left, Vector64<double> right) => MinScalar(left, right); /// <summary> /// float32_t vmins_f32 (float32_t a, float32_t b) /// A64: FMIN Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> MinScalar(Vector64<float> left, Vector64<float> right) => MinScalar(left, right); /// <summary> /// float64x2_t vmulq_f64 (float64x2_t a, float64x2_t b) /// A64: FMUL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Multiply(Vector128<double> left, Vector128<double> right) => Multiply(left, right); /// <summary> /// float64x2_t vmulq_n_f64 (float64x2_t a, float64_t b) /// A64: FMUL Vd.2D, Vn.2D, Vm.D[0] /// </summary> public static Vector128<double> MultiplyByScalar(Vector128<double> left, Vector64<double> right) => MultiplyByScalar(left, right); /// <summary> /// float64x2_t vmulq_laneq_f64 (float64x2_t a, float64x2_t v, const int lane) /// A64: FMUL Vd.2D, Vn.2D, Vm.D[lane] /// </summary> public static Vector128<double> MultiplyBySelectedScalar(Vector128<double> left, Vector128<double> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int16_t vqdmulhh_s16 (int16_t a, int16_t b) /// A64: SQDMULH Hd, Hn, Hm /// </summary> public static Vector64<short> MultiplyDoublingSaturateHighScalar(Vector64<short> left, Vector64<short> right) => MultiplyDoublingSaturateHighScalar(left, right); /// <summary> /// int32_t vqdmulhs_s32 (int32_t a, int32_t b) /// A64: SQDMULH Sd, Sn, Sm /// </summary> public static Vector64<int> MultiplyDoublingSaturateHighScalar(Vector64<int> left, Vector64<int> right) => MultiplyDoublingSaturateHighScalar(left, right); /// <summary> /// int16_t vqdmulhh_lane_s16 (int16_t a, int16x4_t v, const int lane) /// A64: SQDMULH Hd, Hn, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16_t vqdmulhh_laneq_s16 (int16_t a, int16x8_t v, const int lane) /// A64: SQDMULH Hd, Hn, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32_t vqdmulhs_lane_s32 (int32_t a, int32x2_t v, const int lane) /// A64: SQDMULH Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32_t vqdmulhs_laneq_s32 (int32_t a, int32x4_t v, const int lane) /// A64: SQDMULH Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyDoublingScalarBySelectedScalarSaturateHigh(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32_t vqdmlalh_s16 (int32_t a, int16_t b, int16_t c) /// A64: SQDMLAL Sd, Hn, Hm /// </summary> public static Vector64<int> MultiplyDoublingWideningAndAddSaturateScalar(Vector64<int> addend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningAndAddSaturateScalar(addend, left, right); /// <summary> /// int64_t vqdmlals_s32 (int64_t a, int32_t b, int32_t c) /// A64: SQDMLAL Dd, Sn, Sm /// </summary> public static Vector64<long> MultiplyDoublingWideningAndAddSaturateScalar(Vector64<long> addend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningAndAddSaturateScalar(addend, left, right); /// <summary> /// int32_t vqdmlslh_s16 (int32_t a, int16_t b, int16_t c) /// A64: SQDMLSL Sd, Hn, Hm /// </summary> public static Vector64<int> MultiplyDoublingWideningAndSubtractSaturateScalar(Vector64<int> minuend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningAndSubtractSaturateScalar(minuend, left, right); /// <summary> /// int64_t vqdmlsls_s32 (int64_t a, int32_t b, int32_t c) /// A64: SQDMLSL Dd, Sn, Sm /// </summary> public static Vector64<long> MultiplyDoublingWideningAndSubtractSaturateScalar(Vector64<long> minuend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningAndSubtractSaturateScalar(minuend, left, right); /// <summary> /// int32_t vqdmullh_s16 (int16_t a, int16_t b) /// A64: SQDMULL Sd, Hn, Hm /// </summary> public static Vector64<int> MultiplyDoublingWideningSaturateScalar(Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningSaturateScalar(left, right); /// <summary> /// int64_t vqdmulls_s32 (int32_t a, int32_t b) /// A64: SQDMULL Dd, Sn, Sm /// </summary> public static Vector64<long> MultiplyDoublingWideningSaturateScalar(Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningSaturateScalar(left, right); /// <summary> /// int32_t vqdmullh_lane_s16 (int16_t a, int16x4_t v, const int lane) /// A64: SQDMULL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// int32_t vqdmullh_laneq_s16 (int16_t a, int16x8_t v, const int lane) /// A64: SQDMULL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// int64_t vqdmulls_lane_s32 (int32_t a, int32x2_t v, const int lane) /// A64: SQDMULL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// int64_t vqdmulls_laneq_s32 (int32_t a, int32x4_t v, const int lane) /// A64: SQDMULL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningSaturateScalarBySelectedScalar(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// int32_t vqdmlalh_lane_s16 (int32_t a, int16_t b, int16x4_t v, const int lane) /// A64: SQDMLAL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<int> addend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32_t vqdmlalh_laneq_s16 (int32_t a, int16_t b, int16x8_t v, const int lane) /// A64: SQDMLAL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<int> addend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64_t vqdmlals_lane_s32 (int64_t a, int32_t b, int32x2_t v, const int lane) /// A64: SQDMLAL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<long> addend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64_t vqdmlals_laneq_s32 (int64_t a, int32_t b, int32x4_t v, const int lane) /// A64: SQDMLAL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(Vector64<long> addend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32_t vqdmlslh_lane_s16 (int32_t a, int16_t b, int16x4_t v, const int lane) /// A64: SQDMLSL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<int> minuend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int32_t vqdmlslh_laneq_s16 (int32_t a, int16_t b, int16x8_t v, const int lane) /// A64: SQDMLSL Sd, Hn, Vm.H[lane] /// </summary> public static Vector64<int> MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<int> minuend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64_t vqdmlsls_lane_s32 (int64_t a, int32_t b, int32x2_t v, const int lane) /// A64: SQDMLSL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<long> minuend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64_t vqdmlsls_laneq_s32 (int64_t a, int32_t b, int32x4_t v, const int lane) /// A64: SQDMLSL Dd, Sn, Vm.S[lane] /// </summary> public static Vector64<long> MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(Vector64<long> minuend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningScalarBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// float32x2_t vmulx_f32 (float32x2_t a, float32x2_t b) /// A64: FMULX Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MultiplyExtended(Vector64<float> left, Vector64<float> right) => MultiplyExtended(left, right); /// <summary> /// float64x2_t vmulxq_f64 (float64x2_t a, float64x2_t b) /// A64: FMULX Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> MultiplyExtended(Vector128<double> left, Vector128<double> right) => MultiplyExtended(left, right); /// <summary> /// float32x4_t vmulxq_f32 (float32x4_t a, float32x4_t b) /// A64: FMULX Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MultiplyExtended(Vector128<float> left, Vector128<float> right) => MultiplyExtended(left, right); /// <summary> /// float64x2_t vmulxq_lane_f64 (float64x2_t a, float64x1_t v, const int lane) /// A64: FMULX Vd.2D, Vn.2D, Vm.D[0] /// </summary> public static Vector128<double> MultiplyExtendedByScalar(Vector128<double> left, Vector64<double> right) => MultiplyExtendedByScalar(left, right); /// <summary> /// float32x2_t vmulx_lane_f32 (float32x2_t a, float32x2_t v, const int lane) /// A64: FMULX Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyExtendedBySelectedScalar(Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplyExtendedBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x2_t vmulx_laneq_f32 (float32x2_t a, float32x4_t v, const int lane) /// A64: FMULX Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyExtendedBySelectedScalar(Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplyExtendedBySelectedScalar(left, right, rightIndex); /// <summary> /// float64x2_t vmulxq_laneq_f64 (float64x2_t a, float64x2_t v, const int lane) /// A64: FMULX Vd.2D, Vn.2D, Vm.D[lane] /// </summary> public static Vector128<double> MultiplyExtendedBySelectedScalar(Vector128<double> left, Vector128<double> right, byte rightIndex) => MultiplyExtendedBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x4_t vmulxq_lane_f32 (float32x4_t a, float32x2_t v, const int lane) /// A64: FMULX Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> MultiplyExtendedBySelectedScalar(Vector128<float> left, Vector64<float> right, byte rightIndex) => MultiplyExtendedBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x4_t vmulxq_laneq_f32 (float32x4_t a, float32x4_t v, const int lane) /// A64: FMULX Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> MultiplyExtendedBySelectedScalar(Vector128<float> left, Vector128<float> right, byte rightIndex) => MultiplyExtendedBySelectedScalar(left, right, rightIndex); /// <summary> /// float64x1_t vmulx_f64 (float64x1_t a, float64x1_t b) /// A64: FMULX Dd, Dn, Dm /// </summary> public static Vector64<double> MultiplyExtendedScalar(Vector64<double> left, Vector64<double> right) => MultiplyExtendedScalar(left, right); /// <summary> /// float32_t vmulxs_f32 (float32_t a, float32_t b) /// A64: FMULX Sd, Sn, Sm /// </summary> public static Vector64<float> MultiplyExtendedScalar(Vector64<float> left, Vector64<float> right) => MultiplyExtendedScalar(left, right); /// <summary> /// float64_t vmulxd_laneq_f64 (float64_t a, float64x2_t v, const int lane) /// A64: FMULX Dd, Dn, Vm.D[lane] /// </summary> public static Vector64<double> MultiplyExtendedScalarBySelectedScalar(Vector64<double> left, Vector128<double> right, byte rightIndex) => MultiplyExtendedScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// float32_t vmulxs_lane_f32 (float32_t a, float32x2_t v, const int lane) /// A64: FMULX Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyExtendedScalarBySelectedScalar(Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplyExtendedScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// float32_t vmulxs_laneq_f32 (float32_t a, float32x4_t v, const int lane) /// A64: FMULX Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyExtendedScalarBySelectedScalar(Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplyExtendedScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// int16_t vqrdmulhh_s16 (int16_t a, int16_t b) /// A64: SQRDMULH Hd, Hn, Hm /// </summary> public static Vector64<short> MultiplyRoundedDoublingSaturateHighScalar(Vector64<short> left, Vector64<short> right) => MultiplyRoundedDoublingSaturateHighScalar(left, right); /// <summary> /// int32_t vqrdmulhs_s32 (int32_t a, int32_t b) /// A64: SQRDMULH Sd, Sn, Sm /// </summary> public static Vector64<int> MultiplyRoundedDoublingSaturateHighScalar(Vector64<int> left, Vector64<int> right) => MultiplyRoundedDoublingSaturateHighScalar(left, right); /// <summary> /// int16_t vqrdmulhh_lane_s16 (int16_t a, int16x4_t v, const int lane) /// A64: SQRDMULH Hd, Hn, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16_t vqrdmulhh_laneq_s16 (int16_t a, int16x8_t v, const int lane) /// A64: SQRDMULH Hd, Hn, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32_t vqrdmulhs_lane_s32 (int32_t a, int32x2_t v, const int lane) /// A64: SQRDMULH Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32_t vqrdmulhs_laneq_s32 (int32_t a, int32x4_t v, const int lane) /// A64: SQRDMULH Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyRoundedDoublingScalarBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// float64_t vmuld_laneq_f64 (float64_t a, float64x2_t v, const int lane) /// A64: FMUL Dd, Dn, Vm.D[lane] /// </summary> public static Vector64<double> MultiplyScalarBySelectedScalar(Vector64<double> left, Vector128<double> right, byte rightIndex) => MultiplyScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// float64x2_t vnegq_f64 (float64x2_t a) /// A64: FNEG Vd.2D, Vn.2D /// </summary> public static Vector128<double> Negate(Vector128<double> value) => Negate(value); /// <summary> /// int64x2_t vnegq_s64 (int64x2_t a) /// A64: NEG Vd.2D, Vn.2D /// </summary> public static Vector128<long> Negate(Vector128<long> value) => Negate(value); /// <summary> /// int64x2_t vqnegq_s64 (int64x2_t a) /// A64: SQNEG Vd.2D, Vn.2D /// </summary> public static Vector128<long> NegateSaturate(Vector128<long> value) => NegateSaturate(value); /// <summary> /// int16_t vqnegh_s16 (int16_t a) /// A64: SQNEG Hd, Hn /// </summary> public static Vector64<short> NegateSaturateScalar(Vector64<short> value) => NegateSaturateScalar(value); /// <summary> /// int32_t vqnegs_s32 (int32_t a) /// A64: SQNEG Sd, Sn /// </summary> public static Vector64<int> NegateSaturateScalar(Vector64<int> value) => NegateSaturateScalar(value); /// <summary> /// int64_t vqnegd_s64 (int64_t a) /// A64: SQNEG Dd, Dn /// </summary> public static Vector64<long> NegateSaturateScalar(Vector64<long> value) => NegateSaturateScalar(value); /// <summary> /// int8_t vqnegb_s8 (int8_t a) /// A64: SQNEG Bd, Bn /// </summary> public static Vector64<sbyte> NegateSaturateScalar(Vector64<sbyte> value) => NegateSaturateScalar(value); /// <summary> /// int64x1_t vneg_s64 (int64x1_t a) /// A64: NEG Dd, Dn /// </summary> public static Vector64<long> NegateScalar(Vector64<long> value) => NegateScalar(value); /// <summary> /// float64x2_t vrecpeq_f64 (float64x2_t a) /// A64: FRECPE Vd.2D, Vn.2D /// </summary> public static Vector128<double> ReciprocalEstimate(Vector128<double> value) => ReciprocalEstimate(value); /// <summary> /// float64x1_t vrecpe_f64 (float64x1_t a) /// A64: FRECPE Dd, Dn /// </summary> public static Vector64<double> ReciprocalEstimateScalar(Vector64<double> value) => ReciprocalEstimateScalar(value); /// <summary> /// float32_t vrecpes_f32 (float32_t a) /// A64: FRECPE Sd, Sn /// </summary> public static Vector64<float> ReciprocalEstimateScalar(Vector64<float> value) => ReciprocalEstimateScalar(value); /// <summary> /// float64_t vrecpxd_f64 (float64_t a) /// A64: FRECPX Dd, Dn /// </summary> public static Vector64<double> ReciprocalExponentScalar(Vector64<double> value) => ReciprocalExponentScalar(value); /// <summary> /// float32_t vrecpxs_f32 (float32_t a) /// A64: FRECPX Sd, Sn /// </summary> public static Vector64<float> ReciprocalExponentScalar(Vector64<float> value) => ReciprocalExponentScalar(value); /// <summary> /// float64x2_t vrsqrteq_f64 (float64x2_t a) /// A64: FRSQRTE Vd.2D, Vn.2D /// </summary> public static Vector128<double> ReciprocalSquareRootEstimate(Vector128<double> value) => ReciprocalSquareRootEstimate(value); /// <summary> /// float64x1_t vrsqrte_f64 (float64x1_t a) /// A64: FRSQRTE Dd, Dn /// </summary> public static Vector64<double> ReciprocalSquareRootEstimateScalar(Vector64<double> value) => ReciprocalSquareRootEstimateScalar(value); /// <summary> /// float32_t vrsqrtes_f32 (float32_t a) /// A64: FRSQRTE Sd, Sn /// </summary> public static Vector64<float> ReciprocalSquareRootEstimateScalar(Vector64<float> value) => ReciprocalSquareRootEstimateScalar(value); /// <summary> /// float64x2_t vrsqrtsq_f64 (float64x2_t a, float64x2_t b) /// A64: FRSQRTS Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> ReciprocalSquareRootStep(Vector128<double> left, Vector128<double> right) => ReciprocalSquareRootStep(left, right); /// <summary> /// float64x1_t vrsqrts_f64 (float64x1_t a, float64x1_t b) /// A64: FRSQRTS Dd, Dn, Dm /// </summary> public static Vector64<double> ReciprocalSquareRootStepScalar(Vector64<double> left, Vector64<double> right) => ReciprocalSquareRootStepScalar(left, right); /// <summary> /// float32_t vrsqrtss_f32 (float32_t a, float32_t b) /// A64: FRSQRTS Sd, Sn, Sm /// </summary> public static Vector64<float> ReciprocalSquareRootStepScalar(Vector64<float> left, Vector64<float> right) => ReciprocalSquareRootStepScalar(left, right); /// <summary> /// float64x2_t vrecpsq_f64 (float64x2_t a, float64x2_t b) /// A64: FRECPS Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> ReciprocalStep(Vector128<double> left, Vector128<double> right) => ReciprocalStep(left, right); /// <summary> /// float64x1_t vrecps_f64 (float64x1_t a, float64x1_t b) /// A64: FRECPS Dd, Dn, Dm /// </summary> public static Vector64<double> ReciprocalStepScalar(Vector64<double> left, Vector64<double> right) => ReciprocalStepScalar(left, right); /// <summary> /// float32_t vrecpss_f32 (float32_t a, float32_t b) /// A64: FRECPS Sd, Sn, Sm /// </summary> public static Vector64<float> ReciprocalStepScalar(Vector64<float> left, Vector64<float> right) => ReciprocalStepScalar(left, right); /// <summary> /// float64x2_t vrndaq_f64 (float64x2_t a) /// A64: FRINTA Vd.2D, Vn.2D /// </summary> public static Vector128<double> RoundAwayFromZero(Vector128<double> value) => RoundAwayFromZero(value); /// <summary> /// float64x2_t vrndnq_f64 (float64x2_t a) /// A64: FRINTN Vd.2D, Vn.2D /// </summary> public static Vector128<double> RoundToNearest(Vector128<double> value) => RoundToNearest(value); /// <summary> /// float64x2_t vrndmq_f64 (float64x2_t a) /// A64: FRINTM Vd.2D, Vn.2D /// </summary> public static Vector128<double> RoundToNegativeInfinity(Vector128<double> value) => RoundToNegativeInfinity(value); /// <summary> /// float64x2_t vrndpq_f64 (float64x2_t a) /// A64: FRINTP Vd.2D, Vn.2D /// </summary> public static Vector128<double> RoundToPositiveInfinity(Vector128<double> value) => RoundToPositiveInfinity(value); /// <summary> /// float64x2_t vrndq_f64 (float64x2_t a) /// A64: FRINTZ Vd.2D, Vn.2D /// </summary> public static Vector128<double> RoundToZero(Vector128<double> value) => RoundToZero(value); /// <summary> /// int16_t vqrshlh_s16 (int16_t a, int16_t b) /// A64: SQRSHL Hd, Hn, Hm /// </summary> public static Vector64<short> ShiftArithmeticRoundedSaturateScalar(Vector64<short> value, Vector64<short> count) => ShiftArithmeticRoundedSaturateScalar(value, count); /// <summary> /// int32_t vqrshls_s32 (int32_t a, int32_t b) /// A64: SQRSHL Sd, Sn, Sm /// </summary> public static Vector64<int> ShiftArithmeticRoundedSaturateScalar(Vector64<int> value, Vector64<int> count) => ShiftArithmeticRoundedSaturateScalar(value, count); /// <summary> /// int8_t vqrshlb_s8 (int8_t a, int8_t b) /// A64: SQRSHL Bd, Bn, Bm /// </summary> public static Vector64<sbyte> ShiftArithmeticRoundedSaturateScalar(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmeticRoundedSaturateScalar(value, count); /// <summary> /// int16_t vqshlh_s16 (int16_t a, int16_t b) /// A64: SQSHL Hd, Hn, Hm /// </summary> public static Vector64<short> ShiftArithmeticSaturateScalar(Vector64<short> value, Vector64<short> count) => ShiftArithmeticSaturateScalar(value, count); /// <summary> /// int32_t vqshls_s32 (int32_t a, int32_t b) /// A64: SQSHL Sd, Sn, Sm /// </summary> public static Vector64<int> ShiftArithmeticSaturateScalar(Vector64<int> value, Vector64<int> count) => ShiftArithmeticSaturateScalar(value, count); /// <summary> /// int8_t vqshlb_s8 (int8_t a, int8_t b) /// A64: SQSHL Bd, Bn, Bm /// </summary> public static Vector64<sbyte> ShiftArithmeticSaturateScalar(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmeticSaturateScalar(value, count); /// <summary> /// uint8_t vqshlb_n_u8 (uint8_t a, const int n) /// A64: UQSHL Bd, Bn, #n /// </summary> public static Vector64<byte> ShiftLeftLogicalSaturateScalar(Vector64<byte> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// int16_t vqshlh_n_s16 (int16_t a, const int n) /// A64: SQSHL Hd, Hn, #n /// </summary> public static Vector64<short> ShiftLeftLogicalSaturateScalar(Vector64<short> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// int32_t vqshls_n_s32 (int32_t a, const int n) /// A64: SQSHL Sd, Sn, #n /// </summary> public static Vector64<int> ShiftLeftLogicalSaturateScalar(Vector64<int> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// int8_t vqshlb_n_s8 (int8_t a, const int n) /// A64: SQSHL Bd, Bn, #n /// </summary> public static Vector64<sbyte> ShiftLeftLogicalSaturateScalar(Vector64<sbyte> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// uint16_t vqshlh_n_u16 (uint16_t a, const int n) /// A64: UQSHL Hd, Hn, #n /// </summary> public static Vector64<ushort> ShiftLeftLogicalSaturateScalar(Vector64<ushort> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// uint32_t vqshls_n_u32 (uint32_t a, const int n) /// A64: UQSHL Sd, Sn, #n /// </summary> public static Vector64<uint> ShiftLeftLogicalSaturateScalar(Vector64<uint> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// uint16_t vqshluh_n_s16 (int16_t a, const int n) /// A64: SQSHLU Hd, Hn, #n /// </summary> public static Vector64<ushort> ShiftLeftLogicalSaturateUnsignedScalar(Vector64<short> value, byte count) => ShiftLeftLogicalSaturateUnsignedScalar(value, count); /// <summary> /// uint32_t vqshlus_n_s32 (int32_t a, const int n) /// A64: SQSHLU Sd, Sn, #n /// </summary> public static Vector64<uint> ShiftLeftLogicalSaturateUnsignedScalar(Vector64<int> value, byte count) => ShiftLeftLogicalSaturateUnsignedScalar(value, count); /// <summary> /// uint8_t vqshlub_n_s8 (int8_t a, const int n) /// A64: SQSHLU Bd, Bn, #n /// </summary> public static Vector64<byte> ShiftLeftLogicalSaturateUnsignedScalar(Vector64<sbyte> value, byte count) => ShiftLeftLogicalSaturateUnsignedScalar(value, count); /// <summary> /// uint8_t vqrshlb_u8 (uint8_t a, int8_t b) /// A64: UQRSHL Bd, Bn, Bm /// </summary> public static Vector64<byte> ShiftLogicalRoundedSaturateScalar(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint16_t vqrshlh_u16 (uint16_t a, int16_t b) /// A64: UQRSHL Hd, Hn, Hm /// </summary> public static Vector64<short> ShiftLogicalRoundedSaturateScalar(Vector64<short> value, Vector64<short> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint32_t vqrshls_u32 (uint32_t a, int32_t b) /// A64: UQRSHL Sd, Sn, Sm /// </summary> public static Vector64<int> ShiftLogicalRoundedSaturateScalar(Vector64<int> value, Vector64<int> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint8_t vqrshlb_u8 (uint8_t a, int8_t b) /// A64: UQRSHL Bd, Bn, Bm /// </summary> public static Vector64<sbyte> ShiftLogicalRoundedSaturateScalar(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint16_t vqrshlh_u16 (uint16_t a, int16_t b) /// A64: UQRSHL Hd, Hn, Hm /// </summary> public static Vector64<ushort> ShiftLogicalRoundedSaturateScalar(Vector64<ushort> value, Vector64<short> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint32_t vqrshls_u32 (uint32_t a, int32_t b) /// A64: UQRSHL Sd, Sn, Sm /// </summary> public static Vector64<uint> ShiftLogicalRoundedSaturateScalar(Vector64<uint> value, Vector64<int> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint8_t vqshlb_u8 (uint8_t a, int8_t b) /// A64: UQSHL Bd, Bn, Bm /// </summary> public static Vector64<byte> ShiftLogicalSaturateScalar(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint16_t vqshlh_u16 (uint16_t a, int16_t b) /// A64: UQSHL Hd, Hn, Hm /// </summary> public static Vector64<short> ShiftLogicalSaturateScalar(Vector64<short> value, Vector64<short> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint32_t vqshls_u32 (uint32_t a, int32_t b) /// A64: UQSHL Sd, Sn, Sm /// </summary> public static Vector64<int> ShiftLogicalSaturateScalar(Vector64<int> value, Vector64<int> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint8_t vqshlb_u8 (uint8_t a, int8_t b) /// A64: UQSHL Bd, Bn, Bm /// </summary> public static Vector64<sbyte> ShiftLogicalSaturateScalar(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint16_t vqshlh_u16 (uint16_t a, int16_t b) /// A64: UQSHL Hd, Hn, Hm /// </summary> public static Vector64<ushort> ShiftLogicalSaturateScalar(Vector64<ushort> value, Vector64<short> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint32_t vqshls_u32 (uint32_t a, int32_t b) /// A64: UQSHL Sd, Sn, Sm /// </summary> public static Vector64<uint> ShiftLogicalSaturateScalar(Vector64<uint> value, Vector64<int> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// int16_t vqshrns_n_s32 (int32_t a, const int n) /// A64: SQSHRN Hd, Sn, #n /// </summary> public static Vector64<short> ShiftRightArithmeticNarrowingSaturateScalar(Vector64<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateScalar(value, count); /// <summary> /// int32_t vqshrnd_n_s64 (int64_t a, const int n) /// A64: SQSHRN Sd, Dn, #n /// </summary> public static Vector64<int> ShiftRightArithmeticNarrowingSaturateScalar(Vector64<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateScalar(value, count); /// <summary> /// int8_t vqshrnh_n_s16 (int16_t a, const int n) /// A64: SQSHRN Bd, Hn, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticNarrowingSaturateScalar(Vector64<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateScalar(value, count); /// <summary> /// uint8_t vqshrunh_n_s16 (int16_t a, const int n) /// A64: SQSHRUN Bd, Hn, #n /// </summary> public static Vector64<byte> ShiftRightArithmeticNarrowingSaturateUnsignedScalar(Vector64<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// uint16_t vqshruns_n_s32 (int32_t a, const int n) /// A64: SQSHRUN Hd, Sn, #n /// </summary> public static Vector64<ushort> ShiftRightArithmeticNarrowingSaturateUnsignedScalar(Vector64<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// uint32_t vqshrund_n_s64 (int64_t a, const int n) /// A64: SQSHRUN Sd, Dn, #n /// </summary> public static Vector64<uint> ShiftRightArithmeticNarrowingSaturateUnsignedScalar(Vector64<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// int16_t vqrshrns_n_s32 (int32_t a, const int n) /// A64: SQRSHRN Hd, Sn, #n /// </summary> public static Vector64<short> ShiftRightArithmeticRoundedNarrowingSaturateScalar(Vector64<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateScalar(value, count); /// <summary> /// int32_t vqrshrnd_n_s64 (int64_t a, const int n) /// A64: SQRSHRN Sd, Dn, #n /// </summary> public static Vector64<int> ShiftRightArithmeticRoundedNarrowingSaturateScalar(Vector64<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateScalar(value, count); /// <summary> /// int8_t vqrshrnh_n_s16 (int16_t a, const int n) /// A64: SQRSHRN Bd, Hn, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticRoundedNarrowingSaturateScalar(Vector64<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint8_t vqrshrunh_n_s16 (int16_t a, const int n) /// A64: SQRSHRUN Bd, Hn, #n /// </summary> public static Vector64<byte> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(Vector64<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// uint16_t vqrshruns_n_s32 (int32_t a, const int n) /// A64: SQRSHRUN Hd, Sn, #n /// </summary> public static Vector64<ushort> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(Vector64<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// uint32_t vqrshrund_n_s64 (int64_t a, const int n) /// A64: SQRSHRUN Sd, Dn, #n /// </summary> public static Vector64<uint> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(Vector64<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedScalar(value, count); /// <summary> /// uint8_t vqshrnh_n_u16 (uint16_t a, const int n) /// A64: UQSHRN Bd, Hn, #n /// </summary> public static Vector64<byte> ShiftRightLogicalNarrowingSaturateScalar(Vector64<ushort> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint16_t vqshrns_n_u32 (uint32_t a, const int n) /// A64: UQSHRN Hd, Sn, #n /// </summary> public static Vector64<short> ShiftRightLogicalNarrowingSaturateScalar(Vector64<int> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint32_t vqshrnd_n_u64 (uint64_t a, const int n) /// A64: UQSHRN Sd, Dn, #n /// </summary> public static Vector64<int> ShiftRightLogicalNarrowingSaturateScalar(Vector64<long> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint8_t vqshrnh_n_u16 (uint16_t a, const int n) /// A64: UQSHRN Bd, Hn, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalNarrowingSaturateScalar(Vector64<short> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint16_t vqshrns_n_u32 (uint32_t a, const int n) /// A64: UQSHRN Hd, Sn, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalNarrowingSaturateScalar(Vector64<uint> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint32_t vqshrnd_n_u64 (uint64_t a, const int n) /// A64: UQSHRN Sd, Dn, #n /// </summary> public static Vector64<uint> ShiftRightLogicalNarrowingSaturateScalar(Vector64<ulong> value, byte count) => ShiftRightLogicalNarrowingSaturateScalar(value, count); /// <summary> /// uint8_t vqrshrnh_n_u16 (uint16_t a, const int n) /// A64: UQRSHRN Bd, Hn, #n /// </summary> public static Vector64<byte> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<ushort> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint16_t vqrshrns_n_u32 (uint32_t a, const int n) /// A64: UQRSHRN Hd, Sn, #n /// </summary> public static Vector64<short> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<int> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint32_t vqrshrnd_n_u64 (uint64_t a, const int n) /// A64: UQRSHRN Sd, Dn, #n /// </summary> public static Vector64<int> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<long> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint8_t vqrshrnh_n_u16 (uint16_t a, const int n) /// A64: UQRSHRN Bd, Hn, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<short> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint16_t vqrshrns_n_u32 (uint32_t a, const int n) /// A64: UQRSHRN Hd, Sn, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<uint> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// uint32_t vqrshrnd_n_u64 (uint64_t a, const int n) /// A64: UQRSHRN Sd, Dn, #n /// </summary> public static Vector64<uint> ShiftRightLogicalRoundedNarrowingSaturateScalar(Vector64<ulong> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateScalar(value, count); /// <summary> /// float32x2_t vsqrt_f32 (float32x2_t a) /// A64: FSQRT Vd.2S, Vn.2S /// </summary> public static Vector64<float> Sqrt(Vector64<float> value) => Sqrt(value); /// <summary> /// float64x2_t vsqrtq_f64 (float64x2_t a) /// A64: FSQRT Vd.2D, Vn.2D /// </summary> public static Vector128<double> Sqrt(Vector128<double> value) => Sqrt(value); /// <summary> /// float32x4_t vsqrtq_f32 (float32x4_t a) /// A64: FSQRT Vd.4S, Vn.4S /// </summary> public static Vector128<float> Sqrt(Vector128<float> value) => Sqrt(value); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(byte* address, Vector64<byte> value1, Vector64<byte> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(double* address, Vector64<double> value1, Vector64<double> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(short* address, Vector64<short> value1, Vector64<short> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(int* address, Vector64<int> value1, Vector64<int> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(long* address, Vector64<long> value1, Vector64<long> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(sbyte* address, Vector64<sbyte> value1, Vector64<sbyte> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(float* address, Vector64<float> value1, Vector64<float> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(ushort* address, Vector64<ushort> value1, Vector64<ushort> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(uint* address, Vector64<uint> value1, Vector64<uint> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePair(ulong* address, Vector64<ulong> value1, Vector64<ulong> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(byte* address, Vector128<byte> value1, Vector128<byte> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(double* address, Vector128<double> value1, Vector128<double> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(short* address, Vector128<short> value1, Vector128<short> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(int* address, Vector128<int> value1, Vector128<int> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(long* address, Vector128<long> value1, Vector128<long> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(sbyte* address, Vector128<sbyte> value1, Vector128<sbyte> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(float* address, Vector128<float> value1, Vector128<float> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(ushort* address, Vector128<ushort> value1, Vector128<ushort> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(uint* address, Vector128<uint> value1, Vector128<uint> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePair(ulong* address, Vector128<ulong> value1, Vector128<ulong> value2) => StorePair(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(byte* address, Vector64<byte> value1, Vector64<byte> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(double* address, Vector64<double> value1, Vector64<double> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(short* address, Vector64<short> value1, Vector64<short> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(int* address, Vector64<int> value1, Vector64<int> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(long* address, Vector64<long> value1, Vector64<long> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(sbyte* address, Vector64<sbyte> value1, Vector64<sbyte> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(float* address, Vector64<float> value1, Vector64<float> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(ushort* address, Vector64<ushort> value1, Vector64<ushort> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(uint* address, Vector64<uint> value1, Vector64<uint> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Dt1, Dt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(ulong* address, Vector64<ulong> value1, Vector64<ulong> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(byte* address, Vector128<byte> value1, Vector128<byte> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(double* address, Vector128<double> value1, Vector128<double> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(short* address, Vector128<short> value1, Vector128<short> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(int* address, Vector128<int> value1, Vector128<int> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(long* address, Vector128<long> value1, Vector128<long> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(sbyte* address, Vector128<sbyte> value1, Vector128<sbyte> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(float* address, Vector128<float> value1, Vector128<float> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(ushort* address, Vector128<ushort> value1, Vector128<ushort> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(uint* address, Vector128<uint> value1, Vector128<uint> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STNP Qt1, Qt2, [Xn] /// </summary> public static unsafe void StorePairNonTemporal(ulong* address, Vector128<ulong> value1, Vector128<ulong> value2) => StorePairNonTemporal(address, value1, value2); /// <summary> /// A64: STP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalar(int* address, Vector64<int> value1, Vector64<int> value2) => StorePairScalar(address, value1, value2); /// <summary> /// A64: STP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalar(float* address, Vector64<float> value1, Vector64<float> value2) => StorePairScalar(address, value1, value2); /// <summary> /// A64: STP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalar(uint* address, Vector64<uint> value1, Vector64<uint> value2) => StorePairScalar(address, value1, value2); /// <summary> /// A64: STNP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalarNonTemporal(int* address, Vector64<int> value1, Vector64<int> value2) => StorePairScalarNonTemporal(address, value1, value2); /// <summary> /// A64: STNP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalarNonTemporal(float* address, Vector64<float> value1, Vector64<float> value2) => StorePairScalarNonTemporal(address, value1, value2); /// <summary> /// A64: STNP St1, St2, [Xn] /// </summary> public static unsafe void StorePairScalarNonTemporal(uint* address, Vector64<uint> value1, Vector64<uint> value2) => StorePairScalarNonTemporal(address, value1, value2); /// <summary> /// float64x2_t vsubq_f64 (float64x2_t a, float64x2_t b) /// A64: FSUB Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> Subtract(Vector128<double> left, Vector128<double> right) => Subtract(left, right); /// <summary> /// uint8_t vqsubb_u8 (uint8_t a, uint8_t b) /// A64: UQSUB Bd, Bn, Bm /// </summary> public static Vector64<byte> SubtractSaturateScalar(Vector64<byte> left, Vector64<byte> right) => SubtractSaturateScalar(left, right); /// <summary> /// int16_t vqsubh_s16 (int16_t a, int16_t b) /// A64: SQSUB Hd, Hn, Hm /// </summary> public static Vector64<short> SubtractSaturateScalar(Vector64<short> left, Vector64<short> right) => SubtractSaturateScalar(left, right); /// <summary> /// int32_t vqsubs_s32 (int32_t a, int32_t b) /// A64: SQSUB Sd, Sn, Sm /// </summary> public static Vector64<int> SubtractSaturateScalar(Vector64<int> left, Vector64<int> right) => SubtractSaturateScalar(left, right); /// <summary> /// int8_t vqsubb_s8 (int8_t a, int8_t b) /// A64: SQSUB Bd, Bn, Bm /// </summary> public static Vector64<sbyte> SubtractSaturateScalar(Vector64<sbyte> left, Vector64<sbyte> right) => SubtractSaturateScalar(left, right); /// <summary> /// uint16_t vqsubh_u16 (uint16_t a, uint16_t b) /// A64: UQSUB Hd, Hn, Hm /// </summary> public static Vector64<ushort> SubtractSaturateScalar(Vector64<ushort> left, Vector64<ushort> right) => SubtractSaturateScalar(left, right); /// <summary> /// uint32_t vqsubs_u32 (uint32_t a, uint32_t b) /// A64: UQSUB Sd, Sn, Sm /// </summary> public static Vector64<uint> SubtractSaturateScalar(Vector64<uint> left, Vector64<uint> right) => SubtractSaturateScalar(left, right); /// <summary> /// uint8x8_t vrbit_u8 (uint8x8_t a) /// A64: RBIT Vd.8B, Vn.8B /// </summary> public static Vector64<byte> ReverseElementBits(Vector64<byte> value) => ReverseElementBits(value); /// <summary> /// int8x8_t vrbit_s8 (int8x8_t a) /// A64: RBIT Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> ReverseElementBits(Vector64<sbyte> value) => ReverseElementBits(value); /// <summary> /// uint8x16_t vrbitq_u8 (uint8x16_t a) /// A64: RBIT Vd.16B, Vn.16B /// </summary> public static Vector128<byte> ReverseElementBits(Vector128<byte> value) => ReverseElementBits(value); /// <summary> /// int8x16_t vrbitq_s8 (int8x16_t a) /// A64: RBIT Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> ReverseElementBits(Vector128<sbyte> value) => ReverseElementBits(value); /// <summary> /// uint8x8_t vtrn1_u8(uint8x8_t a, uint8x8_t b) /// A64: TRN1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> TransposeEven(Vector64<byte> left, Vector64<byte> right) => TransposeEven(left, right); /// <summary> /// int16x4_t vtrn1_s16(int16x4_t a, int16x4_t b) /// A64: TRN1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> TransposeEven(Vector64<short> left, Vector64<short> right) => TransposeEven(left, right); /// <summary> /// int32x2_t vtrn1_s32(int32x2_t a, int32x2_t b) /// A64: TRN1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> TransposeEven(Vector64<int> left, Vector64<int> right) => TransposeEven(left, right); /// <summary> /// int8x8_t vtrn1_s8(int8x8_t a, int8x8_t b) /// A64: TRN1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> TransposeEven(Vector64<sbyte> left, Vector64<sbyte> right) => TransposeEven(left, right); /// <summary> /// float32x2_t vtrn1_f32(float32x2_t a, float32x2_t b) /// A64: TRN1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> TransposeEven(Vector64<float> left, Vector64<float> right) => TransposeEven(left, right); /// <summary> /// uint16x4_t vtrn1_u16(uint16x4_t a, uint16x4_t b) /// A64: TRN1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> TransposeEven(Vector64<ushort> left, Vector64<ushort> right) => TransposeEven(left, right); /// <summary> /// uint32x2_t vtrn1_u32(uint32x2_t a, uint32x2_t b) /// A64: TRN1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> TransposeEven(Vector64<uint> left, Vector64<uint> right) => TransposeEven(left, right); /// <summary> /// uint8x16_t vtrn1q_u8(uint8x16_t a, uint8x16_t b) /// A64: TRN1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> TransposeEven(Vector128<byte> left, Vector128<byte> right) => TransposeEven(left, right); /// <summary> /// float64x2_t vtrn1q_f64(float64x2_t a, float64x2_t b) /// A64: TRN1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> TransposeEven(Vector128<double> left, Vector128<double> right) => TransposeEven(left, right); /// <summary> /// int16x8_t vtrn1q_s16(int16x8_t a, int16x8_t b) /// A64: TRN1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> TransposeEven(Vector128<short> left, Vector128<short> right) => TransposeEven(left, right); /// <summary> /// int32x4_t vtrn1q_s32(int32x4_t a, int32x4_t b) /// A64: TRN1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> TransposeEven(Vector128<int> left, Vector128<int> right) => TransposeEven(left, right); /// <summary> /// int64x2_t vtrn1q_s64(int64x2_t a, int64x2_t b) /// A64: TRN1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> TransposeEven(Vector128<long> left, Vector128<long> right) => TransposeEven(left, right); /// <summary> /// int8x16_t vtrn1q_u8(int8x16_t a, int8x16_t b) /// A64: TRN1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> TransposeEven(Vector128<sbyte> left, Vector128<sbyte> right) => TransposeEven(left, right); /// <summary> /// float32x4_t vtrn1q_f32(float32x4_t a, float32x4_t b) /// A64: TRN1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> TransposeEven(Vector128<float> left, Vector128<float> right) => TransposeEven(left, right); /// <summary> /// uint16x8_t vtrn1q_u16(uint16x8_t a, uint16x8_t b) /// A64: TRN1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> TransposeEven(Vector128<ushort> left, Vector128<ushort> right) => TransposeEven(left, right); /// <summary> /// uint32x4_t vtrn1q_u32(uint32x4_t a, uint32x4_t b) /// A64: TRN1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> TransposeEven(Vector128<uint> left, Vector128<uint> right) => TransposeEven(left, right); /// <summary> /// uint64x2_t vtrn1q_u64(uint64x2_t a, uint64x2_t b) /// A64: TRN1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> TransposeEven(Vector128<ulong> left, Vector128<ulong> right) => TransposeEven(left, right); /// <summary> /// uint8x8_t vtrn2_u8(uint8x8_t a, uint8x8_t b) /// A64: TRN2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> TransposeOdd(Vector64<byte> left, Vector64<byte> right) => TransposeOdd(left, right); /// <summary> /// int16x4_t vtrn2_s16(int16x4_t a, int16x4_t b) /// A64: TRN2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> TransposeOdd(Vector64<short> left, Vector64<short> right) => TransposeOdd(left, right); /// <summary> /// int32x2_t vtrn2_s32(int32x2_t a, int32x2_t b) /// A64: TRN2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> TransposeOdd(Vector64<int> left, Vector64<int> right) => TransposeOdd(left, right); /// <summary> /// int8x8_t vtrn2_s8(int8x8_t a, int8x8_t b) /// A64: TRN2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> TransposeOdd(Vector64<sbyte> left, Vector64<sbyte> right) => TransposeOdd(left, right); /// <summary> /// float32x2_t vtrn2_f32(float32x2_t a, float32x2_t b) /// A64: TRN2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> TransposeOdd(Vector64<float> left, Vector64<float> right) => TransposeOdd(left, right); /// <summary> /// uint16x4_t vtrn2_u16(uint16x4_t a, uint16x4_t b) /// A64: TRN2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> TransposeOdd(Vector64<ushort> left, Vector64<ushort> right) => TransposeOdd(left, right); /// <summary> /// uint32x2_t vtrn2_u32(uint32x2_t a, uint32x2_t b) /// A64: TRN2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> TransposeOdd(Vector64<uint> left, Vector64<uint> right) => TransposeOdd(left, right); /// <summary> /// uint8x16_t vtrn2q_u8(uint8x16_t a, uint8x16_t b) /// A64: TRN2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> TransposeOdd(Vector128<byte> left, Vector128<byte> right) => TransposeOdd(left, right); /// <summary> /// float64x2_t vtrn2q_f64(float64x2_t a, float64x2_t b) /// A64: TRN2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> TransposeOdd(Vector128<double> left, Vector128<double> right) => TransposeOdd(left, right); /// <summary> /// int16x8_t vtrn2q_s16(int16x8_t a, int16x8_t b) /// A64: TRN2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> TransposeOdd(Vector128<short> left, Vector128<short> right) => TransposeOdd(left, right); /// <summary> /// int32x4_t vtrn2q_s32(int32x4_t a, int32x4_t b) /// A64: TRN2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> TransposeOdd(Vector128<int> left, Vector128<int> right) => TransposeOdd(left, right); /// <summary> /// int64x2_t vtrn2q_s64(int64x2_t a, int64x2_t b) /// A64: TRN2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> TransposeOdd(Vector128<long> left, Vector128<long> right) => TransposeOdd(left, right); /// <summary> /// int8x16_t vtrn2q_u8(int8x16_t a, int8x16_t b) /// A64: TRN2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> TransposeOdd(Vector128<sbyte> left, Vector128<sbyte> right) => TransposeOdd(left, right); /// <summary> /// float32x4_t vtrn2q_f32(float32x4_t a, float32x4_t b) /// A64: TRN2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> TransposeOdd(Vector128<float> left, Vector128<float> right) => TransposeOdd(left, right); /// <summary> /// uint16x8_t vtrn2q_u16(uint16x8_t a, uint16x8_t b) /// A64: TRN2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> TransposeOdd(Vector128<ushort> left, Vector128<ushort> right) => TransposeOdd(left, right); /// <summary> /// uint32x4_t vtrn1q_u32(uint32x4_t a, uint32x4_t b) /// A64: TRN1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> TransposeOdd(Vector128<uint> left, Vector128<uint> right) => TransposeOdd(left, right); /// <summary> /// uint64x2_t vtrn1q_u64(uint64x2_t a, uint64x2_t b) /// A64: TRN1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> TransposeOdd(Vector128<ulong> left, Vector128<ulong> right) => TransposeOdd(left, right); /// <summary> /// uint8x8_t vuzp1_u8(uint8x8_t a, uint8x8_t b) /// A64: UZP1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> UnzipEven(Vector64<byte> left, Vector64<byte> right) => UnzipEven(left, right); /// <summary> /// int16x4_t vuzp1_s16(int16x4_t a, int16x4_t b) /// A64: UZP1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> UnzipEven(Vector64<short> left, Vector64<short> right) => UnzipEven(left, right); /// <summary> /// int32x2_t vuzp1_s32(int32x2_t a, int32x2_t b) /// A64: UZP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> UnzipEven(Vector64<int> left, Vector64<int> right) => UnzipEven(left, right); /// <summary> /// int8x8_t vuzp1_s8(int8x8_t a, int8x8_t b) /// A64: UZP1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> UnzipEven(Vector64<sbyte> left, Vector64<sbyte> right) => UnzipEven(left, right); /// <summary> /// float32x2_t vuzp1_f32(float32x2_t a, float32x2_t b) /// A64: UZP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> UnzipEven(Vector64<float> left, Vector64<float> right) => UnzipEven(left, right); /// <summary> /// uint16x4_t vuzp1_u16(uint16x4_t a, uint16x4_t b) /// A64: UZP1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> UnzipEven(Vector64<ushort> left, Vector64<ushort> right) => UnzipEven(left, right); /// <summary> /// uint32x2_t vuzp1_u32(uint32x2_t a, uint32x2_t b) /// A64: UZP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> UnzipEven(Vector64<uint> left, Vector64<uint> right) => UnzipEven(left, right); /// <summary> /// uint8x16_t vuzp1q_u8(uint8x16_t a, uint8x16_t b) /// A64: UZP1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> UnzipEven(Vector128<byte> left, Vector128<byte> right) => UnzipEven(left, right); /// <summary> /// float64x2_t vuzp1q_f64(float64x2_t a, float64x2_t b) /// A64: UZP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> UnzipEven(Vector128<double> left, Vector128<double> right) => UnzipEven(left, right); /// <summary> /// int16x8_t vuzp1q_s16(int16x8_t a, int16x8_t b) /// A64: UZP1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> UnzipEven(Vector128<short> left, Vector128<short> right) => UnzipEven(left, right); /// <summary> /// int32x4_t vuzp1q_s32(int32x4_t a, int32x4_t b) /// A64: UZP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> UnzipEven(Vector128<int> left, Vector128<int> right) => UnzipEven(left, right); /// <summary> /// int64x2_t vuzp1q_s64(int64x2_t a, int64x2_t b) /// A64: UZP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> UnzipEven(Vector128<long> left, Vector128<long> right) => UnzipEven(left, right); /// <summary> /// int8x16_t vuzp1q_u8(int8x16_t a, int8x16_t b) /// A64: UZP1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> UnzipEven(Vector128<sbyte> left, Vector128<sbyte> right) => UnzipEven(left, right); /// <summary> /// float32x4_t vuzp1q_f32(float32x4_t a, float32x4_t b) /// A64: UZP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> UnzipEven(Vector128<float> left, Vector128<float> right) => UnzipEven(left, right); /// <summary> /// uint16x8_t vuzp1q_u16(uint16x8_t a, uint16x8_t b) /// A64: UZP1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> UnzipEven(Vector128<ushort> left, Vector128<ushort> right) => UnzipEven(left, right); /// <summary> /// uint32x4_t vuzp1q_u32(uint32x4_t a, uint32x4_t b) /// A64: UZP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> UnzipEven(Vector128<uint> left, Vector128<uint> right) => UnzipEven(left, right); /// <summary> /// uint64x2_t vuzp1q_u64(uint64x2_t a, uint64x2_t b) /// A64: UZP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> UnzipEven(Vector128<ulong> left, Vector128<ulong> right) => UnzipEven(left, right); /// <summary> /// uint8x8_t vuzp2_u8(uint8x8_t a, uint8x8_t b) /// A64: UZP2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> UnzipOdd(Vector64<byte> left, Vector64<byte> right) => UnzipOdd(left, right); /// <summary> /// int16x4_t vuzp2_s16(int16x4_t a, int16x4_t b) /// A64: UZP2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> UnzipOdd(Vector64<short> left, Vector64<short> right) => UnzipOdd(left, right); /// <summary> /// int32x2_t vuzp2_s32(int32x2_t a, int32x2_t b) /// A64: UZP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> UnzipOdd(Vector64<int> left, Vector64<int> right) => UnzipOdd(left, right); /// <summary> /// int8x8_t vuzp2_s8(int8x8_t a, int8x8_t b) /// A64: UZP2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> UnzipOdd(Vector64<sbyte> left, Vector64<sbyte> right) => UnzipOdd(left, right); /// <summary> /// float32x2_t vuzp2_f32(float32x2_t a, float32x2_t b) /// A64: UZP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> UnzipOdd(Vector64<float> left, Vector64<float> right) => UnzipOdd(left, right); /// <summary> /// uint16x4_t vuzp2_u16(uint16x4_t a, uint16x4_t b) /// A64: UZP2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> UnzipOdd(Vector64<ushort> left, Vector64<ushort> right) => UnzipOdd(left, right); /// <summary> /// uint32x2_t vuzp2_u32(uint32x2_t a, uint32x2_t b) /// A64: UZP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> UnzipOdd(Vector64<uint> left, Vector64<uint> right) => UnzipOdd(left, right); /// <summary> /// uint8x16_t vuzp2q_u8(uint8x16_t a, uint8x16_t b) /// A64: UZP2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> UnzipOdd(Vector128<byte> left, Vector128<byte> right) => UnzipOdd(left, right); /// <summary> /// float64x2_t vuzp2q_f64(float64x2_t a, float64x2_t b) /// A64: UZP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> UnzipOdd(Vector128<double> left, Vector128<double> right) => UnzipOdd(left, right); /// <summary> /// int16x8_t vuzp2q_s16(int16x8_t a, int16x8_t b) /// A64: UZP2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> UnzipOdd(Vector128<short> left, Vector128<short> right) => UnzipOdd(left, right); /// <summary> /// int32x4_t vuzp2q_s32(int32x4_t a, int32x4_t b) /// A64: UZP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> UnzipOdd(Vector128<int> left, Vector128<int> right) => UnzipOdd(left, right); /// <summary> /// int64x2_t vuzp2q_s64(int64x2_t a, int64x2_t b) /// A64: UZP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> UnzipOdd(Vector128<long> left, Vector128<long> right) => UnzipOdd(left, right); /// <summary> /// int8x16_t vuzp2q_u8(int8x16_t a, int8x16_t b) /// A64: UZP2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> UnzipOdd(Vector128<sbyte> left, Vector128<sbyte> right) => UnzipOdd(left, right); /// <summary> /// float32x4_t vuzp2q_f32(float32x4_t a, float32x4_t b) /// A64: UZP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> UnzipOdd(Vector128<float> left, Vector128<float> right) => UnzipOdd(left, right); /// <summary> /// uint16x8_t vuzp2q_u16(uint16x8_t a, uint16x8_t b) /// A64: UZP2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> UnzipOdd(Vector128<ushort> left, Vector128<ushort> right) => UnzipOdd(left, right); /// <summary> /// uint32x4_t vuzp2q_u32(uint32x4_t a, uint32x4_t b) /// A64: UZP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> UnzipOdd(Vector128<uint> left, Vector128<uint> right) => UnzipOdd(left, right); /// <summary> /// uint64x2_t vuzp2q_u64(uint64x2_t a, uint64x2_t b) /// A64: UZP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> UnzipOdd(Vector128<ulong> left, Vector128<ulong> right) => UnzipOdd(left, right); /// <summary> /// uint8x16_t vqvtbl1q_u8(uint8x16_t t, uint8x16_t idx) /// A64: TBL Vd.16B, {Vn.16B}, Vm.16B /// </summary> public static Vector128<byte> VectorTableLookup(Vector128<byte> table, Vector128<byte> byteIndexes) => VectorTableLookup(table, byteIndexes); /// <summary> /// int8x16_t vqvtbl1q_s8(int8x16_t t, uint8x16_t idx) /// A64: TBL Vd.16B, {Vn.16B}, Vm.16B /// </summary> public static Vector128<sbyte> VectorTableLookup(Vector128<sbyte> table, Vector128<sbyte> byteIndexes) => VectorTableLookup(table, byteIndexes); /// <summary> /// uint8x16_t vqvtbx1q_u8(uint8x16_t r, int8x16_t t, uint8x16_t idx) /// A64: TBX Vd.16B, {Vn.16B}, Vm.16B /// </summary> public static Vector128<byte> VectorTableLookupExtension(Vector128<byte> defaultValues, Vector128<byte> table, Vector128<byte> byteIndexes) => VectorTableLookupExtension(defaultValues, table, byteIndexes); /// <summary> /// int8x16_t vqvtbx1q_s8(int8x16_t r, int8x16_t t, uint8x16_t idx) /// A64: TBX Vd.16B, {Vn.16B}, Vm.16B /// </summary> public static Vector128<sbyte> VectorTableLookupExtension(Vector128<sbyte> defaultValues, Vector128<sbyte> table, Vector128<sbyte> byteIndexes) => VectorTableLookupExtension(defaultValues, table, byteIndexes); /// <summary> /// uint8x8_t vzip2_u8(uint8x8_t a, uint8x8_t b) /// A64: ZIP2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ZipHigh(Vector64<byte> left, Vector64<byte> right) => ZipHigh(left, right); /// <summary> /// int16x4_t vzip2_s16(int16x4_t a, int16x4_t b) /// A64: ZIP2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ZipHigh(Vector64<short> left, Vector64<short> right) => ZipHigh(left, right); /// <summary> /// int32x2_t vzip2_s32(int32x2_t a, int32x2_t b) /// A64: ZIP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ZipHigh(Vector64<int> left, Vector64<int> right) => ZipHigh(left, right); /// <summary> /// int8x8_t vzip2_s8(int8x8_t a, int8x8_t b) /// A64: ZIP2 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ZipHigh(Vector64<sbyte> left, Vector64<sbyte> right) => ZipHigh(left, right); /// <summary> /// float32x2_t vzip2_f32(float32x2_t a, float32x2_t b) /// A64: ZIP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> ZipHigh(Vector64<float> left, Vector64<float> right) => ZipHigh(left, right); /// <summary> /// uint16x4_t vzip2_u16(uint16x4_t a, uint16x4_t b) /// A64: ZIP2 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ZipHigh(Vector64<ushort> left, Vector64<ushort> right) => ZipHigh(left, right); /// <summary> /// uint32x2_t vzip2_u32(uint32x2_t a, uint32x2_t b) /// A64: ZIP2 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ZipHigh(Vector64<uint> left, Vector64<uint> right) => ZipHigh(left, right); /// <summary> /// uint8x16_t vzip2q_u8(uint8x16_t a, uint8x16_t b) /// A64: ZIP2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ZipHigh(Vector128<byte> left, Vector128<byte> right) => ZipHigh(left, right); /// <summary> /// float64x2_t vzip2q_f64(float64x2_t a, float64x2_t b) /// A64: ZIP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> ZipHigh(Vector128<double> left, Vector128<double> right) => ZipHigh(left, right); /// <summary> /// int16x8_t vzip2q_s16(int16x8_t a, int16x8_t b) /// A64: ZIP2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ZipHigh(Vector128<short> left, Vector128<short> right) => ZipHigh(left, right); /// <summary> /// int32x4_t vzip2q_s32(int32x4_t a, int32x4_t b) /// A64: ZIP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ZipHigh(Vector128<int> left, Vector128<int> right) => ZipHigh(left, right); /// <summary> /// int64x2_t vzip2q_s64(int64x2_t a, int64x2_t b) /// A64: ZIP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ZipHigh(Vector128<long> left, Vector128<long> right) => ZipHigh(left, right); /// <summary> /// int8x16_t vzip2q_u8(int8x16_t a, int8x16_t b) /// A64: ZIP2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ZipHigh(Vector128<sbyte> left, Vector128<sbyte> right) => ZipHigh(left, right); /// <summary> /// float32x4_t vzip2q_f32(float32x4_t a, float32x4_t b) /// A64: ZIP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> ZipHigh(Vector128<float> left, Vector128<float> right) => ZipHigh(left, right); /// <summary> /// uint16x8_t vzip2q_u16(uint16x8_t a, uint16x8_t b) /// A64: ZIP2 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ZipHigh(Vector128<ushort> left, Vector128<ushort> right) => ZipHigh(left, right); /// <summary> /// uint32x4_t vzip2q_u32(uint32x4_t a, uint32x4_t b) /// A64: ZIP2 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ZipHigh(Vector128<uint> left, Vector128<uint> right) => ZipHigh(left, right); /// <summary> /// uint64x2_t vzip2q_u64(uint64x2_t a, uint64x2_t b) /// A64: ZIP2 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ZipHigh(Vector128<ulong> left, Vector128<ulong> right) => ZipHigh(left, right); /// <summary> /// uint8x8_t vzip1_u8(uint8x8_t a, uint8x8_t b) /// A64: ZIP1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ZipLow(Vector64<byte> left, Vector64<byte> right) => ZipLow(left, right); /// <summary> /// int16x4_t vzip1_s16(int16x4_t a, int16x4_t b) /// A64: ZIP1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ZipLow(Vector64<short> left, Vector64<short> right) => ZipLow(left, right); /// <summary> /// int32x2_t vzip1_s32(int32x2_t a, int32x2_t b) /// A64: ZIP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ZipLow(Vector64<int> left, Vector64<int> right) => ZipLow(left, right); /// <summary> /// int8x8_t vzip1_s8(int8x8_t a, int8x8_t b) /// A64: ZIP1 Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ZipLow(Vector64<sbyte> left, Vector64<sbyte> right) => ZipLow(left, right); /// <summary> /// float32x2_t vzip1_f32(float32x2_t a, float32x2_t b) /// A64: ZIP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> ZipLow(Vector64<float> left, Vector64<float> right) => ZipLow(left, right); /// <summary> /// uint16x4_t vzip1_u16(uint16x4_t a, uint16x4_t b) /// A64: ZIP1 Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ZipLow(Vector64<ushort> left, Vector64<ushort> right) => ZipLow(left, right); /// <summary> /// uint32x2_t vzip1_u32(uint32x2_t a, uint32x2_t b) /// A64: ZIP1 Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ZipLow(Vector64<uint> left, Vector64<uint> right) => ZipLow(left, right); /// <summary> /// uint8x16_t vzip1q_u8(uint8x16_t a, uint8x16_t b) /// A64: ZIP1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ZipLow(Vector128<byte> left, Vector128<byte> right) => ZipLow(left, right); /// <summary> /// float64x2_t vzip1q_f64(float64x2_t a, float64x2_t b) /// A64: ZIP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<double> ZipLow(Vector128<double> left, Vector128<double> right) => ZipLow(left, right); /// <summary> /// int16x8_t vzip1q_s16(int16x8_t a, int16x8_t b) /// A64: ZIP1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ZipLow(Vector128<short> left, Vector128<short> right) => ZipLow(left, right); /// <summary> /// int32x4_t vzip1q_s32(int32x4_t a, int32x4_t b) /// A64: ZIP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ZipLow(Vector128<int> left, Vector128<int> right) => ZipLow(left, right); /// <summary> /// int64x2_t vzip1q_s64(int64x2_t a, int64x2_t b) /// A64: ZIP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ZipLow(Vector128<long> left, Vector128<long> right) => ZipLow(left, right); /// <summary> /// int8x16_t vzip1q_u8(int8x16_t a, int8x16_t b) /// A64: ZIP1 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ZipLow(Vector128<sbyte> left, Vector128<sbyte> right) => ZipLow(left, right); /// <summary> /// float32x4_t vzip1q_f32(float32x4_t a, float32x4_t b) /// A64: ZIP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> ZipLow(Vector128<float> left, Vector128<float> right) => ZipLow(left, right); /// <summary> /// uint16x8_t vzip1q_u16(uint16x8_t a, uint16x8_t b) /// A64: ZIP1 Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ZipLow(Vector128<ushort> left, Vector128<ushort> right) => ZipLow(left, right); /// <summary> /// uint32x4_t vzip1q_u32(uint32x4_t a, uint32x4_t b) /// A64: ZIP1 Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ZipLow(Vector128<uint> left, Vector128<uint> right) => ZipLow(left, right); /// <summary> /// uint64x2_t vzip1q_u64(uint64x2_t a, uint64x2_t b) /// A64: ZIP1 Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ZipLow(Vector128<ulong> left, Vector128<ulong> right) => ZipLow(left, right); } /// <summary> /// int16x4_t vabs_s16 (int16x4_t a) /// A32: VABS.S16 Dd, Dm /// A64: ABS Vd.4H, Vn.4H /// </summary> public static Vector64<ushort> Abs(Vector64<short> value) => Abs(value); /// <summary> /// int32x2_t vabs_s32 (int32x2_t a) /// A32: VABS.S32 Dd, Dm /// A64: ABS Vd.2S, Vn.2S /// </summary> public static Vector64<uint> Abs(Vector64<int> value) => Abs(value); /// <summary> /// int8x8_t vabs_s8 (int8x8_t a) /// A32: VABS.S8 Dd, Dm /// A64: ABS Vd.8B, Vn.8B /// </summary> public static Vector64<byte> Abs(Vector64<sbyte> value) => Abs(value); /// <summary> /// float32x2_t vabs_f32 (float32x2_t a) /// A32: VABS.F32 Dd, Dm /// A64: FABS Vd.2S, Vn.2S /// </summary> public static Vector64<float> Abs(Vector64<float> value) => Abs(value); /// <summary> /// int16x8_t vabsq_s16 (int16x8_t a) /// A32: VABS.S16 Qd, Qm /// A64: ABS Vd.8H, Vn.8H /// </summary> public static Vector128<ushort> Abs(Vector128<short> value) => Abs(value); /// <summary> /// int32x4_t vabsq_s32 (int32x4_t a) /// A32: VABS.S32 Qd, Qm /// A64: ABS Vd.4S, Vn.4S /// </summary> public static Vector128<uint> Abs(Vector128<int> value) => Abs(value); /// <summary> /// int8x16_t vabsq_s8 (int8x16_t a) /// A32: VABS.S8 Qd, Qm /// A64: ABS Vd.16B, Vn.16B /// </summary> public static Vector128<byte> Abs(Vector128<sbyte> value) => Abs(value); /// <summary> /// float32x4_t vabsq_f32 (float32x4_t a) /// A32: VABS.F32 Qd, Qm /// A64: FABS Vd.4S, Vn.4S /// </summary> public static Vector128<float> Abs(Vector128<float> value) => Abs(value); /// <summary> /// int16x4_t vqabs_s16 (int16x4_t a) /// A32: VQABS.S16 Dd, Dm /// A64: SQABS Vd.4H, Vn.4H /// </summary> public static Vector64<short> AbsSaturate(Vector64<short> value) => AbsSaturate(value); /// <summary> /// int32x2_t vqabs_s32 (int32x2_t a) /// A32: VQABS.S32 Dd, Dm /// A64: SQABS Vd.2S, Vn.2S /// </summary> public static Vector64<int> AbsSaturate(Vector64<int> value) => AbsSaturate(value); /// <summary> /// int8x8_t vqabs_s8 (int8x8_t a) /// A32: VQABS.S8 Dd, Dm /// A64: SQABS Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> AbsSaturate(Vector64<sbyte> value) => AbsSaturate(value); /// <summary> /// int16x8_t vqabsq_s16 (int16x8_t a) /// A32: VQABS.S16 Qd, Qm /// A64: SQABS Vd.8H, Vn.8H /// </summary> public static Vector128<short> AbsSaturate(Vector128<short> value) => AbsSaturate(value); /// <summary> /// int32x4_t vqabsq_s32 (int32x4_t a) /// A32: VQABS.S32 Qd, Qm /// A64: SQABS Vd.4S, Vn.4S /// </summary> public static Vector128<int> AbsSaturate(Vector128<int> value) => AbsSaturate(value); /// <summary> /// int8x16_t vqabsq_s8 (int8x16_t a) /// A32: VQABS.S8 Qd, Qm /// A64: SQABS Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> AbsSaturate(Vector128<sbyte> value) => AbsSaturate(value); /// <summary> /// float64x1_t vabs_f64 (float64x1_t a) /// A32: VABS.F64 Dd, Dm /// A64: FABS Dd, Dn /// </summary> public static Vector64<double> AbsScalar(Vector64<double> value) => AbsScalar(value); /// <summary> /// float32_t vabss_f32 (float32_t a) /// A32: VABS.F32 Sd, Sm /// A64: FABS Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> AbsScalar(Vector64<float> value) => AbsScalar(value); /// <summary> /// uint32x2_t vcagt_f32 (float32x2_t a, float32x2_t b) /// A32: VACGT.F32 Dd, Dn, Dm /// A64: FACGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AbsoluteCompareGreaterThan(Vector64<float> left, Vector64<float> right) => AbsoluteCompareGreaterThan(left, right); /// <summary> /// uint32x4_t vcagtq_f32 (float32x4_t a, float32x4_t b) /// A32: VACGT.F32 Qd, Qn, Qm /// A64: FACGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AbsoluteCompareGreaterThan(Vector128<float> left, Vector128<float> right) => AbsoluteCompareGreaterThan(left, right); /// <summary> /// uint32x2_t vcage_f32 (float32x2_t a, float32x2_t b) /// A32: VACGE.F32 Dd, Dn, Dm /// A64: FACGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AbsoluteCompareGreaterThanOrEqual(Vector64<float> left, Vector64<float> right) => AbsoluteCompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x4_t vcageq_f32 (float32x4_t a, float32x4_t b) /// A32: VACGE.F32 Qd, Qn, Qm /// A64: FACGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AbsoluteCompareGreaterThanOrEqual(Vector128<float> left, Vector128<float> right) => AbsoluteCompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x2_t vcalt_f32 (float32x2_t a, float32x2_t b) /// A32: VACLT.F32 Dd, Dn, Dm /// A64: FACGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AbsoluteCompareLessThan(Vector64<float> left, Vector64<float> right) => AbsoluteCompareLessThan(left, right); /// <summary> /// uint32x4_t vcaltq_f32 (float32x4_t a, float32x4_t b) /// A32: VACLT.F32 Qd, Qn, Qm /// A64: FACGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AbsoluteCompareLessThan(Vector128<float> left, Vector128<float> right) => AbsoluteCompareLessThan(left, right); /// <summary> /// uint32x2_t vcale_f32 (float32x2_t a, float32x2_t b) /// A32: VACLE.F32 Dd, Dn, Dm /// A64: FACGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AbsoluteCompareLessThanOrEqual(Vector64<float> left, Vector64<float> right) => AbsoluteCompareLessThanOrEqual(left, right); /// <summary> /// uint32x4_t vcaleq_f32 (float32x4_t a, float32x4_t b) /// A32: VACLE.F32 Qd, Qn, Qm /// A64: FACGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AbsoluteCompareLessThanOrEqual(Vector128<float> left, Vector128<float> right) => AbsoluteCompareLessThanOrEqual(left, right); /// <summary> /// uint8x8_t vabd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VABD.U8 Dd, Dn, Dm /// A64: UABD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> AbsoluteDifference(Vector64<byte> left, Vector64<byte> right) => AbsoluteDifference(left, right); /// <summary> /// int16x4_t vabd_s16 (int16x4_t a, int16x4_t b) /// A32: VABD.S16 Dd, Dn, Dm /// A64: SABD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> AbsoluteDifference(Vector64<short> left, Vector64<short> right) => AbsoluteDifference(left, right); /// <summary> /// int32x2_t vabd_s32 (int32x2_t a, int32x2_t b) /// A32: VABD.S32 Dd, Dn, Dm /// A64: SABD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> AbsoluteDifference(Vector64<int> left, Vector64<int> right) => AbsoluteDifference(left, right); /// <summary> /// int8x8_t vabd_s8 (int8x8_t a, int8x8_t b) /// A32: VABD.S8 Dd, Dn, Dm /// A64: SABD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> AbsoluteDifference(Vector64<sbyte> left, Vector64<sbyte> right) => AbsoluteDifference(left, right); /// <summary> /// float32x2_t vabd_f32 (float32x2_t a, float32x2_t b) /// A32: VABD.F32 Dd, Dn, Dm /// A64: FABD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AbsoluteDifference(Vector64<float> left, Vector64<float> right) => AbsoluteDifference(left, right); /// <summary> /// uint16x4_t vabd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VABD.U16 Dd, Dn, Dm /// A64: UABD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> AbsoluteDifference(Vector64<ushort> left, Vector64<ushort> right) => AbsoluteDifference(left, right); /// <summary> /// uint32x2_t vabd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VABD.U32 Dd, Dn, Dm /// A64: UABD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> AbsoluteDifference(Vector64<uint> left, Vector64<uint> right) => AbsoluteDifference(left, right); /// <summary> /// uint8x16_t vabdq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VABD.U8 Qd, Qn, Qm /// A64: UABD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> AbsoluteDifference(Vector128<byte> left, Vector128<byte> right) => AbsoluteDifference(left, right); /// <summary> /// int16x8_t vabdq_s16 (int16x8_t a, int16x8_t b) /// A32: VABD.S16 Qd, Qn, Qm /// A64: SABD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> AbsoluteDifference(Vector128<short> left, Vector128<short> right) => AbsoluteDifference(left, right); /// <summary> /// int32x4_t vabdq_s32 (int32x4_t a, int32x4_t b) /// A32: VABD.S32 Qd, Qn, Qm /// A64: SABD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> AbsoluteDifference(Vector128<int> left, Vector128<int> right) => AbsoluteDifference(left, right); /// <summary> /// int8x16_t vabdq_s8 (int8x16_t a, int8x16_t b) /// A32: VABD.S8 Qd, Qn, Qm /// A64: SABD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> AbsoluteDifference(Vector128<sbyte> left, Vector128<sbyte> right) => AbsoluteDifference(left, right); /// <summary> /// float32x4_t vabdq_f32 (float32x4_t a, float32x4_t b) /// A32: VABD.F32 Qd, Qn, Qm /// A64: FABD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> AbsoluteDifference(Vector128<float> left, Vector128<float> right) => AbsoluteDifference(left, right); /// <summary> /// uint16x8_t vabdq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VABD.U16 Qd, Qn, Qm /// A64: UABD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> AbsoluteDifference(Vector128<ushort> left, Vector128<ushort> right) => AbsoluteDifference(left, right); /// <summary> /// uint32x4_t vabdq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VABD.U32 Qd, Qn, Qm /// A64: UABD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> AbsoluteDifference(Vector128<uint> left, Vector128<uint> right) => AbsoluteDifference(left, right); /// <summary> /// uint8x8_t vaba_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VABA.U8 Dd, Dn, Dm /// A64: UABA Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> AbsoluteDifferenceAdd(Vector64<byte> addend, Vector64<byte> left, Vector64<byte> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int16x4_t vaba_s16 (int16x4_t a, int16x4_t b, int16x4_t c) /// A32: VABA.S16 Dd, Dn, Dm /// A64: SABA Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> AbsoluteDifferenceAdd(Vector64<short> addend, Vector64<short> left, Vector64<short> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int32x2_t vaba_s32 (int32x2_t a, int32x2_t b, int32x2_t c) /// A32: VABA.S32 Dd, Dn, Dm /// A64: SABA Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> AbsoluteDifferenceAdd(Vector64<int> addend, Vector64<int> left, Vector64<int> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int8x8_t vaba_s8 (int8x8_t a, int8x8_t b, int8x8_t c) /// A32: VABA.S8 Dd, Dn, Dm /// A64: SABA Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> AbsoluteDifferenceAdd(Vector64<sbyte> addend, Vector64<sbyte> left, Vector64<sbyte> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint16x4_t vaba_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VABA.U16 Dd, Dn, Dm /// A64: UABA Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> AbsoluteDifferenceAdd(Vector64<ushort> addend, Vector64<ushort> left, Vector64<ushort> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint32x2_t vaba_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VABA.U32 Dd, Dn, Dm /// A64: UABA Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> AbsoluteDifferenceAdd(Vector64<uint> addend, Vector64<uint> left, Vector64<uint> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint8x16_t vabaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) /// A32: VABA.U8 Qd, Qn, Qm /// A64: UABA Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> AbsoluteDifferenceAdd(Vector128<byte> addend, Vector128<byte> left, Vector128<byte> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int16x8_t vabaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c) /// A32: VABA.S16 Qd, Qn, Qm /// A64: SABA Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> AbsoluteDifferenceAdd(Vector128<short> addend, Vector128<short> left, Vector128<short> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int32x4_t vabaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c) /// A32: VABA.S32 Qd, Qn, Qm /// A64: SABA Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> AbsoluteDifferenceAdd(Vector128<int> addend, Vector128<int> left, Vector128<int> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// int8x16_t vabaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c) /// A32: VABA.S8 Qd, Qn, Qm /// A64: SABA Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> AbsoluteDifferenceAdd(Vector128<sbyte> addend, Vector128<sbyte> left, Vector128<sbyte> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint16x8_t vabaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) /// A32: VABA.U16 Qd, Qn, Qm /// A64: UABA Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> AbsoluteDifferenceAdd(Vector128<ushort> addend, Vector128<ushort> left, Vector128<ushort> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint32x4_t vabaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) /// A32: VABA.U32 Qd, Qn, Qm /// A64: UABA Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> AbsoluteDifferenceAdd(Vector128<uint> addend, Vector128<uint> left, Vector128<uint> right) => AbsoluteDifferenceAdd(addend, left, right); /// <summary> /// uint16x8_t vabdl_u8 (uint8x8_t a, uint8x8_t b) /// A32: VABDL.U8 Qd, Dn, Dm /// A64: UABDL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningLower(Vector64<byte> left, Vector64<byte> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// int32x4_t vabdl_s16 (int16x4_t a, int16x4_t b) /// A32: VABDL.S16 Qd, Dn, Dm /// A64: SABDL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningLower(Vector64<short> left, Vector64<short> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// int64x2_t vabdl_s32 (int32x2_t a, int32x2_t b) /// A32: VABDL.S32 Qd, Dn, Dm /// A64: SABDL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningLower(Vector64<int> left, Vector64<int> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// int16x8_t vabdl_s8 (int8x8_t a, int8x8_t b) /// A32: VABDL.S8 Qd, Dn, Dm /// A64: SABDL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningLower(Vector64<sbyte> left, Vector64<sbyte> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// uint32x4_t vabdl_u16 (uint16x4_t a, uint16x4_t b) /// A32: VABDL.U16 Qd, Dn, Dm /// A64: UABDL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningLower(Vector64<ushort> left, Vector64<ushort> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// uint64x2_t vabdl_u32 (uint32x2_t a, uint32x2_t b) /// A32: VABDL.U32 Qd, Dn, Dm /// A64: UABDL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningLower(Vector64<uint> left, Vector64<uint> right) => AbsoluteDifferenceWideningLower(left, right); /// <summary> /// uint16x8_t vabal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VABAL.U8 Qd, Dn, Dm /// A64: UABAL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningLowerAndAdd(Vector128<ushort> addend, Vector64<byte> left, Vector64<byte> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// int32x4_t vabal_s16 (int32x4_t a, int16x4_t b, int16x4_t c) /// A32: VABAL.S16 Qd, Dn, Dm /// A64: SABAL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> AbsoluteDifferenceWideningLowerAndAdd(Vector128<int> addend, Vector64<short> left, Vector64<short> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// int64x2_t vabal_s32 (int64x2_t a, int32x2_t b, int32x2_t c) /// A32: VABAL.S32 Qd, Dn, Dm /// A64: SABAL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> AbsoluteDifferenceWideningLowerAndAdd(Vector128<long> addend, Vector64<int> left, Vector64<int> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// int16x8_t vabal_s8 (int16x8_t a, int8x8_t b, int8x8_t c) /// A32: VABAL.S8 Qd, Dn, Dm /// A64: SABAL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> AbsoluteDifferenceWideningLowerAndAdd(Vector128<short> addend, Vector64<sbyte> left, Vector64<sbyte> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// uint32x4_t vabal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VABAL.U16 Qd, Dn, Dm /// A64: UABAL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningLowerAndAdd(Vector128<uint> addend, Vector64<ushort> left, Vector64<ushort> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// uint64x2_t vabal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VABAL.U32 Qd, Dn, Dm /// A64: UABAL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningLowerAndAdd(Vector128<ulong> addend, Vector64<uint> left, Vector64<uint> right) => AbsoluteDifferenceWideningLowerAndAdd(addend, left, right); /// <summary> /// uint16x8_t vabdl_high_u8 (uint8x16_t a, uint8x16_t b) /// A32: VABDL.U8 Qd, Dn+1, Dm+1 /// A64: UABDL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningUpper(Vector128<byte> left, Vector128<byte> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// int32x4_t vabdl_high_s16 (int16x8_t a, int16x8_t b) /// A32: VABDL.S16 Qd, Dn+1, Dm+1 /// A64: SABDL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningUpper(Vector128<short> left, Vector128<short> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// int64x2_t vabdl_high_s32 (int32x4_t a, int32x4_t b) /// A32: VABDL.S32 Qd, Dn+1, Dm+1 /// A64: SABDL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningUpper(Vector128<int> left, Vector128<int> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// int16x8_t vabdl_high_s8 (int8x16_t a, int8x16_t b) /// A32: VABDL.S8 Qd, Dn+1, Dm+1 /// A64: SABDL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningUpper(Vector128<sbyte> left, Vector128<sbyte> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// uint32x4_t vabdl_high_u16 (uint16x8_t a, uint16x8_t b) /// A32: VABDL.U16 Qd, Dn+1, Dm+1 /// A64: UABDL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningUpper(Vector128<ushort> left, Vector128<ushort> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// uint64x2_t vabdl_high_u32 (uint32x4_t a, uint32x4_t b) /// A32: VABDL.U32 Qd, Dn+1, Dm+1 /// A64: UABDL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningUpper(Vector128<uint> left, Vector128<uint> right) => AbsoluteDifferenceWideningUpper(left, right); /// <summary> /// uint16x8_t vabal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c) /// A32: VABAL.U8 Qd, Dn+1, Dm+1 /// A64: UABAL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> AbsoluteDifferenceWideningUpperAndAdd(Vector128<ushort> addend, Vector128<byte> left, Vector128<byte> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// int32x4_t vabal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) /// A32: VABAL.S16 Qd, Dn+1, Dm+1 /// A64: SABAL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> AbsoluteDifferenceWideningUpperAndAdd(Vector128<int> addend, Vector128<short> left, Vector128<short> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// int64x2_t vabal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) /// A32: VABAL.S32 Qd, Dn+1, Dm+1 /// A64: SABAL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> AbsoluteDifferenceWideningUpperAndAdd(Vector128<long> addend, Vector128<int> left, Vector128<int> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// int16x8_t vabal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c) /// A32: VABAL.S8 Qd, Dn+1, Dm+1 /// A64: SABAL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> AbsoluteDifferenceWideningUpperAndAdd(Vector128<short> addend, Vector128<sbyte> left, Vector128<sbyte> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// uint32x4_t vabal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c) /// A32: VABAL.U16 Qd, Dn+1, Dm+1 /// A64: UABAL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> AbsoluteDifferenceWideningUpperAndAdd(Vector128<uint> addend, Vector128<ushort> left, Vector128<ushort> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// uint64x2_t vabal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c) /// A32: VABAL.U32 Qd, Dn+1, Dm+1 /// A64: UABAL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> AbsoluteDifferenceWideningUpperAndAdd(Vector128<ulong> addend, Vector128<uint> left, Vector128<uint> right) => AbsoluteDifferenceWideningUpperAndAdd(addend, left, right); /// <summary> /// uint8x8_t vadd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VADD.I8 Dd, Dn, Dm /// A64: ADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Add(Vector64<byte> left, Vector64<byte> right) => Add(left, right); /// <summary> /// int16x4_t vadd_s16 (int16x4_t a, int16x4_t b) /// A32: VADD.I16 Dd, Dn, Dm /// A64: ADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> Add(Vector64<short> left, Vector64<short> right) => Add(left, right); /// <summary> /// int32x2_t vadd_s32 (int32x2_t a, int32x2_t b) /// A32: VADD.I32 Dd, Dn, Dm /// A64: ADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> Add(Vector64<int> left, Vector64<int> right) => Add(left, right); /// <summary> /// int8x8_t vadd_s8 (int8x8_t a, int8x8_t b) /// A32: VADD.I8 Dd, Dn, Dm /// A64: ADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Add(Vector64<sbyte> left, Vector64<sbyte> right) => Add(left, right); /// <summary> /// float32x2_t vadd_f32 (float32x2_t a, float32x2_t b) /// A32: VADD.F32 Dd, Dn, Dm /// A64: FADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Add(Vector64<float> left, Vector64<float> right) => Add(left, right); /// <summary> /// uint16x4_t vadd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VADD.I16 Dd, Dn, Dm /// A64: ADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> Add(Vector64<ushort> left, Vector64<ushort> right) => Add(left, right); /// <summary> /// uint32x2_t vadd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VADD.I32 Dd, Dn, Dm /// A64: ADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> Add(Vector64<uint> left, Vector64<uint> right) => Add(left, right); /// <summary> /// uint8x16_t vaddq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VADD.I8 Qd, Qn, Qm /// A64: ADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Add(Vector128<byte> left, Vector128<byte> right) => Add(left, right); /// <summary> /// int16x8_t vaddq_s16 (int16x8_t a, int16x8_t b) /// A32: VADD.I16 Qd, Qn, Qm /// A64: ADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> Add(Vector128<short> left, Vector128<short> right) => Add(left, right); /// <summary> /// int32x4_t vaddq_s32 (int32x4_t a, int32x4_t b) /// A32: VADD.I32 Qd, Qn, Qm /// A64: ADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> Add(Vector128<int> left, Vector128<int> right) => Add(left, right); /// <summary> /// int64x2_t vaddq_s64 (int64x2_t a, int64x2_t b) /// A32: VADD.I64 Qd, Qn, Qm /// A64: ADD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> Add(Vector128<long> left, Vector128<long> right) => Add(left, right); /// <summary> /// int8x16_t vaddq_s8 (int8x16_t a, int8x16_t b) /// A32: VADD.I8 Qd, Qn, Qm /// A64: ADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Add(Vector128<sbyte> left, Vector128<sbyte> right) => Add(left, right); /// <summary> /// float32x4_t vaddq_f32 (float32x4_t a, float32x4_t b) /// A32: VADD.F32 Qd, Qn, Qm /// A64: FADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Add(Vector128<float> left, Vector128<float> right) => Add(left, right); /// <summary> /// uint16x8_t vaddq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VADD.I16 Qd, Qn, Qm /// A64: ADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> Add(Vector128<ushort> left, Vector128<ushort> right) => Add(left, right); /// <summary> /// uint32x4_t vaddq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VADD.I32 Qd, Qn, Qm /// A64: ADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> Add(Vector128<uint> left, Vector128<uint> right) => Add(left, right); /// <summary> /// uint64x2_t vaddq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VADD.I64 Qd, Qn, Qm /// A64: ADD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> Add(Vector128<ulong> left, Vector128<ulong> right) => Add(left, right); /// <summary> /// uint8x8_t vaddhn_u16 (uint16x8_t a, uint16x8_t b) /// A32: VADDHN.I16 Dd, Qn, Qm /// A64: ADDHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<byte> AddHighNarrowingLower(Vector128<ushort> left, Vector128<ushort> right) => AddHighNarrowingLower(left, right); /// <summary> /// int16x4_t vaddhn_s32 (int32x4_t a, int32x4_t b) /// A32: VADDHN.I32 Dd, Qn, Qm /// A64: ADDHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<short> AddHighNarrowingLower(Vector128<int> left, Vector128<int> right) => AddHighNarrowingLower(left, right); /// <summary> /// int32x2_t vaddhn_s64 (int64x2_t a, int64x2_t b) /// A32: VADDHN.I64 Dd, Qn, Qm /// A64: ADDHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<int> AddHighNarrowingLower(Vector128<long> left, Vector128<long> right) => AddHighNarrowingLower(left, right); /// <summary> /// int8x8_t vaddhn_s16 (int16x8_t a, int16x8_t b) /// A32: VADDHN.I16 Dd, Qn, Qm /// A64: ADDHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<sbyte> AddHighNarrowingLower(Vector128<short> left, Vector128<short> right) => AddHighNarrowingLower(left, right); /// <summary> /// uint16x4_t vaddhn_u32 (uint32x4_t a, uint32x4_t b) /// A32: VADDHN.I32 Dd, Qn, Qm /// A64: ADDHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<ushort> AddHighNarrowingLower(Vector128<uint> left, Vector128<uint> right) => AddHighNarrowingLower(left, right); /// <summary> /// uint32x2_t vaddhn_u64 (uint64x2_t a, uint64x2_t b) /// A32: VADDHN.I64 Dd, Qn, Qm /// A64: ADDHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<uint> AddHighNarrowingLower(Vector128<ulong> left, Vector128<ulong> right) => AddHighNarrowingLower(left, right); /// <summary> /// uint8x16_t vaddhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) /// A32: VADDHN.I16 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<byte> AddHighNarrowingUpper(Vector64<byte> lower, Vector128<ushort> left, Vector128<ushort> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// int16x8_t vaddhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) /// A32: VADDHN.I32 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<short> AddHighNarrowingUpper(Vector64<short> lower, Vector128<int> left, Vector128<int> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// int32x4_t vaddhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) /// A32: VADDHN.I64 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<int> AddHighNarrowingUpper(Vector64<int> lower, Vector128<long> left, Vector128<long> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// int8x16_t vaddhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) /// A32: VADDHN.I16 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<sbyte> AddHighNarrowingUpper(Vector64<sbyte> lower, Vector128<short> left, Vector128<short> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// uint16x8_t vaddhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) /// A32: VADDHN.I32 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<ushort> AddHighNarrowingUpper(Vector64<ushort> lower, Vector128<uint> left, Vector128<uint> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// uint32x4_t vaddhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) /// A32: VADDHN.I64 Dd+1, Qn, Qm /// A64: ADDHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<uint> AddHighNarrowingUpper(Vector64<uint> lower, Vector128<ulong> left, Vector128<ulong> right) => AddHighNarrowingUpper(lower, left, right); /// <summary> /// uint8x8_t vpadd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VPADD.I8 Dd, Dn, Dm /// A64: ADDP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> AddPairwise(Vector64<byte> left, Vector64<byte> right) => AddPairwise(left, right); /// <summary> /// int16x4_t vpadd_s16 (int16x4_t a, int16x4_t b) /// A32: VPADD.I16 Dd, Dn, Dm /// A64: ADDP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> AddPairwise(Vector64<short> left, Vector64<short> right) => AddPairwise(left, right); /// <summary> /// int32x2_t vpadd_s32 (int32x2_t a, int32x2_t b) /// A32: VPADD.I32 Dd, Dn, Dm /// A64: ADDP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> AddPairwise(Vector64<int> left, Vector64<int> right) => AddPairwise(left, right); /// <summary> /// int8x8_t vpadd_s8 (int8x8_t a, int8x8_t b) /// A32: VPADD.I8 Dd, Dn, Dm /// A64: ADDP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> AddPairwise(Vector64<sbyte> left, Vector64<sbyte> right) => AddPairwise(left, right); /// <summary> /// float32x2_t vpadd_f32 (float32x2_t a, float32x2_t b) /// A32: VPADD.F32 Dd, Dn, Dm /// A64: FADDP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> AddPairwise(Vector64<float> left, Vector64<float> right) => AddPairwise(left, right); /// <summary> /// uint16x4_t vpadd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VPADD.I16 Dd, Dn, Dm /// A64: ADDP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> AddPairwise(Vector64<ushort> left, Vector64<ushort> right) => AddPairwise(left, right); /// <summary> /// uint32x2_t vpadd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VPADD.I32 Dd, Dn, Dm /// A64: ADDP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> AddPairwise(Vector64<uint> left, Vector64<uint> right) => AddPairwise(left, right); /// <summary> /// uint16x4_t vpaddl_u8 (uint8x8_t a) /// A32: VPADDL.U8 Dd, Dm /// A64: UADDLP Vd.4H, Vn.8B /// </summary> public static Vector64<ushort> AddPairwiseWidening(Vector64<byte> value) => AddPairwiseWidening(value); /// <summary> /// int32x2_t vpaddl_s16 (int16x4_t a) /// A32: VPADDL.S16 Dd, Dm /// A64: SADDLP Vd.2S, Vn.4H /// </summary> public static Vector64<int> AddPairwiseWidening(Vector64<short> value) => AddPairwiseWidening(value); /// <summary> /// int16x4_t vpaddl_s8 (int8x8_t a) /// A32: VPADDL.S8 Dd, Dm /// A64: SADDLP Vd.4H, Vn.8B /// </summary> public static Vector64<short> AddPairwiseWidening(Vector64<sbyte> value) => AddPairwiseWidening(value); /// <summary> /// uint32x2_t vpaddl_u16 (uint16x4_t a) /// A32: VPADDL.U16 Dd, Dm /// A64: UADDLP Vd.2S, Vn.4H /// </summary> public static Vector64<uint> AddPairwiseWidening(Vector64<ushort> value) => AddPairwiseWidening(value); /// <summary> /// uint16x8_t vpaddlq_u8 (uint8x16_t a) /// A32: VPADDL.U8 Qd, Qm /// A64: UADDLP Vd.8H, Vn.16B /// </summary> public static Vector128<ushort> AddPairwiseWidening(Vector128<byte> value) => AddPairwiseWidening(value); /// <summary> /// int32x4_t vpaddlq_s16 (int16x8_t a) /// A32: VPADDL.S16 Qd, Qm /// A64: SADDLP Vd.4S, Vn.8H /// </summary> public static Vector128<int> AddPairwiseWidening(Vector128<short> value) => AddPairwiseWidening(value); /// <summary> /// int64x2_t vpaddlq_s32 (int32x4_t a) /// A32: VPADDL.S32 Qd, Qm /// A64: SADDLP Vd.2D, Vn.4S /// </summary> public static Vector128<long> AddPairwiseWidening(Vector128<int> value) => AddPairwiseWidening(value); /// <summary> /// int16x8_t vpaddlq_s8 (int8x16_t a) /// A32: VPADDL.S8 Qd, Qm /// A64: SADDLP Vd.8H, Vn.16B /// </summary> public static Vector128<short> AddPairwiseWidening(Vector128<sbyte> value) => AddPairwiseWidening(value); /// <summary> /// uint32x4_t vpaddlq_u16 (uint16x8_t a) /// A32: VPADDL.U16 Qd, Qm /// A64: UADDLP Vd.4S, Vn.8H /// </summary> public static Vector128<uint> AddPairwiseWidening(Vector128<ushort> value) => AddPairwiseWidening(value); /// <summary> /// uint64x2_t vpaddlq_u32 (uint32x4_t a) /// A32: VPADDL.U32 Qd, Qm /// A64: UADDLP Vd.2D, Vn.4S /// </summary> public static Vector128<ulong> AddPairwiseWidening(Vector128<uint> value) => AddPairwiseWidening(value); /// <summary> /// uint16x4_t vpadal_u8 (uint16x4_t a, uint8x8_t b) /// A32: VPADAL.U8 Dd, Dm /// A64: UADALP Vd.4H, Vn.8B /// </summary> public static Vector64<ushort> AddPairwiseWideningAndAdd(Vector64<ushort> addend, Vector64<byte> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int32x2_t vpadal_s16 (int32x2_t a, int16x4_t b) /// A32: VPADAL.S16 Dd, Dm /// A64: SADALP Vd.2S, Vn.4H /// </summary> public static Vector64<int> AddPairwiseWideningAndAdd(Vector64<int> addend, Vector64<short> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int16x4_t vpadal_s8 (int16x4_t a, int8x8_t b) /// A32: VPADAL.S8 Dd, Dm /// A64: SADALP Vd.4H, Vn.8B /// </summary> public static Vector64<short> AddPairwiseWideningAndAdd(Vector64<short> addend, Vector64<sbyte> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// uint32x2_t vpadal_u16 (uint32x2_t a, uint16x4_t b) /// A32: VPADAL.U16 Dd, Dm /// A64: UADALP Vd.2S, Vn.4H /// </summary> public static Vector64<uint> AddPairwiseWideningAndAdd(Vector64<uint> addend, Vector64<ushort> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// uint16x8_t vpadalq_u8 (uint16x8_t a, uint8x16_t b) /// A32: VPADAL.U8 Qd, Qm /// A64: UADALP Vd.8H, Vn.16B /// </summary> public static Vector128<ushort> AddPairwiseWideningAndAdd(Vector128<ushort> addend, Vector128<byte> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int32x4_t vpadalq_s16 (int32x4_t a, int16x8_t b) /// A32: VPADAL.S16 Qd, Qm /// A64: SADALP Vd.4S, Vn.8H /// </summary> public static Vector128<int> AddPairwiseWideningAndAdd(Vector128<int> addend, Vector128<short> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int64x2_t vpadalq_s32 (int64x2_t a, int32x4_t b) /// A32: VPADAL.S32 Qd, Qm /// A64: SADALP Vd.2D, Vn.4S /// </summary> public static Vector128<long> AddPairwiseWideningAndAdd(Vector128<long> addend, Vector128<int> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int16x8_t vpadalq_s8 (int16x8_t a, int8x16_t b) /// A32: VPADAL.S8 Qd, Qm /// A64: SADALP Vd.8H, Vn.16B /// </summary> public static Vector128<short> AddPairwiseWideningAndAdd(Vector128<short> addend, Vector128<sbyte> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// uint32x4_t vpadalq_u16 (uint32x4_t a, uint16x8_t b) /// A32: VPADAL.U16 Qd, Qm /// A64: UADALP Vd.4S, Vn.8H /// </summary> public static Vector128<uint> AddPairwiseWideningAndAdd(Vector128<uint> addend, Vector128<ushort> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// uint64x2_t vpadalq_u32 (uint64x2_t a, uint32x4_t b) /// A32: VPADAL.U32 Qd, Qm /// A64: UADALP Vd.2D, Vn.4S /// </summary> public static Vector128<ulong> AddPairwiseWideningAndAdd(Vector128<ulong> addend, Vector128<uint> value) => AddPairwiseWideningAndAdd(addend, value); /// <summary> /// int64x1_t vpadal_s32 (int64x1_t a, int32x2_t b) /// A32: VPADAL.S32 Dd, Dm /// A64: SADALP Vd.1D, Vn.2S /// </summary> public static Vector64<long> AddPairwiseWideningAndAddScalar(Vector64<long> addend, Vector64<int> value) => AddPairwiseWideningAndAddScalar(addend, value); /// <summary> /// uint64x1_t vpadal_u32 (uint64x1_t a, uint32x2_t b) /// A32: VPADAL.U32 Dd, Dm /// A64: UADALP Vd.1D, Vn.2S /// </summary> public static Vector64<ulong> AddPairwiseWideningAndAddScalar(Vector64<ulong> addend, Vector64<uint> value) => AddPairwiseWideningAndAddScalar(addend, value); /// <summary> /// int64x1_t vpaddl_s32 (int32x2_t a) /// A32: VPADDL.S32 Dd, Dm /// A64: SADDLP Dd, Vn.2S /// </summary> public static Vector64<long> AddPairwiseWideningScalar(Vector64<int> value) => AddPairwiseWideningScalar(value); /// <summary> /// uint64x1_t vpaddl_u32 (uint32x2_t a) /// A32: VPADDL.U32 Dd, Dm /// A64: UADDLP Dd, Vn.2S /// </summary> public static Vector64<ulong> AddPairwiseWideningScalar(Vector64<uint> value) => AddPairwiseWideningScalar(value); /// <summary> /// uint8x8_t vraddhn_u16 (uint16x8_t a, uint16x8_t b) /// A32: VRADDHN.I16 Dd, Qn, Qm /// A64: RADDHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<byte> AddRoundedHighNarrowingLower(Vector128<ushort> left, Vector128<ushort> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// int16x4_t vraddhn_s32 (int32x4_t a, int32x4_t b) /// A32: VRADDHN.I32 Dd, Qn, Qm /// A64: RADDHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<short> AddRoundedHighNarrowingLower(Vector128<int> left, Vector128<int> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// int32x2_t vraddhn_s64 (int64x2_t a, int64x2_t b) /// A32: VRADDHN.I64 Dd, Qn, Qm /// A64: RADDHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<int> AddRoundedHighNarrowingLower(Vector128<long> left, Vector128<long> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// int8x8_t vraddhn_s16 (int16x8_t a, int16x8_t b) /// A32: VRADDHN.I16 Dd, Qn, Qm /// A64: RADDHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<sbyte> AddRoundedHighNarrowingLower(Vector128<short> left, Vector128<short> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// uint16x4_t vraddhn_u32 (uint32x4_t a, uint32x4_t b) /// A32: VRADDHN.I32 Dd, Qn, Qm /// A64: RADDHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<ushort> AddRoundedHighNarrowingLower(Vector128<uint> left, Vector128<uint> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// uint32x2_t vraddhn_u64 (uint64x2_t a, uint64x2_t b) /// A32: VRADDHN.I64 Dd, Qn, Qm /// A64: RADDHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<uint> AddRoundedHighNarrowingLower(Vector128<ulong> left, Vector128<ulong> right) => AddRoundedHighNarrowingLower(left, right); /// <summary> /// uint8x16_t vraddhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) /// A32: VRADDHN.I16 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<byte> AddRoundedHighNarrowingUpper(Vector64<byte> lower, Vector128<ushort> left, Vector128<ushort> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int16x8_t vraddhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) /// A32: VRADDHN.I32 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<short> AddRoundedHighNarrowingUpper(Vector64<short> lower, Vector128<int> left, Vector128<int> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int32x4_t vraddhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) /// A32: VRADDHN.I64 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<int> AddRoundedHighNarrowingUpper(Vector64<int> lower, Vector128<long> left, Vector128<long> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int8x16_t vraddhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) /// A32: VRADDHN.I16 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<sbyte> AddRoundedHighNarrowingUpper(Vector64<sbyte> lower, Vector128<short> left, Vector128<short> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint16x8_t vraddhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) /// A32: VRADDHN.I32 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<ushort> AddRoundedHighNarrowingUpper(Vector64<ushort> lower, Vector128<uint> left, Vector128<uint> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint32x4_t vraddhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) /// A32: VRADDHN.I64 Dd+1, Qn, Qm /// A64: RADDHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<uint> AddRoundedHighNarrowingUpper(Vector64<uint> lower, Vector128<ulong> left, Vector128<ulong> right) => AddRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint8x8_t vqadd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VQADD.U8 Dd, Dn, Dm /// A64: UQADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> AddSaturate(Vector64<byte> left, Vector64<byte> right) => AddSaturate(left, right); /// <summary> /// int16x4_t vqadd_s16 (int16x4_t a, int16x4_t b) /// A32: VQADD.S16 Dd, Dn, Dm /// A64: SQADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> AddSaturate(Vector64<short> left, Vector64<short> right) => AddSaturate(left, right); /// <summary> /// int32x2_t vqadd_s32 (int32x2_t a, int32x2_t b) /// A32: VQADD.S32 Dd, Dn, Dm /// A64: SQADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> AddSaturate(Vector64<int> left, Vector64<int> right) => AddSaturate(left, right); /// <summary> /// int8x8_t vqadd_s8 (int8x8_t a, int8x8_t b) /// A32: VQADD.S8 Dd, Dn, Dm /// A64: SQADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> AddSaturate(Vector64<sbyte> left, Vector64<sbyte> right) => AddSaturate(left, right); /// <summary> /// uint16x4_t vqadd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VQADD.U16 Dd, Dn, Dm /// A64: UQADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> AddSaturate(Vector64<ushort> left, Vector64<ushort> right) => AddSaturate(left, right); /// <summary> /// uint32x2_t vqadd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VQADD.U32 Dd, Dn, Dm /// A64: UQADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> AddSaturate(Vector64<uint> left, Vector64<uint> right) => AddSaturate(left, right); /// <summary> /// uint8x16_t vqaddq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VQADD.U8 Qd, Qn, Qm /// A64: UQADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> AddSaturate(Vector128<byte> left, Vector128<byte> right) => AddSaturate(left, right); /// <summary> /// int16x8_t vqaddq_s16 (int16x8_t a, int16x8_t b) /// A32: VQADD.S16 Qd, Qn, Qm /// A64: SQADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> AddSaturate(Vector128<short> left, Vector128<short> right) => AddSaturate(left, right); /// <summary> /// int32x4_t vqaddq_s32 (int32x4_t a, int32x4_t b) /// A32: VQADD.S32 Qd, Qn, Qm /// A64: SQADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> AddSaturate(Vector128<int> left, Vector128<int> right) => AddSaturate(left, right); /// <summary> /// int64x2_t vqaddq_s64 (int64x2_t a, int64x2_t b) /// A32: VQADD.S64 Qd, Qn, Qm /// A64: SQADD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> AddSaturate(Vector128<long> left, Vector128<long> right) => AddSaturate(left, right); /// <summary> /// int8x16_t vqaddq_s8 (int8x16_t a, int8x16_t b) /// A32: VQADD.S8 Qd, Qn, Qm /// A64: SQADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> AddSaturate(Vector128<sbyte> left, Vector128<sbyte> right) => AddSaturate(left, right); /// <summary> /// uint16x8_t vqaddq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VQADD.U16 Qd, Qn, Qm /// A64: UQADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> AddSaturate(Vector128<ushort> left, Vector128<ushort> right) => AddSaturate(left, right); /// <summary> /// uint32x4_t vqaddq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VQADD.U32 Qd, Qn, Qm /// A64: UQADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> AddSaturate(Vector128<uint> left, Vector128<uint> right) => AddSaturate(left, right); /// <summary> /// uint64x2_t vqaddq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VQADD.U64 Qd, Qn, Qm /// A64: UQADD Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> AddSaturate(Vector128<ulong> left, Vector128<ulong> right) => AddSaturate(left, right); /// <summary> /// int64x1_t vqadd_s64 (int64x1_t a, int64x1_t b) /// A32: VQADD.S64 Dd, Dn, Dm /// A64: SQADD Dd, Dn, Dm /// </summary> public static Vector64<long> AddSaturateScalar(Vector64<long> left, Vector64<long> right) => AddSaturateScalar(left, right); /// <summary> /// uint64x1_t vqadd_u64 (uint64x1_t a, uint64x1_t b) /// A32: VQADD.U64 Dd, Dn, Dm /// A64: UQADD Dd, Dn, Dm /// </summary> public static Vector64<ulong> AddSaturateScalar(Vector64<ulong> left, Vector64<ulong> right) => AddSaturateScalar(left, right); /// <summary> /// float64x1_t vadd_f64 (float64x1_t a, float64x1_t b) /// A32: VADD.F64 Dd, Dn, Dm /// A64: FADD Dd, Dn, Dm /// </summary> public static Vector64<double> AddScalar(Vector64<double> left, Vector64<double> right) => AddScalar(left, right); /// <summary> /// int64x1_t vadd_s64 (int64x1_t a, int64x1_t b) /// A32: VADD.I64 Dd, Dn, Dm /// A64: ADD Dd, Dn, Dm /// </summary> public static Vector64<long> AddScalar(Vector64<long> left, Vector64<long> right) => AddScalar(left, right); /// <summary> /// float32_t vadds_f32 (float32_t a, float32_t b) /// A32: VADD.F32 Sd, Sn, Sm /// A64: FADD Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> AddScalar(Vector64<float> left, Vector64<float> right) => AddScalar(left, right); /// <summary> /// uint64x1_t vadd_u64 (uint64x1_t a, uint64x1_t b) /// A32: VADD.I64 Dd, Dn, Dm /// A64: ADD Dd, Dn, Dm /// </summary> public static Vector64<ulong> AddScalar(Vector64<ulong> left, Vector64<ulong> right) => AddScalar(left, right); /// <summary> /// uint16x8_t vaddl_u8 (uint8x8_t a, uint8x8_t b) /// A32: VADDL.U8 Qd, Dn, Dm /// A64: UADDL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> AddWideningLower(Vector64<byte> left, Vector64<byte> right) => AddWideningLower(left, right); /// <summary> /// int32x4_t vaddl_s16 (int16x4_t a, int16x4_t b) /// A32: VADDL.S16 Qd, Dn, Dm /// A64: SADDL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> AddWideningLower(Vector64<short> left, Vector64<short> right) => AddWideningLower(left, right); /// <summary> /// int64x2_t vaddl_s32 (int32x2_t a, int32x2_t b) /// A32: VADDL.S32 Qd, Dn, Dm /// A64: SADDL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> AddWideningLower(Vector64<int> left, Vector64<int> right) => AddWideningLower(left, right); /// <summary> /// int16x8_t vaddl_s8 (int8x8_t a, int8x8_t b) /// A32: VADDL.S8 Qd, Dn, Dm /// A64: SADDL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> AddWideningLower(Vector64<sbyte> left, Vector64<sbyte> right) => AddWideningLower(left, right); /// <summary> /// uint32x4_t vaddl_u16 (uint16x4_t a, uint16x4_t b) /// A32: VADDL.U16 Qd, Dn, Dm /// A64: UADDL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> AddWideningLower(Vector64<ushort> left, Vector64<ushort> right) => AddWideningLower(left, right); /// <summary> /// uint64x2_t vaddl_u32 (uint32x2_t a, uint32x2_t b) /// A32: VADDL.U32 Qd, Dn, Dm /// A64: UADDL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> AddWideningLower(Vector64<uint> left, Vector64<uint> right) => AddWideningLower(left, right); /// <summary> /// int16x8_t vaddw_s8 (int16x8_t a, int8x8_t b) /// A32: VADDW.S8 Qd, Qn, Dm /// A64: SADDW Vd.8H, Vn.8H, Vm.8B /// </summary> public static Vector128<short> AddWideningLower(Vector128<short> left, Vector64<sbyte> right) => AddWideningLower(left, right); /// <summary> /// int32x4_t vaddw_s16 (int32x4_t a, int16x4_t b) /// A32: VADDW.S16 Qd, Qn, Dm /// A64: SADDW Vd.4S, Vn.4S, Vm.4H /// </summary> public static Vector128<int> AddWideningLower(Vector128<int> left, Vector64<short> right) => AddWideningLower(left, right); /// <summary> /// int64x2_t vaddw_s32 (int64x2_t a, int32x2_t b) /// A32: VADDW.S32 Qd, Qn, Dm /// A64: SADDW Vd.2D, Vn.2D, Vm.2S /// </summary> public static Vector128<long> AddWideningLower(Vector128<long> left, Vector64<int> right) => AddWideningLower(left, right); /// <summary> /// uint16x8_t vaddw_u8 (uint16x8_t a, uint8x8_t b) /// A32: VADDW.U8 Qd, Qn, Dm /// A64: UADDW Vd.8H, Vn.8H, Vm.8B /// </summary> public static Vector128<ushort> AddWideningLower(Vector128<ushort> left, Vector64<byte> right) => AddWideningLower(left, right); /// <summary> /// uint32x4_t vaddw_u16 (uint32x4_t a, uint16x4_t b) /// A32: VADDW.U16 Qd, Qn, Dm /// A64: UADDW Vd.4S, Vn.4S, Vm.4H /// </summary> public static Vector128<uint> AddWideningLower(Vector128<uint> left, Vector64<ushort> right) => AddWideningLower(left, right); /// <summary> /// uint64x2_t vaddw_u32 (uint64x2_t a, uint32x2_t b) /// A32: VADDW.U32 Qd, Qn, Dm /// A64: UADDW Vd.2D, Vn.2D, Vm.2S /// </summary> public static Vector128<ulong> AddWideningLower(Vector128<ulong> left, Vector64<uint> right) => AddWideningLower(left, right); /// <summary> /// uint16x8_t vaddl_high_u8 (uint8x16_t a, uint8x16_t b) /// A32: VADDL.U8 Qd, Dn+1, Dm+1 /// A64: UADDL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> AddWideningUpper(Vector128<byte> left, Vector128<byte> right) => AddWideningUpper(left, right); /// <summary> /// int32x4_t vaddl_high_s16 (int16x8_t a, int16x8_t b) /// A32: VADDL.S16 Qd, Dn+1, Dm+1 /// A64: SADDL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> AddWideningUpper(Vector128<short> left, Vector128<short> right) => AddWideningUpper(left, right); /// <summary> /// int16x8_t vaddw_high_s8 (int16x8_t a, int8x16_t b) /// A32: VADDW.S8 Qd, Qn, Dm+1 /// A64: SADDW2 Vd.8H, Vn.8H, Vm.16B /// </summary> public static Vector128<short> AddWideningUpper(Vector128<short> left, Vector128<sbyte> right) => AddWideningUpper(left, right); /// <summary> /// int32x4_t vaddw_high_s16 (int32x4_t a, int16x8_t b) /// A32: VADDW.S16 Qd, Qn, Dm+1 /// A64: SADDW2 Vd.4S, Vn.4S, Vm.8H /// </summary> public static Vector128<int> AddWideningUpper(Vector128<int> left, Vector128<short> right) => AddWideningUpper(left, right); /// <summary> /// int64x2_t vaddl_high_s32 (int32x4_t a, int32x4_t b) /// A32: VADDL.S32 Qd, Dn+1, Dm+1 /// A64: SADDL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> AddWideningUpper(Vector128<int> left, Vector128<int> right) => AddWideningUpper(left, right); /// <summary> /// int64x2_t vaddw_high_s32 (int64x2_t a, int32x4_t b) /// A32: VADDW.S32 Qd, Qn, Dm+1 /// A64: SADDW2 Vd.2D, Vn.2D, Vm.4S /// </summary> public static Vector128<long> AddWideningUpper(Vector128<long> left, Vector128<int> right) => AddWideningUpper(left, right); /// <summary> /// int16x8_t vaddl_high_s8 (int8x16_t a, int8x16_t b) /// A32: VADDL.S8 Qd, Dn+1, Dm+1 /// A64: SADDL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> AddWideningUpper(Vector128<sbyte> left, Vector128<sbyte> right) => AddWideningUpper(left, right); /// <summary> /// uint16x8_t vaddw_high_u8 (uint16x8_t a, uint8x16_t b) /// A32: VADDW.U8 Qd, Qn, Dm+1 /// A64: UADDW2 Vd.8H, Vn.8H, Vm.16B /// </summary> public static Vector128<ushort> AddWideningUpper(Vector128<ushort> left, Vector128<byte> right) => AddWideningUpper(left, right); /// <summary> /// uint32x4_t vaddl_high_u16 (uint16x8_t a, uint16x8_t b) /// A32: VADDL.U16 Qd, Dn+1, Dm+1 /// A64: UADDL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> AddWideningUpper(Vector128<ushort> left, Vector128<ushort> right) => AddWideningUpper(left, right); /// <summary> /// uint32x4_t vaddw_high_u16 (uint32x4_t a, uint16x8_t b) /// A32: VADDW.U16 Qd, Qn, Dm+1 /// A64: UADDW2 Vd.4S, Vn.4S, Vm.8H /// </summary> public static Vector128<uint> AddWideningUpper(Vector128<uint> left, Vector128<ushort> right) => AddWideningUpper(left, right); /// <summary> /// uint64x2_t vaddl_high_u32 (uint32x4_t a, uint32x4_t b) /// A32: VADDL.U32 Qd, Dn+1, Dm+1 /// A64: UADDL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> AddWideningUpper(Vector128<uint> left, Vector128<uint> right) => AddWideningUpper(left, right); /// <summary> /// uint64x2_t vaddw_high_u32 (uint64x2_t a, uint32x4_t b) /// A32: VADDW.U32 Qd, Qn, Dm+1 /// A64: UADDW2 Vd.2D, Vn.2D, Vm.4S /// </summary> public static Vector128<ulong> AddWideningUpper(Vector128<ulong> left, Vector128<uint> right) => AddWideningUpper(left, right); /// <summary> /// uint8x8_t vand_u8 (uint8x8_t a, uint8x8_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> And(Vector64<byte> left, Vector64<byte> right) => And(left, right); /// <summary> /// float64x1_t vand_f64 (float64x1_t a, float64x1_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> And(Vector64<double> left, Vector64<double> right) => And(left, right); /// <summary> /// int16x4_t vand_s16 (int16x4_t a, int16x4_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> And(Vector64<short> left, Vector64<short> right) => And(left, right); /// <summary> /// int32x2_t vand_s32 (int32x2_t a, int32x2_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> And(Vector64<int> left, Vector64<int> right) => And(left, right); /// <summary> /// int64x1_t vand_s64 (int64x1_t a, int64x1_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> And(Vector64<long> left, Vector64<long> right) => And(left, right); /// <summary> /// int8x8_t vand_s8 (int8x8_t a, int8x8_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> And(Vector64<sbyte> left, Vector64<sbyte> right) => And(left, right); /// <summary> /// float32x2_t vand_f32 (float32x2_t a, float32x2_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> And(Vector64<float> left, Vector64<float> right) => And(left, right); /// <summary> /// uint16x4_t vand_u16 (uint16x4_t a, uint16x4_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> And(Vector64<ushort> left, Vector64<ushort> right) => And(left, right); /// <summary> /// uint32x2_t vand_u32 (uint32x2_t a, uint32x2_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> And(Vector64<uint> left, Vector64<uint> right) => And(left, right); /// <summary> /// uint64x1_t vand_u64 (uint64x1_t a, uint64x1_t b) /// A32: VAND Dd, Dn, Dm /// A64: AND Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> And(Vector64<ulong> left, Vector64<ulong> right) => And(left, right); /// <summary> /// uint8x16_t vandq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> And(Vector128<byte> left, Vector128<byte> right) => And(left, right); /// <summary> /// float64x2_t vandq_f64 (float64x2_t a, float64x2_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> And(Vector128<double> left, Vector128<double> right) => And(left, right); /// <summary> /// int16x8_t vandq_s16 (int16x8_t a, int16x8_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> And(Vector128<short> left, Vector128<short> right) => And(left, right); /// <summary> /// int32x4_t vandq_s32 (int32x4_t a, int32x4_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> And(Vector128<int> left, Vector128<int> right) => And(left, right); /// <summary> /// int64x2_t vandq_s64 (int64x2_t a, int64x2_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> And(Vector128<long> left, Vector128<long> right) => And(left, right); /// <summary> /// int8x16_t vandq_s8 (int8x16_t a, int8x16_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> And(Vector128<sbyte> left, Vector128<sbyte> right) => And(left, right); /// <summary> /// float32x4_t vandq_f32 (float32x4_t a, float32x4_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> And(Vector128<float> left, Vector128<float> right) => And(left, right); /// <summary> /// uint16x8_t vandq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> And(Vector128<ushort> left, Vector128<ushort> right) => And(left, right); /// <summary> /// uint32x4_t vandq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> And(Vector128<uint> left, Vector128<uint> right) => And(left, right); /// <summary> /// uint64x2_t vandq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VAND Qd, Qn, Qm /// A64: AND Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> And(Vector128<ulong> left, Vector128<ulong> right) => And(left, right); /// <summary> /// uint8x8_t vbic_u8 (uint8x8_t a, uint8x8_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> BitwiseClear(Vector64<byte> value, Vector64<byte> mask) => BitwiseClear(value, mask); /// <summary> /// float64x1_t vbic_f64 (float64x1_t a, float64x1_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> BitwiseClear(Vector64<double> value, Vector64<double> mask) => BitwiseClear(value, mask); /// <summary> /// int16x4_t vbic_s16 (int16x4_t a, int16x4_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> BitwiseClear(Vector64<short> value, Vector64<short> mask) => BitwiseClear(value, mask); /// <summary> /// int32x2_t vbic_s32 (int32x2_t a, int32x2_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> BitwiseClear(Vector64<int> value, Vector64<int> mask) => BitwiseClear(value, mask); /// <summary> /// int64x1_t vbic_s64 (int64x1_t a, int64x1_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> BitwiseClear(Vector64<long> value, Vector64<long> mask) => BitwiseClear(value, mask); /// <summary> /// int8x8_t vbic_s8 (int8x8_t a, int8x8_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> BitwiseClear(Vector64<sbyte> value, Vector64<sbyte> mask) => BitwiseClear(value, mask); /// <summary> /// float32x2_t vbic_f32 (float32x2_t a, float32x2_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> BitwiseClear(Vector64<float> value, Vector64<float> mask) => BitwiseClear(value, mask); /// <summary> /// uint16x4_t vbic_u16 (uint16x4_t a, uint16x4_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> BitwiseClear(Vector64<ushort> value, Vector64<ushort> mask) => BitwiseClear(value, mask); /// <summary> /// uint32x2_t vbic_u32 (uint32x2_t a, uint32x2_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> BitwiseClear(Vector64<uint> value, Vector64<uint> mask) => BitwiseClear(value, mask); /// <summary> /// uint64x1_t vbic_u64 (uint64x1_t a, uint64x1_t b) /// A32: VBIC Dd, Dn, Dm /// A64: BIC Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> BitwiseClear(Vector64<ulong> value, Vector64<ulong> mask) => BitwiseClear(value, mask); /// <summary> /// uint8x16_t vbicq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> BitwiseClear(Vector128<byte> value, Vector128<byte> mask) => BitwiseClear(value, mask); /// <summary> /// float64x2_t vbicq_f64 (float64x2_t a, float64x2_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> BitwiseClear(Vector128<double> value, Vector128<double> mask) => BitwiseClear(value, mask); /// <summary> /// int16x8_t vbicq_s16 (int16x8_t a, int16x8_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> BitwiseClear(Vector128<short> value, Vector128<short> mask) => BitwiseClear(value, mask); /// <summary> /// int32x4_t vbicq_s32 (int32x4_t a, int32x4_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> BitwiseClear(Vector128<int> value, Vector128<int> mask) => BitwiseClear(value, mask); /// <summary> /// int64x2_t vbicq_s64 (int64x2_t a, int64x2_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> BitwiseClear(Vector128<long> value, Vector128<long> mask) => BitwiseClear(value, mask); /// <summary> /// int8x16_t vbicq_s8 (int8x16_t a, int8x16_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> BitwiseClear(Vector128<sbyte> value, Vector128<sbyte> mask) => BitwiseClear(value, mask); /// <summary> /// float32x4_t vbicq_f32 (float32x4_t a, float32x4_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> BitwiseClear(Vector128<float> value, Vector128<float> mask) => BitwiseClear(value, mask); /// <summary> /// uint16x8_t vbicq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> BitwiseClear(Vector128<ushort> value, Vector128<ushort> mask) => BitwiseClear(value, mask); /// <summary> /// uint32x4_t vbicq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> BitwiseClear(Vector128<uint> value, Vector128<uint> mask) => BitwiseClear(value, mask); /// <summary> /// uint64x2_t vbicq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VBIC Qd, Qn, Qm /// A64: BIC Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> BitwiseClear(Vector128<ulong> value, Vector128<ulong> mask) => BitwiseClear(value, mask); /// <summary> /// uint8x8_t vbsl_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> BitwiseSelect(Vector64<byte> select, Vector64<byte> left, Vector64<byte> right) => BitwiseSelect(select, left, right); /// <summary> /// float64x1_t vbsl_f64 (uint64x1_t a, float64x1_t b, float64x1_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<double> BitwiseSelect(Vector64<double> select, Vector64<double> left, Vector64<double> right) => BitwiseSelect(select, left, right); /// <summary> /// int16x4_t vbsl_s16 (uint16x4_t a, int16x4_t b, int16x4_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> BitwiseSelect(Vector64<short> select, Vector64<short> left, Vector64<short> right) => BitwiseSelect(select, left, right); /// <summary> /// int32x2_t vbsl_s32 (uint32x2_t a, int32x2_t b, int32x2_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> BitwiseSelect(Vector64<int> select, Vector64<int> left, Vector64<int> right) => BitwiseSelect(select, left, right); /// <summary> /// int64x1_t vbsl_s64 (uint64x1_t a, int64x1_t b, int64x1_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> BitwiseSelect(Vector64<long> select, Vector64<long> left, Vector64<long> right) => BitwiseSelect(select, left, right); /// <summary> /// int8x8_t vbsl_s8 (uint8x8_t a, int8x8_t b, int8x8_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> BitwiseSelect(Vector64<sbyte> select, Vector64<sbyte> left, Vector64<sbyte> right) => BitwiseSelect(select, left, right); /// <summary> /// float32x2_t vbsl_f32 (uint32x2_t a, float32x2_t b, float32x2_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<float> BitwiseSelect(Vector64<float> select, Vector64<float> left, Vector64<float> right) => BitwiseSelect(select, left, right); /// <summary> /// uint16x4_t vbsl_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> BitwiseSelect(Vector64<ushort> select, Vector64<ushort> left, Vector64<ushort> right) => BitwiseSelect(select, left, right); /// <summary> /// uint32x2_t vbsl_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> BitwiseSelect(Vector64<uint> select, Vector64<uint> left, Vector64<uint> right) => BitwiseSelect(select, left, right); /// <summary> /// uint64x1_t vbsl_u64 (uint64x1_t a, uint64x1_t b, uint64x1_t c) /// A32: VBSL Dd, Dn, Dm /// A64: BSL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> BitwiseSelect(Vector64<ulong> select, Vector64<ulong> left, Vector64<ulong> right) => BitwiseSelect(select, left, right); /// <summary> /// uint8x16_t vbslq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> BitwiseSelect(Vector128<byte> select, Vector128<byte> left, Vector128<byte> right) => BitwiseSelect(select, left, right); /// <summary> /// float64x2_t vbslq_f64 (uint64x2_t a, float64x2_t b, float64x2_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<double> BitwiseSelect(Vector128<double> select, Vector128<double> left, Vector128<double> right) => BitwiseSelect(select, left, right); /// <summary> /// int16x8_t vbslq_s16 (uint16x8_t a, int16x8_t b, int16x8_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> BitwiseSelect(Vector128<short> select, Vector128<short> left, Vector128<short> right) => BitwiseSelect(select, left, right); /// <summary> /// int32x4_t vbslq_s32 (uint32x4_t a, int32x4_t b, int32x4_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> BitwiseSelect(Vector128<int> select, Vector128<int> left, Vector128<int> right) => BitwiseSelect(select, left, right); /// <summary> /// int64x2_t vbslq_s64 (uint64x2_t a, int64x2_t b, int64x2_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> BitwiseSelect(Vector128<long> select, Vector128<long> left, Vector128<long> right) => BitwiseSelect(select, left, right); /// <summary> /// int8x16_t vbslq_s8 (uint8x16_t a, int8x16_t b, int8x16_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> BitwiseSelect(Vector128<sbyte> select, Vector128<sbyte> left, Vector128<sbyte> right) => BitwiseSelect(select, left, right); /// <summary> /// float32x4_t vbslq_f32 (uint32x4_t a, float32x4_t b, float32x4_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<float> BitwiseSelect(Vector128<float> select, Vector128<float> left, Vector128<float> right) => BitwiseSelect(select, left, right); /// <summary> /// uint16x8_t vbslq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> BitwiseSelect(Vector128<ushort> select, Vector128<ushort> left, Vector128<ushort> right) => BitwiseSelect(select, left, right); /// <summary> /// uint32x4_t vbslq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> BitwiseSelect(Vector128<uint> select, Vector128<uint> left, Vector128<uint> right) => BitwiseSelect(select, left, right); /// <summary> /// uint64x2_t vbslq_u64 (uint64x2_t a, uint64x2_t b, uint64x2_t c) /// A32: VBSL Qd, Qn, Qm /// A64: BSL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> BitwiseSelect(Vector128<ulong> select, Vector128<ulong> left, Vector128<ulong> right) => BitwiseSelect(select, left, right); /// <summary> /// float32x2_t vrndp_f32 (float32x2_t a) /// A32: VRINTP.F32 Dd, Dm /// A64: FRINTP Vd.2S, Vn.2S /// </summary> public static Vector64<float> Ceiling(Vector64<float> value) => Ceiling(value); /// <summary> /// float32x4_t vrndpq_f32 (float32x4_t a) /// A32: VRINTP.F32 Qd, Qm /// A64: FRINTP Vd.4S, Vn.4S /// </summary> public static Vector128<float> Ceiling(Vector128<float> value) => Ceiling(value); /// <summary> /// float64x1_t vrndp_f64 (float64x1_t a) /// A32: VRINTP.F64 Dd, Dm /// A64: FRINTP Dd, Dn /// </summary> public static Vector64<double> CeilingScalar(Vector64<double> value) => CeilingScalar(value); /// <summary> /// float32_t vrndps_f32 (float32_t a) /// A32: VRINTP.F32 Sd, Sm /// A64: FRINTP Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> CeilingScalar(Vector64<float> value) => CeilingScalar(value); /// <summary> /// uint8x8_t vceq_u8 (uint8x8_t a, uint8x8_t b) /// A32: VCEQ.I8 Dd, Dn, Dm /// A64: CMEQ Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareEqual(Vector64<byte> left, Vector64<byte> right) => CompareEqual(left, right); /// <summary> /// uint16x4_t vceq_s16 (int16x4_t a, int16x4_t b) /// A32: VCEQ.I16 Dd, Dn, Dm /// A64: CMEQ Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareEqual(Vector64<short> left, Vector64<short> right) => CompareEqual(left, right); /// <summary> /// uint32x2_t vceq_s32 (int32x2_t a, int32x2_t b) /// A32: VCEQ.I32 Dd, Dn, Dm /// A64: CMEQ Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareEqual(Vector64<int> left, Vector64<int> right) => CompareEqual(left, right); /// <summary> /// uint8x8_t vceq_s8 (int8x8_t a, int8x8_t b) /// A32: VCEQ.I8 Dd, Dn, Dm /// A64: CMEQ Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareEqual(Vector64<sbyte> left, Vector64<sbyte> right) => CompareEqual(left, right); /// <summary> /// uint32x2_t vceq_f32 (float32x2_t a, float32x2_t b) /// A32: VCEQ.F32 Dd, Dn, Dm /// A64: FCMEQ Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> CompareEqual(Vector64<float> left, Vector64<float> right) => CompareEqual(left, right); /// <summary> /// uint16x4_t vceq_u16 (uint16x4_t a, uint16x4_t b) /// A32: VCEQ.I16 Dd, Dn, Dm /// A64: CMEQ Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareEqual(Vector64<ushort> left, Vector64<ushort> right) => CompareEqual(left, right); /// <summary> /// uint32x2_t vceq_u32 (uint32x2_t a, uint32x2_t b) /// A32: VCEQ.I32 Dd, Dn, Dm /// A64: CMEQ Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareEqual(Vector64<uint> left, Vector64<uint> right) => CompareEqual(left, right); /// <summary> /// uint8x16_t vceqq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VCEQ.I8 Qd, Qn, Qm /// A64: CMEQ Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareEqual(Vector128<byte> left, Vector128<byte> right) => CompareEqual(left, right); /// <summary> /// uint16x8_t vceqq_s16 (int16x8_t a, int16x8_t b) /// A32: VCEQ.I16 Qd, Qn, Qm /// A64: CMEQ Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareEqual(Vector128<short> left, Vector128<short> right) => CompareEqual(left, right); /// <summary> /// uint32x4_t vceqq_s32 (int32x4_t a, int32x4_t b) /// A32: VCEQ.I32 Qd, Qn, Qm /// A64: CMEQ Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareEqual(Vector128<int> left, Vector128<int> right) => CompareEqual(left, right); /// <summary> /// uint8x16_t vceqq_s8 (int8x16_t a, int8x16_t b) /// A32: VCEQ.I8 Qd, Qn, Qm /// A64: CMEQ Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareEqual(Vector128<sbyte> left, Vector128<sbyte> right) => CompareEqual(left, right); /// <summary> /// uint32x4_t vceqq_f32 (float32x4_t a, float32x4_t b) /// A32: VCEQ.F32 Qd, Qn, Qm /// A64: FCMEQ Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> CompareEqual(Vector128<float> left, Vector128<float> right) => CompareEqual(left, right); /// <summary> /// uint16x8_t vceqq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VCEQ.I16 Qd, Qn, Qm /// A64: CMEQ Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareEqual(Vector128<ushort> left, Vector128<ushort> right) => CompareEqual(left, right); /// <summary> /// uint32x4_t vceqq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VCEQ.I32 Qd, Qn, Qm /// A64: CMEQ Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareEqual(Vector128<uint> left, Vector128<uint> right) => CompareEqual(left, right); /// <summary> /// uint8x8_t vcgt_u8 (uint8x8_t a, uint8x8_t b) /// A32: VCGT.U8 Dd, Dn, Dm /// A64: CMHI Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareGreaterThan(Vector64<byte> left, Vector64<byte> right) => CompareGreaterThan(left, right); /// <summary> /// uint16x4_t vcgt_s16 (int16x4_t a, int16x4_t b) /// A32: VCGT.S16 Dd, Dn, Dm /// A64: CMGT Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareGreaterThan(Vector64<short> left, Vector64<short> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x2_t vcgt_s32 (int32x2_t a, int32x2_t b) /// A32: VCGT.S32 Dd, Dn, Dm /// A64: CMGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareGreaterThan(Vector64<int> left, Vector64<int> right) => CompareGreaterThan(left, right); /// <summary> /// uint8x8_t vcgt_s8 (int8x8_t a, int8x8_t b) /// A32: VCGT.S8 Dd, Dn, Dm /// A64: CMGT Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareGreaterThan(Vector64<sbyte> left, Vector64<sbyte> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x2_t vcgt_f32 (float32x2_t a, float32x2_t b) /// A32: VCGT.F32 Dd, Dn, Dm /// A64: FCMGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> CompareGreaterThan(Vector64<float> left, Vector64<float> right) => CompareGreaterThan(left, right); /// <summary> /// uint16x4_t vcgt_u16 (uint16x4_t a, uint16x4_t b) /// A32: VCGT.U16 Dd, Dn, Dm /// A64: CMHI Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareGreaterThan(Vector64<ushort> left, Vector64<ushort> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x2_t vcgt_u32 (uint32x2_t a, uint32x2_t b) /// A32: VCGT.U32 Dd, Dn, Dm /// A64: CMHI Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareGreaterThan(Vector64<uint> left, Vector64<uint> right) => CompareGreaterThan(left, right); /// <summary> /// uint8x16_t vcgtq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VCGT.U8 Qd, Qn, Qm /// A64: CMHI Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareGreaterThan(Vector128<byte> left, Vector128<byte> right) => CompareGreaterThan(left, right); /// <summary> /// uint16x8_t vcgtq_s16 (int16x8_t a, int16x8_t b) /// A32: VCGT.S16 Qd, Qn, Qm /// A64: CMGT Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareGreaterThan(Vector128<short> left, Vector128<short> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x4_t vcgtq_s32 (int32x4_t a, int32x4_t b) /// A32: VCGT.S32 Qd, Qn, Qm /// A64: CMGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareGreaterThan(Vector128<int> left, Vector128<int> right) => CompareGreaterThan(left, right); /// <summary> /// uint8x16_t vcgtq_s8 (int8x16_t a, int8x16_t b) /// A32: VCGT.S8 Qd, Qn, Qm /// A64: CMGT Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareGreaterThan(Vector128<sbyte> left, Vector128<sbyte> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x4_t vcgtq_f32 (float32x4_t a, float32x4_t b) /// A32: VCGT.F32 Qd, Qn, Qm /// A64: FCMGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> CompareGreaterThan(Vector128<float> left, Vector128<float> right) => CompareGreaterThan(left, right); /// <summary> /// uint16x8_t vcgtq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VCGT.U16 Qd, Qn, Qm /// A64: CMHI Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareGreaterThan(Vector128<ushort> left, Vector128<ushort> right) => CompareGreaterThan(left, right); /// <summary> /// uint32x4_t vcgtq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VCGT.U32 Qd, Qn, Qm /// A64: CMHI Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareGreaterThan(Vector128<uint> left, Vector128<uint> right) => CompareGreaterThan(left, right); /// <summary> /// uint8x8_t vcge_u8 (uint8x8_t a, uint8x8_t b) /// A32: VCGE.U8 Dd, Dn, Dm /// A64: CMHS Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareGreaterThanOrEqual(Vector64<byte> left, Vector64<byte> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint16x4_t vcge_s16 (int16x4_t a, int16x4_t b) /// A32: VCGE.S16 Dd, Dn, Dm /// A64: CMGE Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareGreaterThanOrEqual(Vector64<short> left, Vector64<short> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x2_t vcge_s32 (int32x2_t a, int32x2_t b) /// A32: VCGE.S32 Dd, Dn, Dm /// A64: CMGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareGreaterThanOrEqual(Vector64<int> left, Vector64<int> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint8x8_t vcge_s8 (int8x8_t a, int8x8_t b) /// A32: VCGE.S8 Dd, Dn, Dm /// A64: CMGE Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareGreaterThanOrEqual(Vector64<sbyte> left, Vector64<sbyte> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x2_t vcge_f32 (float32x2_t a, float32x2_t b) /// A32: VCGE.F32 Dd, Dn, Dm /// A64: FCMGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> CompareGreaterThanOrEqual(Vector64<float> left, Vector64<float> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint16x4_t vcge_u16 (uint16x4_t a, uint16x4_t b) /// A32: VCGE.U16 Dd, Dn, Dm /// A64: CMHS Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareGreaterThanOrEqual(Vector64<ushort> left, Vector64<ushort> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x2_t vcge_u32 (uint32x2_t a, uint32x2_t b) /// A32: VCGE.U32 Dd, Dn, Dm /// A64: CMHS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareGreaterThanOrEqual(Vector64<uint> left, Vector64<uint> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint8x16_t vcgeq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VCGE.U8 Qd, Qn, Qm /// A64: CMHS Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareGreaterThanOrEqual(Vector128<byte> left, Vector128<byte> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint16x8_t vcgeq_s16 (int16x8_t a, int16x8_t b) /// A32: VCGE.S16 Qd, Qn, Qm /// A64: CMGE Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareGreaterThanOrEqual(Vector128<short> left, Vector128<short> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x4_t vcgeq_s32 (int32x4_t a, int32x4_t b) /// A32: VCGE.S32 Qd, Qn, Qm /// A64: CMGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareGreaterThanOrEqual(Vector128<int> left, Vector128<int> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint8x16_t vcgeq_s8 (int8x16_t a, int8x16_t b) /// A32: VCGE.S8 Qd, Qn, Qm /// A64: CMGE Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareGreaterThanOrEqual(Vector128<sbyte> left, Vector128<sbyte> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x4_t vcgeq_f32 (float32x4_t a, float32x4_t b) /// A32: VCGE.F32 Qd, Qn, Qm /// A64: FCMGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> CompareGreaterThanOrEqual(Vector128<float> left, Vector128<float> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint16x8_t vcgeq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VCGE.U16 Qd, Qn, Qm /// A64: CMHS Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareGreaterThanOrEqual(Vector128<ushort> left, Vector128<ushort> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint32x4_t vcgeq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VCGE.U32 Qd, Qn, Qm /// A64: CMHS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareGreaterThanOrEqual(Vector128<uint> left, Vector128<uint> right) => CompareGreaterThanOrEqual(left, right); /// <summary> /// uint8x8_t vclt_u8 (uint8x8_t a, uint8x8_t b) /// A32: VCLT.U8 Dd, Dn, Dm /// A64: CMHI Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareLessThan(Vector64<byte> left, Vector64<byte> right) => CompareLessThan(left, right); /// <summary> /// uint16x4_t vclt_s16 (int16x4_t a, int16x4_t b) /// A32: VCLT.S16 Dd, Dn, Dm /// A64: CMGT Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareLessThan(Vector64<short> left, Vector64<short> right) => CompareLessThan(left, right); /// <summary> /// uint32x2_t vclt_s32 (int32x2_t a, int32x2_t b) /// A32: VCLT.S32 Dd, Dn, Dm /// A64: CMGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareLessThan(Vector64<int> left, Vector64<int> right) => CompareLessThan(left, right); /// <summary> /// uint8x8_t vclt_s8 (int8x8_t a, int8x8_t b) /// A32: VCLT.S8 Dd, Dn, Dm /// A64: CMGT Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareLessThan(Vector64<sbyte> left, Vector64<sbyte> right) => CompareLessThan(left, right); /// <summary> /// uint32x2_t vclt_f32 (float32x2_t a, float32x2_t b) /// A32: VCLT.F32 Dd, Dn, Dm /// A64: FCMGT Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> CompareLessThan(Vector64<float> left, Vector64<float> right) => CompareLessThan(left, right); /// <summary> /// uint16x4_t vclt_u16 (uint16x4_t a, uint16x4_t b) /// A32: VCLT.U16 Dd, Dn, Dm /// A64: CMHI Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareLessThan(Vector64<ushort> left, Vector64<ushort> right) => CompareLessThan(left, right); /// <summary> /// uint32x2_t vclt_u32 (uint32x2_t a, uint32x2_t b) /// A32: VCLT.U32 Dd, Dn, Dm /// A64: CMHI Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareLessThan(Vector64<uint> left, Vector64<uint> right) => CompareLessThan(left, right); /// <summary> /// uint8x16_t vcltq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VCLT.U8 Qd, Qn, Qm /// A64: CMHI Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareLessThan(Vector128<byte> left, Vector128<byte> right) => CompareLessThan(left, right); /// <summary> /// uint16x8_t vcltq_s16 (int16x8_t a, int16x8_t b) /// A32: VCLT.S16 Qd, Qn, Qm /// A64: CMGT Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareLessThan(Vector128<short> left, Vector128<short> right) => CompareLessThan(left, right); /// <summary> /// uint32x4_t vcltq_s32 (int32x4_t a, int32x4_t b) /// A32: VCLT.S32 Qd, Qn, Qm /// A64: CMGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareLessThan(Vector128<int> left, Vector128<int> right) => CompareLessThan(left, right); /// <summary> /// uint8x16_t vcltq_s8 (int8x16_t a, int8x16_t b) /// A32: VCLT.S8 Qd, Qn, Qm /// A64: CMGT Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareLessThan(Vector128<sbyte> left, Vector128<sbyte> right) => CompareLessThan(left, right); /// <summary> /// uint32x4_t vcltq_f32 (float32x4_t a, float32x4_t b) /// A32: VCLT.F32 Qd, Qn, Qm /// A64: FCMGT Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> CompareLessThan(Vector128<float> left, Vector128<float> right) => CompareLessThan(left, right); /// <summary> /// uint16x8_t vcltq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VCLT.U16 Qd, Qn, Qm /// A64: CMHI Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareLessThan(Vector128<ushort> left, Vector128<ushort> right) => CompareLessThan(left, right); /// <summary> /// uint32x4_t vcltq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VCLT.U32 Qd, Qn, Qm /// A64: CMHI Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareLessThan(Vector128<uint> left, Vector128<uint> right) => CompareLessThan(left, right); /// <summary> /// uint8x8_t vcle_u8 (uint8x8_t a, uint8x8_t b) /// A32: VCLE.U8 Dd, Dn, Dm /// A64: CMHS Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareLessThanOrEqual(Vector64<byte> left, Vector64<byte> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint16x4_t vcle_s16 (int16x4_t a, int16x4_t b) /// A32: VCLE.S16 Dd, Dn, Dm /// A64: CMGE Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareLessThanOrEqual(Vector64<short> left, Vector64<short> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x2_t vcle_s32 (int32x2_t a, int32x2_t b) /// A32: VCLE.S32 Dd, Dn, Dm /// A64: CMGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareLessThanOrEqual(Vector64<int> left, Vector64<int> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint8x8_t vcle_s8 (int8x8_t a, int8x8_t b) /// A32: VCLE.S8 Dd, Dn, Dm /// A64: CMGE Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareLessThanOrEqual(Vector64<sbyte> left, Vector64<sbyte> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x2_t vcle_f32 (float32x2_t a, float32x2_t b) /// A32: VCLE.F32 Dd, Dn, Dm /// A64: FCMGE Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> CompareLessThanOrEqual(Vector64<float> left, Vector64<float> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint16x4_t vcle_u16 (uint16x4_t a, uint16x4_t b) /// A32: VCLE.U16 Dd, Dn, Dm /// A64: CMHS Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareLessThanOrEqual(Vector64<ushort> left, Vector64<ushort> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x2_t vcle_u32 (uint32x2_t a, uint32x2_t b) /// A32: VCLE.U32 Dd, Dn, Dm /// A64: CMHS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareLessThanOrEqual(Vector64<uint> left, Vector64<uint> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint8x16_t vcleq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VCLE.U8 Qd, Qn, Qm /// A64: CMHS Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareLessThanOrEqual(Vector128<byte> left, Vector128<byte> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint16x8_t vcleq_s16 (int16x8_t a, int16x8_t b) /// A32: VCLE.S16 Qd, Qn, Qm /// A64: CMGE Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareLessThanOrEqual(Vector128<short> left, Vector128<short> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x4_t vcleq_s32 (int32x4_t a, int32x4_t b) /// A32: VCLE.S32 Qd, Qn, Qm /// A64: CMGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareLessThanOrEqual(Vector128<int> left, Vector128<int> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint8x16_t vcleq_s8 (int8x16_t a, int8x16_t b) /// A32: VCLE.S8 Qd, Qn, Qm /// A64: CMGE Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareLessThanOrEqual(Vector128<sbyte> left, Vector128<sbyte> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x4_t vcleq_f32 (float32x4_t a, float32x4_t b) /// A32: VCLE.F32 Qd, Qn, Qm /// A64: FCMGE Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> CompareLessThanOrEqual(Vector128<float> left, Vector128<float> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint16x8_t vcleq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VCLE.U16 Qd, Qn, Qm /// A64: CMHS Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareLessThanOrEqual(Vector128<ushort> left, Vector128<ushort> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint32x4_t vcleq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VCLE.U32 Qd, Qn, Qm /// A64: CMHS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareLessThanOrEqual(Vector128<uint> left, Vector128<uint> right) => CompareLessThanOrEqual(left, right); /// <summary> /// uint8x8_t vtst_u8 (uint8x8_t a, uint8x8_t b) /// A32: VTST.8 Dd, Dn, Dm /// A64: CMTST Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> CompareTest(Vector64<byte> left, Vector64<byte> right) => CompareTest(left, right); /// <summary> /// uint16x4_t vtst_s16 (int16x4_t a, int16x4_t b) /// A32: VTST.16 Dd, Dn, Dm /// A64: CMTST Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> CompareTest(Vector64<short> left, Vector64<short> right) => CompareTest(left, right); /// <summary> /// uint32x2_t vtst_s32 (int32x2_t a, int32x2_t b) /// A32: VTST.32 Dd, Dn, Dm /// A64: CMTST Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> CompareTest(Vector64<int> left, Vector64<int> right) => CompareTest(left, right); /// <summary> /// uint8x8_t vtst_s8 (int8x8_t a, int8x8_t b) /// A32: VTST.8 Dd, Dn, Dm /// A64: CMTST Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> CompareTest(Vector64<sbyte> left, Vector64<sbyte> right) => CompareTest(left, right); /// <summary> /// uint32x2_t vtst_f32 (float32x2_t a, float32x2_t b) /// A32: VTST.32 Dd, Dn, Dm /// A64: CMTST Vd.2S, Vn.2S, Vm.2S /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> CompareTest(Vector64<float> left, Vector64<float> right) => CompareTest(left, right); /// <summary> /// uint16x4_t vtst_u16 (uint16x4_t a, uint16x4_t b) /// A32: VTST.16 Dd, Dn, Dm /// A64: CMTST Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> CompareTest(Vector64<ushort> left, Vector64<ushort> right) => CompareTest(left, right); /// <summary> /// uint32x2_t vtst_u32 (uint32x2_t a, uint32x2_t b) /// A32: VTST.32 Dd, Dn, Dm /// A64: CMTST Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> CompareTest(Vector64<uint> left, Vector64<uint> right) => CompareTest(left, right); /// <summary> /// uint8x16_t vtstq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VTST.8 Qd, Qn, Qm /// A64: CMTST Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> CompareTest(Vector128<byte> left, Vector128<byte> right) => CompareTest(left, right); /// <summary> /// uint16x8_t vtstq_s16 (int16x8_t a, int16x8_t b) /// A32: VTST.16 Qd, Qn, Qm /// A64: CMTST Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> CompareTest(Vector128<short> left, Vector128<short> right) => CompareTest(left, right); /// <summary> /// uint32x4_t vtstq_s32 (int32x4_t a, int32x4_t b) /// A32: VTST.32 Qd, Qn, Qm /// A64: CMTST Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> CompareTest(Vector128<int> left, Vector128<int> right) => CompareTest(left, right); /// <summary> /// uint8x16_t vtstq_s8 (int8x16_t a, int8x16_t b) /// A32: VTST.8 Qd, Qn, Qm /// A64: CMTST Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> CompareTest(Vector128<sbyte> left, Vector128<sbyte> right) => CompareTest(left, right); /// <summary> /// uint32x4_t vtstq_f32 (float32x4_t a, float32x4_t b) /// A32: VTST.32 Qd, Qn, Qm /// A64: CMTST Vd.4S, Vn.4S, Vm.4S /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> CompareTest(Vector128<float> left, Vector128<float> right) => CompareTest(left, right); /// <summary> /// uint16x8_t vtstq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VTST.16 Qd, Qn, Qm /// A64: CMTST Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> CompareTest(Vector128<ushort> left, Vector128<ushort> right) => CompareTest(left, right); /// <summary> /// uint32x4_t vtstq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VTST.32 Qd, Qn, Qm /// A64: CMTST Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> CompareTest(Vector128<uint> left, Vector128<uint> right) => CompareTest(left, right); /// <summary> /// int32x2_t vcvta_s32_f32 (float32x2_t a) /// A32: VCVTA.S32.F32 Dd, Dm /// A64: FCVTAS Vd.2S, Vn.2S /// </summary> public static Vector64<int> ConvertToInt32RoundAwayFromZero(Vector64<float> value) => ConvertToInt32RoundAwayFromZero(value); /// <summary> /// int32x4_t vcvtaq_s32_f32 (float32x4_t a) /// A32: VCVTA.S32.F32 Qd, Qm /// A64: FCVTAS Vd.4S, Vn.4S /// </summary> public static Vector128<int> ConvertToInt32RoundAwayFromZero(Vector128<float> value) => ConvertToInt32RoundAwayFromZero(value); /// <summary> /// int32_t vcvtas_s32_f32 (float32_t a) /// A32: VCVTA.S32.F32 Sd, Sm /// A64: FCVTAS Sd, Sn /// </summary> public static Vector64<int> ConvertToInt32RoundAwayFromZeroScalar(Vector64<float> value) => ConvertToInt32RoundAwayFromZeroScalar(value); /// <summary> /// int32x2_t vcvtn_s32_f32 (float32x2_t a) /// A32: VCVTN.S32.F32 Dd, Dm /// A64: FCVTNS Vd.2S, Vn.2S /// </summary> public static Vector64<int> ConvertToInt32RoundToEven(Vector64<float> value) => ConvertToInt32RoundToEven(value); /// <summary> /// int32x4_t vcvtnq_s32_f32 (float32x4_t a) /// A32: VCVTN.S32.F32 Qd, Qm /// A64: FCVTNS Vd.4S, Vn.4S /// </summary> public static Vector128<int> ConvertToInt32RoundToEven(Vector128<float> value) => ConvertToInt32RoundToEven(value); /// <summary> /// int32_t vcvtns_s32_f32 (float32_t a) /// A32: VCVTN.S32.F32 Sd, Sm /// A64: FCVTNS Sd, Sn /// </summary> public static Vector64<int> ConvertToInt32RoundToEvenScalar(Vector64<float> value) => ConvertToInt32RoundToEvenScalar(value); /// <summary> /// int32x2_t vcvtm_s32_f32 (float32x2_t a) /// A32: VCVTM.S32.F32 Dd, Dm /// A64: FCVTMS Vd.2S, Vn.2S /// </summary> public static Vector64<int> ConvertToInt32RoundToNegativeInfinity(Vector64<float> value) => ConvertToInt32RoundToNegativeInfinity(value); /// <summary> /// int32x4_t vcvtmq_s32_f32 (float32x4_t a) /// A32: VCVTM.S32.F32 Qd, Qm /// A64: FCVTMS Vd.4S, Vn.4S /// </summary> public static Vector128<int> ConvertToInt32RoundToNegativeInfinity(Vector128<float> value) => ConvertToInt32RoundToNegativeInfinity(value); /// <summary> /// int32_t vcvtms_s32_f32 (float32_t a) /// A32: VCVTM.S32.F32 Sd, Sm /// A64: FCVTMS Sd, Sn /// </summary> public static Vector64<int> ConvertToInt32RoundToNegativeInfinityScalar(Vector64<float> value) => ConvertToInt32RoundToNegativeInfinityScalar(value); /// <summary> /// int32x2_t vcvtp_s32_f32 (float32x2_t a) /// A32: VCVTP.S32.F32 Dd, Dm /// A64: FCVTPS Vd.2S, Vn.2S /// </summary> public static Vector64<int> ConvertToInt32RoundToPositiveInfinity(Vector64<float> value) => ConvertToInt32RoundToPositiveInfinity(value); /// <summary> /// int32x4_t vcvtpq_s32_f32 (float32x4_t a) /// A32: VCVTP.S32.F32 Qd, Qm /// A64: FCVTPS Vd.4S, Vn.4S /// </summary> public static Vector128<int> ConvertToInt32RoundToPositiveInfinity(Vector128<float> value) => ConvertToInt32RoundToPositiveInfinity(value); /// <summary> /// int32_t vcvtps_s32_f32 (float32_t a) /// A32: VCVTP.S32.F32 Sd, Sm /// A64: FCVTPS Sd, Sn /// </summary> public static Vector64<int> ConvertToInt32RoundToPositiveInfinityScalar(Vector64<float> value) => ConvertToInt32RoundToPositiveInfinityScalar(value); /// <summary> /// int32x2_t vcvt_s32_f32 (float32x2_t a) /// A32: VCVT.S32.F32 Dd, Dm /// A64: FCVTZS Vd.2S, Vn.2S /// </summary> public static Vector64<int> ConvertToInt32RoundToZero(Vector64<float> value) => ConvertToInt32RoundToZero(value); /// <summary> /// int32x4_t vcvtq_s32_f32 (float32x4_t a) /// A32: VCVT.S32.F32 Qd, Qm /// A64: FCVTZS Vd.4S, Vn.4S /// </summary> public static Vector128<int> ConvertToInt32RoundToZero(Vector128<float> value) => ConvertToInt32RoundToZero(value); /// <summary> /// int32_t vcvts_s32_f32 (float32_t a) /// A32: VCVT.S32.F32 Sd, Sm /// A64: FCVTZS Sd, Sn /// </summary> public static Vector64<int> ConvertToInt32RoundToZeroScalar(Vector64<float> value) => ConvertToInt32RoundToZeroScalar(value); /// <summary> /// float32x2_t vcvt_f32_s32 (int32x2_t a) /// A32: VCVT.F32.S32 Dd, Dm /// A64: SCVTF Vd.2S, Vn.2S /// </summary> public static Vector64<float> ConvertToSingle(Vector64<int> value) => ConvertToSingle(value); /// <summary> /// float32x2_t vcvt_f32_u32 (uint32x2_t a) /// A32: VCVT.F32.U32 Dd, Dm /// A64: UCVTF Vd.2S, Vn.2S /// </summary> public static Vector64<float> ConvertToSingle(Vector64<uint> value) => ConvertToSingle(value); /// <summary> /// float32x4_t vcvtq_f32_s32 (int32x4_t a) /// A32: VCVT.F32.S32 Qd, Qm /// A64: SCVTF Vd.4S, Vn.4S /// </summary> public static Vector128<float> ConvertToSingle(Vector128<int> value) => ConvertToSingle(value); /// <summary> /// float32x4_t vcvtq_f32_u32 (uint32x4_t a) /// A32: VCVT.F32.U32 Qd, Qm /// A64: UCVTF Vd.4S, Vn.4S /// </summary> public static Vector128<float> ConvertToSingle(Vector128<uint> value) => ConvertToSingle(value); /// <summary> /// float32_t vcvts_f32_s32 (int32_t a) /// A32: VCVT.F32.S32 Sd, Sm /// A64: SCVTF Sd, Sn /// </summary> public static Vector64<float> ConvertToSingleScalar(Vector64<int> value) => ConvertToSingleScalar(value); /// <summary> /// float32_t vcvts_f32_u32 (uint32_t a) /// A32: VCVT.F32.U32 Sd, Sm /// A64: UCVTF Sd, Sn /// </summary> public static Vector64<float> ConvertToSingleScalar(Vector64<uint> value) => ConvertToSingleScalar(value); /// <summary> /// uint32x2_t vcvta_u32_f32 (float32x2_t a) /// A32: VCVTA.U32.F32 Dd, Dm /// A64: FCVTAU Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ConvertToUInt32RoundAwayFromZero(Vector64<float> value) => ConvertToUInt32RoundAwayFromZero(value); /// <summary> /// uint32x4_t vcvtaq_u32_f32 (float32x4_t a) /// A32: VCVTA.U32.F32 Qd, Qm /// A64: FCVTAU Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ConvertToUInt32RoundAwayFromZero(Vector128<float> value) => ConvertToUInt32RoundAwayFromZero(value); /// <summary> /// uint32_t vcvtas_u32_f32 (float32_t a) /// A32: VCVTA.U32.F32 Sd, Sm /// A64: FCVTAU Sd, Sn /// </summary> public static Vector64<uint> ConvertToUInt32RoundAwayFromZeroScalar(Vector64<float> value) => ConvertToUInt32RoundAwayFromZeroScalar(value); /// <summary> /// uint32x2_t vcvtn_u32_f32 (float32x2_t a) /// A32: VCVTN.U32.F32 Dd, Dm /// A64: FCVTNU Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ConvertToUInt32RoundToEven(Vector64<float> value) => ConvertToUInt32RoundToEven(value); /// <summary> /// uint32x4_t vcvtnq_u32_f32 (float32x4_t a) /// A32: VCVTN.U32.F32 Qd, Qm /// A64: FCVTNU Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ConvertToUInt32RoundToEven(Vector128<float> value) => ConvertToUInt32RoundToEven(value); /// <summary> /// uint32_t vcvtns_u32_f32 (float32_t a) /// A32: VCVTN.U32.F32 Sd, Sm /// A64: FCVTNU Sd, Sn /// </summary> public static Vector64<uint> ConvertToUInt32RoundToEvenScalar(Vector64<float> value) => ConvertToUInt32RoundToEvenScalar(value); /// <summary> /// uint32x2_t vcvtm_u32_f32 (float32x2_t a) /// A32: VCVTM.U32.F32 Dd, Dm /// A64: FCVTMU Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ConvertToUInt32RoundToNegativeInfinity(Vector64<float> value) => ConvertToUInt32RoundToNegativeInfinity(value); /// <summary> /// uint32x4_t vcvtmq_u32_f32 (float32x4_t a) /// A32: VCVTM.U32.F32 Qd, Qm /// A64: FCVTMU Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ConvertToUInt32RoundToNegativeInfinity(Vector128<float> value) => ConvertToUInt32RoundToNegativeInfinity(value); /// <summary> /// uint32_t vcvtms_u32_f32 (float32_t a) /// A32: VCVTM.U32.F32 Sd, Sm /// A64: FCVTMU Sd, Sn /// </summary> public static Vector64<uint> ConvertToUInt32RoundToNegativeInfinityScalar(Vector64<float> value) => ConvertToUInt32RoundToNegativeInfinityScalar(value); /// <summary> /// uint32x2_t vcvtp_u32_f32 (float32x2_t a) /// A32: VCVTP.U32.F32 Dd, Dm /// A64: FCVTPU Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ConvertToUInt32RoundToPositiveInfinity(Vector64<float> value) => ConvertToUInt32RoundToPositiveInfinity(value); /// <summary> /// uint32x4_t vcvtpq_u32_f32 (float32x4_t a) /// A32: VCVTP.U32.F32 Qd, Qm /// A64: FCVTPU Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ConvertToUInt32RoundToPositiveInfinity(Vector128<float> value) => ConvertToUInt32RoundToPositiveInfinity(value); /// <summary> /// uint32_t vcvtps_u32_f32 (float32_t a) /// A32: VCVTP.U32.F32 Sd, Sm /// A64: FCVTPU Sd, Sn /// </summary> public static Vector64<uint> ConvertToUInt32RoundToPositiveInfinityScalar(Vector64<float> value) => ConvertToUInt32RoundToPositiveInfinityScalar(value); /// <summary> /// uint32x2_t vcvt_u32_f32 (float32x2_t a) /// A32: VCVT.U32.F32 Dd, Dm /// A64: FCVTZU Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ConvertToUInt32RoundToZero(Vector64<float> value) => ConvertToUInt32RoundToZero(value); /// <summary> /// uint32x4_t vcvtq_u32_f32 (float32x4_t a) /// A32: VCVT.U32.F32 Qd, Qm /// A64: FCVTZU Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ConvertToUInt32RoundToZero(Vector128<float> value) => ConvertToUInt32RoundToZero(value); /// <summary> /// uint32_t vcvts_u32_f32 (float32_t a) /// A32: VCVT.U32.F32 Sd, Sm /// A64: FCVTZU Sd, Sn /// </summary> public static Vector64<uint> ConvertToUInt32RoundToZeroScalar(Vector64<float> value) => ConvertToUInt32RoundToZeroScalar(value); /// <summary> /// float64x1_t vdiv_f64 (float64x1_t a, float64x1_t b) /// A32: VDIV.F64 Dd, Dn, Dm /// A64: FDIV Dd, Dn, Dm /// </summary> public static Vector64<double> DivideScalar(Vector64<double> left, Vector64<double> right) => DivideScalar(left, right); /// <summary> /// float32_t vdivs_f32 (float32_t a, float32_t b) /// A32: VDIV.F32 Sd, Sn, Sm /// A64: FDIV Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> DivideScalar(Vector64<float> left, Vector64<float> right) => DivideScalar(left, right); /// <summary> /// uint8x8_t vdup_lane_u8 (uint8x8_t vec, const int lane) /// A32: VDUP.8 Dd, Dm[index] /// A64: DUP Vd.8B, Vn.B[index] /// </summary> public static Vector64<byte> DuplicateSelectedScalarToVector64(Vector64<byte> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int16x4_t vdup_lane_s16 (int16x4_t vec, const int lane) /// A32: VDUP.16 Dd, Dm[index] /// A64: DUP Vd.4H, Vn.H[index] /// </summary> public static Vector64<short> DuplicateSelectedScalarToVector64(Vector64<short> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int32x2_t vdup_lane_s32 (int32x2_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<int> DuplicateSelectedScalarToVector64(Vector64<int> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// float32x2_t vdup_lane_f32 (float32x2_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<float> DuplicateSelectedScalarToVector64(Vector64<float> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int8x8_t vdup_lane_s8 (int8x8_t vec, const int lane) /// A32: VDUP.8 Dd, Dm[index] /// A64: DUP Vd.8B, Vn.B[index] /// </summary> public static Vector64<sbyte> DuplicateSelectedScalarToVector64(Vector64<sbyte> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint16x4_t vdup_lane_u16 (uint16x4_t vec, const int lane) /// A32: VDUP.16 Dd, Dm[index] /// A64: DUP Vd.4H, Vn.H[index] /// </summary> public static Vector64<ushort> DuplicateSelectedScalarToVector64(Vector64<ushort> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint32x2_t vdup_lane_u32 (uint32x2_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<uint> DuplicateSelectedScalarToVector64(Vector64<uint> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint8x8_t vdup_laneq_u8 (uint8x16_t vec, const int lane) /// A32: VDUP.8 Dd, Dm[index] /// A64: DUP Vd.8B, Vn.B[index] /// </summary> public static Vector64<byte> DuplicateSelectedScalarToVector64(Vector128<byte> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int16x4_t vdup_laneq_s16 (int16x8_t vec, const int lane) /// A32: VDUP.16 Dd, Dm[index] /// A64: DUP Vd.4H, Vn.H[index] /// </summary> public static Vector64<short> DuplicateSelectedScalarToVector64(Vector128<short> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int32x2_t vdup_laneq_s32 (int32x4_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<int> DuplicateSelectedScalarToVector64(Vector128<int> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// float32x2_t vdup_laneq_f32 (float32x4_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<float> DuplicateSelectedScalarToVector64(Vector128<float> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// int8x8_t vdup_laneq_s8 (int8x16_t vec, const int lane) /// A32: VDUP.8 Dd, Dm[index] /// A64: DUP Vd.8B, Vn.B[index] /// </summary> public static Vector64<sbyte> DuplicateSelectedScalarToVector64(Vector128<sbyte> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint16x4_t vdup_laneq_u16 (uint16x8_t vec, const int lane) /// A32: VDUP.16 Dd, Dm[index] /// A64: DUP Vd.4H, Vn.H[index] /// </summary> public static Vector64<ushort> DuplicateSelectedScalarToVector64(Vector128<ushort> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint32x2_t vdup_laneq_u32 (uint32x4_t vec, const int lane) /// A32: VDUP.32 Dd, Dm[index] /// A64: DUP Vd.2S, Vn.S[index] /// </summary> public static Vector64<uint> DuplicateSelectedScalarToVector64(Vector128<uint> value, byte index)=> DuplicateSelectedScalarToVector64(value, index); /// <summary> /// uint8x16_t vdupq_lane_u8 (uint8x8_t vec, const int lane) /// A32: VDUP.8 Qd, Dm[index] /// A64: DUP Vd.16B, Vn.B[index] /// </summary> public static Vector128<byte> DuplicateSelectedScalarToVector128(Vector64<byte> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int16x8_t vdupq_lane_s16 (int16x4_t vec, const int lane) /// A32: VDUP.16 Qd, Dm[index] /// A64: DUP Vd.8H, Vn.H[index] /// </summary> public static Vector128<short> DuplicateSelectedScalarToVector128(Vector64<short> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int32x4_t vdupq_lane_s32 (int32x2_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<int> DuplicateSelectedScalarToVector128(Vector64<int> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// float32x4_t vdupq_lane_f32 (float32x2_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<float> DuplicateSelectedScalarToVector128(Vector64<float> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int8x16_t vdupq_lane_s8 (int8x8_t vec, const int lane) /// A32: VDUP.8 Qd, Dm[index] /// A64: DUP Vd.16B, Vn.B[index] /// </summary> public static Vector128<sbyte> DuplicateSelectedScalarToVector128(Vector64<sbyte> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint16x8_t vdupq_lane_u16 (uint16x4_t vec, const int lane) /// A32: VDUP.16 Qd, Dm[index] /// A64: DUP Vd.8H, Vn.H[index] /// </summary> public static Vector128<ushort> DuplicateSelectedScalarToVector128(Vector64<ushort> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint32x4_t vdupq_lane_u32 (uint32x2_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<uint> DuplicateSelectedScalarToVector128(Vector64<uint> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint8x16_t vdupq_lane_u8 (uint8x16_t vec, const int lane) /// A32: VDUP.8 Qd, Dm[index] /// A64: DUP Vd.16B, Vn.B[index] /// </summary> public static Vector128<byte> DuplicateSelectedScalarToVector128(Vector128<byte> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int16x8_t vdupq_lane_s16 (int16x8_t vec, const int lane) /// A32: VDUP.16 Qd, Dm[index] /// A64: DUP Vd.8H, Vn.H[index] /// </summary> public static Vector128<short> DuplicateSelectedScalarToVector128(Vector128<short> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int32x4_t vdupq_lane_s32 (int32x4_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<int> DuplicateSelectedScalarToVector128(Vector128<int> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// float32x4_t vdupq_lane_f32 (float32x4_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<float> DuplicateSelectedScalarToVector128(Vector128<float> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// int8x16_t vdupq_lane_s8 (int8x16_t vec, const int lane) /// A32: VDUP.8 Qd, Dm[index] /// A64: DUP Vd.16B, Vn.B[index] /// </summary> public static Vector128<sbyte> DuplicateSelectedScalarToVector128(Vector128<sbyte> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint16x8_t vdupq_lane_u16 (uint16x8_t vec, const int lane) /// A32: VDUP.16 Qd, Dm[index] /// A64: DUP Vd.8H, Vn.H[index] /// </summary> public static Vector128<ushort> DuplicateSelectedScalarToVector128(Vector128<ushort> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint32x4_t vdupq_lane_u32 (uint32x4_t vec, const int lane) /// A32: VDUP.32 Qd, Dm[index] /// A64: DUP Vd.4S, Vn.S[index] /// </summary> public static Vector128<uint> DuplicateSelectedScalarToVector128(Vector128<uint> value, byte index)=> DuplicateSelectedScalarToVector128(value, index); /// <summary> /// uint8x8_t vdup_n_u8 (uint8_t value) /// A32: VDUP.8 Dd, Rt /// A64: DUP Vd.8B, Rn /// </summary> public static Vector64<byte> DuplicateToVector64(byte value) => DuplicateToVector64(value); /// <summary> /// int16x4_t vdup_n_s16 (int16_t value) /// A32: VDUP.16 Dd, Rt /// A64: DUP Vd.4H, Rn /// </summary> public static Vector64<short> DuplicateToVector64(short value) => DuplicateToVector64(value); /// <summary> /// int32x2_t vdup_n_s32 (int32_t value) /// A32: VDUP.32 Dd, Rt /// A64: DUP Vd.2S, Rn /// </summary> public static Vector64<int> DuplicateToVector64(int value) => DuplicateToVector64(value); /// <summary> /// int8x8_t vdup_n_s8 (int8_t value) /// A32: VDUP.8 Dd, Rt /// A64: DUP Vd.8B, Rn /// </summary> public static Vector64<sbyte> DuplicateToVector64(sbyte value) => DuplicateToVector64(value); /// <summary> /// float32x2_t vdup_n_f32 (float32_t value) /// A32: VDUP Dd, Dm[0] /// A64: DUP Vd.2S, Vn.S[0] /// </summary> public static Vector64<float> DuplicateToVector64(float value) => DuplicateToVector64(value); /// <summary> /// uint16x4_t vdup_n_u16 (uint16_t value) /// A32: VDUP.16 Dd, Rt /// A64: DUP Vd.4H, Rn /// </summary> public static Vector64<ushort> DuplicateToVector64(ushort value) => DuplicateToVector64(value); /// <summary> /// uint32x2_t vdup_n_u32 (uint32_t value) /// A32: VDUP.32 Dd, Rt /// A64: DUP Vd.2S, Rn /// </summary> public static Vector64<uint> DuplicateToVector64(uint value) => DuplicateToVector64(value); /// <summary> /// uint8x16_t vdupq_n_u8 (uint8_t value) /// A32: VDUP.8 Qd, Rt /// A64: DUP Vd.16B, Rn /// </summary> public static Vector128<byte> DuplicateToVector128(byte value) => DuplicateToVector128(value); /// <summary> /// int16x8_t vdupq_n_s16 (int16_t value) /// A32: VDUP.16 Qd, Rt /// A64: DUP Vd.8H, Rn /// </summary> public static Vector128<short> DuplicateToVector128(short value) => DuplicateToVector128(value); /// <summary> /// int32x4_t vdupq_n_s32 (int32_t value) /// A32: VDUP.32 Qd, Rt /// A64: DUP Vd.4S, Rn /// </summary> public static Vector128<int> DuplicateToVector128(int value) => DuplicateToVector128(value); /// <summary> /// int8x16_t vdupq_n_s8 (int8_t value) /// A32: VDUP.8 Qd, Rt /// A64: DUP Vd.16B, Rn /// </summary> public static Vector128<sbyte> DuplicateToVector128(sbyte value) => DuplicateToVector128(value); /// <summary> /// float32x4_t vdupq_n_f32 (float32_t value) /// A32: VDUP Qd, Dm[0] /// A64: DUP Vd.4S, Vn.S[0] /// </summary> public static Vector128<float> DuplicateToVector128(float value) => DuplicateToVector128(value); /// <summary> /// uint16x8_t vdupq_n_u16 (uint16_t value) /// A32: VDUP.16 Qd, Rt /// A64: DUP Vd.8H, Rn /// </summary> public static Vector128<ushort> DuplicateToVector128(ushort value) => DuplicateToVector128(value); /// <summary> /// uint32x4_t vdupq_n_u32 (uint32_t value) /// A32: VDUP.32 Qd, Rt /// A64: DUP Vd.4S, Rn /// </summary> public static Vector128<uint> DuplicateToVector128(uint value) => DuplicateToVector128(value); /// <summary> /// uint8_t vget_lane_u8 (uint8x8_t v, const int lane) /// A32: VMOV.U8 Rt, Dn[lane] /// A64: UMOV Wd, Vn.B[lane] /// </summary> public static byte Extract(Vector64<byte> vector, byte index) => Extract(vector, index); /// <summary> /// int16_t vget_lane_s16 (int16x4_t v, const int lane) /// A32: VMOV.S16 Rt, Dn[lane] /// A64: SMOV Wd, Vn.H[lane] /// </summary> public static short Extract(Vector64<short> vector, byte index) => Extract(vector, index); /// <summary> /// int32_t vget_lane_s32 (int32x2_t v, const int lane) /// A32: VMOV.32 Rt, Dn[lane] /// A64: SMOV Wd, Vn.S[lane] /// </summary> public static int Extract(Vector64<int> vector, byte index) => Extract(vector, index); /// <summary> /// int8_t vget_lane_s8 (int8x8_t v, const int lane) /// A32: VMOV.S8 Rt, Dn[lane] /// A64: SMOV Wd, Vn.B[lane] /// </summary> public static sbyte Extract(Vector64<sbyte> vector, byte index) => Extract(vector, index); /// <summary> /// float32_t vget_lane_f32 (float32x2_t v, const int lane) /// A32: VMOV.F32 Sd, Sm /// A64: DUP Sd, Vn.S[lane] /// </summary> public static float Extract(Vector64<float> vector, byte index) => Extract(vector, index); /// <summary> /// uint16_t vget_lane_u16 (uint16x4_t v, const int lane) /// A32: VMOV.U16 Rt, Dn[lane] /// A64: UMOV Wd, Vn.H[lane] /// </summary> public static ushort Extract(Vector64<ushort> vector, byte index) => Extract(vector, index); /// <summary> /// uint32_t vget_lane_u32 (uint32x2_t v, const int lane) /// A32: VMOV.32 Rt, Dn[lane] /// A64: UMOV Wd, Vn.S[lane] /// </summary> public static uint Extract(Vector64<uint> vector, byte index) => Extract(vector, index); /// <summary> /// uint8_t vgetq_lane_u8 (uint8x16_t v, const int lane) /// A32: VMOV.U8 Rt, Dn[lane] /// A64: UMOV Wd, Vn.B[lane] /// </summary> public static byte Extract(Vector128<byte> vector, byte index) => Extract(vector, index); /// <summary> /// float64_t vgetq_lane_f64 (float64x2_t v, const int lane) /// A32: VMOV.F64 Dd, Dm /// A64: DUP Dd, Vn.D[lane] /// </summary> public static double Extract(Vector128<double> vector, byte index) => Extract(vector, index); /// <summary> /// int16_t vgetq_lane_s16 (int16x8_t v, const int lane) /// A32: VMOV.S16 Rt, Dn[lane] /// A64: SMOV Wd, Vn.H[lane] /// </summary> public static short Extract(Vector128<short> vector, byte index) => Extract(vector, index); /// <summary> /// int32_t vgetq_lane_s32 (int32x4_t v, const int lane) /// A32: VMOV.32 Rt, Dn[lane] /// A64: SMOV Wd, Vn.S[lane] /// </summary> public static int Extract(Vector128<int> vector, byte index) => Extract(vector, index); /// <summary> /// int64_t vgetq_lane_s64 (int64x2_t v, const int lane) /// A32: VMOV Rt, Rt2, Dm /// A64: UMOV Xd, Vn.D[lane] /// </summary> public static long Extract(Vector128<long> vector, byte index) => Extract(vector, index); /// <summary> /// int8_t vgetq_lane_s8 (int8x16_t v, const int lane) /// A32: VMOV.S8 Rt, Dn[lane] /// A64: SMOV Wd, Vn.B[lane] /// </summary> public static sbyte Extract(Vector128<sbyte> vector, byte index) => Extract(vector, index); /// <summary> /// float32_t vgetq_lane_f32 (float32x4_t v, const int lane) /// A32: VMOV.F32 Sd, Sm /// A64: DUP Sd, Vn.S[lane] /// </summary> public static float Extract(Vector128<float> vector, byte index) => Extract(vector, index); /// <summary> /// uint16_t vgetq_lane_u16 (uint16x8_t v, const int lane) /// A32: VMOV.U16 Rt, Dn[lane] /// A64: UMOV Wd, Vn.H[lane] /// </summary> public static ushort Extract(Vector128<ushort> vector, byte index) => Extract(vector, index); /// <summary> /// uint32_t vgetq_lane_u32 (uint32x4_t v, const int lane) /// A32: VMOV.32 Rt, Dn[lane] /// A64: UMOV Wd, Vn.S[lane] /// </summary> public static uint Extract(Vector128<uint> vector, byte index) => Extract(vector, index); /// <summary> /// uint64_t vgetq_lane_u64 (uint64x2_t v, const int lane) /// A32: VMOV Rt, Rt2, Dm /// A64: UMOV Xd, Vn.D[lane] /// </summary> public static ulong Extract(Vector128<ulong> vector, byte index) => Extract(vector, index); /// <summary> /// uint8x8_t vmovn_u16 (uint16x8_t a) /// A32: VMOVN.I16 Dd, Qm /// A64: XTN Vd.8B, Vn.8H /// </summary> public static Vector64<byte> ExtractNarrowingLower(Vector128<ushort> value) => ExtractNarrowingLower(value); /// <summary> /// int16x4_t vmovn_s32 (int32x4_t a) /// A32: VMOVN.I32 Dd, Qm /// A64: XTN Vd.4H, Vn.4S /// </summary> public static Vector64<short> ExtractNarrowingLower(Vector128<int> value) => ExtractNarrowingLower(value); /// <summary> /// int32x2_t vmovn_s64 (int64x2_t a) /// A32: VMOVN.I64 Dd, Qm /// A64: XTN Vd.2S, Vn.2D /// </summary> public static Vector64<int> ExtractNarrowingLower(Vector128<long> value) => ExtractNarrowingLower(value); /// <summary> /// int8x8_t vmovn_s16 (int16x8_t a) /// A32: VMOVN.I16 Dd, Qm /// A64: XTN Vd.8B, Vn.8H /// </summary> public static Vector64<sbyte> ExtractNarrowingLower(Vector128<short> value) => ExtractNarrowingLower(value); /// <summary> /// uint16x4_t vmovn_u32 (uint32x4_t a) /// A32: VMOVN.I32 Dd, Qm /// A64: XTN Vd.4H, Vn.4S /// </summary> public static Vector64<ushort> ExtractNarrowingLower(Vector128<uint> value) => ExtractNarrowingLower(value); /// <summary> /// uint32x2_t vmovn_u64 (uint64x2_t a) /// A32: VMOVN.I64 Dd, Qm /// A64: XTN Vd.2S, Vn.2D /// </summary> public static Vector64<uint> ExtractNarrowingLower(Vector128<ulong> value) => ExtractNarrowingLower(value); /// <summary> /// uint8x8_t vqmovn_u16 (uint16x8_t a) /// A32: VQMOVN.U16 Dd, Qm /// A64: UQXTN Vd.8B, Vn.8H /// </summary> public static Vector64<byte> ExtractNarrowingSaturateLower(Vector128<ushort> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// int16x4_t vqmovn_s32 (int32x4_t a) /// A32: VQMOVN.S32 Dd, Qm /// A64: SQXTN Vd.4H, Vn.4S /// </summary> public static Vector64<short> ExtractNarrowingSaturateLower(Vector128<int> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// int32x2_t vqmovn_s64 (int64x2_t a) /// A32: VQMOVN.S64 Dd, Qm /// A64: SQXTN Vd.2S, Vn.2D /// </summary> public static Vector64<int> ExtractNarrowingSaturateLower(Vector128<long> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// int8x8_t vqmovn_s16 (int16x8_t a) /// A32: VQMOVN.S16 Dd, Qm /// A64: SQXTN Vd.8B, Vn.8H /// </summary> public static Vector64<sbyte> ExtractNarrowingSaturateLower(Vector128<short> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// uint16x4_t vqmovn_u32 (uint32x4_t a) /// A32: VQMOVN.U32 Dd, Qm /// A64: UQXTN Vd.4H, Vn.4S /// </summary> public static Vector64<ushort> ExtractNarrowingSaturateLower(Vector128<uint> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// uint32x2_t vqmovn_u64 (uint64x2_t a) /// A32: VQMOVN.U64 Dd, Qm /// A64: UQXTN Vd.2S, Vn.2D /// </summary> public static Vector64<uint> ExtractNarrowingSaturateLower(Vector128<ulong> value) => ExtractNarrowingSaturateLower(value); /// <summary> /// uint8x8_t vqmovun_s16 (int16x8_t a) /// A32: VQMOVUN.S16 Dd, Qm /// A64: SQXTUN Vd.8B, Vn.8H /// </summary> public static Vector64<byte> ExtractNarrowingSaturateUnsignedLower(Vector128<short> value) => ExtractNarrowingSaturateUnsignedLower(value); /// <summary> /// uint16x4_t vqmovun_s32 (int32x4_t a) /// A32: VQMOVUN.S32 Dd, Qm /// A64: SQXTUN Vd.4H, Vn.4S /// </summary> public static Vector64<ushort> ExtractNarrowingSaturateUnsignedLower(Vector128<int> value) => ExtractNarrowingSaturateUnsignedLower(value); /// <summary> /// uint32x2_t vqmovun_s64 (int64x2_t a) /// A32: VQMOVUN.S64 Dd, Qm /// A64: SQXTUN Vd.2S, Vn.2D /// </summary> public static Vector64<uint> ExtractNarrowingSaturateUnsignedLower(Vector128<long> value) => ExtractNarrowingSaturateUnsignedLower(value); /// <summary> /// uint8x16_t vqmovun_high_s16 (uint8x8_t r, int16x8_t a) /// A32: VQMOVUN.S16 Dd+1, Qm /// A64: SQXTUN2 Vd.16B, Vn.8H /// </summary> public static Vector128<byte> ExtractNarrowingSaturateUnsignedUpper(Vector64<byte> lower, Vector128<short> value) => ExtractNarrowingSaturateUnsignedUpper(lower, value); /// <summary> /// uint16x8_t vqmovun_high_s32 (uint16x4_t r, int32x4_t a) /// A32: VQMOVUN.S32 Dd+1, Qm /// A64: SQXTUN2 Vd.8H, Vn.4S /// </summary> public static Vector128<ushort> ExtractNarrowingSaturateUnsignedUpper(Vector64<ushort> lower, Vector128<int> value) => ExtractNarrowingSaturateUnsignedUpper(lower, value); /// <summary> /// uint32x4_t vqmovun_high_s64 (uint32x2_t r, int64x2_t a) /// A32: VQMOVUN.S64 Dd+1, Qm /// A64: SQXTUN2 Vd.4S, Vn.2D /// </summary> public static Vector128<uint> ExtractNarrowingSaturateUnsignedUpper(Vector64<uint> lower, Vector128<long> value) => ExtractNarrowingSaturateUnsignedUpper(lower, value); /// <summary> /// uint8x16_t vqmovn_high_u16 (uint8x8_t r, uint16x8_t a) /// A32: VQMOVN.U16 Dd+1, Qm /// A64: UQXTN2 Vd.16B, Vn.8H /// </summary> public static Vector128<byte> ExtractNarrowingSaturateUpper(Vector64<byte> lower, Vector128<ushort> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// int16x8_t vqmovn_high_s32 (int16x4_t r, int32x4_t a) /// A32: VQMOVN.S32 Dd+1, Qm /// A64: SQXTN2 Vd.8H, Vn.4S /// </summary> public static Vector128<short> ExtractNarrowingSaturateUpper(Vector64<short> lower, Vector128<int> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// int32x4_t vqmovn_high_s64 (int32x2_t r, int64x2_t a) /// A32: VQMOVN.S64 Dd+1, Qm /// A64: SQXTN2 Vd.4S, Vn.2D /// </summary> public static Vector128<int> ExtractNarrowingSaturateUpper(Vector64<int> lower, Vector128<long> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// int8x16_t vqmovn_high_s16 (int8x8_t r, int16x8_t a) /// A32: VQMOVN.S16 Dd+1, Qm /// A64: SQXTN2 Vd.16B, Vn.8H /// </summary> public static Vector128<sbyte> ExtractNarrowingSaturateUpper(Vector64<sbyte> lower, Vector128<short> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// uint16x8_t vqmovn_high_u32 (uint16x4_t r, uint32x4_t a) /// A32: VQMOVN.U32 Dd+1, Qm /// A64: UQXTN2 Vd.8H, Vn.4S /// </summary> public static Vector128<ushort> ExtractNarrowingSaturateUpper(Vector64<ushort> lower, Vector128<uint> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// uint32x4_t vqmovn_high_u64 (uint32x2_t r, uint64x2_t a) /// A32: VQMOVN.U64 Dd+1, Qm /// A64: UQXTN2 Vd.4S, Vn.2D /// </summary> public static Vector128<uint> ExtractNarrowingSaturateUpper(Vector64<uint> lower, Vector128<ulong> value) => ExtractNarrowingSaturateUpper(lower, value); /// <summary> /// uint8x16_t vmovn_high_u16 (uint8x8_t r, uint16x8_t a) /// A32: VMOVN.I16 Dd+1, Qm /// A64: XTN2 Vd.16B, Vn.8H /// </summary> public static Vector128<byte> ExtractNarrowingUpper(Vector64<byte> lower, Vector128<ushort> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// int16x8_t vmovn_high_s32 (int16x4_t r, int32x4_t a) /// A32: VMOVN.I32 Dd+1, Qm /// A64: XTN2 Vd.8H, Vn.4S /// </summary> public static Vector128<short> ExtractNarrowingUpper(Vector64<short> lower, Vector128<int> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// int32x4_t vmovn_high_s64 (int32x2_t r, int64x2_t a) /// A32: VMOVN.I64 Dd+1, Qm /// A64: XTN2 Vd.4S, Vn.2D /// </summary> public static Vector128<int> ExtractNarrowingUpper(Vector64<int> lower, Vector128<long> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// int8x16_t vmovn_high_s16 (int8x8_t r, int16x8_t a) /// A32: VMOVN.I16 Dd+1, Qm /// A64: XTN2 Vd.16B, Vn.8H /// </summary> public static Vector128<sbyte> ExtractNarrowingUpper(Vector64<sbyte> lower, Vector128<short> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// uint16x8_t vmovn_high_u32 (uint16x4_t r, uint32x4_t a) /// A32: VMOVN.I32 Dd+1, Qm /// A64: XTN2 Vd.8H, Vn.4S /// </summary> public static Vector128<ushort> ExtractNarrowingUpper(Vector64<ushort> lower, Vector128<uint> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// uint32x4_t vmovn_high_u64 (uint32x2_t r, uint64x2_t a) /// A32: VMOVN.I64 Dd+1, Qm /// A64: XTN2 Vd.4S, Vn.2D /// </summary> public static Vector128<uint> ExtractNarrowingUpper(Vector64<uint> lower, Vector128<ulong> value) => ExtractNarrowingUpper(lower, value); /// <summary> /// uint8x8_t vext_s8 (uint8x8_t a, uint8x8_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #n /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #n /// </summary> public static Vector64<byte> ExtractVector64(Vector64<byte> upper, Vector64<byte> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// int16x4_t vext_s16 (int16x4_t a, int16x4_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #(n*2) /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*2) /// </summary> public static Vector64<short> ExtractVector64(Vector64<short> upper, Vector64<short> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// int32x2_t vext_s32 (int32x2_t a, int32x2_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #(n*4) /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*4) /// </summary> public static Vector64<int> ExtractVector64(Vector64<int> upper, Vector64<int> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// int8x8_t vext_s8 (int8x8_t a, int8x8_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #n /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #n /// </summary> public static Vector64<sbyte> ExtractVector64(Vector64<sbyte> upper, Vector64<sbyte> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// float32x2_t vext_f32 (float32x2_t a, float32x2_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #(n*4) /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*4) /// </summary> public static Vector64<float> ExtractVector64(Vector64<float> upper, Vector64<float> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// uint16x4_t vext_s16 (uint16x4_t a, uint16x4_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #(n*2) /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*2) /// </summary> public static Vector64<ushort> ExtractVector64(Vector64<ushort> upper, Vector64<ushort> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// uint32x2_t vext_s32 (uint32x2_t a, uint32x2_t b, const int n) /// A32: VEXT.8 Dd, Dn, Dm, #(n*4) /// A64: EXT Vd.8B, Vn.8B, Vm.8B, #(n*4) /// </summary> public static Vector64<uint> ExtractVector64(Vector64<uint> upper, Vector64<uint> lower, byte index) => ExtractVector64(upper, lower, index); /// <summary> /// uint8x16_t vextq_s8 (uint8x16_t a, uint8x16_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #n /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #n /// </summary> public static Vector128<byte> ExtractVector128(Vector128<byte> upper, Vector128<byte> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// float64x2_t vextq_f64 (float64x2_t a, float64x2_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*8) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*8) /// </summary> public static Vector128<double> ExtractVector128(Vector128<double> upper, Vector128<double> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// int16x8_t vextq_s16 (int16x8_t a, int16x8_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*2) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*2) /// </summary> public static Vector128<short> ExtractVector128(Vector128<short> upper, Vector128<short> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// int32x4_t vextq_s32 (int32x4_t a, int32x4_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*4) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*4) /// </summary> public static Vector128<int> ExtractVector128(Vector128<int> upper, Vector128<int> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// int64x2_t vextq_s64 (int64x2_t a, int64x2_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*8) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*8) /// </summary> public static Vector128<long> ExtractVector128(Vector128<long> upper, Vector128<long> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// int8x16_t vextq_s8 (int8x16_t a, int8x16_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #n /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #n /// </summary> public static Vector128<sbyte> ExtractVector128(Vector128<sbyte> upper, Vector128<sbyte> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// float32x4_t vextq_f32 (float32x4_t a, float32x4_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*4) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*4) /// </summary> public static Vector128<float> ExtractVector128(Vector128<float> upper, Vector128<float> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// uint16x8_t vextq_s16 (uint16x8_t a, uint16x8_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*2) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*2) /// </summary> public static Vector128<ushort> ExtractVector128(Vector128<ushort> upper, Vector128<ushort> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// uint32x4_t vextq_s32 (uint32x4_t a, uint32x4_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*4) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*4) /// </summary> public static Vector128<uint> ExtractVector128(Vector128<uint> upper, Vector128<uint> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// uint64x2_t vextq_s64 (uint64x2_t a, uint64x2_t b, const int n) /// A32: VEXT.8 Qd, Qn, Qm, #(n*8) /// A64: EXT Vd.16B, Vn.16B, Vm.16B, #(n*8) /// </summary> public static Vector128<ulong> ExtractVector128(Vector128<ulong> upper, Vector128<ulong> lower, byte index) => ExtractVector128(upper, lower, index); /// <summary> /// float32x2_t vrndm_f32 (float32x2_t a) /// A32: VRINTM.F32 Dd, Dm /// A64: FRINTM Vd.2S, Vn.2S /// </summary> public static Vector64<float> Floor(Vector64<float> value) => Floor(value); /// <summary> /// float32x4_t vrndmq_f32 (float32x4_t a) /// A32: VRINTM.F32 Qd, Qm /// A64: FRINTM Vd.4S, Vn.4S /// </summary> public static Vector128<float> Floor(Vector128<float> value) => Floor(value); /// <summary> /// float64x1_t vrndm_f64 (float64x1_t a) /// A32: VRINTM.F64 Dd, Dm /// A64: FRINTM Dd, Dn /// </summary> public static Vector64<double> FloorScalar(Vector64<double> value) => FloorScalar(value); /// <summary> /// float32_t vrndms_f32 (float32_t a) /// A32: VRINTM.F32 Sd, Sm /// A64: FRINTM Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> FloorScalar(Vector64<float> value) => FloorScalar(value); /// <summary> /// uint8x8_t vhadd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VHADD.U8 Dd, Dn, Dm /// A64: UHADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> FusedAddHalving(Vector64<byte> left, Vector64<byte> right) => FusedAddHalving(left, right); /// <summary> /// int16x4_t vhadd_s16 (int16x4_t a, int16x4_t b) /// A32: VHADD.S16 Dd, Dn, Dm /// A64: SHADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> FusedAddHalving(Vector64<short> left, Vector64<short> right) => FusedAddHalving(left, right); /// <summary> /// int32x2_t vhadd_s32 (int32x2_t a, int32x2_t b) /// A32: VHADD.S32 Dd, Dn, Dm /// A64: SHADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> FusedAddHalving(Vector64<int> left, Vector64<int> right) => FusedAddHalving(left, right); /// <summary> /// int8x8_t vhadd_s8 (int8x8_t a, int8x8_t b) /// A32: VHADD.S8 Dd, Dn, Dm /// A64: SHADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> FusedAddHalving(Vector64<sbyte> left, Vector64<sbyte> right) => FusedAddHalving(left, right); /// <summary> /// uint16x4_t vhadd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VHADD.U16 Dd, Dn, Dm /// A64: UHADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> FusedAddHalving(Vector64<ushort> left, Vector64<ushort> right) => FusedAddHalving(left, right); /// <summary> /// uint32x2_t vhadd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VHADD.U32 Dd, Dn, Dm /// A64: UHADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> FusedAddHalving(Vector64<uint> left, Vector64<uint> right) => FusedAddHalving(left, right); /// <summary> /// uint8x16_t vhaddq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VHADD.U8 Qd, Qn, Qm /// A64: UHADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> FusedAddHalving(Vector128<byte> left, Vector128<byte> right) => FusedAddHalving(left, right); /// <summary> /// int16x8_t vhaddq_s16 (int16x8_t a, int16x8_t b) /// A32: VHADD.S16 Qd, Qn, Qm /// A64: SHADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> FusedAddHalving(Vector128<short> left, Vector128<short> right) => FusedAddHalving(left, right); /// <summary> /// int32x4_t vhaddq_s32 (int32x4_t a, int32x4_t b) /// A32: VHADD.S32 Qd, Qn, Qm /// A64: SHADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> FusedAddHalving(Vector128<int> left, Vector128<int> right) => FusedAddHalving(left, right); /// <summary> /// int8x16_t vhaddq_s8 (int8x16_t a, int8x16_t b) /// A32: VHADD.S8 Qd, Qn, Qm /// A64: SHADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> FusedAddHalving(Vector128<sbyte> left, Vector128<sbyte> right) => FusedAddHalving(left, right); /// <summary> /// uint16x8_t vhaddq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VHADD.U16 Qd, Qn, Qm /// A64: UHADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> FusedAddHalving(Vector128<ushort> left, Vector128<ushort> right) => FusedAddHalving(left, right); /// <summary> /// uint32x4_t vhaddq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VHADD.U32 Qd, Qn, Qm /// A64: UHADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> FusedAddHalving(Vector128<uint> left, Vector128<uint> right) => FusedAddHalving(left, right); /// <summary> /// uint8x8_t vrhadd_u8 (uint8x8_t a, uint8x8_t b) /// A32: VRHADD.U8 Dd, Dn, Dm /// A64: URHADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> FusedAddRoundedHalving(Vector64<byte> left, Vector64<byte> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int16x4_t vrhadd_s16 (int16x4_t a, int16x4_t b) /// A32: VRHADD.S16 Dd, Dn, Dm /// A64: SRHADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> FusedAddRoundedHalving(Vector64<short> left, Vector64<short> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int32x2_t vrhadd_s32 (int32x2_t a, int32x2_t b) /// A32: VRHADD.S32 Dd, Dn, Dm /// A64: SRHADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> FusedAddRoundedHalving(Vector64<int> left, Vector64<int> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int8x8_t vrhadd_s8 (int8x8_t a, int8x8_t b) /// A32: VRHADD.S8 Dd, Dn, Dm /// A64: SRHADD Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> FusedAddRoundedHalving(Vector64<sbyte> left, Vector64<sbyte> right) => FusedAddRoundedHalving(left, right); /// <summary> /// uint16x4_t vrhadd_u16 (uint16x4_t a, uint16x4_t b) /// A32: VRHADD.U16 Dd, Dn, Dm /// A64: URHADD Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> FusedAddRoundedHalving(Vector64<ushort> left, Vector64<ushort> right) => FusedAddRoundedHalving(left, right); /// <summary> /// uint32x2_t vrhadd_u32 (uint32x2_t a, uint32x2_t b) /// A32: VRHADD.U32 Dd, Dn, Dm /// A64: URHADD Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> FusedAddRoundedHalving(Vector64<uint> left, Vector64<uint> right) => FusedAddRoundedHalving(left, right); /// <summary> /// uint8x16_t vrhaddq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VRHADD.U8 Qd, Qn, Qm /// A64: URHADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> FusedAddRoundedHalving(Vector128<byte> left, Vector128<byte> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int16x8_t vrhaddq_s16 (int16x8_t a, int16x8_t b) /// A32: VRHADD.S16 Qd, Qn, Qm /// A64: SRHADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> FusedAddRoundedHalving(Vector128<short> left, Vector128<short> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int32x4_t vrhaddq_s32 (int32x4_t a, int32x4_t b) /// A32: VRHADD.S32 Qd, Qn, Qm /// A64: SRHADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> FusedAddRoundedHalving(Vector128<int> left, Vector128<int> right) => FusedAddRoundedHalving(left, right); /// <summary> /// int8x16_t vrhaddq_s8 (int8x16_t a, int8x16_t b) /// A32: VRHADD.S8 Qd, Qn, Qm /// A64: SRHADD Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> FusedAddRoundedHalving(Vector128<sbyte> left, Vector128<sbyte> right) => FusedAddRoundedHalving(left, right); /// <summary> /// uint16x8_t vrhaddq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VRHADD.U16 Qd, Qn, Qm /// A64: URHADD Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> FusedAddRoundedHalving(Vector128<ushort> left, Vector128<ushort> right) => FusedAddRoundedHalving(left, right); /// <summary> /// uint32x4_t vrhaddq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VRHADD.U32 Qd, Qn, Qm /// A64: URHADD Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> FusedAddRoundedHalving(Vector128<uint> left, Vector128<uint> right) => FusedAddRoundedHalving(left, right); /// <summary> /// float32x2_t vfma_f32 (float32x2_t a, float32x2_t b, float32x2_t c) /// A32: VFMA.F32 Dd, Dn, Dm /// A64: FMLA Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> FusedMultiplyAdd(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => FusedMultiplyAdd(addend, left, right); /// <summary> /// float32x4_t vfmaq_f32 (float32x4_t a, float32x4_t b, float32x4_t c) /// A32: VFMA.F32 Qd, Qn, Qm /// A64: FMLA Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> FusedMultiplyAdd(Vector128<float> addend, Vector128<float> left, Vector128<float> right) => FusedMultiplyAdd(addend, left, right); /// <summary> /// float64x1_t vfnma_f64 (float64x1_t a, float64x1_t b, float64x1_t c) /// A32: VFNMA.F64 Dd, Dn, Dm /// A64: FNMADD Dd, Dn, Dm, Da /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> FusedMultiplyAddNegatedScalar(Vector64<double> addend, Vector64<double> left, Vector64<double> right) => FusedMultiplyAddNegatedScalar(addend, left, right); /// <summary> /// float32_t vfnmas_f32 (float32_t a, float32_t b, float32_t c) /// A32: VFNMA.F32 Sd, Sn, Sm /// A64: FNMADD Sd, Sn, Sm, Sa /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> FusedMultiplyAddNegatedScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => FusedMultiplyAddNegatedScalar(addend, left, right); /// <summary> /// float64x1_t vfma_f64 (float64x1_t a, float64x1_t b, float64x1_t c) /// A32: VFMA.F64 Dd, Dn, Dm /// A64: FMADD Dd, Dn, Dm, Da /// </summary> public static Vector64<double> FusedMultiplyAddScalar(Vector64<double> addend, Vector64<double> left, Vector64<double> right) => FusedMultiplyAddScalar(addend, left, right); /// <summary> /// float32_t vfmas_f32 (float32_t a, float32_t b, float32_t c) /// A32: VFMA.F32 Sd, Sn, Sm /// A64: FMADD Sd, Sn, Sm, Sa /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> FusedMultiplyAddScalar(Vector64<float> addend, Vector64<float> left, Vector64<float> right) => FusedMultiplyAddScalar(addend, left, right); /// <summary> /// float32x2_t vfms_f32 (float32x2_t a, float32x2_t b, float32x2_t c) /// A32: VFMS.F32 Dd, Dn, Dm /// A64: FMLS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> FusedMultiplySubtract(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => FusedMultiplySubtract(minuend, left, right); /// <summary> /// float32x4_t vfmsq_f32 (float32x4_t a, float32x4_t b, float32x4_t c) /// A32: VFMS.F32 Qd, Qn, Qm /// A64: FMLS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> FusedMultiplySubtract(Vector128<float> minuend, Vector128<float> left, Vector128<float> right) => FusedMultiplySubtract(minuend, left, right); /// <summary> /// float64x1_t vfnms_f64 (float64x1_t a, float64x1_t b, float64x1_t c) /// A32: VFNMS.F64 Dd, Dn, Dm /// A64: FNMSUB Dd, Dn, Dm, Da /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> FusedMultiplySubtractNegatedScalar(Vector64<double> minuend, Vector64<double> left, Vector64<double> right) => FusedMultiplySubtractNegatedScalar(minuend, left, right); /// <summary> /// float32_t vfnmss_f32 (float32_t a, float32_t b, float32_t c) /// A32: VFNMS.F32 Sd, Sn, Sm /// A64: FNMSUB Sd, Sn, Sm, Sa /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> FusedMultiplySubtractNegatedScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => FusedMultiplySubtractNegatedScalar(minuend, left, right); /// <summary> /// float64x1_t vfms_f64 (float64x1_t a, float64x1_t b, float64x1_t c) /// A32: VFMS.F64 Dd, Dn, Dm /// A64: FMSUB Dd, Dn, Dm, Da /// </summary> public static Vector64<double> FusedMultiplySubtractScalar(Vector64<double> minuend, Vector64<double> left, Vector64<double> right) => FusedMultiplySubtractScalar(minuend, left, right); /// <summary> /// float32_t vfmss_f32 (float32_t a, float32_t b, float32_t c) /// A32: VFMS.F32 Sd, Sn, Sm /// A64: FMSUB Sd, Sn, Sm, Sa /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> FusedMultiplySubtractScalar(Vector64<float> minuend, Vector64<float> left, Vector64<float> right) => FusedMultiplySubtractScalar(minuend, left, right); /// <summary> /// uint8x8_t vhsub_u8 (uint8x8_t a, uint8x8_t b) /// A32: VHSUB.U8 Dd, Dn, Dm /// A64: UHSUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> FusedSubtractHalving(Vector64<byte> left, Vector64<byte> right) => FusedSubtractHalving(left, right); /// <summary> /// int16x4_t vhsub_s16 (int16x4_t a, int16x4_t b) /// A32: VHSUB.S16 Dd, Dn, Dm /// A64: SHSUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> FusedSubtractHalving(Vector64<short> left, Vector64<short> right) => FusedSubtractHalving(left, right); /// <summary> /// int32x2_t vhsub_s32 (int32x2_t a, int32x2_t b) /// A32: VHSUB.S32 Dd, Dn, Dm /// A64: SHSUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> FusedSubtractHalving(Vector64<int> left, Vector64<int> right) => FusedSubtractHalving(left, right); /// <summary> /// int8x8_t vhsub_s8 (int8x8_t a, int8x8_t b) /// A32: VHSUB.S8 Dd, Dn, Dm /// A64: SHSUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> FusedSubtractHalving(Vector64<sbyte> left, Vector64<sbyte> right) => FusedSubtractHalving(left, right); /// <summary> /// uint16x4_t vhsub_u16 (uint16x4_t a, uint16x4_t b) /// A32: VHSUB.U16 Dd, Dn, Dm /// A64: UHSUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> FusedSubtractHalving(Vector64<ushort> left, Vector64<ushort> right) => FusedSubtractHalving(left, right); /// <summary> /// uint32x2_t vhsub_u32 (uint32x2_t a, uint32x2_t b) /// A32: VHSUB.U32 Dd, Dn, Dm /// A64: UHSUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> FusedSubtractHalving(Vector64<uint> left, Vector64<uint> right) => FusedSubtractHalving(left, right); /// <summary> /// uint8x16_t vhsubq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VHSUB.U8 Qd, Qn, Qm /// A64: UHSUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> FusedSubtractHalving(Vector128<byte> left, Vector128<byte> right) => FusedSubtractHalving(left, right); /// <summary> /// int16x8_t vhsubq_s16 (int16x8_t a, int16x8_t b) /// A32: VHSUB.S16 Qd, Qn, Qm /// A64: SHSUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> FusedSubtractHalving(Vector128<short> left, Vector128<short> right) => FusedSubtractHalving(left, right); /// <summary> /// int32x4_t vhsubq_s32 (int32x4_t a, int32x4_t b) /// A32: VHSUB.S32 Qd, Qn, Qm /// A64: SHSUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> FusedSubtractHalving(Vector128<int> left, Vector128<int> right) => FusedSubtractHalving(left, right); /// <summary> /// int8x16_t vhsubq_s8 (int8x16_t a, int8x16_t b) /// A32: VHSUB.S8 Qd, Qn, Qm /// A64: SHSUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> FusedSubtractHalving(Vector128<sbyte> left, Vector128<sbyte> right) => FusedSubtractHalving(left, right); /// <summary> /// uint16x8_t vhsubq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VHSUB.U16 Qd, Qn, Qm /// A64: UHSUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> FusedSubtractHalving(Vector128<ushort> left, Vector128<ushort> right) => FusedSubtractHalving(left, right); /// <summary> /// uint32x4_t vhsubq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VHSUB.U32 Qd, Qn, Qm /// A64: UHSUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> FusedSubtractHalving(Vector128<uint> left, Vector128<uint> right) => FusedSubtractHalving(left, right); /// <summary> /// uint8x8_t vset_lane_u8 (uint8_t a, uint8x8_t v, const int lane) /// A32: VMOV.8 Dd[lane], Rt /// A64: INS Vd.B[lane], Wn /// </summary> public static Vector64<byte> Insert(Vector64<byte> vector, byte index, byte data) => Insert(vector, index, data); /// <summary> /// int16x4_t vset_lane_s16 (int16_t a, int16x4_t v, const int lane) /// A32: VMOV.16 Dd[lane], Rt /// A64: INS Vd.H[lane], Wn /// </summary> public static Vector64<short> Insert(Vector64<short> vector, byte index, short data) => Insert(vector, index, data); /// <summary> /// int32x2_t vset_lane_s32 (int32_t a, int32x2_t v, const int lane) /// A32: VMOV.32 Dd[lane], Rt /// A64: INS Vd.S[lane], Wn /// </summary> public static Vector64<int> Insert(Vector64<int> vector, byte index, int data) => Insert(vector, index, data); /// <summary> /// int8x8_t vset_lane_s8 (int8_t a, int8x8_t v, const int lane) /// A32: VMOV.8 Dd[lane], Rt /// A64: INS Vd.B[lane], Wn /// </summary> public static Vector64<sbyte> Insert(Vector64<sbyte> vector, byte index, sbyte data) => Insert(vector, index, data); /// <summary> /// float32x2_t vset_lane_f32 (float32_t a, float32x2_t v, const int lane) /// A32: VMOV.F32 Sd, Sm /// A64: INS Vd.S[lane], Vn.S[0] /// </summary> public static Vector64<float> Insert(Vector64<float> vector, byte index, float data) => Insert(vector, index, data); /// <summary> /// uint16x4_t vset_lane_u16 (uint16_t a, uint16x4_t v, const int lane) /// A32: VMOV.16 Dd[lane], Rt /// A64: INS Vd.H[lane], Wn /// </summary> public static Vector64<ushort> Insert(Vector64<ushort> vector, byte index, ushort data) => Insert(vector, index, data); /// <summary> /// uint32x2_t vset_lane_u32 (uint32_t a, uint32x2_t v, const int lane) /// A32: VMOV.32 Dd[lane], Rt /// A64: INS Vd.S[lane], Wn /// </summary> public static Vector64<uint> Insert(Vector64<uint> vector, byte index, uint data) => Insert(vector, index, data); /// <summary> /// uint8x16_t vsetq_lane_u8 (uint8_t a, uint8x16_t v, const int lane) /// A32: VMOV.8 Dd[lane], Rt /// A64: INS Vd.B[lane], Wn /// </summary> public static Vector128<byte> Insert(Vector128<byte> vector, byte index, byte data) => Insert(vector, index, data); /// <summary> /// float64x2_t vsetq_lane_f64 (float64_t a, float64x2_t v, const int lane) /// A32: VMOV.F64 Dd, Dm /// A64: INS Vd.D[lane], Vn.D[0] /// </summary> public static Vector128<double> Insert(Vector128<double> vector, byte index, double data) => Insert(vector, index, data); /// <summary> /// int16x8_t vsetq_lane_s16 (int16_t a, int16x8_t v, const int lane) /// A32: VMOV.16 Dd[lane], Rt /// A64: INS Vd.H[lane], Wn /// </summary> public static Vector128<short> Insert(Vector128<short> vector, byte index, short data) => Insert(vector, index, data); /// <summary> /// int32x4_t vsetq_lane_s32 (int32_t a, int32x4_t v, const int lane) /// A32: VMOV.32 Dd[lane], Rt /// A64: INS Vd.S[lane], Wn /// </summary> public static Vector128<int> Insert(Vector128<int> vector, byte index, int data) => Insert(vector, index, data); /// <summary> /// int64x2_t vsetq_lane_s64 (int64_t a, int64x2_t v, const int lane) /// A32: VMOV.64 Dd, Rt, Rt2 /// A64: INS Vd.D[lane], Xn /// </summary> public static Vector128<long> Insert(Vector128<long> vector, byte index, long data) => Insert(vector, index, data); /// <summary> /// int8x16_t vsetq_lane_s8 (int8_t a, int8x16_t v, const int lane) /// A32: VMOV.8 Dd[lane], Rt /// A64: INS Vd.B[lane], Wn /// </summary> public static Vector128<sbyte> Insert(Vector128<sbyte> vector, byte index, sbyte data) => Insert(vector, index, data); /// <summary> /// float32x4_t vsetq_lane_f32 (float32_t a, float32x4_t v, const int lane) /// A32: VMOV.F32 Sd, Sm /// A64: INS Vd.S[lane], Vn.S[0] /// </summary> public static Vector128<float> Insert(Vector128<float> vector, byte index, float data) => Insert(vector, index, data); /// <summary> /// uint16x8_t vsetq_lane_u16 (uint16_t a, uint16x8_t v, const int lane) /// A32: VMOV.16 Dd[lane], Rt /// A64: INS Vd.H[lane], Wn /// </summary> public static Vector128<ushort> Insert(Vector128<ushort> vector, byte index, ushort data) => Insert(vector, index, data); /// <summary> /// uint32x4_t vsetq_lane_u32 (uint32_t a, uint32x4_t v, const int lane) /// A32: VMOV.32 Dd[lane], Rt /// A64: INS Vd.S[lane], Wn /// </summary> public static Vector128<uint> Insert(Vector128<uint> vector, byte index, uint data) => Insert(vector, index, data); /// <summary> /// uint64x2_t vsetq_lane_u64 (uint64_t a, uint64x2_t v, const int lane) /// A32: VMOV.64 Dd, Rt, Rt2 /// A64: INS Vd.D[lane], Xn /// </summary> public static Vector128<ulong> Insert(Vector128<ulong> vector, byte index, ulong data) => Insert(vector, index, data); /// <summary> /// float64x2_t vcopyq_lane_f64 (float64x2_t a, const int lane1, float64x1_t b, const int lane2) /// A32: VMOV.F64 Dd, Dm /// A64: INS Vd.D[lane1], Vn.D[0] /// </summary> public static Vector128<double> InsertScalar(Vector128<double> result, byte resultIndex, Vector64<double> value) => InsertScalar(result, resultIndex, value); /// <summary> /// int64x2_t vcopyq_lane_s64 (int64x2_t a, const int lane1, int64x1_t b, const int lane2) /// A32: VMOV Dd, Dm /// A64: INS Vd.D[lane1], Vn.D[0] /// </summary> public static Vector128<long> InsertScalar(Vector128<long> result, byte resultIndex, Vector64<long> value) => InsertScalar(result, resultIndex, value); /// <summary> /// uint64x2_t vcopyq_lane_u64 (uint64x2_t a, const int lane1, uint64x1_t b, const int lane2) /// A32: VMOV Dd, Dm /// A64: INS Vd.D[lane1], Vn.D[0] /// </summary> public static Vector128<ulong> InsertScalar(Vector128<ulong> result, byte resultIndex, Vector64<ulong> value) => InsertScalar(result, resultIndex, value); /// <summary> /// int16x4_t vcls_s16 (int16x4_t a) /// A32: VCLS.S16 Dd, Dm /// A64: CLS Vd.4H, Vn.4H /// </summary> public static Vector64<short> LeadingSignCount(Vector64<short> value) => LeadingSignCount(value); /// <summary> /// int32x2_t vcls_s32 (int32x2_t a) /// A32: VCLS.S32 Dd, Dm /// A64: CLS Vd.2S, Vn.2S /// </summary> public static Vector64<int> LeadingSignCount(Vector64<int> value) => LeadingSignCount(value); /// <summary> /// int8x8_t vcls_s8 (int8x8_t a) /// A32: VCLS.S8 Dd, Dm /// A64: CLS Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> LeadingSignCount(Vector64<sbyte> value) => LeadingSignCount(value); /// <summary> /// int16x8_t vclsq_s16 (int16x8_t a) /// A32: VCLS.S16 Qd, Qm /// A64: CLS Vd.8H, Vn.8H /// </summary> public static Vector128<short> LeadingSignCount(Vector128<short> value) => LeadingSignCount(value); /// <summary> /// int32x4_t vclsq_s32 (int32x4_t a) /// A32: VCLS.S32 Qd, Qm /// A64: CLS Vd.4S, Vn.4S /// </summary> public static Vector128<int> LeadingSignCount(Vector128<int> value) => LeadingSignCount(value); /// <summary> /// int8x16_t vclsq_s8 (int8x16_t a) /// A32: VCLS.S8 Qd, Qm /// A64: CLS Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> LeadingSignCount(Vector128<sbyte> value) => LeadingSignCount(value); /// <summary> /// uint8x8_t vclz_u8 (uint8x8_t a) /// A32: VCLZ.I8 Dd, Dm /// A64: CLZ Vd.8B, Vn.8B /// </summary> public static Vector64<byte> LeadingZeroCount(Vector64<byte> value) => LeadingZeroCount(value); /// <summary> /// int16x4_t vclz_s16 (int16x4_t a) /// A32: VCLZ.I16 Dd, Dm /// A64: CLZ Vd.4H, Vn.4H /// </summary> public static Vector64<short> LeadingZeroCount(Vector64<short> value) => LeadingZeroCount(value); /// <summary> /// int32x2_t vclz_s32 (int32x2_t a) /// A32: VCLZ.I32 Dd, Dm /// A64: CLZ Vd.2S, Vn.2S /// </summary> public static Vector64<int> LeadingZeroCount(Vector64<int> value) => LeadingZeroCount(value); /// <summary> /// int8x8_t vclz_s8 (int8x8_t a) /// A32: VCLZ.I8 Dd, Dm /// A64: CLZ Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> LeadingZeroCount(Vector64<sbyte> value) => LeadingZeroCount(value); /// <summary> /// uint16x4_t vclz_u16 (uint16x4_t a) /// A32: VCLZ.I16 Dd, Dm /// A64: CLZ Vd.4H, Vn.4H /// </summary> public static Vector64<ushort> LeadingZeroCount(Vector64<ushort> value) => LeadingZeroCount(value); /// <summary> /// uint32x2_t vclz_u32 (uint32x2_t a) /// A32: VCLZ.I32 Dd, Dm /// A64: CLZ Vd.2S, Vn.2S /// </summary> public static Vector64<uint> LeadingZeroCount(Vector64<uint> value) => LeadingZeroCount(value); /// <summary> /// uint8x16_t vclzq_u8 (uint8x16_t a) /// A32: VCLZ.I8 Qd, Qm /// A64: CLZ Vd.16B, Vn.16B /// </summary> public static Vector128<byte> LeadingZeroCount(Vector128<byte> value) => LeadingZeroCount(value); /// <summary> /// int16x8_t vclzq_s16 (int16x8_t a) /// A32: VCLZ.I16 Qd, Qm /// A64: CLZ Vd.8H, Vn.8H /// </summary> public static Vector128<short> LeadingZeroCount(Vector128<short> value) => LeadingZeroCount(value); /// <summary> /// int32x4_t vclzq_s32 (int32x4_t a) /// A32: VCLZ.I32 Qd, Qm /// A64: CLZ Vd.4S, Vn.4S /// </summary> public static Vector128<int> LeadingZeroCount(Vector128<int> value) => LeadingZeroCount(value); /// <summary> /// int8x16_t vclzq_s8 (int8x16_t a) /// A32: VCLZ.I8 Qd, Qm /// A64: CLZ Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> LeadingZeroCount(Vector128<sbyte> value) => LeadingZeroCount(value); /// <summary> /// uint16x8_t vclzq_u16 (uint16x8_t a) /// A32: VCLZ.I16 Qd, Qm /// A64: CLZ Vd.8H, Vn.8H /// </summary> public static Vector128<ushort> LeadingZeroCount(Vector128<ushort> value) => LeadingZeroCount(value); /// <summary> /// uint32x4_t vclzq_u32 (uint32x4_t a) /// A32: VCLZ.I32 Qd, Qm /// A64: CLZ Vd.4S, Vn.4S /// </summary> public static Vector128<uint> LeadingZeroCount(Vector128<uint> value) => LeadingZeroCount(value); /// <summary> /// uint8x8_t vld1_lane_u8 (uint8_t const * ptr, uint8x8_t src, const int lane) /// A32: VLD1.8 { Dd[index] }, [Rn] /// A64: LD1 { Vt.B }[index], [Xn] /// </summary> public static unsafe Vector64<byte> LoadAndInsertScalar(Vector64<byte> value, byte index, byte* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int16x4_t vld1_lane_s16 (int16_t const * ptr, int16x4_t src, const int lane) /// A32: VLD1.16 { Dd[index] }, [Rn] /// A64: LD1 { Vt.H }[index], [Xn] /// </summary> public static unsafe Vector64<short> LoadAndInsertScalar(Vector64<short> value, byte index, short* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int32x2_t vld1_lane_s32 (int32_t const * ptr, int32x2_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector64<int> LoadAndInsertScalar(Vector64<int> value, byte index, int* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int8x8_t vld1_lane_s8 (int8_t const * ptr, int8x8_t src, const int lane) /// A32: VLD1.8 { Dd[index] }, [Rn] /// A64: LD1 { Vt.B }[index], [Xn] /// </summary> public static unsafe Vector64<sbyte> LoadAndInsertScalar(Vector64<sbyte> value, byte index, sbyte* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// float32x2_t vld1_lane_f32 (float32_t const * ptr, float32x2_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector64<float> LoadAndInsertScalar(Vector64<float> value, byte index, float* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint16x4_t vld1_lane_u16 (uint16_t const * ptr, uint16x4_t src, const int lane) /// A32: VLD1.16 { Dd[index] }, [Rn] /// A64: LD1 { Vt.H }[index], [Xn] /// </summary> public static unsafe Vector64<ushort> LoadAndInsertScalar(Vector64<ushort> value, byte index, ushort* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint32x2_t vld1_lane_u32 (uint32_t const * ptr, uint32x2_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector64<uint> LoadAndInsertScalar(Vector64<uint> value, byte index, uint* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint8x16_t vld1q_lane_u8 (uint8_t const * ptr, uint8x16_t src, const int lane) /// A32: VLD1.8 { Dd[index] }, [Rn] /// A64: LD1 { Vt.B }[index], [Xn] /// </summary> public static unsafe Vector128<byte> LoadAndInsertScalar(Vector128<byte> value, byte index, byte* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// float64x2_t vld1q_lane_f64 (float64_t const * ptr, float64x2_t src, const int lane) /// A32: VLDR.64 Dd, [Rn] /// A64: LD1 { Vt.D }[index], [Xn] /// </summary> public static unsafe Vector128<double> LoadAndInsertScalar(Vector128<double> value, byte index, double* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int16x8_t vld1q_lane_s16 (int16_t const * ptr, int16x8_t src, const int lane) /// A32: VLD1.16 { Dd[index] }, [Rn] /// A64: LD1 { Vt.H }[index], [Xn] /// </summary> public static unsafe Vector128<short> LoadAndInsertScalar(Vector128<short> value, byte index, short* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int32x4_t vld1q_lane_s32 (int32_t const * ptr, int32x4_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector128<int> LoadAndInsertScalar(Vector128<int> value, byte index, int* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int64x2_t vld1q_lane_s64 (int64_t const * ptr, int64x2_t src, const int lane) /// A32: VLDR.64 Dd, [Rn] /// A64: LD1 { Vt.D }[index], [Xn] /// </summary> public static unsafe Vector128<long> LoadAndInsertScalar(Vector128<long> value, byte index, long* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// int8x16_t vld1q_lane_s8 (int8_t const * ptr, int8x16_t src, const int lane) /// A32: VLD1.8 { Dd[index] }, [Rn] /// A64: LD1 { Vt.B }[index], [Xn] /// </summary> public static unsafe Vector128<sbyte> LoadAndInsertScalar(Vector128<sbyte> value, byte index, sbyte* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// float32x4_t vld1q_lane_f32 (float32_t const * ptr, float32x4_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector128<float> LoadAndInsertScalar(Vector128<float> value, byte index, float* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint16x8_t vld1q_lane_u16 (uint16_t const * ptr, uint16x8_t src, const int lane) /// A32: VLD1.16 { Dd[index] }, [Rn] /// A64: LD1 { Vt.H }[index], [Xn] /// </summary> public static unsafe Vector128<ushort> LoadAndInsertScalar(Vector128<ushort> value, byte index, ushort* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint32x4_t vld1q_lane_u32 (uint32_t const * ptr, uint32x4_t src, const int lane) /// A32: VLD1.32 { Dd[index] }, [Rn] /// A64: LD1 { Vt.S }[index], [Xn] /// </summary> public static unsafe Vector128<uint> LoadAndInsertScalar(Vector128<uint> value, byte index, uint* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint64x2_t vld1q_lane_u64 (uint64_t const * ptr, uint64x2_t src, const int lane) /// A32: VLDR.64 Dd, [Rn] /// A64: LD1 { Vt.D }[index], [Xn] /// </summary> public static unsafe Vector128<ulong> LoadAndInsertScalar(Vector128<ulong> value, byte index, ulong* address) => LoadAndInsertScalar(value, index, address); /// <summary> /// uint8x8_t vld1_dup_u8 (uint8_t const * ptr) /// A32: VLD1.8 { Dd[] }, [Rn] /// A64: LD1R { Vt.8B }, [Xn] /// </summary> public static unsafe Vector64<byte> LoadAndReplicateToVector64(byte* address) => LoadAndReplicateToVector64(address); /// <summary> /// int16x4_t vld1_dup_s16 (int16_t const * ptr) /// A32: VLD1.16 { Dd[] }, [Rn] /// A64: LD1R { Vt.4H }, [Xn] /// </summary> public static unsafe Vector64<short> LoadAndReplicateToVector64(short* address) => LoadAndReplicateToVector64(address); /// <summary> /// int32x2_t vld1_dup_s32 (int32_t const * ptr) /// A32: VLD1.32 { Dd[] }, [Rn] /// A64: LD1R { Vt.2S }, [Xn] /// </summary> public static unsafe Vector64<int> LoadAndReplicateToVector64(int* address) => LoadAndReplicateToVector64(address); /// <summary> /// int8x8_t vld1_dup_s8 (int8_t const * ptr) /// A32: VLD1.8 { Dd[] }, [Rn] /// A64: LD1R { Vt.8B }, [Xn] /// </summary> public static unsafe Vector64<sbyte> LoadAndReplicateToVector64(sbyte* address) => LoadAndReplicateToVector64(address); /// <summary> /// float32x2_t vld1_dup_f32 (float32_t const * ptr) /// A32: VLD1.32 { Dd[] }, [Rn] /// A64: LD1R { Vt.2S }, [Xn] /// </summary> public static unsafe Vector64<float> LoadAndReplicateToVector64(float* address) => LoadAndReplicateToVector64(address); /// <summary> /// uint16x4_t vld1_dup_u16 (uint16_t const * ptr) /// A32: VLD1.16 { Dd[] }, [Rn] /// A64: LD1R { Vt.4H }, [Xn] /// </summary> public static unsafe Vector64<ushort> LoadAndReplicateToVector64(ushort* address) => LoadAndReplicateToVector64(address); /// <summary> /// uint32x2_t vld1_dup_u32 (uint32_t const * ptr) /// A32: VLD1.32 { Dd[] }, [Rn] /// A64: LD1R { Vt.2S }, [Xn] /// </summary> public static unsafe Vector64<uint> LoadAndReplicateToVector64(uint* address) => LoadAndReplicateToVector64(address); /// <summary> /// uint8x16_t vld1q_dup_u8 (uint8_t const * ptr) /// A32: VLD1.8 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.16B }, [Xn] /// </summary> public static unsafe Vector128<byte> LoadAndReplicateToVector128(byte* address) => LoadAndReplicateToVector128(address); /// <summary> /// int16x8_t vld1q_dup_s16 (int16_t const * ptr) /// A32: VLD1.16 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.8H }, [Xn] /// </summary> public static unsafe Vector128<short> LoadAndReplicateToVector128(short* address) => LoadAndReplicateToVector128(address); /// <summary> /// int32x4_t vld1q_dup_s32 (int32_t const * ptr) /// A32: VLD1.32 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.4S }, [Xn] /// </summary> public static unsafe Vector128<int> LoadAndReplicateToVector128(int* address) => LoadAndReplicateToVector128(address); /// <summary> /// int8x16_t vld1q_dup_s8 (int8_t const * ptr) /// A32: VLD1.8 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.16B }, [Xn] /// </summary> public static unsafe Vector128<sbyte> LoadAndReplicateToVector128(sbyte* address) => LoadAndReplicateToVector128(address); /// <summary> /// float32x4_t vld1q_dup_f32 (float32_t const * ptr) /// A32: VLD1.32 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.4S }, [Xn] /// </summary> public static unsafe Vector128<float> LoadAndReplicateToVector128(float* address) => LoadAndReplicateToVector128(address); /// <summary> /// uint16x8_t vld1q_dup_u16 (uint16_t const * ptr) /// A32: VLD1.16 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.8H }, [Xn] /// </summary> public static unsafe Vector128<ushort> LoadAndReplicateToVector128(ushort* address) => LoadAndReplicateToVector128(address); /// <summary> /// uint32x4_t vld1q_dup_u32 (uint32_t const * ptr) /// A32: VLD1.32 { Dd[], Dd+1[] }, [Rn] /// A64: LD1R { Vt.4S }, [Xn] /// </summary> public static unsafe Vector128<uint> LoadAndReplicateToVector128(uint* address) => LoadAndReplicateToVector128(address); /// <summary> /// uint8x8_t vld1_u8 (uint8_t const * ptr) /// A32: VLD1.8 Dd, [Rn] /// A64: LD1 Vt.8B, [Xn] /// </summary> public static unsafe Vector64<byte> LoadVector64(byte* address) => LoadVector64(address); /// <summary> /// float64x1_t vld1_f64 (float64_t const * ptr) /// A32: VLD1.64 Dd, [Rn] /// A64: LD1 Vt.1D, [Xn] /// </summary> public static unsafe Vector64<double> LoadVector64(double* address) => LoadVector64(address); /// <summary> /// int16x4_t vld1_s16 (int16_t const * ptr) /// A32: VLD1.16 Dd, [Rn] /// A64: LD1 Vt.4H, [Xn] /// </summary> public static unsafe Vector64<short> LoadVector64(short* address) => LoadVector64(address); /// <summary> /// int32x2_t vld1_s32 (int32_t const * ptr) /// A32: VLD1.32 Dd, [Rn] /// A64: LD1 Vt.2S, [Xn] /// </summary> public static unsafe Vector64<int> LoadVector64(int* address) => LoadVector64(address); /// <summary> /// int64x1_t vld1_s64 (int64_t const * ptr) /// A32: VLD1.64 Dd, [Rn] /// A64: LD1 Vt.1D, [Xn] /// </summary> public static unsafe Vector64<long> LoadVector64(long* address) => LoadVector64(address); /// <summary> /// int8x8_t vld1_s8 (int8_t const * ptr) /// A32: VLD1.8 Dd, [Rn] /// A64: LD1 Vt.8B, [Xn] /// </summary> public static unsafe Vector64<sbyte> LoadVector64(sbyte* address) => LoadVector64(address); /// <summary> /// float32x2_t vld1_f32 (float32_t const * ptr) /// A32: VLD1.32 Dd, [Rn] /// A64: LD1 Vt.2S, [Xn] /// </summary> public static unsafe Vector64<float> LoadVector64(float* address) => LoadVector64(address); /// <summary> /// uint16x4_t vld1_u16 (uint16_t const * ptr) /// A32: VLD1.16 Dd, [Rn] /// A64: LD1 Vt.4H, [Xn] /// </summary> public static unsafe Vector64<ushort> LoadVector64(ushort* address) => LoadVector64(address); /// <summary> /// uint32x2_t vld1_u32 (uint32_t const * ptr) /// A32: VLD1.32 Dd, [Rn] /// A64: LD1 Vt.2S, [Xn] /// </summary> public static unsafe Vector64<uint> LoadVector64(uint* address) => LoadVector64(address); /// <summary> /// uint64x1_t vld1_u64 (uint64_t const * ptr) /// A32: VLD1.64 Dd, [Rn] /// A64: LD1 Vt.1D, [Xn] /// </summary> public static unsafe Vector64<ulong> LoadVector64(ulong* address) => LoadVector64(address); /// <summary> /// uint8x16_t vld1q_u8 (uint8_t const * ptr) /// A32: VLD1.8 Dd, Dd+1, [Rn] /// A64: LD1 Vt.16B, [Xn] /// </summary> public static unsafe Vector128<byte> LoadVector128(byte* address) => LoadVector128(address); /// <summary> /// float64x2_t vld1q_f64 (float64_t const * ptr) /// A32: VLD1.64 Dd, Dd+1, [Rn] /// A64: LD1 Vt.2D, [Xn] /// </summary> public static unsafe Vector128<double> LoadVector128(double* address) => LoadVector128(address); /// <summary> /// int16x8_t vld1q_s16 (int16_t const * ptr) /// A32: VLD1.16 Dd, Dd+1, [Rn] /// A64: LD1 Vt.8H, [Xn] /// </summary> public static unsafe Vector128<short> LoadVector128(short* address) => LoadVector128(address); /// <summary> /// int32x4_t vld1q_s32 (int32_t const * ptr) /// A32: VLD1.32 Dd, Dd+1, [Rn] /// A64: LD1 Vt.4S, [Xn] /// </summary> public static unsafe Vector128<int> LoadVector128(int* address) => LoadVector128(address); /// <summary> /// int64x2_t vld1q_s64 (int64_t const * ptr) /// A32: VLD1.64 Dd, Dd+1, [Rn] /// A64: LD1 Vt.2D, [Xn] /// </summary> public static unsafe Vector128<long> LoadVector128(long* address) => LoadVector128(address); /// <summary> /// int8x16_t vld1q_s8 (int8_t const * ptr) /// A32: VLD1.8 Dd, Dd+1, [Rn] /// A64: LD1 Vt.16B, [Xn] /// </summary> public static unsafe Vector128<sbyte> LoadVector128(sbyte* address) => LoadVector128(address); /// <summary> /// float32x4_t vld1q_f32 (float32_t const * ptr) /// A32: VLD1.32 Dd, Dd+1, [Rn] /// A64: LD1 Vt.4S, [Xn] /// </summary> public static unsafe Vector128<float> LoadVector128(float* address) => LoadVector128(address); /// <summary> /// uint16x8_t vld1q_s16 (uint16_t const * ptr) /// A32: VLD1.16 Dd, Dd+1, [Rn] /// A64: LD1 Vt.8H, [Xn] /// </summary> public static unsafe Vector128<ushort> LoadVector128(ushort* address) => LoadVector128(address); /// <summary> /// uint32x4_t vld1q_s32 (uint32_t const * ptr) /// A32: VLD1.32 Dd, Dd+1, [Rn] /// A64: LD1 Vt.4S, [Xn] /// </summary> public static unsafe Vector128<uint> LoadVector128(uint* address) => LoadVector128(address); /// <summary> /// uint64x2_t vld1q_u64 (uint64_t const * ptr) /// A32: VLD1.64 Dd, Dd+1, [Rn] /// A64: LD1 Vt.2D, [Xn] /// </summary> public static unsafe Vector128<ulong> LoadVector128(ulong* address) => LoadVector128(address); /// <summary> /// uint8x8_t vmax_u8 (uint8x8_t a, uint8x8_t b) /// A32: VMAX.U8 Dd, Dn, Dm /// A64: UMAX Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Max(Vector64<byte> left, Vector64<byte> right) => Max(left, right); /// <summary> /// int16x4_t vmax_s16 (int16x4_t a, int16x4_t b) /// A32: VMAX.S16 Dd, Dn, Dm /// A64: SMAX Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> Max(Vector64<short> left, Vector64<short> right) => Max(left, right); /// <summary> /// int32x2_t vmax_s32 (int32x2_t a, int32x2_t b) /// A32: VMAX.S32 Dd, Dn, Dm /// A64: SMAX Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> Max(Vector64<int> left, Vector64<int> right) => Max(left, right); /// <summary> /// int8x8_t vmax_s8 (int8x8_t a, int8x8_t b) /// A32: VMAX.S8 Dd, Dn, Dm /// A64: SMAX Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Max(Vector64<sbyte> left, Vector64<sbyte> right) => Max(left, right); /// <summary> /// float32x2_t vmax_f32 (float32x2_t a, float32x2_t b) /// A32: VMAX.F32 Dd, Dn, Dm /// A64: FMAX Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Max(Vector64<float> left, Vector64<float> right) => Max(left, right); /// <summary> /// uint16x4_t vmax_u16 (uint16x4_t a, uint16x4_t b) /// A32: VMAX.U16 Dd, Dn, Dm /// A64: UMAX Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> Max(Vector64<ushort> left, Vector64<ushort> right) => Max(left, right); /// <summary> /// uint32x2_t vmax_u32 (uint32x2_t a, uint32x2_t b) /// A32: VMAX.U32 Dd, Dn, Dm /// A64: UMAX Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> Max(Vector64<uint> left, Vector64<uint> right) => Max(left, right); /// <summary> /// uint8x16_t vmaxq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VMAX.U8 Qd, Qn, Qm /// A64: UMAX Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Max(Vector128<byte> left, Vector128<byte> right) => Max(left, right); /// <summary> /// int16x8_t vmaxq_s16 (int16x8_t a, int16x8_t b) /// A32: VMAX.S16 Qd, Qn, Qm /// A64: SMAX Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> Max(Vector128<short> left, Vector128<short> right) => Max(left, right); /// <summary> /// int32x4_t vmaxq_s32 (int32x4_t a, int32x4_t b) /// A32: VMAX.S32 Qd, Qn, Qm /// A64: SMAX Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> Max(Vector128<int> left, Vector128<int> right) => Max(left, right); /// <summary> /// int8x16_t vmaxq_s8 (int8x16_t a, int8x16_t b) /// A32: VMAX.S8 Qd, Qn, Qm /// A64: SMAX Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Max(Vector128<sbyte> left, Vector128<sbyte> right) => Max(left, right); /// <summary> /// float32x4_t vmaxq_f32 (float32x4_t a, float32x4_t b) /// A32: VMAX.F32 Qd, Qn, Qm /// A64: FMAX Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Max(Vector128<float> left, Vector128<float> right) => Max(left, right); /// <summary> /// uint16x8_t vmaxq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VMAX.U16 Qd, Qn, Qm /// A64: UMAX Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> Max(Vector128<ushort> left, Vector128<ushort> right) => Max(left, right); /// <summary> /// uint32x4_t vmaxq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VMAX.U32 Qd, Qn, Qm /// A64: UMAX Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> Max(Vector128<uint> left, Vector128<uint> right) => Max(left, right); /// <summary> /// float32x2_t vmaxnm_f32 (float32x2_t a, float32x2_t b) /// A32: VMAXNM.F32 Dd, Dn, Dm /// A64: FMAXNM Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MaxNumber(Vector64<float> left, Vector64<float> right) => MaxNumber(left, right); /// <summary> /// float32x4_t vmaxnmq_f32 (float32x4_t a, float32x4_t b) /// A32: VMAXNM.F32 Qd, Qn, Qm /// A64: FMAXNM Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MaxNumber(Vector128<float> left, Vector128<float> right) => MaxNumber(left, right); /// <summary> /// float64x1_t vmaxnm_f64 (float64x1_t a, float64x1_t b) /// A32: VMAXNM.F64 Dd, Dn, Dm /// A64: FMAXNM Dd, Dn, Dm /// </summary> public static Vector64<double> MaxNumberScalar(Vector64<double> left, Vector64<double> right) => MaxNumberScalar(left, right); /// <summary> /// float32_t vmaxnms_f32 (float32_t a, float32_t b) /// A32: VMAXNM.F32 Sd, Sn, Sm /// A64: FMAXNM Sd, Sn, Sm /// </summary> public static Vector64<float> MaxNumberScalar(Vector64<float> left, Vector64<float> right) => MaxNumberScalar(left, right); /// <summary> /// uint8x8_t vpmax_u8 (uint8x8_t a, uint8x8_t b) /// A32: VPMAX.U8 Dd, Dn, Dm /// A64: UMAXP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> MaxPairwise(Vector64<byte> left, Vector64<byte> right) => MaxPairwise(left, right); /// <summary> /// int16x4_t vpmax_s16 (int16x4_t a, int16x4_t b) /// A32: VPMAX.S16 Dd, Dn, Dm /// A64: SMAXP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MaxPairwise(Vector64<short> left, Vector64<short> right) => MaxPairwise(left, right); /// <summary> /// int32x2_t vpmax_s32 (int32x2_t a, int32x2_t b) /// A32: VPMAX.S32 Dd, Dn, Dm /// A64: SMAXP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MaxPairwise(Vector64<int> left, Vector64<int> right) => MaxPairwise(left, right); /// <summary> /// int8x8_t vpmax_s8 (int8x8_t a, int8x8_t b) /// A32: VPMAX.S8 Dd, Dn, Dm /// A64: SMAXP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> MaxPairwise(Vector64<sbyte> left, Vector64<sbyte> right) => MaxPairwise(left, right); /// <summary> /// float32x2_t vpmax_f32 (float32x2_t a, float32x2_t b) /// A32: VPMAX.F32 Dd, Dn, Dm /// A64: FMAXP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MaxPairwise(Vector64<float> left, Vector64<float> right) => MaxPairwise(left, right); /// <summary> /// uint16x4_t vpmax_u16 (uint16x4_t a, uint16x4_t b) /// A32: VPMAX.U16 Dd, Dn, Dm /// A64: UMAXP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> MaxPairwise(Vector64<ushort> left, Vector64<ushort> right) => MaxPairwise(left, right); /// <summary> /// uint32x2_t vpmax_u32 (uint32x2_t a, uint32x2_t b) /// A32: VPMAX.U32 Dd, Dn, Dm /// A64: UMAXP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> MaxPairwise(Vector64<uint> left, Vector64<uint> right) => MaxPairwise(left, right); /// <summary> /// uint8x8_t vmin_u8 (uint8x8_t a, uint8x8_t b) /// A32: VMIN.U8 Dd, Dn, Dm /// A64: UMIN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Min(Vector64<byte> left, Vector64<byte> right) => Min(left, right); /// <summary> /// int16x4_t vmin_s16 (int16x4_t a, int16x4_t b) /// A32: VMIN.S16 Dd, Dn, Dm /// A64: SMIN Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> Min(Vector64<short> left, Vector64<short> right) => Min(left, right); /// <summary> /// int32x2_t vmin_s32 (int32x2_t a, int32x2_t b) /// A32: VMIN.S32 Dd, Dn, Dm /// A64: SMIN Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> Min(Vector64<int> left, Vector64<int> right) => Min(left, right); /// <summary> /// int8x8_t vmin_s8 (int8x8_t a, int8x8_t b) /// A32: VMIN.S8 Dd, Dn, Dm /// A64: SMIN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Min(Vector64<sbyte> left, Vector64<sbyte> right) => Min(left, right); /// <summary> /// float32x2_t vmin_f32 (float32x2_t a, float32x2_t b) /// A32: VMIN.F32 Dd, Dn, Dm /// A64: FMIN Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Min(Vector64<float> left, Vector64<float> right) => Min(left, right); /// <summary> /// uint16x4_t vmin_u16 (uint16x4_t a, uint16x4_t b) /// A32: VMIN.U16 Dd, Dn, Dm /// A64: UMIN Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> Min(Vector64<ushort> left, Vector64<ushort> right) => Min(left, right); /// <summary> /// uint32x2_t vmin_u32 (uint32x2_t a, uint32x2_t b) /// A32: VMIN.U32 Dd, Dn, Dm /// A64: UMIN Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> Min(Vector64<uint> left, Vector64<uint> right) => Min(left, right); /// <summary> /// uint8x16_t vminq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VMIN.U8 Qd, Qn, Qm /// A64: UMIN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Min(Vector128<byte> left, Vector128<byte> right) => Min(left, right); /// <summary> /// int16x8_t vminq_s16 (int16x8_t a, int16x8_t b) /// A32: VMIN.S16 Qd, Qn, Qm /// A64: SMIN Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> Min(Vector128<short> left, Vector128<short> right) => Min(left, right); /// <summary> /// int32x4_t vminq_s32 (int32x4_t a, int32x4_t b) /// A32: VMIN.S32 Qd, Qn, Qm /// A64: SMIN Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> Min(Vector128<int> left, Vector128<int> right) => Min(left, right); /// <summary> /// int8x16_t vminq_s8 (int8x16_t a, int8x16_t b) /// A32: VMIN.S8 Qd, Qn, Qm /// A64: SMIN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Min(Vector128<sbyte> left, Vector128<sbyte> right) => Min(left, right); /// <summary> /// float32x4_t vminq_f32 (float32x4_t a, float32x4_t b) /// A32: VMIN.F32 Qd, Qn, Qm /// A64: FMIN Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Min(Vector128<float> left, Vector128<float> right) => Min(left, right); /// <summary> /// uint16x8_t vminq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VMIN.U16 Qd, Qn, Qm /// A64: UMIN Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> Min(Vector128<ushort> left, Vector128<ushort> right) => Min(left, right); /// <summary> /// uint32x4_t vminq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VMIN.U32 Qd, Qn, Qm /// A64: UMIN Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> Min(Vector128<uint> left, Vector128<uint> right) => Min(left, right); /// <summary> /// float32x2_t vminnm_f32 (float32x2_t a, float32x2_t b) /// A32: VMINNM.F32 Dd, Dn, Dm /// A64: FMINNM Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MinNumber(Vector64<float> left, Vector64<float> right) => MinNumber(left, right); /// <summary> /// float32x4_t vminnmq_f32 (float32x4_t a, float32x4_t b) /// A32: VMINNM.F32 Qd, Qn, Qm /// A64: FMINNM Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> MinNumber(Vector128<float> left, Vector128<float> right) => MinNumber(left, right); /// <summary> /// float64x1_t vminnm_f64 (float64x1_t a, float64x1_t b) /// A32: VMINNM.F64 Dd, Dn, Dm /// A64: FMINNM Dd, Dn, Dm /// </summary> public static Vector64<double> MinNumberScalar(Vector64<double> left, Vector64<double> right) => MinNumberScalar(left, right); /// <summary> /// float32_t vminnms_f32 (float32_t a, float32_t b) /// A32: VMINNM.F32 Sd, Sn, Sm /// A64: FMINNM Sd, Sn, Sm /// </summary> public static Vector64<float> MinNumberScalar(Vector64<float> left, Vector64<float> right) => MinNumberScalar(left, right); /// <summary> /// uint8x8_t vpmin_u8 (uint8x8_t a, uint8x8_t b) /// A32: VPMIN.U8 Dd, Dn, Dm /// A64: UMINP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> MinPairwise(Vector64<byte> left, Vector64<byte> right) => MinPairwise(left, right); /// <summary> /// int16x4_t vpmin_s16 (int16x4_t a, int16x4_t b) /// A32: VPMIN.S16 Dd, Dn, Dm /// A64: SMINP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MinPairwise(Vector64<short> left, Vector64<short> right) => MinPairwise(left, right); /// <summary> /// int32x2_t vpmin_s32 (int32x2_t a, int32x2_t b) /// A32: VPMIN.S32 Dd, Dn, Dm /// A64: SMINP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MinPairwise(Vector64<int> left, Vector64<int> right) => MinPairwise(left, right); /// <summary> /// int8x8_t vpmin_s8 (int8x8_t a, int8x8_t b) /// A32: VPMIN.S8 Dd, Dn, Dm /// A64: SMINP Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> MinPairwise(Vector64<sbyte> left, Vector64<sbyte> right) => MinPairwise(left, right); /// <summary> /// float32x2_t vpmin_f32 (float32x2_t a, float32x2_t b) /// A32: VPMIN.F32 Dd, Dn, Dm /// A64: FMINP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> MinPairwise(Vector64<float> left, Vector64<float> right) => MinPairwise(left, right); /// <summary> /// uint16x4_t vpmin_u16 (uint16x4_t a, uint16x4_t b) /// A32: VPMIN.U16 Dd, Dn, Dm /// A64: UMINP Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> MinPairwise(Vector64<ushort> left, Vector64<ushort> right) => MinPairwise(left, right); /// <summary> /// uint32x2_t vpmin_u32 (uint32x2_t a, uint32x2_t b) /// A32: VPMIN.U32 Dd, Dn, Dm /// A64: UMINP Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> MinPairwise(Vector64<uint> left, Vector64<uint> right) => MinPairwise(left, right); /// <summary> /// uint8x8_t vmul_u8 (uint8x8_t a, uint8x8_t b) /// A32: VMUL.I8 Dd, Dn, Dm /// A64: MUL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Multiply(Vector64<byte> left, Vector64<byte> right) => Multiply(left, right); /// <summary> /// int16x4_t vmul_s16 (int16x4_t a, int16x4_t b) /// A32: VMUL.I16 Dd, Dn, Dm /// A64: MUL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> Multiply(Vector64<short> left, Vector64<short> right) => Multiply(left, right); /// <summary> /// int32x2_t vmul_s32 (int32x2_t a, int32x2_t b) /// A32: VMUL.I32 Dd, Dn, Dm /// A64: MUL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> Multiply(Vector64<int> left, Vector64<int> right) => Multiply(left, right); /// <summary> /// int8x8_t vmul_s8 (int8x8_t a, int8x8_t b) /// A32: VMUL.I8 Dd, Dn, Dm /// A64: MUL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Multiply(Vector64<sbyte> left, Vector64<sbyte> right) => Multiply(left, right); /// <summary> /// float32x2_t vmul_f32 (float32x2_t a, float32x2_t b) /// A32: VMUL.F32 Dd, Dn, Dm /// A64: FMUL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Multiply(Vector64<float> left, Vector64<float> right) => Multiply(left, right); /// <summary> /// uint16x4_t vmul_u16 (uint16x4_t a, uint16x4_t b) /// A32: VMUL.I16 Dd, Dn, Dm /// A64: MUL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> Multiply(Vector64<ushort> left, Vector64<ushort> right) => Multiply(left, right); /// <summary> /// uint32x2_t vmul_u32 (uint32x2_t a, uint32x2_t b) /// A32: VMUL.I32 Dd, Dn, Dm /// A64: MUL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> Multiply(Vector64<uint> left, Vector64<uint> right) => Multiply(left, right); /// <summary> /// uint8x16_t vmulq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VMUL.I8 Qd, Qn, Qm /// A64: MUL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Multiply(Vector128<byte> left, Vector128<byte> right) => Multiply(left, right); /// <summary> /// int16x8_t vmulq_s16 (int16x8_t a, int16x8_t b) /// A32: VMUL.I16 Qd, Qn, Qm /// A64: MUL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> Multiply(Vector128<short> left, Vector128<short> right) => Multiply(left, right); /// <summary> /// int32x4_t vmulq_s32 (int32x4_t a, int32x4_t b) /// A32: VMUL.I32 Qd, Qn, Qm /// A64: MUL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> Multiply(Vector128<int> left, Vector128<int> right) => Multiply(left, right); /// <summary> /// int8x16_t vmulq_s8 (int8x16_t a, int8x16_t b) /// A32: VMUL.I8 Qd, Qn, Qm /// A64: MUL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Multiply(Vector128<sbyte> left, Vector128<sbyte> right) => Multiply(left, right); /// <summary> /// float32x4_t vmulq_f32 (float32x4_t a, float32x4_t b) /// A32: VMUL.F32 Qd, Qn, Qm /// A64: FMUL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Multiply(Vector128<float> left, Vector128<float> right) => Multiply(left, right); /// <summary> /// uint16x8_t vmulq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VMUL.I16 Qd, Qn, Qm /// A64: MUL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> Multiply(Vector128<ushort> left, Vector128<ushort> right) => Multiply(left, right); /// <summary> /// uint32x4_t vmulq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VMUL.I32 Qd, Qn, Qm /// A64: MUL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> Multiply(Vector128<uint> left, Vector128<uint> right) => Multiply(left, right); /// <summary> /// uint8x8_t vmla_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VMLA.I8 Dd, Dn, Dm /// A64: MLA Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> MultiplyAdd(Vector64<byte> addend, Vector64<byte> left, Vector64<byte> right) => MultiplyAdd(addend, left, right); /// <summary> /// int16x4_t vmla_s16 (int16x4_t a, int16x4_t b, int16x4_t c) /// A32: VMLA.I16 Dd, Dn, Dm /// A64: MLA Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MultiplyAdd(Vector64<short> addend, Vector64<short> left, Vector64<short> right) => MultiplyAdd(addend, left, right); /// <summary> /// int32x2_t vmla_s32 (int32x2_t a, int32x2_t b, int32x2_t c) /// A32: VMLA.I32 Dd, Dn, Dm /// A64: MLA Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MultiplyAdd(Vector64<int> addend, Vector64<int> left, Vector64<int> right) => MultiplyAdd(addend, left, right); /// <summary> /// int8x8_t vmla_s8 (int8x8_t a, int8x8_t b, int8x8_t c) /// A32: VMLA.I8 Dd, Dn, Dm /// A64: MLA Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> MultiplyAdd(Vector64<sbyte> addend, Vector64<sbyte> left, Vector64<sbyte> right) => MultiplyAdd(addend, left, right); /// <summary> /// uint16x4_t vmla_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VMLA.I16 Dd, Dn, Dm /// A64: MLA Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> MultiplyAdd(Vector64<ushort> addend, Vector64<ushort> left, Vector64<ushort> right) => MultiplyAdd(addend, left, right); /// <summary> /// uint32x2_t vmla_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VMLA.I32 Dd, Dn, Dm /// A64: MLA Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> MultiplyAdd(Vector64<uint> addend, Vector64<uint> left, Vector64<uint> right) => MultiplyAdd(addend, left, right); /// <summary> /// uint8x16_t vmlaq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) /// A32: VMLA.I8 Qd, Qn, Qm /// A64: MLA Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> MultiplyAdd(Vector128<byte> addend, Vector128<byte> left, Vector128<byte> right) => MultiplyAdd(addend, left, right); /// <summary> /// int16x8_t vmlaq_s16 (int16x8_t a, int16x8_t b, int16x8_t c) /// A32: VMLA.I16 Qd, Qn, Qm /// A64: MLA Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MultiplyAdd(Vector128<short> addend, Vector128<short> left, Vector128<short> right) => MultiplyAdd(addend, left, right); /// <summary> /// int32x4_t vmlaq_s32 (int32x4_t a, int32x4_t b, int32x4_t c) /// A32: VMLA.I32 Qd, Qn, Qm /// A64: MLA Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MultiplyAdd(Vector128<int> addend, Vector128<int> left, Vector128<int> right) => MultiplyAdd(addend, left, right); /// <summary> /// int8x16_t vmlaq_s8 (int8x16_t a, int8x16_t b, int8x16_t c) /// A32: VMLA.I8 Qd, Qn, Qm /// A64: MLA Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> MultiplyAdd(Vector128<sbyte> addend, Vector128<sbyte> left, Vector128<sbyte> right) => MultiplyAdd(addend, left, right); /// <summary> /// uint16x8_t vmlaq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) /// A32: VMLA.I16 Qd, Qn, Qm /// A64: MLA Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> MultiplyAdd(Vector128<ushort> addend, Vector128<ushort> left, Vector128<ushort> right) => MultiplyAdd(addend, left, right); /// <summary> /// uint32x4_t vmlaq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) /// A32: VMLA.I32 Qd, Qn, Qm /// A64: MLA Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> MultiplyAdd(Vector128<uint> addend, Vector128<uint> left, Vector128<uint> right) => MultiplyAdd(addend, left, right); /// <summary> /// int16x4_t vmla_n_s16 (int16x4_t a, int16x4_t b, int16_t c) /// A32: VMLA.I16 Dd, Dn, Dm[0] /// A64: MLA Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<short> MultiplyAddByScalar(Vector64<short> addend, Vector64<short> left, Vector64<short> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// int32x2_t vmla_n_s32 (int32x2_t a, int32x2_t b, int32_t c) /// A32: VMLA.I32 Dd, Dn, Dm[0] /// A64: MLA Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<int> MultiplyAddByScalar(Vector64<int> addend, Vector64<int> left, Vector64<int> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// uint16x4_t vmla_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c) /// A32: VMLA.I16 Dd, Dn, Dm[0] /// A64: MLA Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<ushort> MultiplyAddByScalar(Vector64<ushort> addend, Vector64<ushort> left, Vector64<ushort> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// uint32x2_t vmla_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c) /// A32: VMLA.I32 Dd, Dn, Dm[0] /// A64: MLA Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<uint> MultiplyAddByScalar(Vector64<uint> addend, Vector64<uint> left, Vector64<uint> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// int16x8_t vmlaq_n_s16 (int16x8_t a, int16x8_t b, int16_t c) /// A32: VMLA.I16 Qd, Qn, Dm[0] /// A64: MLA Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<short> MultiplyAddByScalar(Vector128<short> addend, Vector128<short> left, Vector64<short> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// int32x4_t vmlaq_n_s32 (int32x4_t a, int32x4_t b, int32_t c) /// A32: VMLA.I32 Qd, Qn, Dm[0] /// A64: MLA Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<int> MultiplyAddByScalar(Vector128<int> addend, Vector128<int> left, Vector64<int> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// uint16x8_t vmlaq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c) /// A32: VMLA.I16 Qd, Qn, Dm[0] /// A64: MLA Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<ushort> MultiplyAddByScalar(Vector128<ushort> addend, Vector128<ushort> left, Vector64<ushort> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// uint32x4_t vmlaq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c) /// A32: VMLA.I32 Qd, Qn, Dm[0] /// A64: MLA Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<uint> MultiplyAddByScalar(Vector128<uint> addend, Vector128<uint> left, Vector64<uint> right) => MultiplyAddByScalar(addend, left, right); /// <summary> /// int16x4_t vmla_lane_s16 (int16x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VMLA.I16 Dd, Dn, Dm[lane] /// A64: MLA Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyAddBySelectedScalar(Vector64<short> addend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int16x4_t vmla_laneq_s16 (int16x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VMLA.I16 Dd, Dn, Dm[lane] /// A64: MLA Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyAddBySelectedScalar(Vector64<short> addend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int32x2_t vmla_lane_s32 (int32x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VMLA.I32 Dd, Dn, Dm[lane] /// A64: MLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyAddBySelectedScalar(Vector64<int> addend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int32x2_t vmla_laneq_s32 (int32x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VMLA.I32 Dd, Dn, Dm[lane] /// A64: MLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyAddBySelectedScalar(Vector64<int> addend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint16x4_t vmla_lane_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t v, const int lane) /// A32: VMLA.I16 Dd, Dn, Dm[lane] /// A64: MLA Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplyAddBySelectedScalar(Vector64<ushort> addend, Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint16x4_t vmla_laneq_u16 (uint16x4_t a, uint16x4_t b, uint16x8_t v, const int lane) /// A32: VMLA.I16 Dd, Dn, Dm[lane] /// A64: MLA Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplyAddBySelectedScalar(Vector64<ushort> addend, Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint32x2_t vmla_lane_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t v, const int lane) /// A32: VMLA.I32 Dd, Dn, Dm[lane] /// A64: MLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplyAddBySelectedScalar(Vector64<uint> addend, Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint32x2_t vmla_laneq_u32 (uint32x2_t a, uint32x2_t b, uint32x4_t v, const int lane) /// A32: VMLA.I32 Dd, Dn, Dm[lane] /// A64: MLA Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplyAddBySelectedScalar(Vector64<uint> addend, Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int16x8_t vmlaq_lane_s16 (int16x8_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VMLA.I16 Qd, Qn, Dm[lane] /// A64: MLA Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyAddBySelectedScalar(Vector128<short> addend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int16x8_t vmlaq_laneq_s16 (int16x8_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VMLA.I16 Qd, Qn, Dm[lane] /// A64: MLA Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyAddBySelectedScalar(Vector128<short> addend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlaq_lane_s32 (int32x4_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VMLA.I32 Qd, Qn, Dm[lane] /// A64: MLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyAddBySelectedScalar(Vector128<int> addend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlaq_laneq_s32 (int32x4_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VMLA.I32 Qd, Qn, Dm[lane] /// A64: MLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyAddBySelectedScalar(Vector128<int> addend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint16x8_t vmlaq_lane_u16 (uint16x8_t a, uint16x8_t b, uint16x4_t v, const int lane) /// A32: VMLA.I16 Qd, Qn, Dm[lane] /// A64: MLA Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplyAddBySelectedScalar(Vector128<ushort> addend, Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint16x8_t vmlaq_laneq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t v, const int lane) /// A32: VMLA.I16 Qd, Qn, Dm[lane] /// A64: MLA Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplyAddBySelectedScalar(Vector128<ushort> addend, Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlaq_lane_u32 (uint32x4_t a, uint32x4_t b, uint32x2_t v, const int lane) /// A32: VMLA.I32 Qd, Qn, Dm[lane] /// A64: MLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplyAddBySelectedScalar(Vector128<uint> addend, Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlaq_laneq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t v, const int lane) /// A32: VMLA.I32 Qd, Qn, Dm[lane] /// A64: MLA Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplyAddBySelectedScalar(Vector128<uint> addend, Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyAddBySelectedScalar(addend, left, right, rightIndex); /// <summary> /// int16x4_t vmul_n_s16 (int16x4_t a, int16_t b) /// A32: VMUL.I16 Dd, Dn, Dm[0] /// A64: MUL Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<short> MultiplyByScalar(Vector64<short> left, Vector64<short> right) => MultiplyByScalar(left, right); /// <summary> /// int32x2_t vmul_n_s32 (int32x2_t a, int32_t b) /// A32: VMUL.I32 Dd, Dn, Dm[0] /// A64: MUL Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<int> MultiplyByScalar(Vector64<int> left, Vector64<int> right) => MultiplyByScalar(left, right); /// <summary> /// float32x2_t vmul_n_f32 (float32x2_t a, float32_t b) /// A32: VMUL.F32 Dd, Dn, Dm[0] /// A64: FMUL Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<float> MultiplyByScalar(Vector64<float> left, Vector64<float> right) => MultiplyByScalar(left, right); /// <summary> /// uint16x4_t vmul_n_u16 (uint16x4_t a, uint16_t b) /// A32: VMUL.I16 Dd, Dn, Dm[0] /// A64: MUL Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<ushort> MultiplyByScalar(Vector64<ushort> left, Vector64<ushort> right) => MultiplyByScalar(left, right); /// <summary> /// uint32x2_t vmul_n_u32 (uint32x2_t a, uint32_t b) /// A32: VMUL.I32 Dd, Dn, Dm[0] /// A64: MUL Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<uint> MultiplyByScalar(Vector64<uint> left, Vector64<uint> right) => MultiplyByScalar(left, right); /// <summary> /// int16x8_t vmulq_n_s16 (int16x8_t a, int16_t b) /// A32: VMUL.I16 Qd, Qn, Dm[0] /// A64: MUL Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<short> MultiplyByScalar(Vector128<short> left, Vector64<short> right) => MultiplyByScalar(left, right); /// <summary> /// int32x4_t vmulq_n_s32 (int32x4_t a, int32_t b) /// A32: VMUL.I32 Qd, Qn, Dm[0] /// A64: MUL Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<int> MultiplyByScalar(Vector128<int> left, Vector64<int> right) => MultiplyByScalar(left, right); /// <summary> /// float32x4_t vmulq_n_f32 (float32x4_t a, float32_t b) /// A32: VMUL.F32 Qd, Qn, Dm[0] /// A64: FMUL Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<float> MultiplyByScalar(Vector128<float> left, Vector64<float> right) => MultiplyByScalar(left, right); /// <summary> /// uint16x8_t vmulq_n_u16 (uint16x8_t a, uint16_t b) /// A32: VMUL.I16 Qd, Qn, Dm[0] /// A64: MUL Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<ushort> MultiplyByScalar(Vector128<ushort> left, Vector64<ushort> right) => MultiplyByScalar(left, right); /// <summary> /// uint32x4_t vmulq_n_u32 (uint32x4_t a, uint32_t b) /// A32: VMUL.I32 Qd, Qn, Dm[0] /// A64: MUL Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<uint> MultiplyByScalar(Vector128<uint> left, Vector64<uint> right) => MultiplyByScalar(left, right); /// <summary> /// int16x4_t vmul_lane_s16 (int16x4_t a, int16x4_t v, const int lane) /// A32: VMUL.I16 Dd, Dn, Dm[lane] /// A64: MUL Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyBySelectedScalar(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int16x4_t vmul_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) /// A32: VMUL.I16 Dd, Dn, Dm[lane] /// A64: MUL Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyBySelectedScalar(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x2_t vmul_lane_s32 (int32x2_t a, int32x2_t v, const int lane) /// A32: VMUL.I32 Dd, Dn, Dm[lane] /// A64: MUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyBySelectedScalar(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x2_t vmul_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) /// A32: VMUL.I32 Dd, Dn, Dm[lane] /// A64: MUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyBySelectedScalar(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x2_t vmul_lane_f32 (float32x2_t a, float32x2_t v, const int lane) /// A32: VMUL.F32 Dd, Dn, Dm[lane] /// A64: FMUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyBySelectedScalar(Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x2_t vmul_laneq_f32 (float32x2_t a, float32x4_t v, const int lane) /// A32: VMUL.F32 Dd, Dn, Dm[lane] /// A64: FMUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyBySelectedScalar(Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint16x4_t vmul_lane_u16 (uint16x4_t a, uint16x4_t v, const int lane) /// A32: VMUL.I16 Dd, Dn, Dm[lane] /// A64: MUL Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplyBySelectedScalar(Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint16x4_t vmul_laneq_u16 (uint16x4_t a, uint16x8_t v, const int lane) /// A32: VMUL.I16 Dd, Dn, Dm[lane] /// A64: MUL Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplyBySelectedScalar(Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint32x2_t vmul_lane_u32 (uint32x2_t a, uint32x2_t v, const int lane) /// A32: VMUL.I32 Dd, Dn, Dm[lane] /// A64: MUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplyBySelectedScalar(Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint32x2_t vmul_laneq_u32 (uint32x2_t a, uint32x4_t v, const int lane) /// A32: VMUL.I32 Dd, Dn, Dm[lane] /// A64: MUL Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplyBySelectedScalar(Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int16x8_t vmulq_lane_s16 (int16x8_t a, int16x4_t v, const int lane) /// A32: VMUL.I16 Qd, Qn, Dm[lane] /// A64: MUL Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyBySelectedScalar(Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int16x8_t vmulq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) /// A32: VMUL.I16 Qd, Qn, Dm[lane] /// A64: MUL Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyBySelectedScalar(Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vmulq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) /// A32: VMUL.I32 Qd, Qn, Dm[lane] /// A64: MUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalar(Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vmulq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) /// A32: VMUL.I32 Qd, Qn, Dm[lane] /// A64: MUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalar(Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x4_t vmulq_lane_f32 (float32x4_t a, float32x2_t v, const int lane) /// A32: VMUL.F32 Qd, Qn, Dm[lane] /// A64: FMUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> MultiplyBySelectedScalar(Vector128<float> left, Vector64<float> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// float32x4_t vmulq_laneq_f32 (float32x4_t a, float32x4_t v, const int lane) /// A32: VMUL.F32 Qd, Qn, Dm[lane] /// A64: FMUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<float> MultiplyBySelectedScalar(Vector128<float> left, Vector128<float> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint16x8_t vmulq_lane_u16 (uint16x8_t a, uint16x4_t v, const int lane) /// A32: VMUL.I16 Qd, Qn, Dm[lane] /// A64: MUL Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplyBySelectedScalar(Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint16x8_t vmulq_laneq_u16 (uint16x8_t a, uint16x8_t v, const int lane) /// A32: VMUL.I16 Qd, Qn, Dm[lane] /// A64: MUL Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplyBySelectedScalar(Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint32x4_t vmulq_lane_u32 (uint32x4_t a, uint32x2_t v, const int lane) /// A32: VMUL.I32 Qd, Qn, Dm[lane] /// A64: MUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalar(Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// uint32x4_t vmulq_laneq_u32 (uint32x4_t a, uint32x4_t v, const int lane) /// A32: VMUL.I32 Qd, Qn, Dm[lane] /// A64: MUL Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalar(Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vmull_lane_s16 (int16x4_t a, int16x4_t v, const int lane) /// A32: VMULL.S16 Qd, Dn, Dm[lane] /// A64: SMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLower(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// int32x4_t vmull_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) /// A32: VMULL.S16 Qd, Dn, Dm[lane] /// A64: SMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLower(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// int64x2_t vmull_lane_s32 (int32x2_t a, int32x2_t v, const int lane) /// A32: VMULL.S32 Qd, Dn, Dm[lane] /// A64: SMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLower(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// int64x2_t vmull_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) /// A32: VMULL.S32 Qd, Dn, Dm[lane] /// A64: SMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLower(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// uint32x4_t vmull_lane_u16 (uint16x4_t a, uint16x4_t v, const int lane) /// A32: VMULL.U16 Qd, Dn, Dm[lane] /// A64: UMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLower(Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// uint32x4_t vmull_laneq_u16 (uint16x4_t a, uint16x8_t v, const int lane) /// A32: VMULL.U16 Qd, Dn, Dm[lane] /// A64: UMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLower(Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// uint64x2_t vmull_lane_u32 (uint32x2_t a, uint32x2_t v, const int lane) /// A32: VMULL.U32 Qd, Dn, Dm[lane] /// A64: UMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLower(Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// uint64x2_t vmull_laneq_u32 (uint32x2_t a, uint32x4_t v, const int lane) /// A32: VMULL.U32 Qd, Dn, Dm[lane] /// A64: UMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLower(Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLower(left, right, rightIndex); /// <summary> /// int32x4_t vmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VMLAL.S16 Qd, Dn, Dm[lane] /// A64: SMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<int> addend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlal_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VMLAL.S16 Qd, Dn, Dm[lane] /// A64: SMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<int> addend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// int64x2_t vmlal_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VMLAL.S32 Qd, Dn, Dm[lane] /// A64: SMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<long> addend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// int64x2_t vmlal_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VMLAL.S32 Qd, Dn, Dm[lane] /// A64: SMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<long> addend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlal_lane_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t v, const int lane) /// A32: VMLAL.U16 Qd, Dn, Dm[lane] /// A64: UMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<uint> addend, Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlal_laneq_u16 (uint32x4_t a, uint16x4_t b, uint16x8_t v, const int lane) /// A32: VMLAL.U16 Qd, Dn, Dm[lane] /// A64: UMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<uint> addend, Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlal_lane_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t v, const int lane) /// A32: VMLAL.U32 Qd, Dn, Dm[lane] /// A64: UMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<ulong> addend, Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlal_laneq_u32 (uint64x2_t a, uint32x2_t b, uint32x4_t v, const int lane) /// A32: VMLAL.U32 Qd, Dn, Dm[lane] /// A64: UMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLowerAndAdd(Vector128<ulong> addend, Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndAdd(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VMLSL.S16 Qd, Dn, Dm[lane] /// A64: SMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<int> minuend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsl_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VMLSL.S16 Qd, Dn, Dm[lane] /// A64: SMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<int> minuend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vmlsl_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VMLSL.S32 Qd, Dn, Dm[lane] /// A64: SMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<long> minuend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vmlsl_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VMLSL.S32 Qd, Dn, Dm[lane] /// A64: SMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<long> minuend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsl_lane_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t v, const int lane) /// A32: VMLSL.U16 Qd, Dn, Dm[lane] /// A64: UMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<uint> minuend, Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsl_laneq_u16 (uint32x4_t a, uint16x4_t b, uint16x8_t v, const int lane) /// A32: VMLSL.U16 Qd, Dn, Dm[lane] /// A64: UMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<uint> minuend, Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlsl_lane_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t v, const int lane) /// A32: VMLSL.U32 Qd, Dn, Dm[lane] /// A64: UMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<ulong> minuend, Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlsl_laneq_u32 (uint64x2_t a, uint32x2_t b, uint32x4_t v, const int lane) /// A32: VMLSL.U32 Qd, Dn, Dm[lane] /// A64: UMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningLowerAndSubtract(Vector128<ulong> minuend, Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningLowerAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vmull_high_lane_s16 (int16x8_t a, int16x4_t v, const int lane) /// A32: VMULL.S16 Qd, Dn+1, Dm[lane] /// A64: SMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpper(Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// int32x4_t vmull_high_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) /// A32: VMULL.S16 Qd, Dn+1, Dm[lane] /// A64: SMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpper(Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// int64x2_t vmull_high_lane_s32 (int32x4_t a, int32x2_t v, const int lane) /// A32: VMULL.S32 Qd, Dn+1, Dm[lane] /// A64: SMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpper(Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// int64x2_t vmull_high_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) /// A32: VMULL.S32 Qd, Dn+1, Dm[lane] /// A64: SMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpper(Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// uint32x4_t vmull_high_lane_u16 (uint16x8_t a, uint16x4_t v, const int lane) /// A32: VMULL.U16 Qd, Dn+1, Dm[lane] /// A64: UMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpper(Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// uint32x4_t vmull_high_laneq_u16 (uint16x8_t a, uint16x8_t v, const int lane) /// A32: VMULL.U16 Qd, Dn+1, Dm[lane] /// A64: UMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpper(Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// uint64x2_t vmull_high_lane_u32 (uint32x4_t a, uint32x2_t v, const int lane) /// A32: VMULL.U32 Qd, Dn+1, Dm[lane] /// A64: UMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpper(Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// uint64x2_t vmull_high_laneq_u32 (uint32x4_t a, uint32x4_t v, const int lane) /// A32: VMULL.U32 Qd, Dn+1, Dm[lane] /// A64: UMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpper(Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpper(left, right, rightIndex); /// <summary> /// int32x4_t vmlal_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VMLAL.S16 Qd, Dn+1, Dm[lane] /// A64: SMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<int> addend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlal_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VMLAL.S16 Qd, Dn+1, Dm[lane] /// A64: SMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<int> addend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// int64x2_t vmlal_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VMLAL.S32 Qd, Dn+1, Dm[lane] /// A64: SMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<long> addend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// int64x2_t vmlal_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VMLAL.S32 Qd, Dn+1, Dm[lane] /// A64: SMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<long> addend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlal_high_lane_u16 (uint32x4_t a, uint16x8_t b, uint16x4_t v, const int lane) /// A32: VMLAL.U16 Qd, Dn+1, Dm[lane] /// A64: UMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<uint> addend, Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlal_high_laneq_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t v, const int lane) /// A32: VMLAL.U16 Qd, Dn+1, Dm[lane] /// A64: UMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<uint> addend, Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlal_high_lane_u32 (uint64x2_t a, uint32x4_t b, uint32x2_t v, const int lane) /// A32: VMLAL.U32 Qd, Dn+1, Dm[lane] /// A64: UMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<ulong> addend, Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlal_high_laneq_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t v, const int lane) /// A32: VMLAL.U32 Qd, Dn+1, Dm[lane] /// A64: UMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpperAndAdd(Vector128<ulong> addend, Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndAdd(addend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsl_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VMLSL.S16 Qd, Dn+1, Dm[lane] /// A64: SMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<int> minuend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsl_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VMLSL.S16 Qd, Dn+1, Dm[lane] /// A64: SMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<int> minuend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vmlsl_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VMLSL.S32 Qd, Dn+1, Dm[lane] /// A64: SMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<long> minuend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vmlsl_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VMLSL.S32 Qd, Dn+1, Dm[lane] /// A64: SMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<long> minuend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsl_high_lane_u16 (uint32x4_t a, uint16x8_t b, uint16x4_t v, const int lane) /// A32: VMLSL.U16 Qd, Dn+1, Dm[lane] /// A64: UMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<uint> minuend, Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsl_high_laneq_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t v, const int lane) /// A32: VMLSL.U16 Qd, Dn+1, Dm[lane] /// A64: UMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<uint> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<uint> minuend, Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlsl_high_lane_u32 (uint64x2_t a, uint32x4_t b, uint32x2_t v, const int lane) /// A32: VMLSL.U32 Qd, Dn+1, Dm[lane] /// A64: UMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<ulong> minuend, Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// uint64x2_t vmlsl_high_laneq_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t v, const int lane) /// A32: VMLSL.U32 Qd, Dn+1, Dm[lane] /// A64: UMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<ulong> MultiplyBySelectedScalarWideningUpperAndSubtract(Vector128<ulong> minuend, Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplyBySelectedScalarWideningUpperAndSubtract(minuend, left, right, rightIndex); /// <summary> /// int16x4_t vqdmulh_n_s16 (int16x4_t a, int16_t b) /// A32: VQDMULH.S16 Dd, Dn, Dm[0] /// A64: SQDMULH Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<short> MultiplyDoublingByScalarSaturateHigh(Vector64<short> left, Vector64<short> right) => MultiplyDoublingByScalarSaturateHigh(left, right); /// <summary> /// int32x2_t vqdmulh_n_s32 (int32x2_t a, int32_t b) /// A32: VQDMULH.S32 Dd, Dn, Dm[0] /// A64: SQDMULH Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<int> MultiplyDoublingByScalarSaturateHigh(Vector64<int> left, Vector64<int> right) => MultiplyDoublingByScalarSaturateHigh(left, right); /// <summary> /// int16x8_t vqdmulhq_n_s16 (int16x8_t a, int16_t b) /// A32: VQDMULH.S16 Qd, Qn, Dm[0] /// A64: SQDMULH Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<short> MultiplyDoublingByScalarSaturateHigh(Vector128<short> left, Vector64<short> right) => MultiplyDoublingByScalarSaturateHigh(left, right); /// <summary> /// int32x4_t vqdmulhq_n_s32 (int32x4_t a, int32_t b) /// A32: VQDMULH.S32 Qd, Qn, Dm[0] /// A64: SQDMULH Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<int> MultiplyDoublingByScalarSaturateHigh(Vector128<int> left, Vector64<int> right) => MultiplyDoublingByScalarSaturateHigh(left, right); /// <summary> /// int16x4_t vqdmulh_lane_s16 (int16x4_t a, int16x4_t v, const int lane) /// A32: VQDMULH.S16 Dd, Dn, Dm[lane] /// A64: SQDMULH Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x4_t vqdmulh_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) /// A32: VQDMULH.S16 Dd, Dn, Dm[lane] /// A64: SQDMULH Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x2_t vqdmulh_lane_s32 (int32x2_t a, int32x2_t v, const int lane) /// A32: VQDMULH.S32 Dd, Dn, Dm[lane] /// A64: SQDMULH Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x2_t vqdmulh_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) /// A32: VQDMULH.S32 Dd, Dn, Dm[lane] /// A64: SQDMULH Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyDoublingBySelectedScalarSaturateHigh(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x8_t vqdmulhq_lane_s16 (int16x8_t a, int16x4_t v, const int lane) /// A32: VQDMULH.S16 Qd, Qn, Dm[lane] /// A64: SQDMULH Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x8_t vqdmulhq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) /// A32: VQDMULH.S16 Qd, Qn, Dm[lane] /// A64: SQDMULH Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x4_t vqdmulhq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) /// A32: VQDMULH.S32 Qd, Qn, Dm[lane] /// A64: SQDMULH Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x4_t vqdmulhq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) /// A32: VQDMULH.S32 Qd, Qn, Dm[lane] /// A64: SQDMULH Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyDoublingBySelectedScalarSaturateHigh(Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x4_t vqdmulh_s16 (int16x4_t a, int16x4_t b) /// A32: VQDMULH.S16 Dd, Dn, Dm /// A64: SQDMULH Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MultiplyDoublingSaturateHigh(Vector64<short> left, Vector64<short> right) => MultiplyDoublingSaturateHigh(left, right); /// <summary> /// int32x2_t vqdmulh_s32 (int32x2_t a, int32x2_t b) /// A32: VQDMULH.S32 Dd, Dn, Dm /// A64: SQDMULH Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MultiplyDoublingSaturateHigh(Vector64<int> left, Vector64<int> right) => MultiplyDoublingSaturateHigh(left, right); /// <summary> /// int16x8_t vqdmulhq_s16 (int16x8_t a, int16x8_t b) /// A32: VQDMULH.S16 Qd, Qn, Qm /// A64: SQDMULH Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MultiplyDoublingSaturateHigh(Vector128<short> left, Vector128<short> right) => MultiplyDoublingSaturateHigh(left, right); /// <summary> /// int32x4_t vqdmulhq_s32 (int32x4_t a, int32x4_t b) /// A32: VQDMULH.S32 Qd, Qn, Qm /// A64: SQDMULH Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MultiplyDoublingSaturateHigh(Vector128<int> left, Vector128<int> right) => MultiplyDoublingSaturateHigh(left, right); /// <summary> /// int32x4_t vqdmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c) /// A32: VQDMLAL.S16 Qd, Dn, Dm /// A64: SQDMLAL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerAndAddSaturate(Vector128<int> addend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningLowerAndAddSaturate(addend, left, right); /// <summary> /// int64x2_t vqdmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c) /// A32: VQDMLAL.S32 Qd, Dn, Dm /// A64: SQDMLAL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerAndAddSaturate(Vector128<long> addend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningLowerAndAddSaturate(addend, left, right); /// <summary> /// int32x4_t vqdmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c) /// A32: VQDMLSL.S16 Qd, Dn, Dm /// A64: SQDMLSL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerAndSubtractSaturate(Vector128<int> minuend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningLowerAndSubtractSaturate(minuend, left, right); /// <summary> /// int64x2_t vqdmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c) /// A32: VQDMLSL.S32 Qd, Dn, Dm /// A64: SQDMLSL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerAndSubtractSaturate(Vector128<long> minuend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningLowerAndSubtractSaturate(minuend, left, right); /// <summary> /// int32x4_t vqdmlal_n_s16 (int32x4_t a, int16x4_t b, int16_t c) /// A32: VQDMLAL.S16 Qd, Dn, Dm[0] /// A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerByScalarAndAddSaturate(Vector128<int> addend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningLowerByScalarAndAddSaturate(addend, left, right); /// <summary> /// int64x2_t vqdmlal_n_s32 (int64x2_t a, int32x2_t b, int32_t c) /// A32: VQDMLAL.S32 Qd, Dn, Dm[0] /// A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerByScalarAndAddSaturate(Vector128<long> addend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningLowerByScalarAndAddSaturate(addend, left, right); /// <summary> /// int32x4_t vqdmlsl_n_s16 (int32x4_t a, int16x4_t b, int16_t c) /// A32: VQDMLSL.S16 Qd, Dn, Dm[0] /// A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerByScalarAndSubtractSaturate(Vector128<int> minuend, Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningLowerByScalarAndSubtractSaturate(minuend, left, right); /// <summary> /// int64x2_t vqdmlsl_n_s32 (int64x2_t a, int32x2_t b, int32_t c) /// A32: VQDMLSL.S32 Qd, Dn, Dm[0] /// A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerByScalarAndSubtractSaturate(Vector128<long> minuend, Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningLowerByScalarAndSubtractSaturate(minuend, left, right); /// <summary> /// int32x4_t vqdmlal_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VQDMLAL.S16 Qd, Dn, Dm[lane] /// A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<int> addend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlal_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VQDMLAL.S16 Qd, Dn, Dm[lane] /// A64: SQDMLAL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<int> addend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlal_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VQDMLAL.S32 Qd, Dn, Dm[lane] /// A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<long> addend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlal_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VQDMLAL.S32 Qd, Dn, Dm[lane] /// A64: SQDMLAL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(Vector128<long> addend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlsl_lane_s16 (int32x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VQDMLSL.S16 Qd, Dn, Dm[lane] /// A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<int> minuend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlsl_laneq_s16 (int32x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VQDMLSL.S16 Qd, Dn, Dm[lane] /// A64: SQDMLSL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<int> minuend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlsl_lane_s32 (int64x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VQDMLSL.S32 Qd, Dn, Dm[lane] /// A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<long> minuend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlsl_laneq_s32 (int64x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VQDMLSL.S32 Qd, Dn, Dm[lane] /// A64: SQDMLSL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(Vector128<long> minuend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningLowerBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmull_s16 (int16x4_t a, int16x4_t b) /// A32: VQDMULL.S16 Qd, Dn, Dm /// A64: SQDMULL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateLower(Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningSaturateLower(left, right); /// <summary> /// int64x2_t vqdmull_s32 (int32x2_t a, int32x2_t b) /// A32: VQDMULL.S32 Qd, Dn, Dm /// A64: SQDMULL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateLower(Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningSaturateLower(left, right); /// <summary> /// int32x4_t vqdmull_n_s16 (int16x4_t a, int16_t b) /// A32: VQDMULL.S16 Qd, Dn, Dm[0] /// A64: SQDMULL Vd.4S, Vn.4H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateLowerByScalar(Vector64<short> left, Vector64<short> right) => MultiplyDoublingWideningSaturateLowerByScalar(left, right); /// <summary> /// int64x2_t vqdmull_n_s32 (int32x2_t a, int32_t b) /// A32: VQDMULL.S32 Qd, Dn, Dm[0] /// A64: SQDMULL Vd.2D, Vn.2S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateLowerByScalar(Vector64<int> left, Vector64<int> right) => MultiplyDoublingWideningSaturateLowerByScalar(left, right); /// <summary> /// int32x4_t vqdmull_lane_s16 (int16x4_t a, int16x4_t v, const int lane) /// A32: VQDMULL.S16 Qd, Dn, Dm[lane] /// A64: SQDMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateLowerBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vqdmull_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) /// A32: VQDMULL.S16 Qd, Dn, Dm[lane] /// A64: SQDMULL Vd.4S, Vn.4H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateLowerBySelectedScalar(left, right, rightIndex); /// <summary> /// int64x2_t vqdmull_lane_s32 (int32x2_t a, int32x2_t v, const int lane) /// A32: VQDMULL.S32 Qd, Dn, Dm[lane] /// A64: SQDMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateLowerBySelectedScalar(left, right, rightIndex); /// <summary> /// int64x2_t vqdmull_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) /// A32: VQDMULL.S32 Qd, Dn, Dm[lane] /// A64: SQDMULL Vd.2D, Vn.2S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateLowerBySelectedScalar(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateLowerBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vqdmull_high_s16 (int16x8_t a, int16x8_t b) /// A32: VQDMULL.S16 Qd, Dn+1, Dm+1 /// A64: SQDMULL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateUpper(Vector128<short> left, Vector128<short> right) => MultiplyDoublingWideningSaturateUpper(left, right); /// <summary> /// int64x2_t vqdmull_high_s32 (int32x4_t a, int32x4_t b) /// A32: VQDMULL.S32 Qd, Dn+1, Dm+1 /// A64: SQDMULL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateUpper(Vector128<int> left, Vector128<int> right) => MultiplyDoublingWideningSaturateUpper(left, right); /// <summary> /// int32x4_t vqdmull_high_n_s16 (int16x8_t a, int16_t b) /// A32: VQDMULL.S16 Qd, Dn+1, Dm[0] /// A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateUpperByScalar(Vector128<short> left, Vector64<short> right) => MultiplyDoublingWideningSaturateUpperByScalar(left, right); /// <summary> /// int64x2_t vqdmull_high_n_s32 (int32x4_t a, int32_t b) /// A32: VQDMULL.S32 Qd, Dn+1, Dm[0] /// A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateUpperByScalar(Vector128<int> left, Vector64<int> right) => MultiplyDoublingWideningSaturateUpperByScalar(left, right); /// <summary> /// int32x4_t vqdmull_high_lane_s16 (int16x8_t a, int16x4_t v, const int lane) /// A32: VQDMULL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateUpperBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vqdmull_high_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) /// A32: VQDMULL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMULL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningSaturateUpperBySelectedScalar(left, right, rightIndex); /// <summary> /// int64x2_t vqdmull_high_lane_s32 (int32x4_t a, int32x2_t v, const int lane) /// A32: VQDMULL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateUpperBySelectedScalar(left, right, rightIndex); /// <summary> /// int64x2_t vqdmull_high_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) /// A32: VQDMULL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMULL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningSaturateUpperBySelectedScalar(Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningSaturateUpperBySelectedScalar(left, right, rightIndex); /// <summary> /// int32x4_t vqdmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) /// A32: VQDMLAL.S16 Qd, Dn+1, Dm+1 /// A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperAndAddSaturate(Vector128<int> addend, Vector128<short> left, Vector128<short> right) => MultiplyDoublingWideningUpperAndAddSaturate(addend, left, right); /// <summary> /// int64x2_t vqdmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) /// A32: VQDMLAL.S32 Qd, Dn+1, Dm+1 /// A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperAndAddSaturate(Vector128<long> addend, Vector128<int> left, Vector128<int> right) => MultiplyDoublingWideningUpperAndAddSaturate(addend, left, right); /// <summary> /// int32x4_t vqdmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) /// A32: VQDMLSL.S16 Qd, Dn+1, Dm+1 /// A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperAndSubtractSaturate(Vector128<int> minuend, Vector128<short> left, Vector128<short> right) => MultiplyDoublingWideningUpperAndSubtractSaturate(minuend, left, right); /// <summary> /// int64x2_t vqdmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) /// A32: VQDMLSL.S32 Qd, Dn+1, Dm+1 /// A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperAndSubtractSaturate(Vector128<long> minuend, Vector128<int> left, Vector128<int> right) => MultiplyDoublingWideningUpperAndSubtractSaturate(minuend, left, right); /// <summary> /// int32x4_t vqdmlal_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c) /// A32: VQDMLAL.S16 Qd, Dn+1, Dm[0] /// A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperByScalarAndAddSaturate(Vector128<int> addend, Vector128<short> left, Vector64<short> right) => MultiplyDoublingWideningUpperByScalarAndAddSaturate(addend, left, right); /// <summary> /// int64x2_t vqdmlal_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c) /// A32: VQDMLAL.S32 Qd, Dn+1, Dm[0] /// A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperByScalarAndAddSaturate(Vector128<long> addend, Vector128<int> left, Vector64<int> right) => MultiplyDoublingWideningUpperByScalarAndAddSaturate(addend, left, right); /// <summary> /// int32x4_t vqdmlsl_high_n_s16 (int32x4_t a, int16x8_t b, int16_t c) /// A32: VQDMLSL.S16 Qd, Dn+1, Dm[0] /// A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[0] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperByScalarAndSubtractSaturate(Vector128<int> minuend, Vector128<short> left, Vector64<short> right) => MultiplyDoublingWideningUpperByScalarAndSubtractSaturate(minuend, left, right); /// <summary> /// int64x2_t vqdmlsl_high_n_s32 (int64x2_t a, int32x4_t b, int32_t c) /// A32: VQDMLSL.S32 Qd, Dn+1, Dm[0] /// A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[0] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperByScalarAndSubtractSaturate(Vector128<long> minuend, Vector128<int> left, Vector64<int> right) => MultiplyDoublingWideningUpperByScalarAndSubtractSaturate(minuend, left, right); /// <summary> /// int32x4_t vqdmlal_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VQDMLAL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<int> addend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlal_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VQDMLAL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMLAL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<int> addend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlal_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VQDMLAL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<long> addend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlal_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VQDMLAL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMLAL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(Vector128<long> addend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndAddSaturate(addend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlsl_high_lane_s16 (int32x4_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VQDMLSL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<int> minuend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vqdmlsl_high_laneq_s16 (int32x4_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VQDMLSL.S16 Qd, Dn+1, Dm[lane] /// A64: SQDMLSL2 Vd.4S, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<int> MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<int> minuend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlsl_high_lane_s32 (int64x2_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VQDMLSL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<long> minuend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int64x2_t vqdmlsl_high_laneq_s32 (int64x2_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VQDMLSL.S32 Qd, Dn+1, Dm[lane] /// A64: SQDMLSL2 Vd.2D, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<long> MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(Vector128<long> minuend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyDoublingWideningUpperBySelectedScalarAndSubtractSaturate(minuend, left, right, rightIndex); /// <summary> /// int16x4_t vqrdmulh_n_s16 (int16x4_t a, int16_t b) /// A32: VQRDMULH.S16 Dd, Dn, Dm[0] /// A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<short> MultiplyRoundedDoublingByScalarSaturateHigh(Vector64<short> left, Vector64<short> right) => MultiplyRoundedDoublingByScalarSaturateHigh(left, right); /// <summary> /// int32x2_t vqrdmulh_n_s32 (int32x2_t a, int32_t b) /// A32: VQRDMULH.S32 Dd, Dn, Dm[0] /// A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<int> MultiplyRoundedDoublingByScalarSaturateHigh(Vector64<int> left, Vector64<int> right) => MultiplyRoundedDoublingByScalarSaturateHigh(left, right); /// <summary> /// int16x8_t vqrdmulhq_n_s16 (int16x8_t a, int16_t b) /// A32: VQRDMULH.S16 Qd, Qn, Dm[0] /// A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<short> MultiplyRoundedDoublingByScalarSaturateHigh(Vector128<short> left, Vector64<short> right) => MultiplyRoundedDoublingByScalarSaturateHigh(left, right); /// <summary> /// int32x4_t vqrdmulhq_n_s32 (int32x4_t a, int32_t b) /// A32: VQRDMULH.S32 Qd, Qn, Dm[0] /// A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<int> MultiplyRoundedDoublingByScalarSaturateHigh(Vector128<int> left, Vector64<int> right) => MultiplyRoundedDoublingByScalarSaturateHigh(left, right); /// <summary> /// int16x4_t vqrdmulh_lane_s16 (int16x4_t a, int16x4_t v, const int lane) /// A32: VQRDMULH.S16 Dd, Dn, Dm[lane] /// A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x4_t vqrdmulh_laneq_s16 (int16x4_t a, int16x8_t v, const int lane) /// A32: VQRDMULH.S16 Dd, Dn, Dm[lane] /// A64: SQRDMULH Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x2_t vqrdmulh_lane_s32 (int32x2_t a, int32x2_t v, const int lane) /// A32: VQRDMULH.S32 Dd, Dn, Dm[lane] /// A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x2_t vqrdmulh_laneq_s32 (int32x2_t a, int32x4_t v, const int lane) /// A32: VQRDMULH.S32 Dd, Dn, Dm[lane] /// A64: SQRDMULH Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x8_t vqrdmulhq_lane_s16 (int16x8_t a, int16x4_t v, const int lane) /// A32: VQRDMULH.S16 Qd, Qn, Dm[lane] /// A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x8_t vqrdmulhq_laneq_s16 (int16x8_t a, int16x8_t v, const int lane) /// A32: VQRDMULH.S16 Qd, Qn, Dm[lane] /// A64: SQRDMULH Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x4_t vqrdmulhq_lane_s32 (int32x4_t a, int32x2_t v, const int lane) /// A32: VQRDMULH.S32 Qd, Qn, Dm[lane] /// A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int32x4_t vqrdmulhq_laneq_s32 (int32x4_t a, int32x4_t v, const int lane) /// A32: VQRDMULH.S32 Qd, Qn, Dm[lane] /// A64: SQRDMULH Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplyRoundedDoublingBySelectedScalarSaturateHigh(Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplyRoundedDoublingBySelectedScalarSaturateHigh(left, right, rightIndex); /// <summary> /// int16x4_t vqrdmulh_s16 (int16x4_t a, int16x4_t b) /// A32: VQRDMULH.S16 Dd, Dn, Dm /// A64: SQRDMULH Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MultiplyRoundedDoublingSaturateHigh(Vector64<short> left, Vector64<short> right) => MultiplyRoundedDoublingSaturateHigh(left, right); /// <summary> /// int32x2_t vqrdmulh_s32 (int32x2_t a, int32x2_t b) /// A32: VQRDMULH.S32 Dd, Dn, Dm /// A64: SQRDMULH Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MultiplyRoundedDoublingSaturateHigh(Vector64<int> left, Vector64<int> right) => MultiplyRoundedDoublingSaturateHigh(left, right); /// <summary> /// int16x8_t vqrdmulhq_s16 (int16x8_t a, int16x8_t b) /// A32: VQRDMULH.S16 Qd, Qn, Qm /// A64: SQRDMULH Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MultiplyRoundedDoublingSaturateHigh(Vector128<short> left, Vector128<short> right) => MultiplyRoundedDoublingSaturateHigh(left, right); /// <summary> /// int32x4_t vqrdmulhq_s32 (int32x4_t a, int32x4_t b) /// A32: VQRDMULH.S32 Qd, Qn, Qm /// A64: SQRDMULH Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MultiplyRoundedDoublingSaturateHigh(Vector128<int> left, Vector128<int> right) => MultiplyRoundedDoublingSaturateHigh(left, right); /// <summary> /// float64x1_t vmul_f64 (float64x1_t a, float64x1_t b) /// A32: VMUL.F64 Dd, Dn, Dm /// A64: FMUL Dd, Dn, Dm /// </summary> public static Vector64<double> MultiplyScalar(Vector64<double> left, Vector64<double> right) => MultiplyScalar(left, right); /// <summary> /// float32_t vmuls_f32 (float32_t a, float32_t b) /// A32: VMUL.F32 Sd, Sn, Sm /// A64: FMUL Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> MultiplyScalar(Vector64<float> left, Vector64<float> right) => MultiplyScalar(left, right); /// <summary> /// float32_t vmuls_lane_f32 (float32_t a, float32x2_t v, const int lane) /// A32: VMUL.F32 Sd, Sn, Dm[lane] /// A64: FMUL Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyScalarBySelectedScalar(Vector64<float> left, Vector64<float> right, byte rightIndex) => MultiplyScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// float32_t vmuls_laneq_f32 (float32_t a, float32x4_t v, const int lane) /// A32: VMUL.F32 Sd, Sn, Dm[lane] /// A64: FMUL Sd, Sn, Vm.S[lane] /// </summary> public static Vector64<float> MultiplyScalarBySelectedScalar(Vector64<float> left, Vector128<float> right, byte rightIndex) => MultiplyScalarBySelectedScalar(left, right, rightIndex); /// <summary> /// uint8x8_t vmls_u8 (uint8x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VMLS.I8 Dd, Dn, Dm /// A64: MLS Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> MultiplySubtract(Vector64<byte> minuend, Vector64<byte> left, Vector64<byte> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int16x4_t vmls_s16 (int16x4_t a, int16x4_t b, int16x4_t c) /// A32: VMLS.I16 Dd, Dn, Dm /// A64: MLS Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> MultiplySubtract(Vector64<short> minuend, Vector64<short> left, Vector64<short> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int32x2_t vmls_s32 (int32x2_t a, int32x2_t b, int32x2_t c) /// A32: VMLS.I32 Dd, Dn, Dm /// A64: MLS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> MultiplySubtract(Vector64<int> minuend, Vector64<int> left, Vector64<int> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int8x8_t vmls_s8 (int8x8_t a, int8x8_t b, int8x8_t c) /// A32: VMLS.I8 Dd, Dn, Dm /// A64: MLS Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> MultiplySubtract(Vector64<sbyte> minuend, Vector64<sbyte> left, Vector64<sbyte> right) => MultiplySubtract(minuend, left, right); /// <summary> /// uint16x4_t vmls_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VMLS.I16 Dd, Dn, Dm /// A64: MLS Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> MultiplySubtract(Vector64<ushort> minuend, Vector64<ushort> left, Vector64<ushort> right) => MultiplySubtract(minuend, left, right); /// <summary> /// uint32x2_t vmls_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VMLS.I32 Dd, Dn, Dm /// A64: MLS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> MultiplySubtract(Vector64<uint> minuend, Vector64<uint> left, Vector64<uint> right) => MultiplySubtract(minuend, left, right); /// <summary> /// uint8x16_t vmlsq_u8 (uint8x16_t a, uint8x16_t b, uint8x16_t c) /// A32: VMLS.I8 Qd, Qn, Qm /// A64: MLS Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> MultiplySubtract(Vector128<byte> minuend, Vector128<byte> left, Vector128<byte> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int16x8_t vmlsq_s16 (int16x8_t a, int16x8_t b, int16x8_t c) /// A32: VMLS.I16 Qd, Qn, Qm /// A64: MLS Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> MultiplySubtract(Vector128<short> minuend, Vector128<short> left, Vector128<short> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int32x4_t vmlsq_s32 (int32x4_t a, int32x4_t b, int32x4_t c) /// A32: VMLS.I32 Qd, Qn, Qm /// A64: MLS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> MultiplySubtract(Vector128<int> minuend, Vector128<int> left, Vector128<int> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int8x16_t vmlsq_s8 (int8x16_t a, int8x16_t b, int8x16_t c) /// A32: VMLS.I8 Qd, Qn, Qm /// A64: MLS Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> MultiplySubtract(Vector128<sbyte> minuend, Vector128<sbyte> left, Vector128<sbyte> right) => MultiplySubtract(minuend, left, right); /// <summary> /// uint16x8_t vmlsq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t c) /// A32: VMLS.I16 Qd, Qn, Qm /// A64: MLS Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> MultiplySubtract(Vector128<ushort> minuend, Vector128<ushort> left, Vector128<ushort> right) => MultiplySubtract(minuend, left, right); /// <summary> /// uint32x4_t vmlsq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t c) /// A32: VMLS.I32 Qd, Qn, Qm /// A64: MLS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> MultiplySubtract(Vector128<uint> minuend, Vector128<uint> left, Vector128<uint> right) => MultiplySubtract(minuend, left, right); /// <summary> /// int16x4_t vmls_n_s16 (int16x4_t a, int16x4_t b, int16_t c) /// A32: VMLS.I16 Dd, Dn, Dm[0] /// A64: MLS Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<short> MultiplySubtractByScalar(Vector64<short> minuend, Vector64<short> left, Vector64<short> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// int32x2_t vmls_n_s32 (int32x2_t a, int32x2_t b, int32_t c) /// A32: VMLS.I32 Dd, Dn, Dm[0] /// A64: MLS Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<int> MultiplySubtractByScalar(Vector64<int> minuend, Vector64<int> left, Vector64<int> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// uint16x4_t vmls_n_u16 (uint16x4_t a, uint16x4_t b, uint16_t c) /// A32: VMLS.I16 Dd, Dn, Dm[0] /// A64: MLS Vd.4H, Vn.4H, Vm.H[0] /// </summary> public static Vector64<ushort> MultiplySubtractByScalar(Vector64<ushort> minuend, Vector64<ushort> left, Vector64<ushort> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// uint32x2_t vmls_n_u32 (uint32x2_t a, uint32x2_t b, uint32_t c) /// A32: VMLS.I32 Dd, Dn, Dm[0] /// A64: MLS Vd.2S, Vn.2S, Vm.S[0] /// </summary> public static Vector64<uint> MultiplySubtractByScalar(Vector64<uint> minuend, Vector64<uint> left, Vector64<uint> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// int16x8_t vmlsq_n_s16 (int16x8_t a, int16x8_t b, int16_t c) /// A32: VMLS.I16 Qd, Qn, Dm[0] /// A64: MLS Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<short> MultiplySubtractByScalar(Vector128<short> minuend, Vector128<short> left, Vector64<short> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// int32x4_t vmlsq_n_s32 (int32x4_t a, int32x4_t b, int32_t c) /// A32: VMLS.I32 Qd, Qn, Dm[0] /// A64: MLS Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<int> MultiplySubtractByScalar(Vector128<int> minuend, Vector128<int> left, Vector64<int> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// uint16x8_t vmlsq_n_u16 (uint16x8_t a, uint16x8_t b, uint16_t c) /// A32: VMLS.I16 Qd, Qn, Dm[0] /// A64: MLS Vd.8H, Vn.8H, Vm.H[0] /// </summary> public static Vector128<ushort> MultiplySubtractByScalar(Vector128<ushort> minuend, Vector128<ushort> left, Vector64<ushort> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// uint32x4_t vmlsq_n_u32 (uint32x4_t a, uint32x4_t b, uint32_t c) /// A32: VMLS.I32 Qd, Qn, Dm[0] /// A64: MLS Vd.4S, Vn.4S, Vm.S[0] /// </summary> public static Vector128<uint> MultiplySubtractByScalar(Vector128<uint> minuend, Vector128<uint> left, Vector64<uint> right) => MultiplySubtractByScalar(minuend, left, right); /// <summary> /// int16x4_t vmls_lane_s16 (int16x4_t a, int16x4_t b, int16x4_t v, const int lane) /// A32: VMLS.I16 Dd, Dn, Dm[lane] /// A64: MLS Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplySubtractBySelectedScalar(Vector64<short> minuend, Vector64<short> left, Vector64<short> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int16x4_t vmls_laneq_s16 (int16x4_t a, int16x4_t b, int16x8_t v, const int lane) /// A32: VMLS.I16 Dd, Dn, Dm[lane] /// A64: MLS Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<short> MultiplySubtractBySelectedScalar(Vector64<short> minuend, Vector64<short> left, Vector128<short> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int32x2_t vmls_lane_s32 (int32x2_t a, int32x2_t b, int32x2_t v, const int lane) /// A32: VMLS.I32 Dd, Dn, Dm[lane] /// A64: MLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplySubtractBySelectedScalar(Vector64<int> minuend, Vector64<int> left, Vector64<int> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int32x2_t vmls_laneq_s32 (int32x2_t a, int32x2_t b, int32x4_t v, const int lane) /// A32: VMLS.I32 Dd, Dn, Dm[lane] /// A64: MLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<int> MultiplySubtractBySelectedScalar(Vector64<int> minuend, Vector64<int> left, Vector128<int> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint16x4_t vmls_lane_u16 (uint16x4_t a, uint16x4_t b, uint16x4_t v, const int lane) /// A32: VMLS.I16 Dd, Dn, Dm[lane] /// A64: MLS Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplySubtractBySelectedScalar(Vector64<ushort> minuend, Vector64<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint16x4_t vmls_laneq_u16 (uint16x4_t a, uint16x4_t b, uint16x8_t v, const int lane) /// A32: VMLS.I16 Dd, Dn, Dm[lane] /// A64: MLS Vd.4H, Vn.4H, Vm.H[lane] /// </summary> public static Vector64<ushort> MultiplySubtractBySelectedScalar(Vector64<ushort> minuend, Vector64<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint32x2_t vmls_lane_u32 (uint32x2_t a, uint32x2_t b, uint32x2_t v, const int lane) /// A32: VMLS.I32 Dd, Dn, Dm[lane] /// A64: MLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplySubtractBySelectedScalar(Vector64<uint> minuend, Vector64<uint> left, Vector64<uint> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint32x2_t vmls_laneq_u32 (uint32x2_t a, uint32x2_t b, uint32x4_t v, const int lane) /// A32: VMLS.I32 Dd, Dn, Dm[lane] /// A64: MLS Vd.2S, Vn.2S, Vm.S[lane] /// </summary> public static Vector64<uint> MultiplySubtractBySelectedScalar(Vector64<uint> minuend, Vector64<uint> left, Vector128<uint> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int16x8_t vmlsq_lane_s16 (int16x8_t a, int16x8_t b, int16x4_t v, const int lane) /// A32: VMLS.I16 Qd, Qn, Dm[lane] /// A64: MLS Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplySubtractBySelectedScalar(Vector128<short> minuend, Vector128<short> left, Vector64<short> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int16x8_t vmlsq_laneq_s16 (int16x8_t a, int16x8_t b, int16x8_t v, const int lane) /// A32: VMLS.I16 Qd, Qn, Dm[lane] /// A64: MLS Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<short> MultiplySubtractBySelectedScalar(Vector128<short> minuend, Vector128<short> left, Vector128<short> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsq_lane_s32 (int32x4_t a, int32x4_t b, int32x2_t v, const int lane) /// A32: VMLS.I32 Qd, Qn, Dm[lane] /// A64: MLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplySubtractBySelectedScalar(Vector128<int> minuend, Vector128<int> left, Vector64<int> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// int32x4_t vmlsq_laneq_s32 (int32x4_t a, int32x4_t b, int32x4_t v, const int lane) /// A32: VMLS.I32 Qd, Qn, Dm[lane] /// A64: MLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<int> MultiplySubtractBySelectedScalar(Vector128<int> minuend, Vector128<int> left, Vector128<int> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint16x8_t vmlsq_lane_u16 (uint16x8_t a, uint16x8_t b, uint16x4_t v, const int lane) /// A32: VMLS.I16 Qd, Qn, Dm[lane] /// A64: MLS Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplySubtractBySelectedScalar(Vector128<ushort> minuend, Vector128<ushort> left, Vector64<ushort> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint16x8_t vmlsq_laneq_u16 (uint16x8_t a, uint16x8_t b, uint16x8_t v, const int lane) /// A32: VMLS.I16 Qd, Qn, Dm[lane] /// A64: MLS Vd.8H, Vn.8H, Vm.H[lane] /// </summary> public static Vector128<ushort> MultiplySubtractBySelectedScalar(Vector128<ushort> minuend, Vector128<ushort> left, Vector128<ushort> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsq_lane_u32 (uint32x4_t a, uint32x4_t b, uint32x2_t v, const int lane) /// A32: VMLS.I32 Qd, Qn, Dm[lane] /// A64: MLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplySubtractBySelectedScalar(Vector128<uint> minuend, Vector128<uint> left, Vector64<uint> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint32x4_t vmlsq_laneq_u32 (uint32x4_t a, uint32x4_t b, uint32x4_t v, const int lane) /// A32: VMLS.I32 Qd, Qn, Dm[lane] /// A64: MLS Vd.4S, Vn.4S, Vm.S[lane] /// </summary> public static Vector128<uint> MultiplySubtractBySelectedScalar(Vector128<uint> minuend, Vector128<uint> left, Vector128<uint> right, byte rightIndex) => MultiplySubtractBySelectedScalar(minuend, left, right, rightIndex); /// <summary> /// uint16x8_t vmull_u8 (uint8x8_t a, uint8x8_t b) /// A32: VMULL.U8 Qd, Dn, Dm /// A64: UMULL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> MultiplyWideningLower(Vector64<byte> left, Vector64<byte> right) => MultiplyWideningLower(left, right); /// <summary> /// int32x4_t vmull_s16 (int16x4_t a, int16x4_t b) /// A32: VMULL.S16 Qd, Dn, Dm /// A64: SMULL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyWideningLower(Vector64<short> left, Vector64<short> right) => MultiplyWideningLower(left, right); /// <summary> /// int64x2_t vmull_s32 (int32x2_t a, int32x2_t b) /// A32: VMULL.S32 Qd, Dn, Dm /// A64: SMULL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyWideningLower(Vector64<int> left, Vector64<int> right) => MultiplyWideningLower(left, right); /// <summary> /// int16x8_t vmull_s8 (int8x8_t a, int8x8_t b) /// A32: VMULL.S8 Qd, Dn, Dm /// A64: SMULL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> MultiplyWideningLower(Vector64<sbyte> left, Vector64<sbyte> right) => MultiplyWideningLower(left, right); /// <summary> /// uint32x4_t vmull_u16 (uint16x4_t a, uint16x4_t b) /// A32: VMULL.U16 Qd, Dn, Dm /// A64: UMULL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> MultiplyWideningLower(Vector64<ushort> left, Vector64<ushort> right) => MultiplyWideningLower(left, right); /// <summary> /// uint64x2_t vmull_u32 (uint32x2_t a, uint32x2_t b) /// A32: VMULL.U32 Qd, Dn, Dm /// A64: UMULL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> MultiplyWideningLower(Vector64<uint> left, Vector64<uint> right) => MultiplyWideningLower(left, right); /// <summary> /// uint16x8_t vmlal_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VMLAL.U8 Qd, Dn, Dm /// A64: UMLAL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> MultiplyWideningLowerAndAdd(Vector128<ushort> addend, Vector64<byte> left, Vector64<byte> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// int32x4_t vmlal_s16 (int32x4_t a, int16x4_t b, int16x4_t c) /// A32: VMLAL.S16 Qd, Dn, Dm /// A64: SMLAL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyWideningLowerAndAdd(Vector128<int> addend, Vector64<short> left, Vector64<short> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// int64x2_t vmlal_s32 (int64x2_t a, int32x2_t b, int32x2_t c) /// A32: VMLAL.S32 Qd, Dn, Dm /// A64: SMLAL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyWideningLowerAndAdd(Vector128<long> addend, Vector64<int> left, Vector64<int> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// int16x8_t vmlal_s8 (int16x8_t a, int8x8_t b, int8x8_t c) /// A32: VMLAL.S8 Qd, Dn, Dm /// A64: SMLAL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> MultiplyWideningLowerAndAdd(Vector128<short> addend, Vector64<sbyte> left, Vector64<sbyte> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// uint32x4_t vmlal_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VMLAL.U16 Qd, Dn, Dm /// A64: UMLAL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> MultiplyWideningLowerAndAdd(Vector128<uint> addend, Vector64<ushort> left, Vector64<ushort> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// uint64x2_t vmlal_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VMLAL.U32 Qd, Dn, Dm /// A64: UMLAL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> MultiplyWideningLowerAndAdd(Vector128<ulong> addend, Vector64<uint> left, Vector64<uint> right) => MultiplyWideningLowerAndAdd(addend, left, right); /// <summary> /// uint16x8_t vmlsl_u8 (uint16x8_t a, uint8x8_t b, uint8x8_t c) /// A32: VMLSL.U8 Qd, Dn, Dm /// A64: UMLSL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> MultiplyWideningLowerAndSubtract(Vector128<ushort> minuend, Vector64<byte> left, Vector64<byte> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// int32x4_t vmlsl_s16 (int32x4_t a, int16x4_t b, int16x4_t c) /// A32: VMLSL.S16 Qd, Dn, Dm /// A64: SMLSL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> MultiplyWideningLowerAndSubtract(Vector128<int> minuend, Vector64<short> left, Vector64<short> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// int64x2_t vmlsl_s32 (int64x2_t a, int32x2_t b, int32x2_t c) /// A32: VMLSL.S32 Qd, Dn, Dm /// A64: SMLSL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> MultiplyWideningLowerAndSubtract(Vector128<long> minuend, Vector64<int> left, Vector64<int> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// int16x8_t vmlsl_s8 (int16x8_t a, int8x8_t b, int8x8_t c) /// A32: VMLSL.S8 Qd, Dn, Dm /// A64: SMLSL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> MultiplyWideningLowerAndSubtract(Vector128<short> minuend, Vector64<sbyte> left, Vector64<sbyte> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// uint32x4_t vmlsl_u16 (uint32x4_t a, uint16x4_t b, uint16x4_t c) /// A32: VMLSL.U16 Qd, Dn, Dm /// A64: UMLSL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> MultiplyWideningLowerAndSubtract(Vector128<uint> minuend, Vector64<ushort> left, Vector64<ushort> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// uint64x2_t vmlsl_u32 (uint64x2_t a, uint32x2_t b, uint32x2_t c) /// A32: VMLSL.U32 Qd, Dn, Dm /// A64: UMLSL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> MultiplyWideningLowerAndSubtract(Vector128<ulong> minuend, Vector64<uint> left, Vector64<uint> right) => MultiplyWideningLowerAndSubtract(minuend, left, right); /// <summary> /// uint16x8_t vmull_high_u8 (uint8x16_t a, uint8x16_t b) /// A32: VMULL.U8 Qd, Dn+1, Dm+1 /// A64: UMULL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> MultiplyWideningUpper(Vector128<byte> left, Vector128<byte> right) => MultiplyWideningUpper(left, right); /// <summary> /// int32x4_t vmull_high_s16 (int16x8_t a, int16x8_t b) /// A32: VMULL.S16 Qd, Dn+1, Dm+1 /// A64: SMULL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyWideningUpper(Vector128<short> left, Vector128<short> right) => MultiplyWideningUpper(left, right); /// <summary> /// int64x2_t vmull_high_s32 (int32x4_t a, int32x4_t b) /// A32: VMULL.S32 Qd, Dn+1, Dm+1 /// A64: SMULL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyWideningUpper(Vector128<int> left, Vector128<int> right) => MultiplyWideningUpper(left, right); /// <summary> /// int16x8_t vmull_high_s8 (int8x16_t a, int8x16_t b) /// A32: VMULL.S8 Qd, Dn+1, Dm+1 /// A64: SMULL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> MultiplyWideningUpper(Vector128<sbyte> left, Vector128<sbyte> right) => MultiplyWideningUpper(left, right); /// <summary> /// uint32x4_t vmull_high_u16 (uint16x8_t a, uint16x8_t b) /// A32: VMULL.U16 Qd, Dn+1, Dm+1 /// A64: UMULL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> MultiplyWideningUpper(Vector128<ushort> left, Vector128<ushort> right) => MultiplyWideningUpper(left, right); /// <summary> /// uint64x2_t vmull_high_u32 (uint32x4_t a, uint32x4_t b) /// A32: VMULL.U32 Qd, Dn+1, Dm+1 /// A64: UMULL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> MultiplyWideningUpper(Vector128<uint> left, Vector128<uint> right) => MultiplyWideningUpper(left, right); /// <summary> /// uint16x8_t vmlal_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c) /// A32: VMLAL.U8 Qd, Dn+1, Dm+1 /// A64: UMLAL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> MultiplyWideningUpperAndAdd(Vector128<ushort> addend, Vector128<byte> left, Vector128<byte> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// int32x4_t vmlal_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) /// A32: VMLAL.S16 Qd, Dn+1, Dm+1 /// A64: SMLAL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyWideningUpperAndAdd(Vector128<int> addend, Vector128<short> left, Vector128<short> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// int64x2_t vmlal_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) /// A32: VMLAL.S32 Qd, Dn+1, Dm+1 /// A64: SMLAL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyWideningUpperAndAdd(Vector128<long> addend, Vector128<int> left, Vector128<int> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// int16x8_t vmlal_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c) /// A32: VMLAL.S8 Qd, Dn+1, Dm+1 /// A64: SMLAL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> MultiplyWideningUpperAndAdd(Vector128<short> addend, Vector128<sbyte> left, Vector128<sbyte> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// uint32x4_t vmlal_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c) /// A32: VMLAL.U16 Qd, Dn+1, Dm+1 /// A64: UMLAL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> MultiplyWideningUpperAndAdd(Vector128<uint> addend, Vector128<ushort> left, Vector128<ushort> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// uint64x2_t vmlal_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c) /// A32: VMLAL.U32 Qd, Dn+1, Dm+1 /// A64: UMLAL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> MultiplyWideningUpperAndAdd(Vector128<ulong> addend, Vector128<uint> left, Vector128<uint> right) => MultiplyWideningUpperAndAdd(addend, left, right); /// <summary> /// uint16x8_t vmlsl_high_u8 (uint16x8_t a, uint8x16_t b, uint8x16_t c) /// A32: VMLSL.U8 Qd, Dn+1, Dm+1 /// A64: UMLSL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> MultiplyWideningUpperAndSubtract(Vector128<ushort> minuend, Vector128<byte> left, Vector128<byte> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// int32x4_t vmlsl_high_s16 (int32x4_t a, int16x8_t b, int16x8_t c) /// A32: VMLSL.S16 Qd, Dn+1, Dm+1 /// A64: SMLSL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> MultiplyWideningUpperAndSubtract(Vector128<int> minuend, Vector128<short> left, Vector128<short> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// int64x2_t vmlsl_high_s32 (int64x2_t a, int32x4_t b, int32x4_t c) /// A32: VMLSL.S32 Qd, Dn+1, Dm+1 /// A64: SMLSL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> MultiplyWideningUpperAndSubtract(Vector128<long> minuend, Vector128<int> left, Vector128<int> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// int16x8_t vmlsl_high_s8 (int16x8_t a, int8x16_t b, int8x16_t c) /// A32: VMLSL.S8 Qd, Dn+1, Dm+1 /// A64: SMLSL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> MultiplyWideningUpperAndSubtract(Vector128<short> minuend, Vector128<sbyte> left, Vector128<sbyte> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// uint32x4_t vmlsl_high_u16 (uint32x4_t a, uint16x8_t b, uint16x8_t c) /// A32: VMLSL.U16 Qd, Dn+1, Dm+1 /// A64: UMLSL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> MultiplyWideningUpperAndSubtract(Vector128<uint> minuend, Vector128<ushort> left, Vector128<ushort> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// uint64x2_t vmlsl_high_u32 (uint64x2_t a, uint32x4_t b, uint32x4_t c) /// A32: VMLSL.U32 Qd, Dn+1, Dm+1 /// A64: UMLSL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> MultiplyWideningUpperAndSubtract(Vector128<ulong> minuend, Vector128<uint> left, Vector128<uint> right) => MultiplyWideningUpperAndSubtract(minuend, left, right); /// <summary> /// int16x4_t vneg_s16 (int16x4_t a) /// A32: VNEG.S16 Dd, Dm /// A64: NEG Vd.4H, Vn.4H /// </summary> public static Vector64<short> Negate(Vector64<short> value) => Negate(value); /// <summary> /// int32x2_t vneg_s32 (int32x2_t a) /// A32: VNEG.S32 Dd, Dm /// A64: NEG Vd.2S, Vn.2S /// </summary> public static Vector64<int> Negate(Vector64<int> value) => Negate(value); /// <summary> /// int8x8_t vneg_s8 (int8x8_t a) /// A32: VNEG.S8 Dd, Dm /// A64: NEG Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> Negate(Vector64<sbyte> value) => Negate(value); /// <summary> /// float32x2_t vneg_f32 (float32x2_t a) /// A32: VNEG.F32 Dd, Dm /// A64: FNEG Vd.2S, Vn.2S /// </summary> public static Vector64<float> Negate(Vector64<float> value) => Negate(value); /// <summary> /// int16x8_t vnegq_s16 (int16x8_t a) /// A32: VNEG.S16 Qd, Qm /// A64: NEG Vd.8H, Vn.8H /// </summary> public static Vector128<short> Negate(Vector128<short> value) => Negate(value); /// <summary> /// int32x4_t vnegq_s32 (int32x4_t a) /// A32: VNEG.S32 Qd, Qm /// A64: NEG Vd.4S, Vn.4S /// </summary> public static Vector128<int> Negate(Vector128<int> value) => Negate(value); /// <summary> /// int8x16_t vnegq_s8 (int8x16_t a) /// A32: VNEG.S8 Qd, Qm /// A64: NEG Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> Negate(Vector128<sbyte> value) => Negate(value); /// <summary> /// float32x4_t vnegq_f32 (float32x4_t a) /// A32: VNEG.F32 Qd, Qm /// A64: FNEG Vd.4S, Vn.4S /// </summary> public static Vector128<float> Negate(Vector128<float> value) => Negate(value); /// <summary> /// int16x4_t vqneg_s16 (int16x4_t a) /// A32: VQNEG.S16 Dd, Dm /// A64: SQNEG Vd.4H, Vn.4H /// </summary> public static Vector64<short> NegateSaturate(Vector64<short> value) => NegateSaturate(value); /// <summary> /// int32x2_t vqneg_s32 (int32x2_t a) /// A32: VQNEG.S32 Dd, Dm /// A64: SQNEG Vd.2S, Vn.2S /// </summary> public static Vector64<int> NegateSaturate(Vector64<int> value) => NegateSaturate(value); /// <summary> /// int8x8_t vqneg_s8 (int8x8_t a) /// A32: VQNEG.S8 Dd, Dm /// A64: SQNEG Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> NegateSaturate(Vector64<sbyte> value) => NegateSaturate(value); /// <summary> /// int16x8_t vqnegq_s16 (int16x8_t a) /// A32: VQNEG.S16 Qd, Qm /// A64: SQNEG Vd.8H, Vn.8H /// </summary> public static Vector128<short> NegateSaturate(Vector128<short> value) => NegateSaturate(value); /// <summary> /// int32x4_t vqnegq_s32 (int32x4_t a) /// A32: VQNEG.S32 Qd, Qm /// A64: SQNEG Vd.4S, Vn.4S /// </summary> public static Vector128<int> NegateSaturate(Vector128<int> value) => NegateSaturate(value); /// <summary> /// int8x16_t vqnegq_s8 (int8x16_t a) /// A32: VQNEG.S8 Qd, Qm /// A64: SQNEG Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> NegateSaturate(Vector128<sbyte> value) => NegateSaturate(value); /// <summary> /// float64x1_t vneg_f64 (float64x1_t a) /// A32: VNEG.F64 Dd, Dm /// A64: FNEG Dd, Dn /// </summary> public static Vector64<double> NegateScalar(Vector64<double> value) => NegateScalar(value); /// <summary> /// float32_t vnegs_f32 (float32_t a) /// A32: VNEG.F32 Sd, Sm /// A64: FNEG Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> NegateScalar(Vector64<float> value) => NegateScalar(value); /// <summary> /// uint8x8_t vmvn_u8 (uint8x8_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<byte> Not(Vector64<byte> value) => Not(value); /// <summary> /// float64x1_t vmvn_f64 (float64x1_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> Not(Vector64<double> value) => Not(value); /// <summary> /// int16x4_t vmvn_s16 (int16x4_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<short> Not(Vector64<short> value) => Not(value); /// <summary> /// int32x2_t vmvn_s32 (int32x2_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<int> Not(Vector64<int> value) => Not(value); /// <summary> /// int64x1_t vmvn_s64 (int64x1_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<long> Not(Vector64<long> value) => Not(value); /// <summary> /// int8x8_t vmvn_s8 (int8x8_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> Not(Vector64<sbyte> value) => Not(value); /// <summary> /// float32x2_t vmvn_f32 (float32x2_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> Not(Vector64<float> value) => Not(value); /// <summary> /// uint16x4_t vmvn_u16 (uint16x4_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<ushort> Not(Vector64<ushort> value) => Not(value); /// <summary> /// uint32x2_t vmvn_u32 (uint32x2_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<uint> Not(Vector64<uint> value) => Not(value); /// <summary> /// uint64x1_t vmvn_u64 (uint64x1_t a) /// A32: VMVN Dd, Dm /// A64: MVN Vd.8B, Vn.8B /// </summary> public static Vector64<ulong> Not(Vector64<ulong> value) => Not(value); /// <summary> /// uint8x16_t vmvnq_u8 (uint8x16_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<byte> Not(Vector128<byte> value) => Not(value); /// <summary> /// float64x2_t vmvnq_f64 (float64x2_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> Not(Vector128<double> value) => Not(value); /// <summary> /// int16x8_t vmvnq_s16 (int16x8_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<short> Not(Vector128<short> value) => Not(value); /// <summary> /// int32x4_t vmvnq_s32 (int32x4_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<int> Not(Vector128<int> value) => Not(value); /// <summary> /// int64x2_t vmvnq_s64 (int64x2_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<long> Not(Vector128<long> value) => Not(value); /// <summary> /// int8x16_t vmvnq_s8 (int8x16_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> Not(Vector128<sbyte> value) => Not(value); /// <summary> /// float32x4_t vmvnq_f32 (float32x4_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> Not(Vector128<float> value) => Not(value); /// <summary> /// uint16x8_t vmvnq_u16 (uint16x8_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<ushort> Not(Vector128<ushort> value) => Not(value); /// <summary> /// uint32x4_t vmvnq_u32 (uint32x4_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<uint> Not(Vector128<uint> value) => Not(value); /// <summary> /// uint64x2_t vmvnq_u64 (uint64x2_t a) /// A32: VMVN Qd, Qm /// A64: MVN Vd.16B, Vn.16B /// </summary> public static Vector128<ulong> Not(Vector128<ulong> value) => Not(value); /// <summary> /// uint8x8_t vorr_u8 (uint8x8_t a, uint8x8_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Or(Vector64<byte> left, Vector64<byte> right) => Or(left, right); /// <summary> /// float64x1_t vorr_f64 (float64x1_t a, float64x1_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> Or(Vector64<double> left, Vector64<double> right) => Or(left, right); /// <summary> /// int16x4_t vorr_s16 (int16x4_t a, int16x4_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> Or(Vector64<short> left, Vector64<short> right) => Or(left, right); /// <summary> /// int32x2_t vorr_s32 (int32x2_t a, int32x2_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> Or(Vector64<int> left, Vector64<int> right) => Or(left, right); /// <summary> /// int64x1_t vorr_s64 (int64x1_t a, int64x1_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> Or(Vector64<long> left, Vector64<long> right) => Or(left, right); /// <summary> /// int8x8_t vorr_s8 (int8x8_t a, int8x8_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Or(Vector64<sbyte> left, Vector64<sbyte> right) => Or(left, right); /// <summary> /// float32x2_t vorr_f32 (float32x2_t a, float32x2_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> Or(Vector64<float> left, Vector64<float> right) => Or(left, right); /// <summary> /// uint16x4_t vorr_u16 (uint16x4_t a, uint16x4_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> Or(Vector64<ushort> left, Vector64<ushort> right) => Or(left, right); /// <summary> /// uint32x2_t vorr_u32 (uint32x2_t a, uint32x2_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> Or(Vector64<uint> left, Vector64<uint> right) => Or(left, right); /// <summary> /// uint64x1_t vorr_u64 (uint64x1_t a, uint64x1_t b) /// A32: VORR Dd, Dn, Dm /// A64: ORR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> Or(Vector64<ulong> left, Vector64<ulong> right) => Or(left, right); /// <summary> /// uint8x16_t vorrq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Or(Vector128<byte> left, Vector128<byte> right) => Or(left, right); /// <summary> /// float64x2_t vorrq_f64 (float64x2_t a, float64x2_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> Or(Vector128<double> left, Vector128<double> right) => Or(left, right); /// <summary> /// int16x8_t vorrq_s16 (int16x8_t a, int16x8_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> Or(Vector128<short> left, Vector128<short> right) => Or(left, right); /// <summary> /// int32x4_t vorrq_s32 (int32x4_t a, int32x4_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> Or(Vector128<int> left, Vector128<int> right) => Or(left, right); /// <summary> /// int64x2_t vorrq_s64 (int64x2_t a, int64x2_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> Or(Vector128<long> left, Vector128<long> right) => Or(left, right); /// <summary> /// int8x16_t vorrq_s8 (int8x16_t a, int8x16_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Or(Vector128<sbyte> left, Vector128<sbyte> right) => Or(left, right); /// <summary> /// float32x4_t vorrq_f32 (float32x4_t a, float32x4_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> Or(Vector128<float> left, Vector128<float> right) => Or(left, right); /// <summary> /// uint16x8_t vorrq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> Or(Vector128<ushort> left, Vector128<ushort> right) => Or(left, right); /// <summary> /// uint32x4_t vorrq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> Or(Vector128<uint> left, Vector128<uint> right) => Or(left, right); /// <summary> /// uint64x2_t vorrq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VORR Qd, Qn, Qm /// A64: ORR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> Or(Vector128<ulong> left, Vector128<ulong> right) => Or(left, right); /// <summary> /// uint8x8_t vorn_u8 (uint8x8_t a, uint8x8_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> OrNot(Vector64<byte> left, Vector64<byte> right) => OrNot(left, right); /// <summary> /// float64x1_t vorn_f64 (float64x1_t a, float64x1_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> OrNot(Vector64<double> left, Vector64<double> right) => OrNot(left, right); /// <summary> /// int16x4_t vorn_s16 (int16x4_t a, int16x4_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> OrNot(Vector64<short> left, Vector64<short> right) => OrNot(left, right); /// <summary> /// int32x2_t vorn_s32 (int32x2_t a, int32x2_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> OrNot(Vector64<int> left, Vector64<int> right) => OrNot(left, right); /// <summary> /// int64x1_t vorn_s64 (int64x1_t a, int64x1_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> OrNot(Vector64<long> left, Vector64<long> right) => OrNot(left, right); /// <summary> /// int8x8_t vorn_s8 (int8x8_t a, int8x8_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> OrNot(Vector64<sbyte> left, Vector64<sbyte> right) => OrNot(left, right); /// <summary> /// float32x2_t vorn_f32 (float32x2_t a, float32x2_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> OrNot(Vector64<float> left, Vector64<float> right) => OrNot(left, right); /// <summary> /// uint16x4_t vorn_u16 (uint16x4_t a, uint16x4_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> OrNot(Vector64<ushort> left, Vector64<ushort> right) => OrNot(left, right); /// <summary> /// uint32x2_t vorn_u32 (uint32x2_t a, uint32x2_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> OrNot(Vector64<uint> left, Vector64<uint> right) => OrNot(left, right); /// <summary> /// uint64x1_t vorn_u64 (uint64x1_t a, uint64x1_t b) /// A32: VORN Dd, Dn, Dm /// A64: ORN Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> OrNot(Vector64<ulong> left, Vector64<ulong> right) => OrNot(left, right); /// <summary> /// uint8x16_t vornq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> OrNot(Vector128<byte> left, Vector128<byte> right) => OrNot(left, right); /// <summary> /// float64x2_t vornq_f64 (float64x2_t a, float64x2_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> OrNot(Vector128<double> left, Vector128<double> right) => OrNot(left, right); /// <summary> /// int16x8_t vornq_s16 (int16x8_t a, int16x8_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> OrNot(Vector128<short> left, Vector128<short> right) => OrNot(left, right); /// <summary> /// int32x4_t vornq_s32 (int32x4_t a, int32x4_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> OrNot(Vector128<int> left, Vector128<int> right) => OrNot(left, right); /// <summary> /// int64x2_t vornq_s64 (int64x2_t a, int64x2_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> OrNot(Vector128<long> left, Vector128<long> right) => OrNot(left, right); /// <summary> /// int8x16_t vornq_s8 (int8x16_t a, int8x16_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> OrNot(Vector128<sbyte> left, Vector128<sbyte> right) => OrNot(left, right); /// <summary> /// float32x4_t vornq_f32 (float32x4_t a, float32x4_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> OrNot(Vector128<float> left, Vector128<float> right) => OrNot(left, right); /// <summary> /// uint16x8_t vornq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> OrNot(Vector128<ushort> left, Vector128<ushort> right) => OrNot(left, right); /// <summary> /// uint32x4_t vornq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> OrNot(Vector128<uint> left, Vector128<uint> right) => OrNot(left, right); /// <summary> /// uint64x2_t vornq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VORN Qd, Qn, Qm /// A64: ORN Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> OrNot(Vector128<ulong> left, Vector128<ulong> right) => OrNot(left, right); /// <summary> /// poly8x8_t vmul_p8 (poly8x8_t a, poly8x8_t b) /// A32: VMUL.P8 Dd, Dn, Dm /// A64: PMUL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> PolynomialMultiply(Vector64<byte> left, Vector64<byte> right) => PolynomialMultiply(left, right); /// <summary> /// poly8x8_t vmul_p8 (poly8x8_t a, poly8x8_t b) /// A32: VMUL.P8 Dd, Dn, Dm /// A64: PMUL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> PolynomialMultiply(Vector64<sbyte> left, Vector64<sbyte> right) => PolynomialMultiply(left, right); /// <summary> /// poly8x16_t vmulq_p8 (poly8x16_t a, poly8x16_t b) /// A32: VMUL.P8 Qd, Qn, Qm /// A64: PMUL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> PolynomialMultiply(Vector128<byte> left, Vector128<byte> right) => PolynomialMultiply(left, right); /// <summary> /// poly8x16_t vmulq_p8 (poly8x16_t a, poly8x16_t b) /// A32: VMUL.P8 Qd, Qn, Qm /// A64: PMUL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> PolynomialMultiply(Vector128<sbyte> left, Vector128<sbyte> right) => PolynomialMultiply(left, right); /// <summary> /// poly16x8_t vmull_p8 (poly8x8_t a, poly8x8_t b) /// A32: VMULL.P8 Qd, Dn, Dm /// A64: PMULL Vd.16B, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> PolynomialMultiplyWideningLower(Vector64<byte> left, Vector64<byte> right) => PolynomialMultiplyWideningLower(left, right); /// <summary> /// poly16x8_t vmull_p8 (poly8x8_t a, poly8x8_t b) /// A32: VMULL.P8 Qd, Dn, Dm /// A64: PMULL Vd.16B, Vn.8B, Vm.8B /// </summary> public static Vector128<short> PolynomialMultiplyWideningLower(Vector64<sbyte> left, Vector64<sbyte> right) => PolynomialMultiplyWideningLower(left, right); /// <summary> /// poly16x8_t vmull_high_p8 (poly8x16_t a, poly8x16_t b) /// A32: VMULL.P8 Qd, Dn+1, Dm+1 /// A64: PMULL2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> PolynomialMultiplyWideningUpper(Vector128<byte> left, Vector128<byte> right) => PolynomialMultiplyWideningUpper(left, right); /// <summary> /// poly16x8_t vmull_high_p8 (poly8x16_t a, poly8x16_t b) /// A32: VMULL.P8 Qd, Dn+1, Dm+1 /// A64: PMULL2 Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> PolynomialMultiplyWideningUpper(Vector128<sbyte> left, Vector128<sbyte> right) => PolynomialMultiplyWideningUpper(left, right); /// <summary> /// uint8x8_t vcnt_u8 (uint8x8_t a) /// A32: VCNT.I8 Dd, Dm /// A64: CNT Vd.8B, Vn.8B /// </summary> public static Vector64<byte> PopCount(Vector64<byte> value) => PopCount(value); /// <summary> /// int8x8_t vcnt_s8 (int8x8_t a) /// A32: VCNT.I8 Dd, Dm /// A64: CNT Vd.8B, Vn.8B /// </summary> public static Vector64<sbyte> PopCount(Vector64<sbyte> value) => PopCount(value); /// <summary> /// uint8x16_t vcntq_u8 (uint8x16_t a) /// A32: VCNT.I8 Qd, Qm /// A64: CNT Vd.16B, Vn.16B /// </summary> public static Vector128<byte> PopCount(Vector128<byte> value) => PopCount(value); /// <summary> /// int8x16_t vcntq_s8 (int8x16_t a) /// A32: VCNT.I8 Qd, Qm /// A64: CNT Vd.16B, Vn.16B /// </summary> public static Vector128<sbyte> PopCount(Vector128<sbyte> value) => PopCount(value); /// <summary> /// float32x2_t vrecpe_f32 (float32x2_t a) /// A32: VRECPE.F32 Dd, Dm /// A64: FRECPE Vd.2S, Vn.2S /// </summary> public static Vector64<float> ReciprocalEstimate(Vector64<float> value) => ReciprocalEstimate(value); /// <summary> /// uint32x2_t vrecpe_u32 (uint32x2_t a) /// A32: VRECPE.U32 Dd, Dm /// A64: URECPE Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ReciprocalEstimate(Vector64<uint> value) => ReciprocalEstimate(value); /// <summary> /// float32x4_t vrecpeq_f32 (float32x4_t a) /// A32: VRECPE.F32 Qd, Qm /// A64: FRECPE Vd.4S, Vn.4S /// </summary> public static Vector128<float> ReciprocalEstimate(Vector128<float> value) => ReciprocalEstimate(value); /// <summary> /// uint32x4_t vrecpeq_u32 (uint32x4_t a) /// A32: VRECPE.U32 Qd, Qm /// A64: URECPE Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ReciprocalEstimate(Vector128<uint> value) => ReciprocalEstimate(value); /// <summary> /// float32x2_t vrsqrte_f32 (float32x2_t a) /// A32: VRSQRTE.F32 Dd, Dm /// A64: FRSQRTE Vd.2S, Vn.2S /// </summary> public static Vector64<float> ReciprocalSquareRootEstimate(Vector64<float> value) => ReciprocalSquareRootEstimate(value); /// <summary> /// uint32x2_t vrsqrte_u32 (uint32x2_t a) /// A32: VRSQRTE.U32 Dd, Dm /// A64: URSQRTE Vd.2S, Vn.2S /// </summary> public static Vector64<uint> ReciprocalSquareRootEstimate(Vector64<uint> value) => ReciprocalSquareRootEstimate(value); /// <summary> /// float32x4_t vrsqrteq_f32 (float32x4_t a) /// A32: VRSQRTE.F32 Qd, Qm /// A64: FRSQRTE Vd.4S, Vn.4S /// </summary> public static Vector128<float> ReciprocalSquareRootEstimate(Vector128<float> value) => ReciprocalSquareRootEstimate(value); /// <summary> /// uint32x4_t vrsqrteq_u32 (uint32x4_t a) /// A32: VRSQRTE.U32 Qd, Qm /// A64: URSQRTE Vd.4S, Vn.4S /// </summary> public static Vector128<uint> ReciprocalSquareRootEstimate(Vector128<uint> value) => ReciprocalSquareRootEstimate(value); /// <summary> /// float32x2_t vrsqrts_f32 (float32x2_t a, float32x2_t b) /// A32: VRSQRTS.F32 Dd, Dn, Dm /// A64: FRSQRTS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> ReciprocalSquareRootStep(Vector64<float> left, Vector64<float> right) => ReciprocalSquareRootStep(left, right); /// <summary> /// float32x4_t vrsqrtsq_f32 (float32x4_t a, float32x4_t b) /// A32: VRSQRTS.F32 Qd, Qn, Qm /// A64: FRSQRTS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> ReciprocalSquareRootStep(Vector128<float> left, Vector128<float> right) => ReciprocalSquareRootStep(left, right); /// <summary> /// float32x2_t vrecps_f32 (float32x2_t a, float32x2_t b) /// A32: VRECPS.F32 Dd, Dn, Dm /// A64: FRECPS Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> ReciprocalStep(Vector64<float> left, Vector64<float> right) => ReciprocalStep(left, right); /// <summary> /// float32x4_t vrecpsq_f32 (float32x4_t a, float32x4_t b) /// A32: VRECPS.F32 Qd, Qn, Qm /// A64: FRECPS Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> ReciprocalStep(Vector128<float> left, Vector128<float> right) => ReciprocalStep(left, right); /// <summary> /// int16x4_t vrev32_s16 (int16x4_t vec) /// A32: VREV32.16 Dd, Dm /// A64: REV32 Vd.4H, Vn.4H /// </summary> public static Vector64<int> ReverseElement16(Vector64<int> value) => ReverseElement16(value); /// <summary> /// int16x4_t vrev64_s16 (int16x4_t vec) /// A32: VREV64.16 Dd, Dm /// A64: REV64 Vd.4H, Vn.4H /// </summary> public static Vector64<long> ReverseElement16(Vector64<long> value) => ReverseElement16(value); /// <summary> /// uint16x4_t vrev32_u16 (uint16x4_t vec) /// A32: VREV32.16 Dd, Dm /// A64: REV32 Vd.4H, Vn.4H /// </summary> public static Vector64<uint> ReverseElement16(Vector64<uint> value) => ReverseElement16(value); /// <summary> /// uint16x4_t vrev64_u16 (uint16x4_t vec) /// A32: VREV64.16 Dd, Dm /// A64: REV64 Vd.4H, Vn.4H /// </summary> public static Vector64<ulong> ReverseElement16(Vector64<ulong> value) => ReverseElement16(value); /// <summary> /// int16x8_t vrev32q_s16 (int16x8_t vec) /// A32: VREV32.16 Qd, Qm /// A64: REV32 Vd.8H, Vn.8H /// </summary> public static Vector128<int> ReverseElement16(Vector128<int> value) => ReverseElement16(value); /// <summary> /// int16x8_t vrev64q_s16 (int16x8_t vec) /// A32: VREV64.16 Qd, Qm /// A64: REV64 Vd.8H, Vn.8H /// </summary> public static Vector128<long> ReverseElement16(Vector128<long> value) => ReverseElement16(value); /// <summary> /// uint16x8_t vrev32q_u16 (uint16x8_t vec) /// A32: VREV32.16 Qd, Qm /// A64: REV32 Vd.8H, Vn.8H /// </summary> public static Vector128<uint> ReverseElement16(Vector128<uint> value) => ReverseElement16(value); /// <summary> /// uint16x8_t vrev64q_u16 (uint16x8_t vec) /// A32: VREV64.16 Qd, Qm /// A64: REV64 Vd.8H, Vn.8H /// </summary> public static Vector128<ulong> ReverseElement16(Vector128<ulong> value) => ReverseElement16(value); /// <summary> /// int32x2_t vrev64_s32 (int32x2_t vec) /// A32: VREV64.32 Dd, Dm /// A64: REV64 Vd.2S, Vn.2S /// </summary> public static Vector64<long> ReverseElement32(Vector64<long> value) => ReverseElement32(value); /// <summary> /// uint32x2_t vrev64_u32 (uint32x2_t vec) /// A32: VREV64.32 Dd, Dm /// A64: REV64 Vd.2S, Vn.2S /// </summary> public static Vector64<ulong> ReverseElement32(Vector64<ulong> value) => ReverseElement32(value); /// <summary> /// int32x4_t vrev64q_s32 (int32x4_t vec) /// A32: VREV64.32 Qd, Qm /// A64: REV64 Vd.4S, Vn.4S /// </summary> public static Vector128<long> ReverseElement32(Vector128<long> value) => ReverseElement32(value); /// <summary> /// uint32x4_t vrev64q_u32 (uint32x4_t vec) /// A32: VREV64.32 Qd, Qm /// A64: REV64 Vd.4S, Vn.4S /// </summary> public static Vector128<ulong> ReverseElement32(Vector128<ulong> value) => ReverseElement32(value); /// <summary> /// int8x8_t vrev16_s8 (int8x8_t vec) /// A32: VREV16.8 Dd, Dm /// A64: REV16 Vd.8B, Vn.8B /// </summary> public static Vector64<short> ReverseElement8(Vector64<short> value) => ReverseElement8(value); /// <summary> /// int8x8_t vrev32_s8 (int8x8_t vec) /// A32: VREV32.8 Dd, Dm /// A64: REV32 Vd.8B, Vn.8B /// </summary> public static Vector64<int> ReverseElement8(Vector64<int> value) => ReverseElement8(value); /// <summary> /// int8x8_t vrev64_s8 (int8x8_t vec) /// A32: VREV64.8 Dd, Dm /// A64: REV64 Vd.8B, Vn.8B /// </summary> public static Vector64<long> ReverseElement8(Vector64<long> value) => ReverseElement8(value); /// <summary> /// uint8x8_t vrev16_u8 (uint8x8_t vec) /// A32: VREV16.8 Dd, Dm /// A64: REV16 Vd.8B, Vn.8B /// </summary> public static Vector64<ushort> ReverseElement8(Vector64<ushort> value) => ReverseElement8(value); /// <summary> /// uint8x8_t vrev32_u8 (uint8x8_t vec) /// A32: VREV32.8 Dd, Dm /// A64: REV32 Vd.8B, Vn.8B /// </summary> public static Vector64<uint> ReverseElement8(Vector64<uint> value) => ReverseElement8(value); /// <summary> /// uint8x8_t vrev64_u8 (uint8x8_t vec) /// A32: VREV64.8 Dd, Dm /// A64: REV64 Vd.8B, Vn.8B /// </summary> public static Vector64<ulong> ReverseElement8(Vector64<ulong> value) => ReverseElement8(value); /// <summary> /// int8x16_t vrev16q_s8 (int8x16_t vec) /// A32: VREV16.8 Qd, Qm /// A64: REV16 Vd.16B, Vn.16B /// </summary> public static Vector128<short> ReverseElement8(Vector128<short> value) => ReverseElement8(value); /// <summary> /// int8x16_t vrev32q_s8 (int8x16_t vec) /// A32: VREV32.8 Qd, Qm /// A64: REV32 Vd.16B, Vn.16B /// </summary> public static Vector128<int> ReverseElement8(Vector128<int> value) => ReverseElement8(value); /// <summary> /// int8x16_t vrev64q_s8 (int8x16_t vec) /// A32: VREV64.8 Qd, Qm /// A64: REV64 Vd.16B, Vn.16B /// </summary> public static Vector128<long> ReverseElement8(Vector128<long> value) => ReverseElement8(value); /// <summary> /// uint8x16_t vrev16q_u8 (uint8x16_t vec) /// A32: VREV16.8 Qd, Qm /// A64: REV16 Vd.16B, Vn.16B /// </summary> public static Vector128<ushort> ReverseElement8(Vector128<ushort> value) => ReverseElement8(value); /// <summary> /// uint8x16_t vrev32q_u8 (uint8x16_t vec) /// A32: VREV32.8 Qd, Qm /// A64: REV32 Vd.16B, Vn.16B /// </summary> public static Vector128<uint> ReverseElement8(Vector128<uint> value) => ReverseElement8(value); /// <summary> /// uint8x16_t vrev64q_u8 (uint8x16_t vec) /// A32: VREV64.8 Qd, Qm /// A64: REV64 Vd.16B, Vn.16B /// </summary> public static Vector128<ulong> ReverseElement8(Vector128<ulong> value) => ReverseElement8(value); /// <summary> /// float32x2_t vrnda_f32 (float32x2_t a) /// A32: VRINTA.F32 Dd, Dm /// A64: FRINTA Vd.2S, Vn.2S /// </summary> public static Vector64<float> RoundAwayFromZero(Vector64<float> value) => RoundAwayFromZero(value); /// <summary> /// float32x4_t vrndaq_f32 (float32x4_t a) /// A32: VRINTA.F32 Qd, Qm /// A64: FRINTA Vd.4S, Vn.4S /// </summary> public static Vector128<float> RoundAwayFromZero(Vector128<float> value) => RoundAwayFromZero(value); /// <summary> /// float64x1_t vrnda_f64 (float64x1_t a) /// A32: VRINTA.F64 Dd, Dm /// A64: FRINTA Dd, Dn /// </summary> public static Vector64<double> RoundAwayFromZeroScalar(Vector64<double> value) => RoundAwayFromZeroScalar(value); /// <summary> /// float32_t vrndas_f32 (float32_t a) /// A32: VRINTA.F32 Sd, Sm /// A64: FRINTA Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> RoundAwayFromZeroScalar(Vector64<float> value) => RoundAwayFromZeroScalar(value); /// <summary> /// float32x2_t vrndn_f32 (float32x2_t a) /// A32: VRINTN.F32 Dd, Dm /// A64: FRINTN Vd.2S, Vn.2S /// </summary> public static Vector64<float> RoundToNearest(Vector64<float> value) => RoundToNearest(value); /// <summary> /// float32x4_t vrndnq_f32 (float32x4_t a) /// A32: VRINTN.F32 Qd, Qm /// A64: FRINTN Vd.4S, Vn.4S /// </summary> public static Vector128<float> RoundToNearest(Vector128<float> value) => RoundToNearest(value); /// <summary> /// float64x1_t vrndn_f64 (float64x1_t a) /// A32: VRINTN.F64 Dd, Dm /// A64: FRINTN Dd, Dn /// </summary> public static Vector64<double> RoundToNearestScalar(Vector64<double> value) => RoundToNearestScalar(value); /// <summary> /// float32_t vrndns_f32 (float32_t a) /// A32: VRINTN.F32 Sd, Sm /// A64: FRINTN Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> RoundToNearestScalar(Vector64<float> value) => RoundToNearestScalar(value); /// <summary> /// float32x2_t vrndm_f32 (float32x2_t a) /// A32: VRINTM.F32 Dd, Dm /// A64: FRINTM Vd.2S, Vn.2S /// </summary> public static Vector64<float> RoundToNegativeInfinity(Vector64<float> value) => RoundToNegativeInfinity(value); /// <summary> /// float32x4_t vrndmq_f32 (float32x4_t a) /// A32: VRINTM.F32 Qd, Qm /// A64: FRINTM Vd.4S, Vn.4S /// </summary> public static Vector128<float> RoundToNegativeInfinity(Vector128<float> value) => RoundToNegativeInfinity(value); /// <summary> /// float64x1_t vrndm_f64 (float64x1_t a) /// A32: VRINTM.F64 Dd, Dm /// A64: FRINTM Dd, Dn /// </summary> public static Vector64<double> RoundToNegativeInfinityScalar(Vector64<double> value) => RoundToNegativeInfinityScalar(value); /// <summary> /// float32_t vrndms_f32 (float32_t a) /// A32: VRINTM.F32 Sd, Sm /// A64: FRINTM Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> RoundToNegativeInfinityScalar(Vector64<float> value) => RoundToNegativeInfinityScalar(value); /// <summary> /// float32x2_t vrndp_f32 (float32x2_t a) /// A32: VRINTP.F32 Dd, Dm /// A64: FRINTP Vd.2S, Vn.2S /// </summary> public static Vector64<float> RoundToPositiveInfinity(Vector64<float> value) => RoundToPositiveInfinity(value); /// <summary> /// float32x4_t vrndpq_f32 (float32x4_t a) /// A32: VRINTP.F32 Qd, Qm /// A64: FRINTP Vd.4S, Vn.4S /// </summary> public static Vector128<float> RoundToPositiveInfinity(Vector128<float> value) => RoundToPositiveInfinity(value); /// <summary> /// float64x1_t vrndp_f64 (float64x1_t a) /// A32: VRINTP.F64 Dd, Dm /// A64: FRINTP Dd, Dn /// </summary> public static Vector64<double> RoundToPositiveInfinityScalar(Vector64<double> value) => RoundToPositiveInfinityScalar(value); /// <summary> /// float32_t vrndps_f32 (float32_t a) /// A32: VRINTP.F32 Sd, Sm /// A64: FRINTP Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> RoundToPositiveInfinityScalar(Vector64<float> value) => RoundToPositiveInfinityScalar(value); /// <summary> /// float32x2_t vrnd_f32 (float32x2_t a) /// A32: VRINTZ.F32 Dd, Dm /// A64: FRINTZ Vd.2S, Vn.2S /// </summary> public static Vector64<float> RoundToZero(Vector64<float> value) => RoundToZero(value); /// <summary> /// float32x4_t vrndq_f32 (float32x4_t a) /// A32: VRINTZ.F32 Qd, Qm /// A64: FRINTZ Vd.4S, Vn.4S /// </summary> public static Vector128<float> RoundToZero(Vector128<float> value) => RoundToZero(value); /// <summary> /// float64x1_t vrnd_f64 (float64x1_t a) /// A32: VRINTZ.F64 Dd, Dm /// A64: FRINTZ Dd, Dn /// </summary> public static Vector64<double> RoundToZeroScalar(Vector64<double> value) => RoundToZeroScalar(value); /// <summary> /// float32_t vrnds_f32 (float32_t a) /// A32: VRINTZ.F32 Sd, Sm /// A64: FRINTZ Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> RoundToZeroScalar(Vector64<float> value) => RoundToZeroScalar(value); /// <summary> /// int16x4_t vshl_s16 (int16x4_t a, int16x4_t b) /// A32: VSHL.S16 Dd, Dn, Dm /// A64: SSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftArithmetic(Vector64<short> value, Vector64<short> count) => ShiftArithmetic(value, count); /// <summary> /// int32x2_t vshl_s32 (int32x2_t a, int32x2_t b) /// A32: VSHL.S32 Dd, Dn, Dm /// A64: SSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftArithmetic(Vector64<int> value, Vector64<int> count) => ShiftArithmetic(value, count); /// <summary> /// int8x8_t vshl_s8 (int8x8_t a, int8x8_t b) /// A32: VSHL.S8 Dd, Dn, Dm /// A64: SSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftArithmetic(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmetic(value, count); /// <summary> /// int16x8_t vshlq_s16 (int16x8_t a, int16x8_t b) /// A32: VSHL.S16 Qd, Qn, Qm /// A64: SSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftArithmetic(Vector128<short> value, Vector128<short> count) => ShiftArithmetic(value, count); /// <summary> /// int32x4_t vshlq_s32 (int32x4_t a, int32x4_t b) /// A32: VSHL.S32 Qd, Qn, Qm /// A64: SSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftArithmetic(Vector128<int> value, Vector128<int> count) => ShiftArithmetic(value, count); /// <summary> /// int64x2_t vshlq_s64 (int64x2_t a, int64x2_t b) /// A32: VSHL.S64 Qd, Qn, Qm /// A64: SSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftArithmetic(Vector128<long> value, Vector128<long> count) => ShiftArithmetic(value, count); /// <summary> /// int8x16_t vshlq_s8 (int8x16_t a, int8x16_t b) /// A32: VSHL.S8 Qd, Qn, Qm /// A64: SSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftArithmetic(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftArithmetic(value, count); /// <summary> /// int16x4_t vrshl_s16 (int16x4_t a, int16x4_t b) /// A32: VRSHL.S16 Dd, Dn, Dm /// A64: SRSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftArithmeticRounded(Vector64<short> value, Vector64<short> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int32x2_t vrshl_s32 (int32x2_t a, int32x2_t b) /// A32: VRSHL.S32 Dd, Dn, Dm /// A64: SRSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftArithmeticRounded(Vector64<int> value, Vector64<int> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int8x8_t vrshl_s8 (int8x8_t a, int8x8_t b) /// A32: VRSHL.S8 Dd, Dn, Dm /// A64: SRSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftArithmeticRounded(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int16x8_t vrshlq_s16 (int16x8_t a, int16x8_t b) /// A32: VRSHL.S16 Qd, Qn, Qm /// A64: SRSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftArithmeticRounded(Vector128<short> value, Vector128<short> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int32x4_t vrshlq_s32 (int32x4_t a, int32x4_t b) /// A32: VRSHL.S32 Qd, Qn, Qm /// A64: SRSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftArithmeticRounded(Vector128<int> value, Vector128<int> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int64x2_t vrshlq_s64 (int64x2_t a, int64x2_t b) /// A32: VRSHL.S64 Qd, Qn, Qm /// A64: SRSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftArithmeticRounded(Vector128<long> value, Vector128<long> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int8x16_t vrshlq_s8 (int8x16_t a, int8x16_t b) /// A32: VRSHL.S8 Qd, Qn, Qm /// A64: SRSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftArithmeticRounded(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftArithmeticRounded(value, count); /// <summary> /// int16x4_t vqrshl_s16 (int16x4_t a, int16x4_t b) /// A32: VQRSHL.S16 Dd, Dn, Dm /// A64: SQRSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftArithmeticRoundedSaturate(Vector64<short> value, Vector64<short> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int32x2_t vqrshl_s32 (int32x2_t a, int32x2_t b) /// A32: VQRSHL.S32 Dd, Dn, Dm /// A64: SQRSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftArithmeticRoundedSaturate(Vector64<int> value, Vector64<int> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int8x8_t vqrshl_s8 (int8x8_t a, int8x8_t b) /// A32: VQRSHL.S8 Dd, Dn, Dm /// A64: SQRSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftArithmeticRoundedSaturate(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int16x8_t vqrshlq_s16 (int16x8_t a, int16x8_t b) /// A32: VQRSHL.S16 Qd, Qn, Qm /// A64: SQRSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftArithmeticRoundedSaturate(Vector128<short> value, Vector128<short> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int32x4_t vqrshlq_s32 (int32x4_t a, int32x4_t b) /// A32: VQRSHL.S32 Qd, Qn, Qm /// A64: SQRSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftArithmeticRoundedSaturate(Vector128<int> value, Vector128<int> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int64x2_t vqrshlq_s64 (int64x2_t a, int64x2_t b) /// A32: VQRSHL.S64 Qd, Qn, Qm /// A64: SQRSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftArithmeticRoundedSaturate(Vector128<long> value, Vector128<long> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int8x16_t vqrshlq_s8 (int8x16_t a, int8x16_t b) /// A32: VQRSHL.S8 Qd, Qn, Qm /// A64: SQRSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftArithmeticRoundedSaturate(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftArithmeticRoundedSaturate(value, count); /// <summary> /// int64x1_t vqrshl_s64 (int64x1_t a, int64x1_t b) /// A32: VQRSHL.S64 Dd, Dn, Dm /// A64: SQRSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftArithmeticRoundedSaturateScalar(Vector64<long> value, Vector64<long> count) => ShiftArithmeticRoundedSaturateScalar(value, count); /// <summary> /// int64x1_t vrshl_s64 (int64x1_t a, int64x1_t b) /// A32: VRSHL.S64 Dd, Dn, Dm /// A64: SRSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftArithmeticRoundedScalar(Vector64<long> value, Vector64<long> count) => ShiftArithmeticRoundedScalar(value, count); /// <summary> /// int16x4_t vqshl_s16 (int16x4_t a, int16x4_t b) /// A32: VQSHL.S16 Dd, Dn, Dm /// A64: SQSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftArithmeticSaturate(Vector64<short> value, Vector64<short> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int32x2_t vqshl_s32 (int32x2_t a, int32x2_t b) /// A32: VQSHL.S32 Dd, Dn, Dm /// A64: SQSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftArithmeticSaturate(Vector64<int> value, Vector64<int> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int8x8_t vqshl_s8 (int8x8_t a, int8x8_t b) /// A32: VQSHL.S8 Dd, Dn, Dm /// A64: SQSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftArithmeticSaturate(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int16x8_t vqshlq_s16 (int16x8_t a, int16x8_t b) /// A32: VQSHL.S16 Qd, Qn, Qm /// A64: SQSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftArithmeticSaturate(Vector128<short> value, Vector128<short> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int32x4_t vqshlq_s32 (int32x4_t a, int32x4_t b) /// A32: VQSHL.S32 Qd, Qn, Qm /// A64: SQSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftArithmeticSaturate(Vector128<int> value, Vector128<int> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int64x2_t vqshlq_s64 (int64x2_t a, int64x2_t b) /// A32: VQSHL.S64 Qd, Qn, Qm /// A64: SQSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftArithmeticSaturate(Vector128<long> value, Vector128<long> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int8x16_t vqshlq_s8 (int8x16_t a, int8x16_t b) /// A32: VQSHL.S8 Qd, Qn, Qm /// A64: SQSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftArithmeticSaturate(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftArithmeticSaturate(value, count); /// <summary> /// int64x1_t vqshl_s64 (int64x1_t a, int64x1_t b) /// A32: VQSHL.S64 Dd, Dn, Dm /// A64: SQSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftArithmeticSaturateScalar(Vector64<long> value, Vector64<long> count) => ShiftArithmeticSaturateScalar(value, count); /// <summary> /// int64x1_t vshl_s64 (int64x1_t a, int64x1_t b) /// A32: VSHL.S64 Dd, Dn, Dm /// A64: SSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftArithmeticScalar(Vector64<long> value, Vector64<long> count) => ShiftArithmeticScalar(value, count); /// <summary> /// uint8x8_t vsli_n_u8(uint8x8_t a, uint8x8_t b, __builtin_constant_p(n)) /// A32: VSLI.8 Dd, Dm, #n /// A64: SLI Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftLeftAndInsert(Vector64<byte> left, Vector64<byte> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int16x4_t vsli_n_s16(int16x4_t a, int16x4_t b, __builtin_constant_p(n)) /// A32: VSLI.16 Dd, Dm, #n /// A64: SLI Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftLeftAndInsert(Vector64<short> left, Vector64<short> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int32x2_t vsli_n_s32(int32x2_t a, int32x2_t b, __builtin_constant_p(n)) /// A32: VSLI.32 Dd, Dm, #n /// A64: SLI Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftLeftAndInsert(Vector64<int> left, Vector64<int> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int8x8_t vsli_n_s8(int8x8_t a, int8x8_t b, __builtin_constant_p(n)) /// A32: VSLI.8 Dd, Dm, #n /// A64: SLI Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftLeftAndInsert(Vector64<sbyte> left, Vector64<sbyte> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint16x4_t vsli_n_u16(uint16x4_t a, uint16x4_t b, __builtin_constant_p(n)) /// A32: VSLI.16 Dd, Dm, #n /// A64: SLI Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftLeftAndInsert(Vector64<ushort> left, Vector64<ushort> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint32x2_t vsli_n_u32(uint32x2_t a, uint32x2_t b, __builtin_constant_p(n)) /// A32: VSLI.32 Dd, Dm, #n /// A64: SLI Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftLeftAndInsert(Vector64<uint> left, Vector64<uint> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint8x16_t vsliq_n_u8(uint8x16_t a, uint8x16_t b, __builtin_constant_p(n)) /// A32: VSLI.8 Qd, Qm, #n /// A64: SLI Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftLeftAndInsert(Vector128<byte> left, Vector128<byte> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int16x8_t vsliq_n_s16(int16x8_t a, int16x8_t b, __builtin_constant_p(n)) /// A32: VSLI.16 Qd, Qm, #n /// A64: SLI Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftLeftAndInsert(Vector128<short> left, Vector128<short> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int32x4_t vsliq_n_s32(int32x4_t a, int32x4_t b, __builtin_constant_p(n)) /// A32: VSLI.32 Qd, Qm, #n /// A64: SLI Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftLeftAndInsert(Vector128<int> left, Vector128<int> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int64x2_t vsliq_n_s64(int64x2_t a, int64x2_t b, __builtin_constant_p(n)) /// A32: VSLI.64 Qd, Qm, #n /// A64: SLI Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftLeftAndInsert(Vector128<long> left, Vector128<long> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int8x16_t vsliq_n_s8(int8x16_t a, int8x16_t b, __builtin_constant_p(n)) /// A32: VSLI.8 Qd, Qm, #n /// A64: SLI Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftLeftAndInsert(Vector128<sbyte> left, Vector128<sbyte> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint16x8_t vsliq_n_u16(uint16x8_t a, uint16x8_t b, __builtin_constant_p(n)) /// A32: VSLI.16 Qd, Qm, #n /// A64: SLI Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftLeftAndInsert(Vector128<ushort> left, Vector128<ushort> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint32x4_t vsliq_n_u32(uint32x4_t a, uint32x4_t b, __builtin_constant_p(n)) /// A32: VSLI.32 Qd, Qm, #n /// A64: SLI Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftLeftAndInsert(Vector128<uint> left, Vector128<uint> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// uint64x2_t vsliq_n_u64(uint64x2_t a, uint64x2_t b, __builtin_constant_p(n)) /// A32: VSLI.64 Qd, Qm, #n /// A64: SLI Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftLeftAndInsert(Vector128<ulong> left, Vector128<ulong> right, byte shift) => ShiftLeftAndInsert(left, right, shift); /// <summary> /// int64_t vslid_n_s64(int64_t a, int64_t b, __builtin_constant_p(n)) /// A32: VSLI.64 Dd, Dm, #n /// A64: SLI Dd, Dn, #n /// </summary> public static Vector64<long> ShiftLeftAndInsertScalar(Vector64<long> left, Vector64<long> right, byte shift) => ShiftLeftAndInsertScalar(left, right, shift); /// <summary> /// uint64_t vslid_n_u64(uint64_t a, uint64_t b, __builtin_constant_p(n)) /// A32: VSLI.64 Dd, Dm, #n /// A64: SLI Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftLeftAndInsertScalar(Vector64<ulong> left, Vector64<ulong> right, byte shift) => ShiftLeftAndInsertScalar(left, right, shift); /// <summary> /// uint8x8_t vshl_n_u8 (uint8x8_t a, const int n) /// A32: VSHL.I8 Dd, Dm, #n /// A64: SHL Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftLeftLogical(Vector64<byte> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int16x4_t vshl_n_s16 (int16x4_t a, const int n) /// A32: VSHL.I16 Dd, Dm, #n /// A64: SHL Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftLeftLogical(Vector64<short> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int32x2_t vshl_n_s32 (int32x2_t a, const int n) /// A32: VSHL.I32 Dd, Dm, #n /// A64: SHL Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftLeftLogical(Vector64<int> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int8x8_t vshl_n_s8 (int8x8_t a, const int n) /// A32: VSHL.I8 Dd, Dm, #n /// A64: SHL Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftLeftLogical(Vector64<sbyte> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint16x4_t vshl_n_u16 (uint16x4_t a, const int n) /// A32: VSHL.I16 Dd, Dm, #n /// A64: SHL Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftLeftLogical(Vector64<ushort> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint32x2_t vshl_n_u32 (uint32x2_t a, const int n) /// A32: VSHL.I32 Dd, Dm, #n /// A64: SHL Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftLeftLogical(Vector64<uint> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint8x16_t vshlq_n_u8 (uint8x16_t a, const int n) /// A32: VSHL.I8 Qd, Qm, #n /// A64: SHL Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftLeftLogical(Vector128<byte> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int16x8_t vshlq_n_s16 (int16x8_t a, const int n) /// A32: VSHL.I16 Qd, Qm, #n /// A64: SHL Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftLeftLogical(Vector128<short> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int64x2_t vshlq_n_s64 (int64x2_t a, const int n) /// A32: VSHL.I64 Qd, Qm, #n /// A64: SHL Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftLeftLogical(Vector128<long> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// int8x16_t vshlq_n_s8 (int8x16_t a, const int n) /// A32: VSHL.I8 Qd, Qm, #n /// A64: SHL Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftLeftLogical(Vector128<sbyte> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint16x8_t vshlq_n_u16 (uint16x8_t a, const int n) /// A32: VSHL.I16 Qd, Qm, #n /// A64: SHL Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftLeftLogical(Vector128<ushort> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint32x4_t vshlq_n_u32 (uint32x4_t a, const int n) /// A32: VSHL.I32 Qd, Qm, #n /// A64: SHL Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftLeftLogical(Vector128<uint> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint64x2_t vshlq_n_u64 (uint64x2_t a, const int n) /// A32: VSHL.I64 Qd, Qm, #n /// A64: SHL Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftLeftLogical(Vector128<ulong> value, byte count) => ShiftLeftLogical(value, count); /// <summary> /// uint8x8_t vqshl_n_u8 (uint8x8_t a, const int n) /// A32: VQSHL.U8 Dd, Dm, #n /// A64: UQSHL Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftLeftLogicalSaturate(Vector64<byte> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int16x4_t vqshl_n_s16 (int16x4_t a, const int n) /// A32: VQSHL.S16 Dd, Dm, #n /// A64: SQSHL Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftLeftLogicalSaturate(Vector64<short> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int32x2_t vqshl_n_s32 (int32x2_t a, const int n) /// A32: VQSHL.S32 Dd, Dm, #n /// A64: SQSHL Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftLeftLogicalSaturate(Vector64<int> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int8x8_t vqshl_n_s8 (int8x8_t a, const int n) /// A32: VQSHL.S8 Dd, Dm, #n /// A64: SQSHL Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftLeftLogicalSaturate(Vector64<sbyte> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint16x4_t vqshl_n_u16 (uint16x4_t a, const int n) /// A32: VQSHL.U16 Dd, Dm, #n /// A64: UQSHL Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftLeftLogicalSaturate(Vector64<ushort> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint32x2_t vqshl_n_u32 (uint32x2_t a, const int n) /// A32: VQSHL.U32 Dd, Dm, #n /// A64: UQSHL Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftLeftLogicalSaturate(Vector64<uint> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint8x16_t vqshlq_n_u8 (uint8x16_t a, const int n) /// A32: VQSHL.U8 Qd, Qm, #n /// A64: UQSHL Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftLeftLogicalSaturate(Vector128<byte> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int16x8_t vqshlq_n_s16 (int16x8_t a, const int n) /// A32: VQSHL.S16 Qd, Qm, #n /// A64: SQSHL Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftLeftLogicalSaturate(Vector128<short> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int32x4_t vqshlq_n_s32 (int32x4_t a, const int n) /// A32: VQSHL.S32 Qd, Qm, #n /// A64: SQSHL Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftLeftLogicalSaturate(Vector128<int> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int64x2_t vqshlq_n_s64 (int64x2_t a, const int n) /// A32: VQSHL.S64 Qd, Qm, #n /// A64: SQSHL Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftLeftLogicalSaturate(Vector128<long> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int8x16_t vqshlq_n_s8 (int8x16_t a, const int n) /// A32: VQSHL.S8 Qd, Qm, #n /// A64: SQSHL Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftLeftLogicalSaturate(Vector128<sbyte> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint16x8_t vqshlq_n_u16 (uint16x8_t a, const int n) /// A32: VQSHL.U16 Qd, Qm, #n /// A64: UQSHL Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftLeftLogicalSaturate(Vector128<ushort> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint32x4_t vqshlq_n_u32 (uint32x4_t a, const int n) /// A32: VQSHL.U32 Qd, Qm, #n /// A64: UQSHL Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftLeftLogicalSaturate(Vector128<uint> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// uint64x2_t vqshlq_n_u64 (uint64x2_t a, const int n) /// A32: VQSHL.U64 Qd, Qm, #n /// A64: UQSHL Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftLeftLogicalSaturate(Vector128<ulong> value, byte count) => ShiftLeftLogicalSaturate(value, count); /// <summary> /// int64x1_t vqshl_n_s64 (int64x1_t a, const int n) /// A32: VQSHL.S64 Dd, Dm, #n /// A64: SQSHL Dd, Dn, #n /// </summary> public static Vector64<long> ShiftLeftLogicalSaturateScalar(Vector64<long> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// uint64x1_t vqshl_n_u64 (uint64x1_t a, const int n) /// A32: VQSHL.U64 Dd, Dm, #n /// A64: UQSHL Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftLeftLogicalSaturateScalar(Vector64<ulong> value, byte count) => ShiftLeftLogicalSaturateScalar(value, count); /// <summary> /// uint16x4_t vqshlu_n_s16 (int16x4_t a, const int n) /// A32: VQSHLU.S16 Dd, Dm, #n /// A64: SQSHLU Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftLeftLogicalSaturateUnsigned(Vector64<short> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint32x2_t vqshlu_n_s32 (int32x2_t a, const int n) /// A32: VQSHLU.S32 Dd, Dm, #n /// A64: SQSHLU Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftLeftLogicalSaturateUnsigned(Vector64<int> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint8x8_t vqshlu_n_s8 (int8x8_t a, const int n) /// A32: VQSHLU.S8 Dd, Dm, #n /// A64: SQSHLU Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftLeftLogicalSaturateUnsigned(Vector64<sbyte> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint16x8_t vqshluq_n_s16 (int16x8_t a, const int n) /// A32: VQSHLU.S16 Qd, Qm, #n /// A64: SQSHLU Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftLeftLogicalSaturateUnsigned(Vector128<short> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint32x4_t vqshluq_n_s32 (int32x4_t a, const int n) /// A32: VQSHLU.S32 Qd, Qm, #n /// A64: SQSHLU Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftLeftLogicalSaturateUnsigned(Vector128<int> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint64x2_t vqshluq_n_s64 (int64x2_t a, const int n) /// A32: VQSHLU.S64 Qd, Qm, #n /// A64: SQSHLU Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftLeftLogicalSaturateUnsigned(Vector128<long> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint8x16_t vqshluq_n_s8 (int8x16_t a, const int n) /// A32: VQSHLU.S8 Qd, Qm, #n /// A64: SQSHLU Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftLeftLogicalSaturateUnsigned(Vector128<sbyte> value, byte count) => ShiftLeftLogicalSaturateUnsigned(value, count); /// <summary> /// uint64x1_t vqshlu_n_s64 (int64x1_t a, const int n) /// A32: VQSHLU.S64 Dd, Dm, #n /// A64: SQSHLU Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftLeftLogicalSaturateUnsignedScalar(Vector64<long> value, byte count) => ShiftLeftLogicalSaturateUnsignedScalar(value, count); /// <summary> /// int64x1_t vshl_n_s64 (int64x1_t a, const int n) /// A32: VSHL.I64 Dd, Dm, #n /// A64: SHL Dd, Dn, #n /// </summary> public static Vector64<long> ShiftLeftLogicalScalar(Vector64<long> value, byte count) => ShiftLeftLogicalScalar(value, count); /// <summary> /// uint64x1_t vshl_n_u64 (uint64x1_t a, const int n) /// A32: VSHL.I64 Dd, Dm, #n /// A64: SHL Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftLeftLogicalScalar(Vector64<ulong> value, byte count) => ShiftLeftLogicalScalar(value, count); /// <summary> /// uint16x8_t vshll_n_u8 (uint8x8_t a, const int n) /// A32: VSHLL.U8 Qd, Dm, #n /// A64: USHLL Vd.8H, Vn.8B, #n /// </summary> public static Vector128<ushort> ShiftLeftLogicalWideningLower(Vector64<byte> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// int32x4_t vshll_n_s16 (int16x4_t a, const int n) /// A32: VSHLL.S16 Qd, Dm, #n /// A64: SSHLL Vd.4S, Vn.4H, #n /// </summary> public static Vector128<int> ShiftLeftLogicalWideningLower(Vector64<short> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// int64x2_t vshll_n_s32 (int32x2_t a, const int n) /// A32: VSHLL.S32 Qd, Dm, #n /// A64: SSHLL Vd.2D, Vn.2S, #n /// </summary> public static Vector128<long> ShiftLeftLogicalWideningLower(Vector64<int> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// int16x8_t vshll_n_s8 (int8x8_t a, const int n) /// A32: VSHLL.S8 Qd, Dm, #n /// A64: SSHLL Vd.8H, Vn.8B, #n /// </summary> public static Vector128<short> ShiftLeftLogicalWideningLower(Vector64<sbyte> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// uint32x4_t vshll_n_u16 (uint16x4_t a, const int n) /// A32: VSHLL.U16 Qd, Dm, #n /// A64: USHLL Vd.4S, Vn.4H, #n /// </summary> public static Vector128<uint> ShiftLeftLogicalWideningLower(Vector64<ushort> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// uint64x2_t vshll_n_u32 (uint32x2_t a, const int n) /// A32: VSHLL.U32 Qd, Dm, #n /// A64: USHLL Vd.2D, Vn.2S, #n /// </summary> public static Vector128<ulong> ShiftLeftLogicalWideningLower(Vector64<uint> value, byte count) => ShiftLeftLogicalWideningLower(value, count); /// <summary> /// uint16x8_t vshll_high_n_u8 (uint8x16_t a, const int n) /// A32: VSHLL.U8 Qd, Dm+1, #n /// A64: USHLL2 Vd.8H, Vn.16B, #n /// </summary> public static Vector128<ushort> ShiftLeftLogicalWideningUpper(Vector128<byte> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// int32x4_t vshll_high_n_s16 (int16x8_t a, const int n) /// A32: VSHLL.S16 Qd, Dm+1, #n /// A64: SSHLL2 Vd.4S, Vn.8H, #n /// </summary> public static Vector128<int> ShiftLeftLogicalWideningUpper(Vector128<short> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// int64x2_t vshll_high_n_s32 (int32x4_t a, const int n) /// A32: VSHLL.S32 Qd, Dm+1, #n /// A64: SSHLL2 Vd.2D, Vn.4S, #n /// </summary> public static Vector128<long> ShiftLeftLogicalWideningUpper(Vector128<int> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// int16x8_t vshll_high_n_s8 (int8x16_t a, const int n) /// A32: VSHLL.S8 Qd, Dm+1, #n /// A64: SSHLL2 Vd.8H, Vn.16B, #n /// </summary> public static Vector128<short> ShiftLeftLogicalWideningUpper(Vector128<sbyte> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// uint32x4_t vshll_high_n_u16 (uint16x8_t a, const int n) /// A32: VSHLL.U16 Qd, Dm+1, #n /// A64: USHLL2 Vd.4S, Vn.8H, #n /// </summary> public static Vector128<uint> ShiftLeftLogicalWideningUpper(Vector128<ushort> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// uint64x2_t vshll_high_n_u32 (uint32x4_t a, const int n) /// A32: VSHLL.U32 Qd, Dm+1, #n /// A64: USHLL2 Vd.2D, Vn.4S, #n /// </summary> public static Vector128<ulong> ShiftLeftLogicalWideningUpper(Vector128<uint> value, byte count) => ShiftLeftLogicalWideningUpper(value, count); /// <summary> /// uint8x8_t vshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VSHL.U8 Dd, Dn, Dm /// A64: USHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ShiftLogical(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogical(value, count); /// <summary> /// uint16x4_t vshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VSHL.U16 Dd, Dn, Dm /// A64: USHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftLogical(Vector64<short> value, Vector64<short> count) => ShiftLogical(value, count); /// <summary> /// uint32x2_t vshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VSHL.U32 Dd, Dn, Dm /// A64: USHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftLogical(Vector64<int> value, Vector64<int> count) => ShiftLogical(value, count); /// <summary> /// uint8x8_t vshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VSHL.U8 Dd, Dn, Dm /// A64: USHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftLogical(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogical(value, count); /// <summary> /// uint16x4_t vshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VSHL.U16 Dd, Dn, Dm /// A64: USHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ShiftLogical(Vector64<ushort> value, Vector64<short> count) => ShiftLogical(value, count); /// <summary> /// uint32x2_t vshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VSHL.U32 Dd, Dn, Dm /// A64: USHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ShiftLogical(Vector64<uint> value, Vector64<int> count) => ShiftLogical(value, count); /// <summary> /// uint8x16_t vshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VSHL.U8 Qd, Qn, Qm /// A64: USHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ShiftLogical(Vector128<byte> value, Vector128<sbyte> count) => ShiftLogical(value, count); /// <summary> /// uint16x8_t vshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VSHL.U16 Qd, Qn, Qm /// A64: USHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftLogical(Vector128<short> value, Vector128<short> count) => ShiftLogical(value, count); /// <summary> /// uint32x4_t vshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VSHL.U32 Qd, Qn, Qm /// A64: USHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftLogical(Vector128<int> value, Vector128<int> count) => ShiftLogical(value, count); /// <summary> /// uint64x2_t vshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VSHL.U64 Qd, Qn, Qm /// A64: USHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftLogical(Vector128<long> value, Vector128<long> count) => ShiftLogical(value, count); /// <summary> /// uint8x16_t vshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VSHL.U8 Qd, Qn, Qm /// A64: USHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftLogical(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftLogical(value, count); /// <summary> /// uint16x8_t vshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VSHL.U16 Qd, Qn, Qm /// A64: USHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ShiftLogical(Vector128<ushort> value, Vector128<short> count) => ShiftLogical(value, count); /// <summary> /// uint32x4_t vshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VSHL.U32 Qd, Qn, Qm /// A64: USHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ShiftLogical(Vector128<uint> value, Vector128<int> count) => ShiftLogical(value, count); /// <summary> /// uint64x2_t vshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VSHL.U64 Qd, Qn, Qm /// A64: USHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ShiftLogical(Vector128<ulong> value, Vector128<long> count) => ShiftLogical(value, count); /// <summary> /// uint8x8_t vrshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VRSHL.U8 Dd, Dn, Dm /// A64: URSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ShiftLogicalRounded(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint16x4_t vrshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VRSHL.U16 Dd, Dn, Dm /// A64: URSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftLogicalRounded(Vector64<short> value, Vector64<short> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint32x2_t vrshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VRSHL.U32 Dd, Dn, Dm /// A64: URSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftLogicalRounded(Vector64<int> value, Vector64<int> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint8x8_t vrshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VRSHL.U8 Dd, Dn, Dm /// A64: URSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftLogicalRounded(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint16x4_t vrshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VRSHL.U16 Dd, Dn, Dm /// A64: URSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ShiftLogicalRounded(Vector64<ushort> value, Vector64<short> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint32x2_t vrshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VRSHL.U32 Dd, Dn, Dm /// A64: URSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ShiftLogicalRounded(Vector64<uint> value, Vector64<int> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint8x16_t vrshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VRSHL.U8 Qd, Qn, Qm /// A64: URSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ShiftLogicalRounded(Vector128<byte> value, Vector128<sbyte> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint16x8_t vrshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VRSHL.U16 Qd, Qn, Qm /// A64: URSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftLogicalRounded(Vector128<short> value, Vector128<short> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint32x4_t vrshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VRSHL.U32 Qd, Qn, Qm /// A64: URSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftLogicalRounded(Vector128<int> value, Vector128<int> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint64x2_t vrshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VRSHL.U64 Qd, Qn, Qm /// A64: URSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftLogicalRounded(Vector128<long> value, Vector128<long> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint8x16_t vrshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VRSHL.U8 Qd, Qn, Qm /// A64: URSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftLogicalRounded(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint16x8_t vrshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VRSHL.U16 Qd, Qn, Qm /// A64: URSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ShiftLogicalRounded(Vector128<ushort> value, Vector128<short> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint32x4_t vrshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VRSHL.U32 Qd, Qn, Qm /// A64: URSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ShiftLogicalRounded(Vector128<uint> value, Vector128<int> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint64x2_t vrshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VRSHL.U64 Qd, Qn, Qm /// A64: URSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ShiftLogicalRounded(Vector128<ulong> value, Vector128<long> count) => ShiftLogicalRounded(value, count); /// <summary> /// uint8x8_t vqrshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VQRSHL.U8 Dd, Dn, Dm /// A64: UQRSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ShiftLogicalRoundedSaturate(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint16x4_t vqrshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VQRSHL.U16 Dd, Dn, Dm /// A64: UQRSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftLogicalRoundedSaturate(Vector64<short> value, Vector64<short> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint32x2_t vqrshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VQRSHL.U32 Dd, Dn, Dm /// A64: UQRSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftLogicalRoundedSaturate(Vector64<int> value, Vector64<int> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint8x8_t vqrshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VQRSHL.U8 Dd, Dn, Dm /// A64: UQRSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftLogicalRoundedSaturate(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint16x4_t vqrshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VQRSHL.U16 Dd, Dn, Dm /// A64: UQRSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ShiftLogicalRoundedSaturate(Vector64<ushort> value, Vector64<short> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint32x2_t vqrshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VQRSHL.U32 Dd, Dn, Dm /// A64: UQRSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ShiftLogicalRoundedSaturate(Vector64<uint> value, Vector64<int> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint8x16_t vqrshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VQRSHL.U8 Qd, Qn, Qm /// A64: UQRSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ShiftLogicalRoundedSaturate(Vector128<byte> value, Vector128<sbyte> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint16x8_t vqrshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VQRSHL.U16 Qd, Qn, Qm /// A64: UQRSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftLogicalRoundedSaturate(Vector128<short> value, Vector128<short> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint32x4_t vqrshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VQRSHL.U32 Qd, Qn, Qm /// A64: UQRSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftLogicalRoundedSaturate(Vector128<int> value, Vector128<int> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint64x2_t vqrshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VQRSHL.U64 Qd, Qn, Qm /// A64: UQRSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftLogicalRoundedSaturate(Vector128<long> value, Vector128<long> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint8x16_t vqrshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VQRSHL.U8 Qd, Qn, Qm /// A64: UQRSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftLogicalRoundedSaturate(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint16x8_t vqrshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VQRSHL.U16 Qd, Qn, Qm /// A64: UQRSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ShiftLogicalRoundedSaturate(Vector128<ushort> value, Vector128<short> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint32x4_t vqrshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VQRSHL.U32 Qd, Qn, Qm /// A64: UQRSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ShiftLogicalRoundedSaturate(Vector128<uint> value, Vector128<int> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint64x2_t vqrshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VQRSHL.U64 Qd, Qn, Qm /// A64: UQRSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ShiftLogicalRoundedSaturate(Vector128<ulong> value, Vector128<long> count) => ShiftLogicalRoundedSaturate(value, count); /// <summary> /// uint64x1_t vqrshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VQRSHL.U64 Dd, Dn, Dm /// A64: UQRSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftLogicalRoundedSaturateScalar(Vector64<long> value, Vector64<long> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint64x1_t vqrshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VQRSHL.U64 Dd, Dn, Dm /// A64: UQRSHL Dd, Dn, Dm /// </summary> public static Vector64<ulong> ShiftLogicalRoundedSaturateScalar(Vector64<ulong> value, Vector64<long> count) => ShiftLogicalRoundedSaturateScalar(value, count); /// <summary> /// uint64x1_t vrshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VRSHL.U64 Dd, Dn, Dm /// A64: URSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftLogicalRoundedScalar(Vector64<long> value, Vector64<long> count) => ShiftLogicalRoundedScalar(value, count); /// <summary> /// uint64x1_t vrshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VRSHL.U64 Dd, Dn, Dm /// A64: URSHL Dd, Dn, Dm /// </summary> public static Vector64<ulong> ShiftLogicalRoundedScalar(Vector64<ulong> value, Vector64<long> count) => ShiftLogicalRoundedScalar(value, count); /// <summary> /// uint8x8_t vqshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VQSHL.U8 Dd, Dn, Dm /// A64: UQSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> ShiftLogicalSaturate(Vector64<byte> value, Vector64<sbyte> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint16x4_t vqshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VQSHL.U16 Dd, Dn, Dm /// A64: UQSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> ShiftLogicalSaturate(Vector64<short> value, Vector64<short> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint32x2_t vqshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VQSHL.U32 Dd, Dn, Dm /// A64: UQSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> ShiftLogicalSaturate(Vector64<int> value, Vector64<int> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint8x8_t vqshl_u8 (uint8x8_t a, int8x8_t b) /// A32: VQSHL.U8 Dd, Dn, Dm /// A64: UQSHL Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> ShiftLogicalSaturate(Vector64<sbyte> value, Vector64<sbyte> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint16x4_t vqshl_u16 (uint16x4_t a, int16x4_t b) /// A32: VQSHL.U16 Dd, Dn, Dm /// A64: UQSHL Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> ShiftLogicalSaturate(Vector64<ushort> value, Vector64<short> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint32x2_t vqshl_u32 (uint32x2_t a, int32x2_t b) /// A32: VQSHL.U32 Dd, Dn, Dm /// A64: UQSHL Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> ShiftLogicalSaturate(Vector64<uint> value, Vector64<int> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint8x16_t vqshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VQSHL.U8 Qd, Qn, Qm /// A64: UQSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> ShiftLogicalSaturate(Vector128<byte> value, Vector128<sbyte> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint16x8_t vqshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VQSHL.U16 Qd, Qn, Qm /// A64: UQSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> ShiftLogicalSaturate(Vector128<short> value, Vector128<short> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint32x4_t vqshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VQSHL.U32 Qd, Qn, Qm /// A64: UQSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> ShiftLogicalSaturate(Vector128<int> value, Vector128<int> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint64x2_t vqshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VQSHL.U64 Qd, Qn, Qm /// A64: UQSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> ShiftLogicalSaturate(Vector128<long> value, Vector128<long> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint8x16_t vqshlq_u8 (uint8x16_t a, int8x16_t b) /// A32: VQSHL.U8 Qd, Qn, Qm /// A64: UQSHL Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> ShiftLogicalSaturate(Vector128<sbyte> value, Vector128<sbyte> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint16x8_t vqshlq_u16 (uint16x8_t a, int16x8_t b) /// A32: VQSHL.U16 Qd, Qn, Qm /// A64: UQSHL Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> ShiftLogicalSaturate(Vector128<ushort> value, Vector128<short> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint32x4_t vqshlq_u32 (uint32x4_t a, int32x4_t b) /// A32: VQSHL.U32 Qd, Qn, Qm /// A64: UQSHL Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> ShiftLogicalSaturate(Vector128<uint> value, Vector128<int> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint64x2_t vqshlq_u64 (uint64x2_t a, int64x2_t b) /// A32: VQSHL.U64 Qd, Qn, Qm /// A64: UQSHL Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> ShiftLogicalSaturate(Vector128<ulong> value, Vector128<long> count) => ShiftLogicalSaturate(value, count); /// <summary> /// uint64x1_t vqshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VQSHL.U64 Dd, Dn, Dm /// A64: UQSHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftLogicalSaturateScalar(Vector64<long> value, Vector64<long> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint64x1_t vqshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VQSHL.U64 Dd, Dn, Dm /// A64: UQSHL Dd, Dn, Dm /// </summary> public static Vector64<ulong> ShiftLogicalSaturateScalar(Vector64<ulong> value, Vector64<long> count) => ShiftLogicalSaturateScalar(value, count); /// <summary> /// uint64x1_t vshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VSHL.U64 Dd, Dn, Dm /// A64: USHL Dd, Dn, Dm /// </summary> public static Vector64<long> ShiftLogicalScalar(Vector64<long> value, Vector64<long> count) => ShiftLogicalScalar(value, count); /// <summary> /// uint64x1_t vshl_u64 (uint64x1_t a, int64x1_t b) /// A32: VSHL.U64 Dd, Dn, Dm /// A64: USHL Dd, Dn, Dm /// </summary> public static Vector64<ulong> ShiftLogicalScalar(Vector64<ulong> value, Vector64<long> count) => ShiftLogicalScalar(value, count); /// <summary> /// uint8x8_t vsri_n_u8(uint8x8_t a, uint8x8_t b, __builtin_constant_p(n)) /// A32: VSRI.8 Dd, Dm, #n /// A64: SRI Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftRightAndInsert(Vector64<byte> left, Vector64<byte> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int16x4_t vsri_n_s16(int16x4_t a, int16x4_t b, __builtin_constant_p(n)) /// A32: VSRI.16 Dd, Dm, #n /// A64: SRI Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightAndInsert(Vector64<short> left, Vector64<short> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int32x2_t vsri_n_s32(int32x2_t a, int32x2_t b, __builtin_constant_p(n)) /// A32: VSRI.32 Dd, Dm, #n /// A64: SRI Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightAndInsert(Vector64<int> left, Vector64<int> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int8x8_t vsri_n_s8(int8x8_t a, int8x8_t b, __builtin_constant_p(n)) /// A32: VSRI.8 Dd, Dm, #n /// A64: SRI Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightAndInsert(Vector64<sbyte> left, Vector64<sbyte> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint16x4_t vsri_n_u16(uint16x4_t a, uint16x4_t b, __builtin_constant_p(n)) /// A32: VSRI.16 Dd, Dm, #n /// A64: SRI Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftRightAndInsert(Vector64<ushort> left, Vector64<ushort> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint32x2_t vsri_n_u32(uint32x2_t a, uint32x2_t b, __builtin_constant_p(n)) /// A32: VSRI.32 Dd, Dm, #n /// A64: SRI Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftRightAndInsert(Vector64<uint> left, Vector64<uint> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint8x16_t vsriq_n_u8(uint8x16_t a, uint8x16_t b, __builtin_constant_p(n)) /// A32: VSRI.8 Qd, Qm, #n /// A64: SRI Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftRightAndInsert(Vector128<byte> left, Vector128<byte> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int16x8_t vsriq_n_s16(int16x8_t a, int16x8_t b, __builtin_constant_p(n)) /// A32: VSRI.16 Qd, Qm, #n /// A64: SRI Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightAndInsert(Vector128<short> left, Vector128<short> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int32x4_t vsriq_n_s32(int32x4_t a, int32x4_t b, __builtin_constant_p(n)) /// A32: VSRI.32 Qd, Qm, #n /// A64: SRI Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightAndInsert(Vector128<int> left, Vector128<int> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int64x2_t vsriq_n_s64(int64x2_t a, int64x2_t b, __builtin_constant_p(n)) /// A32: VSRI.64 Qd, Qm, #n /// A64: SRI Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightAndInsert(Vector128<long> left, Vector128<long> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int8x16_t vsriq_n_s8(int8x16_t a, int8x16_t b, __builtin_constant_p(n)) /// A32: VSRI.8 Qd, Qm, #n /// A64: SRI Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightAndInsert(Vector128<sbyte> left, Vector128<sbyte> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint16x8_t vsriq_n_u16(uint16x8_t a, uint16x8_t b, __builtin_constant_p(n)) /// A32: VSRI.16 Qd, Qm, #n /// A64: SRI Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftRightAndInsert(Vector128<ushort> left, Vector128<ushort> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint32x4_t vsriq_n_u32(uint32x4_t a, uint32x4_t b, __builtin_constant_p(n)) /// A32: VSRI.32 Qd, Qm, #n /// A64: SRI Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftRightAndInsert(Vector128<uint> left, Vector128<uint> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// uint64x2_t vsriq_n_u64(uint64x2_t a, uint64x2_t b, __builtin_constant_p(n)) /// A32: VSRI.64 Qd, Qm, #n /// A64: SRI Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftRightAndInsert(Vector128<ulong> left, Vector128<ulong> right, byte shift) => ShiftRightAndInsert(left, right, shift); /// <summary> /// int64_t vsrid_n_s64(int64_t a, int64_t b, __builtin_constant_p(n)) /// A32: VSRI.64 Dd, Dm, #n /// A64: SRI Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightAndInsertScalar(Vector64<long> left, Vector64<long> right, byte shift) => ShiftRightAndInsertScalar(left, right, shift); /// <summary> /// uint64_t vsrid_n_u64(uint64_t a, uint64_t b, __builtin_constant_p(n)) /// A32: VSRI.64 Dd, Dm, #n /// A64: SRI Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftRightAndInsertScalar(Vector64<ulong> left, Vector64<ulong> right, byte shift) => ShiftRightAndInsertScalar(left, right, shift); /// <summary> /// int16x4_t vshr_n_s16 (int16x4_t a, const int n) /// A32: VSHR.S16 Dd, Dm, #n /// A64: SSHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightArithmetic(Vector64<short> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int32x2_t vshr_n_s32 (int32x2_t a, const int n) /// A32: VSHR.S32 Dd, Dm, #n /// A64: SSHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightArithmetic(Vector64<int> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int8x8_t vshr_n_s8 (int8x8_t a, const int n) /// A32: VSHR.S8 Dd, Dm, #n /// A64: SSHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmetic(Vector64<sbyte> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int16x8_t vshrq_n_s16 (int16x8_t a, const int n) /// A32: VSHR.S16 Qd, Qm, #n /// A64: SSHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightArithmetic(Vector128<short> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int32x4_t vshrq_n_s32 (int32x4_t a, const int n) /// A32: VSHR.S32 Qd, Qm, #n /// A64: SSHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightArithmetic(Vector128<int> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int64x2_t vshrq_n_s64 (int64x2_t a, const int n) /// A32: VSHR.S64 Qd, Qm, #n /// A64: SSHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightArithmetic(Vector128<long> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int8x16_t vshrq_n_s8 (int8x16_t a, const int n) /// A32: VSHR.S8 Qd, Qm, #n /// A64: SSHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmetic(Vector128<sbyte> value, byte count) => ShiftRightArithmetic(value, count); /// <summary> /// int16x4_t vsra_n_s16 (int16x4_t a, int16x4_t b, const int n) /// A32: VSRA.S16 Dd, Dm, #n /// A64: SSRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightArithmeticAdd(Vector64<short> addend, Vector64<short> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int32x2_t vsra_n_s32 (int32x2_t a, int32x2_t b, const int n) /// A32: VSRA.S32 Dd, Dm, #n /// A64: SSRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightArithmeticAdd(Vector64<int> addend, Vector64<int> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int8x8_t vsra_n_s8 (int8x8_t a, int8x8_t b, const int n) /// A32: VSRA.S8 Dd, Dm, #n /// A64: SSRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticAdd(Vector64<sbyte> addend, Vector64<sbyte> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int16x8_t vsraq_n_s16 (int16x8_t a, int16x8_t b, const int n) /// A32: VSRA.S16 Qd, Qm, #n /// A64: SSRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightArithmeticAdd(Vector128<short> addend, Vector128<short> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int32x4_t vsraq_n_s32 (int32x4_t a, int32x4_t b, const int n) /// A32: VSRA.S32 Qd, Qm, #n /// A64: SSRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightArithmeticAdd(Vector128<int> addend, Vector128<int> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int64x2_t vsraq_n_s64 (int64x2_t a, int64x2_t b, const int n) /// A32: VSRA.S64 Qd, Qm, #n /// A64: SSRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightArithmeticAdd(Vector128<long> addend, Vector128<long> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int8x16_t vsraq_n_s8 (int8x16_t a, int8x16_t b, const int n) /// A32: VSRA.S8 Qd, Qm, #n /// A64: SSRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmeticAdd(Vector128<sbyte> addend, Vector128<sbyte> value, byte count) => ShiftRightArithmeticAdd(addend, value, count); /// <summary> /// int64x1_t vsra_n_s64 (int64x1_t a, int64x1_t b, const int n) /// A32: VSRA.S64 Dd, Dm, #n /// A64: SSRA Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightArithmeticAddScalar(Vector64<long> addend, Vector64<long> value, byte count) => ShiftRightArithmeticAddScalar(addend, value, count); /// <summary> /// int16x4_t vqshrn_n_s32 (int32x4_t a, const int n) /// A32: VQSHRN.S32 Dd, Qm, #n /// A64: SQSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightArithmeticNarrowingSaturateLower(Vector128<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateLower(value, count); /// <summary> /// int32x2_t vqshrn_n_s64 (int64x2_t a, const int n) /// A32: VQSHRN.S64 Dd, Qm, #n /// A64: SQSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightArithmeticNarrowingSaturateLower(Vector128<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateLower(value, count); /// <summary> /// int8x8_t vqshrn_n_s16 (int16x8_t a, const int n) /// A32: VQSHRN.S16 Dd, Qm, #n /// A64: SQSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticNarrowingSaturateLower(Vector128<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateLower(value, count); /// <summary> /// uint8x8_t vqshrun_n_s16 (int16x8_t a, const int n) /// A32: VQSHRUN.S16 Dd, Qm, #n /// A64: SQSHRUN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightArithmeticNarrowingSaturateUnsignedLower(Vector128<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint16x4_t vqshrun_n_s32 (int32x4_t a, const int n) /// A32: VQSHRUN.S32 Dd, Qm, #n /// A64: SQSHRUN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightArithmeticNarrowingSaturateUnsignedLower(Vector128<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint32x2_t vqshrun_n_s64 (int64x2_t a, const int n) /// A32: VQSHRUN.S64 Dd, Qm, #n /// A64: SQSHRUN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightArithmeticNarrowingSaturateUnsignedLower(Vector128<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint8x16_t vqshrun_high_n_s16 (uint8x8_t r, int16x8_t a, const int n) /// A32: VQSHRUN.S16 Dd+1, Dn, #n /// A64: SQSHRUN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightArithmeticNarrowingSaturateUnsignedUpper(Vector64<byte> lower, Vector128<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// uint16x8_t vqshrun_high_n_s32 (uint16x4_t r, int32x4_t a, const int n) /// A32: VQSHRUN.S32 Dd+1, Dn, #n /// A64: SQSHRUN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightArithmeticNarrowingSaturateUnsignedUpper(Vector64<ushort> lower, Vector128<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// uint32x4_t vqshrun_high_n_s64 (uint32x2_t r, int64x2_t a, const int n) /// A32: VQSHRUN.S64 Dd+1, Dn, #n /// A64: SQSHRUN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightArithmeticNarrowingSaturateUnsignedUpper(Vector64<uint> lower, Vector128<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// int16x8_t vqshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) /// A32: VQSHRN.S32 Dd+1, Qm, #n /// A64: SQSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightArithmeticNarrowingSaturateUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightArithmeticNarrowingSaturateUpper(lower, value, count); /// <summary> /// int32x4_t vqshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) /// A32: VQSHRN.S64 Dd+1, Qm, #n /// A64: SQSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightArithmeticNarrowingSaturateUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightArithmeticNarrowingSaturateUpper(lower, value, count); /// <summary> /// int8x16_t vqshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) /// A32: VQSHRN.S16 Dd+1, Qm, #n /// A64: SQSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmeticNarrowingSaturateUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightArithmeticNarrowingSaturateUpper(lower, value, count); /// <summary> /// int16x4_t vrshr_n_s16 (int16x4_t a, const int n) /// A32: VRSHR.S16 Dd, Dm, #n /// A64: SRSHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightArithmeticRounded(Vector64<short> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int32x2_t vrshr_n_s32 (int32x2_t a, const int n) /// A32: VRSHR.S32 Dd, Dm, #n /// A64: SRSHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightArithmeticRounded(Vector64<int> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int8x8_t vrshr_n_s8 (int8x8_t a, const int n) /// A32: VRSHR.S8 Dd, Dm, #n /// A64: SRSHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticRounded(Vector64<sbyte> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int16x8_t vrshrq_n_s16 (int16x8_t a, const int n) /// A32: VRSHR.S16 Qd, Qm, #n /// A64: SRSHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightArithmeticRounded(Vector128<short> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int32x4_t vrshrq_n_s32 (int32x4_t a, const int n) /// A32: VRSHR.S32 Qd, Qm, #n /// A64: SRSHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightArithmeticRounded(Vector128<int> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int64x2_t vrshrq_n_s64 (int64x2_t a, const int n) /// A32: VRSHR.S64 Qd, Qm, #n /// A64: SRSHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightArithmeticRounded(Vector128<long> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int8x16_t vrshrq_n_s8 (int8x16_t a, const int n) /// A32: VRSHR.S8 Qd, Qm, #n /// A64: SRSHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmeticRounded(Vector128<sbyte> value, byte count) => ShiftRightArithmeticRounded(value, count); /// <summary> /// int16x4_t vrsra_n_s16 (int16x4_t a, int16x4_t b, const int n) /// A32: VRSRA.S16 Dd, Dm, #n /// A64: SRSRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightArithmeticRoundedAdd(Vector64<short> addend, Vector64<short> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int32x2_t vrsra_n_s32 (int32x2_t a, int32x2_t b, const int n) /// A32: VRSRA.S32 Dd, Dm, #n /// A64: SRSRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightArithmeticRoundedAdd(Vector64<int> addend, Vector64<int> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int8x8_t vrsra_n_s8 (int8x8_t a, int8x8_t b, const int n) /// A32: VRSRA.S8 Dd, Dm, #n /// A64: SRSRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticRoundedAdd(Vector64<sbyte> addend, Vector64<sbyte> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int16x8_t vrsraq_n_s16 (int16x8_t a, int16x8_t b, const int n) /// A32: VRSRA.S16 Qd, Qm, #n /// A64: SRSRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightArithmeticRoundedAdd(Vector128<short> addend, Vector128<short> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int32x4_t vrsraq_n_s32 (int32x4_t a, int32x4_t b, const int n) /// A32: VRSRA.S32 Qd, Qm, #n /// A64: SRSRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightArithmeticRoundedAdd(Vector128<int> addend, Vector128<int> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int64x2_t vrsraq_n_s64 (int64x2_t a, int64x2_t b, const int n) /// A32: VRSRA.S64 Qd, Qm, #n /// A64: SRSRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightArithmeticRoundedAdd(Vector128<long> addend, Vector128<long> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int8x16_t vrsraq_n_s8 (int8x16_t a, int8x16_t b, const int n) /// A32: VRSRA.S8 Qd, Qm, #n /// A64: SRSRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmeticRoundedAdd(Vector128<sbyte> addend, Vector128<sbyte> value, byte count) => ShiftRightArithmeticRoundedAdd(addend, value, count); /// <summary> /// int64x1_t vrsra_n_s64 (int64x1_t a, int64x1_t b, const int n) /// A32: VRSRA.S64 Dd, Dm, #n /// A64: SRSRA Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightArithmeticRoundedAddScalar(Vector64<long> addend, Vector64<long> value, byte count) => ShiftRightArithmeticRoundedAddScalar(addend, value, count); /// <summary> /// int16x4_t vqrshrn_n_s32 (int32x4_t a, const int n) /// A32: VQRSHRN.S32 Dd, Qm, #n /// A64: SQRSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightArithmeticRoundedNarrowingSaturateLower(Vector128<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateLower(value, count); /// <summary> /// int32x2_t vqrshrn_n_s64 (int64x2_t a, const int n) /// A32: VQRSHRN.S64 Dd, Qm, #n /// A64: SQRSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightArithmeticRoundedNarrowingSaturateLower(Vector128<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateLower(value, count); /// <summary> /// int8x8_t vqrshrn_n_s16 (int16x8_t a, const int n) /// A32: VQRSHRN.S16 Dd, Qm, #n /// A64: SQRSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightArithmeticRoundedNarrowingSaturateLower(Vector128<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint8x8_t vqrshrun_n_s16 (int16x8_t a, const int n) /// A32: VQRSHRUN.S16 Dd, Qm, #n /// A64: SQRSHRUN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(Vector128<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint16x4_t vqrshrun_n_s32 (int32x4_t a, const int n) /// A32: VQRSHRUN.S32 Dd, Qm, #n /// A64: SQRSHRUN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(Vector128<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint32x2_t vqrshrun_n_s64 (int64x2_t a, const int n) /// A32: VQRSHRUN.S64 Dd, Qm, #n /// A64: SQRSHRUN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(Vector128<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedLower(value, count); /// <summary> /// uint8x16_t vqrshrun_high_n_s16 (uint8x8_t r, int16x8_t a, const int n) /// A32: VQRSHRUN.S16 Dd+1, Dn, #n /// A64: SQRSHRUN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(Vector64<byte> lower, Vector128<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// uint16x8_t vqrshrun_high_n_s32 (uint16x4_t r, int32x4_t a, const int n) /// A32: VQRSHRUN.S32 Dd+1, Dn, #n /// A64: SQRSHRUN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(Vector64<ushort> lower, Vector128<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// uint32x4_t vqrshrun_high_n_s64 (uint32x2_t r, int64x2_t a, const int n) /// A32: VQRSHRUN.S64 Dd+1, Dn, #n /// A64: SQRSHRUN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(Vector64<uint> lower, Vector128<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUnsignedUpper(lower, value, count); /// <summary> /// int16x8_t vqrshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) /// A32: VQRSHRN.S32 Dd+1, Dn, #n /// A64: SQRSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightArithmeticRoundedNarrowingSaturateUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// int32x4_t vqrshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) /// A32: VQRSHRN.S64 Dd+1, Dn, #n /// A64: SQRSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightArithmeticRoundedNarrowingSaturateUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// int8x16_t vqrshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) /// A32: VQRSHRN.S16 Dd+1, Dn, #n /// A64: SQRSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightArithmeticRoundedNarrowingSaturateUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightArithmeticRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// int64x1_t vrshr_n_s64 (int64x1_t a, const int n) /// A32: VRSHR.S64 Dd, Dm, #n /// A64: SRSHR Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightArithmeticRoundedScalar(Vector64<long> value, byte count) => ShiftRightArithmeticRoundedScalar(value, count); /// <summary> /// int64x1_t vshr_n_s64 (int64x1_t a, const int n) /// A32: VSHR.S64 Dd, Dm, #n /// A64: SSHR Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightArithmeticScalar(Vector64<long> value, byte count) => ShiftRightArithmeticScalar(value, count); /// <summary> /// uint8x8_t vshr_n_u8 (uint8x8_t a, const int n) /// A32: VSHR.U8 Dd, Dm, #n /// A64: USHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftRightLogical(Vector64<byte> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint16x4_t vshr_n_u16 (uint16x4_t a, const int n) /// A32: VSHR.U16 Dd, Dm, #n /// A64: USHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightLogical(Vector64<short> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint32x2_t vshr_n_u32 (uint32x2_t a, const int n) /// A32: VSHR.U32 Dd, Dm, #n /// A64: USHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightLogical(Vector64<int> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint8x8_t vshr_n_u8 (uint8x8_t a, const int n) /// A32: VSHR.U8 Dd, Dm, #n /// A64: USHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightLogical(Vector64<sbyte> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint16x4_t vshr_n_u16 (uint16x4_t a, const int n) /// A32: VSHR.U16 Dd, Dm, #n /// A64: USHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftRightLogical(Vector64<ushort> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint32x2_t vshr_n_u32 (uint32x2_t a, const int n) /// A32: VSHR.U32 Dd, Dm, #n /// A64: USHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftRightLogical(Vector64<uint> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint8x16_t vshrq_n_u8 (uint8x16_t a, const int n) /// A32: VSHR.U8 Qd, Qm, #n /// A64: USHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftRightLogical(Vector128<byte> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint16x8_t vshrq_n_u16 (uint16x8_t a, const int n) /// A32: VSHR.U16 Qd, Qm, #n /// A64: USHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightLogical(Vector128<short> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint32x4_t vshrq_n_u32 (uint32x4_t a, const int n) /// A32: VSHR.U32 Qd, Qm, #n /// A64: USHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightLogical(Vector128<int> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint64x2_t vshrq_n_u64 (uint64x2_t a, const int n) /// A32: VSHR.U64 Qd, Qm, #n /// A64: USHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightLogical(Vector128<long> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint8x16_t vshrq_n_u8 (uint8x16_t a, const int n) /// A32: VSHR.U8 Qd, Qm, #n /// A64: USHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightLogical(Vector128<sbyte> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint16x8_t vshrq_n_u16 (uint16x8_t a, const int n) /// A32: VSHR.U16 Qd, Qm, #n /// A64: USHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftRightLogical(Vector128<ushort> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint32x4_t vshrq_n_u32 (uint32x4_t a, const int n) /// A32: VSHR.U32 Qd, Qm, #n /// A64: USHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftRightLogical(Vector128<uint> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint64x2_t vshrq_n_u64 (uint64x2_t a, const int n) /// A32: VSHR.U64 Qd, Qm, #n /// A64: USHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftRightLogical(Vector128<ulong> value, byte count) => ShiftRightLogical(value, count); /// <summary> /// uint8x8_t vsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) /// A32: VSRA.U8 Dd, Dm, #n /// A64: USRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftRightLogicalAdd(Vector64<byte> addend, Vector64<byte> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint16x4_t vsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) /// A32: VSRA.U16 Dd, Dm, #n /// A64: USRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightLogicalAdd(Vector64<short> addend, Vector64<short> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint32x2_t vsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) /// A32: VSRA.U32 Dd, Dm, #n /// A64: USRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightLogicalAdd(Vector64<int> addend, Vector64<int> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint8x8_t vsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) /// A32: VSRA.U8 Dd, Dm, #n /// A64: USRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalAdd(Vector64<sbyte> addend, Vector64<sbyte> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint16x4_t vsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) /// A32: VSRA.U16 Dd, Dm, #n /// A64: USRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalAdd(Vector64<ushort> addend, Vector64<ushort> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint32x2_t vsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) /// A32: VSRA.U32 Dd, Dm, #n /// A64: USRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftRightLogicalAdd(Vector64<uint> addend, Vector64<uint> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint8x16_t vsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) /// A32: VSRA.U8 Qd, Qm, #n /// A64: USRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftRightLogicalAdd(Vector128<byte> addend, Vector128<byte> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint16x8_t vsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) /// A32: VSRA.U16 Qd, Qm, #n /// A64: USRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightLogicalAdd(Vector128<short> addend, Vector128<short> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint32x4_t vsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) /// A32: VSRA.U32 Qd, Qm, #n /// A64: USRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightLogicalAdd(Vector128<int> addend, Vector128<int> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint64x2_t vsraq_n_u64 (uint64x2_t a, uint64x2_t b, const int n) /// A32: VSRA.U64 Qd, Qm, #n /// A64: USRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightLogicalAdd(Vector128<long> addend, Vector128<long> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint8x16_t vsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) /// A32: VSRA.U8 Qd, Qm, #n /// A64: USRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalAdd(Vector128<sbyte> addend, Vector128<sbyte> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint16x8_t vsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) /// A32: VSRA.U16 Qd, Qm, #n /// A64: USRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalAdd(Vector128<ushort> addend, Vector128<ushort> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint32x4_t vsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) /// A32: VSRA.U32 Qd, Qm, #n /// A64: USRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftRightLogicalAdd(Vector128<uint> addend, Vector128<uint> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint64x2_t vsraq_n_u64 (uint64x2_t a, uint64x2_t b, const int n) /// A32: VSRA.U64 Qd, Qm, #n /// A64: USRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftRightLogicalAdd(Vector128<ulong> addend, Vector128<ulong> value, byte count) => ShiftRightLogicalAdd(addend, value, count); /// <summary> /// uint64x1_t vsra_n_u64 (uint64x1_t a, uint64x1_t b, const int n) /// A32: VSRA.U64 Dd, Dm, #n /// A64: USRA Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightLogicalAddScalar(Vector64<long> addend, Vector64<long> value, byte count) => ShiftRightLogicalAddScalar(addend, value, count); /// <summary> /// uint64x1_t vsra_n_u64 (uint64x1_t a, uint64x1_t b, const int n) /// A32: VSRA.U64 Dd, Dm, #n /// A64: USRA Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftRightLogicalAddScalar(Vector64<ulong> addend, Vector64<ulong> value, byte count) => ShiftRightLogicalAddScalar(addend, value, count); /// <summary> /// uint8x8_t vshrn_n_u16 (uint16x8_t a, const int n) /// A32: VSHRN.I16 Dd, Qm, #n /// A64: SHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightLogicalNarrowingLower(Vector128<ushort> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// int16x4_t vshrn_n_s32 (int32x4_t a, const int n) /// A32: VSHRN.I32 Dd, Qm, #n /// A64: SHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightLogicalNarrowingLower(Vector128<int> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// int32x2_t vshrn_n_s64 (int64x2_t a, const int n) /// A32: VSHRN.I64 Dd, Qm, #n /// A64: SHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightLogicalNarrowingLower(Vector128<long> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// int8x8_t vshrn_n_s16 (int16x8_t a, const int n) /// A32: VSHRN.I16 Dd, Qm, #n /// A64: SHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalNarrowingLower(Vector128<short> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// uint16x4_t vshrn_n_u32 (uint32x4_t a, const int n) /// A32: VSHRN.I32 Dd, Qm, #n /// A64: SHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalNarrowingLower(Vector128<uint> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// uint32x2_t vshrn_n_u64 (uint64x2_t a, const int n) /// A32: VSHRN.I64 Dd, Qm, #n /// A64: SHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightLogicalNarrowingLower(Vector128<ulong> value, byte count) => ShiftRightLogicalNarrowingLower(value, count); /// <summary> /// uint8x8_t vqshrn_n_u16 (uint16x8_t a, const int n) /// A32: VQSHRN.U16 Dd, Qm, #n /// A64: UQSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightLogicalNarrowingSaturateLower(Vector128<ushort> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint16x4_t vqshrn_n_u32 (uint32x4_t a, const int n) /// A32: VQSHRN.U32 Dd, Qm, #n /// A64: UQSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightLogicalNarrowingSaturateLower(Vector128<int> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint32x2_t vqshrn_n_u64 (uint64x2_t a, const int n) /// A32: VQSHRN.U64 Dd, Qm, #n /// A64: UQSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightLogicalNarrowingSaturateLower(Vector128<long> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint8x8_t vqshrn_n_u16 (uint16x8_t a, const int n) /// A32: VQSHRN.U16 Dd, Qm, #n /// A64: UQSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalNarrowingSaturateLower(Vector128<short> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint16x4_t vqshrn_n_u32 (uint32x4_t a, const int n) /// A32: VQSHRN.U32 Dd, Qm, #n /// A64: UQSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalNarrowingSaturateLower(Vector128<uint> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint32x2_t vqshrn_n_u64 (uint64x2_t a, const int n) /// A32: VQSHRN.U64 Dd, Qm, #n /// A64: UQSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightLogicalNarrowingSaturateLower(Vector128<ulong> value, byte count) => ShiftRightLogicalNarrowingSaturateLower(value, count); /// <summary> /// uint8x16_t vqshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VQSHRN.U16 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightLogicalNarrowingSaturateUpper(Vector64<byte> lower, Vector128<ushort> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint16x8_t vqshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VQSHRN.U32 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightLogicalNarrowingSaturateUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint32x4_t vqshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VQSHRN.U64 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightLogicalNarrowingSaturateUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint8x16_t vqshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VQSHRN.U16 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalNarrowingSaturateUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint16x8_t vqshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VQSHRN.U32 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalNarrowingSaturateUpper(Vector64<ushort> lower, Vector128<uint> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint32x4_t vqshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VQSHRN.U64 Dd+1, Qm, #n /// A64: UQSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightLogicalNarrowingSaturateUpper(Vector64<uint> lower, Vector128<ulong> value, byte count) => ShiftRightLogicalNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint8x16_t vshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VSHRN.I16 Dd+1, Qm, #n /// A64: SHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightLogicalNarrowingUpper(Vector64<byte> lower, Vector128<ushort> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// int16x8_t vshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) /// A32: VSHRN.I32 Dd+1, Qm, #n /// A64: SHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightLogicalNarrowingUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// int32x4_t vshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) /// A32: VSHRN.I64 Dd+1, Qm, #n /// A64: SHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightLogicalNarrowingUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// int8x16_t vshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) /// A32: VSHRN.I16 Dd+1, Qm, #n /// A64: SHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalNarrowingUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// uint16x8_t vshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VSHRN.I32 Dd+1, Qm, #n /// A64: SHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalNarrowingUpper(Vector64<ushort> lower, Vector128<uint> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// uint32x4_t vshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VSHRN.I64 Dd+1, Qm, #n /// A64: SHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightLogicalNarrowingUpper(Vector64<uint> lower, Vector128<ulong> value, byte count) => ShiftRightLogicalNarrowingUpper(lower, value, count); /// <summary> /// uint8x8_t vrshr_n_u8 (uint8x8_t a, const int n) /// A32: VRSHR.U8 Dd, Dm, #n /// A64: URSHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftRightLogicalRounded(Vector64<byte> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint16x4_t vrshr_n_u16 (uint16x4_t a, const int n) /// A32: VRSHR.U16 Dd, Dm, #n /// A64: URSHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightLogicalRounded(Vector64<short> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint32x2_t vrshr_n_u32 (uint32x2_t a, const int n) /// A32: VRSHR.U32 Dd, Dm, #n /// A64: URSHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightLogicalRounded(Vector64<int> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint8x8_t vrshr_n_u8 (uint8x8_t a, const int n) /// A32: VRSHR.U8 Dd, Dm, #n /// A64: URSHR Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalRounded(Vector64<sbyte> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint16x4_t vrshr_n_u16 (uint16x4_t a, const int n) /// A32: VRSHR.U16 Dd, Dm, #n /// A64: URSHR Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalRounded(Vector64<ushort> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint32x2_t vrshr_n_u32 (uint32x2_t a, const int n) /// A32: VRSHR.U32 Dd, Dm, #n /// A64: URSHR Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftRightLogicalRounded(Vector64<uint> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint8x16_t vrshrq_n_u8 (uint8x16_t a, const int n) /// A32: VRSHR.U8 Qd, Qm, #n /// A64: URSHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftRightLogicalRounded(Vector128<byte> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint16x8_t vrshrq_n_u16 (uint16x8_t a, const int n) /// A32: VRSHR.U16 Qd, Qm, #n /// A64: URSHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightLogicalRounded(Vector128<short> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint32x4_t vrshrq_n_u32 (uint32x4_t a, const int n) /// A32: VRSHR.U32 Qd, Qm, #n /// A64: URSHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightLogicalRounded(Vector128<int> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint64x2_t vrshrq_n_u64 (uint64x2_t a, const int n) /// A32: VRSHR.U64 Qd, Qm, #n /// A64: URSHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightLogicalRounded(Vector128<long> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint8x16_t vrshrq_n_u8 (uint8x16_t a, const int n) /// A32: VRSHR.U8 Qd, Qm, #n /// A64: URSHR Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalRounded(Vector128<sbyte> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint16x8_t vrshrq_n_u16 (uint16x8_t a, const int n) /// A32: VRSHR.U16 Qd, Qm, #n /// A64: URSHR Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalRounded(Vector128<ushort> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint32x4_t vrshrq_n_u32 (uint32x4_t a, const int n) /// A32: VRSHR.U32 Qd, Qm, #n /// A64: URSHR Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftRightLogicalRounded(Vector128<uint> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint64x2_t vrshrq_n_u64 (uint64x2_t a, const int n) /// A32: VRSHR.U64 Qd, Qm, #n /// A64: URSHR Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftRightLogicalRounded(Vector128<ulong> value, byte count) => ShiftRightLogicalRounded(value, count); /// <summary> /// uint8x8_t vrsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) /// A32: VRSRA.U8 Dd, Dm, #n /// A64: URSRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<byte> ShiftRightLogicalRoundedAdd(Vector64<byte> addend, Vector64<byte> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint16x4_t vrsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) /// A32: VRSRA.U16 Dd, Dm, #n /// A64: URSRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<short> ShiftRightLogicalRoundedAdd(Vector64<short> addend, Vector64<short> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint32x2_t vrsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) /// A32: VRSRA.U32 Dd, Dm, #n /// A64: URSRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<int> ShiftRightLogicalRoundedAdd(Vector64<int> addend, Vector64<int> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint8x8_t vrsra_n_u8 (uint8x8_t a, uint8x8_t b, const int n) /// A32: VRSRA.U8 Dd, Dm, #n /// A64: URSRA Vd.8B, Vn.8B, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalRoundedAdd(Vector64<sbyte> addend, Vector64<sbyte> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint16x4_t vrsra_n_u16 (uint16x4_t a, uint16x4_t b, const int n) /// A32: VRSRA.U16 Dd, Dm, #n /// A64: URSRA Vd.4H, Vn.4H, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalRoundedAdd(Vector64<ushort> addend, Vector64<ushort> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint32x2_t vrsra_n_u32 (uint32x2_t a, uint32x2_t b, const int n) /// A32: VRSRA.U32 Dd, Dm, #n /// A64: URSRA Vd.2S, Vn.2S, #n /// </summary> public static Vector64<uint> ShiftRightLogicalRoundedAdd(Vector64<uint> addend, Vector64<uint> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint8x16_t vrsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) /// A32: VRSRA.U8 Qd, Qm, #n /// A64: URSRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<byte> ShiftRightLogicalRoundedAdd(Vector128<byte> addend, Vector128<byte> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint16x8_t vrsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) /// A32: VRSRA.U16 Qd, Qm, #n /// A64: URSRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<short> ShiftRightLogicalRoundedAdd(Vector128<short> addend, Vector128<short> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint32x4_t vrsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) /// A32: VRSRA.U32 Qd, Qm, #n /// A64: URSRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<int> ShiftRightLogicalRoundedAdd(Vector128<int> addend, Vector128<int> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint64x2_t vrsraq_n_u64 (uint64x2_t a, uint64x2_t b, const int n) /// A32: VRSRA.U64 Qd, Qm, #n /// A64: URSRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<long> ShiftRightLogicalRoundedAdd(Vector128<long> addend, Vector128<long> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint8x16_t vrsraq_n_u8 (uint8x16_t a, uint8x16_t b, const int n) /// A32: VRSRA.U8 Qd, Qm, #n /// A64: URSRA Vd.16B, Vn.16B, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalRoundedAdd(Vector128<sbyte> addend, Vector128<sbyte> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint16x8_t vrsraq_n_u16 (uint16x8_t a, uint16x8_t b, const int n) /// A32: VRSRA.U16 Qd, Qm, #n /// A64: URSRA Vd.8H, Vn.8H, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalRoundedAdd(Vector128<ushort> addend, Vector128<ushort> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint32x4_t vrsraq_n_u32 (uint32x4_t a, uint32x4_t b, const int n) /// A32: VRSRA.U32 Qd, Qm, #n /// A64: URSRA Vd.4S, Vn.4S, #n /// </summary> public static Vector128<uint> ShiftRightLogicalRoundedAdd(Vector128<uint> addend, Vector128<uint> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint64x2_t vrsraq_n_u64 (uint64x2_t a, uint64x2_t b, const int n) /// A32: VRSRA.U64 Qd, Qm, #n /// A64: URSRA Vd.2D, Vn.2D, #n /// </summary> public static Vector128<ulong> ShiftRightLogicalRoundedAdd(Vector128<ulong> addend, Vector128<ulong> value, byte count) => ShiftRightLogicalRoundedAdd(addend, value, count); /// <summary> /// uint64x1_t vrsra_n_u64 (uint64x1_t a, uint64x1_t b, const int n) /// A32: VRSRA.U64 Dd, Dm, #n /// A64: URSRA Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightLogicalRoundedAddScalar(Vector64<long> addend, Vector64<long> value, byte count) => ShiftRightLogicalRoundedAddScalar(addend, value, count); /// <summary> /// uint64x1_t vrsra_n_u64 (uint64x1_t a, uint64x1_t b, const int n) /// A32: VRSRA.U64 Dd, Dm, #n /// A64: URSRA Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftRightLogicalRoundedAddScalar(Vector64<ulong> addend, Vector64<ulong> value, byte count) => ShiftRightLogicalRoundedAddScalar(addend, value, count); /// <summary> /// uint8x8_t vrshrn_n_u16 (uint16x8_t a, const int n) /// A32: VRSHRN.I16 Dd, Qm, #n /// A64: RSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightLogicalRoundedNarrowingLower(Vector128<ushort> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// int16x4_t vrshrn_n_s32 (int32x4_t a, const int n) /// A32: VRSHRN.I32 Dd, Qm, #n /// A64: RSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightLogicalRoundedNarrowingLower(Vector128<int> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// int32x2_t vrshrn_n_s64 (int64x2_t a, const int n) /// A32: VRSHRN.I64 Dd, Qm, #n /// A64: RSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightLogicalRoundedNarrowingLower(Vector128<long> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// int8x8_t vrshrn_n_s16 (int16x8_t a, const int n) /// A32: VRSHRN.I16 Dd, Qm, #n /// A64: RSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalRoundedNarrowingLower(Vector128<short> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// uint16x4_t vrshrn_n_u32 (uint32x4_t a, const int n) /// A32: VRSHRN.I32 Dd, Qm, #n /// A64: RSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalRoundedNarrowingLower(Vector128<uint> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// uint32x2_t vrshrn_n_u64 (uint64x2_t a, const int n) /// A32: VRSHRN.I64 Dd, Qm, #n /// A64: RSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightLogicalRoundedNarrowingLower(Vector128<ulong> value, byte count) => ShiftRightLogicalRoundedNarrowingLower(value, count); /// <summary> /// uint8x8_t vqrshrn_n_u16 (uint16x8_t a, const int n) /// A32: VQRSHRN.U16 Dd, Qm, #n /// A64: UQRSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<byte> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<ushort> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint16x4_t vqrshrn_n_u32 (uint32x4_t a, const int n) /// A32: VQRSHRN.U32 Dd, Qm, #n /// A64: UQRSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<short> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<int> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint32x2_t vqrshrn_n_u64 (uint64x2_t a, const int n) /// A32: VQRSHRN.U64 Dd, Qm, #n /// A64: UQRSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<int> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<long> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint8x8_t vqrshrn_n_u16 (uint16x8_t a, const int n) /// A32: VQRSHRN.U16 Dd, Qm, #n /// A64: UQRSHRN Vd.8B, Vn.8H, #n /// </summary> public static Vector64<sbyte> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<short> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint16x4_t vqrshrn_n_u32 (uint32x4_t a, const int n) /// A32: VQRSHRN.U32 Dd, Qm, #n /// A64: UQRSHRN Vd.4H, Vn.4S, #n /// </summary> public static Vector64<ushort> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<uint> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint32x2_t vqrshrn_n_u64 (uint64x2_t a, const int n) /// A32: VQRSHRN.U64 Dd, Qm, #n /// A64: UQRSHRN Vd.2S, Vn.2D, #n /// </summary> public static Vector64<uint> ShiftRightLogicalRoundedNarrowingSaturateLower(Vector128<ulong> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateLower(value, count); /// <summary> /// uint8x16_t vqrshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VQRSHRN.U16 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<byte> lower, Vector128<ushort> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint16x8_t vqrshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VQRSHRN.U32 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint32x4_t vqrshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VQRSHRN.U64 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint8x16_t vqrshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VQRSHRN.U16 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint16x8_t vqrshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VQRSHRN.U32 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<ushort> lower, Vector128<uint> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint32x4_t vqrshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VQRSHRN.U64 Dd+1, Dn, #n /// A64: UQRSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightLogicalRoundedNarrowingSaturateUpper(Vector64<uint> lower, Vector128<ulong> value, byte count) => ShiftRightLogicalRoundedNarrowingSaturateUpper(lower, value, count); /// <summary> /// uint8x16_t vrshrn_high_n_u16 (uint8x8_t r, uint16x8_t a, const int n) /// A32: VRSHRN.I16 Dd+1, Qm, #n /// A64: RSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<byte> ShiftRightLogicalRoundedNarrowingUpper(Vector64<byte> lower, Vector128<ushort> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// int16x8_t vrshrn_high_n_s32 (int16x4_t r, int32x4_t a, const int n) /// A32: VRSHRN.I32 Dd+1, Qm, #n /// A64: RSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<short> ShiftRightLogicalRoundedNarrowingUpper(Vector64<short> lower, Vector128<int> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// int32x4_t vrshrn_high_n_s64 (int32x2_t r, int64x2_t a, const int n) /// A32: VRSHRN.I64 Dd+1, Qm, #n /// A64: RSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<int> ShiftRightLogicalRoundedNarrowingUpper(Vector64<int> lower, Vector128<long> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// int8x16_t vrshrn_high_n_s16 (int8x8_t r, int16x8_t a, const int n) /// A32: VRSHRN.I16 Dd+1, Qm, #n /// A64: RSHRN2 Vd.16B, Vn.8H, #n /// </summary> public static Vector128<sbyte> ShiftRightLogicalRoundedNarrowingUpper(Vector64<sbyte> lower, Vector128<short> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// uint16x8_t vrshrn_high_n_u32 (uint16x4_t r, uint32x4_t a, const int n) /// A32: VRSHRN.I32 Dd+1, Qm, #n /// A64: RSHRN2 Vd.8H, Vn.4S, #n /// </summary> public static Vector128<ushort> ShiftRightLogicalRoundedNarrowingUpper(Vector64<ushort> lower, Vector128<uint> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// uint32x4_t vrshrn_high_n_u64 (uint32x2_t r, uint64x2_t a, const int n) /// A32: VRSHRN.I64 Dd+1, Qm, #n /// A64: RSHRN2 Vd.4S, Vn.2D, #n /// </summary> public static Vector128<uint> ShiftRightLogicalRoundedNarrowingUpper(Vector64<uint> lower, Vector128<ulong> value, byte count) => ShiftRightLogicalRoundedNarrowingUpper(lower, value, count); /// <summary> /// uint64x1_t vrshr_n_u64 (uint64x1_t a, const int n) /// A32: VRSHR.U64 Dd, Dm, #n /// A64: URSHR Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightLogicalRoundedScalar(Vector64<long> value, byte count) => ShiftRightLogicalRoundedScalar(value, count); /// <summary> /// uint64x1_t vrshr_n_u64 (uint64x1_t a, const int n) /// A32: VRSHR.U64 Dd, Dm, #n /// A64: URSHR Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftRightLogicalRoundedScalar(Vector64<ulong> value, byte count) => ShiftRightLogicalRoundedScalar(value, count); /// <summary> /// uint64x1_t vshr_n_u64 (uint64x1_t a, const int n) /// A32: VSHR.U64 Dd, Dm, #n /// A64: USHR Dd, Dn, #n /// </summary> public static Vector64<long> ShiftRightLogicalScalar(Vector64<long> value, byte count) => ShiftRightLogicalScalar(value, count); /// <summary> /// uint64x1_t vshr_n_u64 (uint64x1_t a, const int n) /// A32: VSHR.U64 Dd, Dm, #n /// A64: USHR Dd, Dn, #n /// </summary> public static Vector64<ulong> ShiftRightLogicalScalar(Vector64<ulong> value, byte count) => ShiftRightLogicalScalar(value, count); /// <summary> /// int32x4_t vmovl_s16 (int16x4_t a) /// A32: VMOVL.S16 Qd, Dm /// A64: SXTL Vd.4S, Vn.4H /// </summary> public static Vector128<int> SignExtendWideningLower(Vector64<short> value) => SignExtendWideningLower(value); /// <summary> /// int64x2_t vmovl_s32 (int32x2_t a) /// A32: VMOVL.S32 Qd, Dm /// A64: SXTL Vd.2D, Vn.2S /// </summary> public static Vector128<long> SignExtendWideningLower(Vector64<int> value) => SignExtendWideningLower(value); /// <summary> /// int16x8_t vmovl_s8 (int8x8_t a) /// A32: VMOVL.S8 Qd, Dm /// A64: SXTL Vd.8H, Vn.8B /// </summary> public static Vector128<short> SignExtendWideningLower(Vector64<sbyte> value) => SignExtendWideningLower(value); /// <summary> /// int32x4_t vmovl_high_s16 (int16x8_t a) /// A32: VMOVL.S16 Qd, Dm+1 /// A64: SXTL2 Vd.4S, Vn.8H /// </summary> public static Vector128<int> SignExtendWideningUpper(Vector128<short> value) => SignExtendWideningUpper(value); /// <summary> /// int64x2_t vmovl_high_s32 (int32x4_t a) /// A32: VMOVL.S32 Qd, Dm+1 /// A64: SXTL2 Vd.2D, Vn.4S /// </summary> public static Vector128<long> SignExtendWideningUpper(Vector128<int> value) => SignExtendWideningUpper(value); /// <summary> /// int16x8_t vmovl_high_s8 (int8x16_t a) /// A32: VMOVL.S8 Qd, Dm+1 /// A64: SXTL2 Vd.8H, Vn.16B /// </summary> public static Vector128<short> SignExtendWideningUpper(Vector128<sbyte> value) => SignExtendWideningUpper(value); /// <summary> /// float64x1_t vsqrt_f64 (float64x1_t a) /// A32: VSQRT.F64 Dd, Dm /// A64: FSQRT Dd, Dn /// </summary> public static Vector64<double> SqrtScalar(Vector64<double> value) => SqrtScalar(value); /// <summary> /// float32_t vsqrts_f32 (float32_t a) /// A32: VSQRT.F32 Sd, Sm /// A64: FSQRT Sd, Sn /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> SqrtScalar(Vector64<float> value) => SqrtScalar(value); /// <summary> /// void vst1_u8 (uint8_t * ptr, uint8x8_t val) /// A32: VST1.8 { Dd }, [Rn] /// A64: ST1 { Vt.8B }, [Xn] /// </summary> public static unsafe void Store(byte* address, Vector64<byte> source) => Store(address, source); /// <summary> /// void vst1_f64 (float64_t * ptr, float64x1_t val) /// A32: VST1.64 { Dd }, [Rn] /// A64: ST1 { Vt.1D }, [Xn] /// </summary> public static unsafe void Store(double* address, Vector64<double> source) => Store(address, source); /// <summary> /// void vst1_s16 (int16_t * ptr, int16x4_t val) /// A32: VST1.16 { Dd }, [Rn] /// A64: ST1 {Vt.4H }, [Xn] /// </summary> public static unsafe void Store(short* address, Vector64<short> source) => Store(address, source); /// <summary> /// void vst1_s32 (int32_t * ptr, int32x2_t val) /// A32: VST1.32 { Dd }, [Rn] /// A64: ST1 { Vt.2S }, [Xn] /// </summary> public static unsafe void Store(int* address, Vector64<int> source) => Store(address, source); /// <summary> /// void vst1_s64 (int64_t * ptr, int64x1_t val) /// A32: VST1.64 { Dd }, [Rn] /// A64: ST1 { Vt.1D }, [Xn] /// </summary> public static unsafe void Store(long* address, Vector64<long> source) => Store(address, source); /// <summary> /// void vst1_s8 (int8_t * ptr, int8x8_t val) /// A32: VST1.8 { Dd }, [Rn] /// A64: ST1 { Vt.8B }, [Xn] /// </summary> public static unsafe void Store(sbyte* address, Vector64<sbyte> source) => Store(address, source); /// <summary> /// void vst1_f32 (float32_t * ptr, float32x2_t val) /// A32: VST1.32 { Dd }, [Rn] /// A64: ST1 { Vt.2S }, [Xn] /// </summary> public static unsafe void Store(float* address, Vector64<float> source) => Store(address, source); /// <summary> /// void vst1_u16 (uint16_t * ptr, uint16x4_t val) /// A32: VST1.16 { Dd }, [Rn] /// A64: ST1 { Vt.4H }, [Xn] /// </summary> public static unsafe void Store(ushort* address, Vector64<ushort> source) => Store(address, source); /// <summary> /// void vst1_u32 (uint32_t * ptr, uint32x2_t val) /// A32: VST1.32 { Dd }, [Rn] /// A64: ST1 { Vt.2S }, [Xn] /// </summary> public static unsafe void Store(uint* address, Vector64<uint> source) => Store(address, source); /// <summary> /// void vst1_u64 (uint64_t * ptr, uint64x1_t val) /// A32: VST1.64 { Dd }, [Rn] /// A64: ST1 { Vt.1D }, [Xn] /// </summary> public static unsafe void Store(ulong* address, Vector64<ulong> source) => Store(address, source); /// <summary> /// void vst1q_u8 (uint8_t * ptr, uint8x16_t val) /// A32: VST1.8 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.16B }, [Xn] /// </summary> public static unsafe void Store(byte* address, Vector128<byte> source) => Store(address, source); /// <summary> /// void vst1q_f64 (float64_t * ptr, float64x2_t val) /// A32: VST1.64 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.2D }, [Xn] /// </summary> public static unsafe void Store(double* address, Vector128<double> source) => Store(address, source); /// <summary> /// void vst1q_s16 (int16_t * ptr, int16x8_t val) /// A32: VST1.16 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.8H }, [Xn] /// </summary> public static unsafe void Store(short* address, Vector128<short> source) => Store(address, source); /// <summary> /// void vst1q_s32 (int32_t * ptr, int32x4_t val) /// A32: VST1.32 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.4S }, [Xn] /// </summary> public static unsafe void Store(int* address, Vector128<int> source) => Store(address, source); /// <summary> /// void vst1q_s64 (int64_t * ptr, int64x2_t val) /// A32: VST1.64 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.2D }, [Xn] /// </summary> public static unsafe void Store(long* address, Vector128<long> source) => Store(address, source); /// <summary> /// void vst1q_s8 (int8_t * ptr, int8x16_t val) /// A32: VST1.8 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.16B }, [Xn] /// </summary> public static unsafe void Store(sbyte* address, Vector128<sbyte> source) => Store(address, source); /// <summary> /// void vst1q_f32 (float32_t * ptr, float32x4_t val) /// A32: VST1.32 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.4S }, [Xn] /// </summary> public static unsafe void Store(float* address, Vector128<float> source) => Store(address, source); /// <summary> /// void vst1q_u16 (uint16_t * ptr, uint16x8_t val) /// A32: VST1.16 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.8H }, [Xn] /// </summary> public static unsafe void Store(ushort* address, Vector128<ushort> source) => Store(address, source); /// <summary> /// void vst1q_u32 (uint32_t * ptr, uint32x4_t val) /// A32: VST1.32 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.4S }, [Xn] /// </summary> public static unsafe void Store(uint* address, Vector128<uint> source) => Store(address, source); /// <summary> /// void vst1q_u64 (uint64_t * ptr, uint64x2_t val) /// A32: VST1.64 { Dd, Dd+1 }, [Rn] /// A64: ST1 { Vt.2D }, [Xn] /// </summary> public static unsafe void Store(ulong* address, Vector128<ulong> source) => Store(address, source); /// <summary> /// void vst1_lane_u8 (uint8_t * ptr, uint8x8_t val, const int lane) /// A32: VST1.8 { Dd[index] }, [Rn] /// A64: ST1 { Vt.B }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(byte* address, Vector64<byte> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_s16 (int16_t * ptr, int16x4_t val, const int lane) /// A32: VST1.16 { Dd[index] }, [Rn] /// A64: ST1 { Vt.H }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(short* address, Vector64<short> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_s32 (int32_t * ptr, int32x2_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(int* address, Vector64<int> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_s8 (int8_t * ptr, int8x8_t val, const int lane) /// A32: VST1.8 { Dd[index] }, [Rn] /// A64: ST1 { Vt.B }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(sbyte* address, Vector64<sbyte> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_f32 (float32_t * ptr, float32x2_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(float* address, Vector64<float> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_u16 (uint16_t * ptr, uint16x4_t val, const int lane) /// A32: VST1.16 { Dd[index] }, [Rn] /// A64: ST1 { Vt.H }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(ushort* address, Vector64<ushort> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1_lane_u32 (uint32_t * ptr, uint32x2_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(uint* address, Vector64<uint> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_u8 (uint8_t * ptr, uint8x16_t val, const int lane) /// A32: VST1.8 { Dd[index] }, [Rn] /// A64: ST1 { Vt.B }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(byte* address, Vector128<byte> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_f64 (float64_t * ptr, float64x2_t val, const int lane) /// A32: VSTR.64 Dd, [Rn] /// A64: ST1 { Vt.D }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(double* address, Vector128<double> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_s16 (int16_t * ptr, int16x8_t val, const int lane) /// A32: VST1.16 { Dd[index] }, [Rn] /// A64: ST1 { Vt.H }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(short* address, Vector128<short> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_s32 (int32_t * ptr, int32x4_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(int* address, Vector128<int> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_s64 (int64_t * ptr, int64x2_t val, const int lane) /// A32: VSTR.64 Dd, [Rn] /// A64: ST1 { Vt.D }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(long* address, Vector128<long> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_s8 (int8_t * ptr, int8x16_t val, const int lane) /// A32: VST1.8 { Dd[index] }, [Rn] /// A64: ST1 { Vt.B }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(sbyte* address, Vector128<sbyte> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_f32 (float32_t * ptr, float32x4_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(float* address, Vector128<float> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_u16 (uint16_t * ptr, uint16x8_t val, const int lane) /// A32: VST1.16 { Dd[index] }, [Rn] /// A64: ST1 { Vt.H }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(ushort* address, Vector128<ushort> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_u32 (uint32_t * ptr, uint32x4_t val, const int lane) /// A32: VST1.32 { Dd[index] }, [Rn] /// A64: ST1 { Vt.S }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(uint* address, Vector128<uint> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// void vst1q_lane_u64 (uint64_t * ptr, uint64x2_t val, const int lane) /// A32: VSTR.64 Dd, [Rn] /// A64: ST1 { Vt.D }[index], [Xn] /// </summary> public static unsafe void StoreSelectedScalar(ulong* address, Vector128<ulong> value, byte index) => StoreSelectedScalar(address, value, index); /// <summary> /// uint8x8_t vsub_u8 (uint8x8_t a, uint8x8_t b) /// A32: VSUB.I8 Dd, Dn, Dm /// A64: SUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Subtract(Vector64<byte> left, Vector64<byte> right) => Subtract(left, right); /// <summary> /// int16x4_t vsub_s16 (int16x4_t a, int16x4_t b) /// A32: VSUB.I16 Dd, Dn, Dm /// A64: SUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> Subtract(Vector64<short> left, Vector64<short> right) => Subtract(left, right); /// <summary> /// int32x2_t vsub_s32 (int32x2_t a, int32x2_t b) /// A32: VSUB.I32 Dd, Dn, Dm /// A64: SUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> Subtract(Vector64<int> left, Vector64<int> right) => Subtract(left, right); /// <summary> /// int8x8_t vsub_s8 (int8x8_t a, int8x8_t b) /// A32: VSUB.I8 Dd, Dn, Dm /// A64: SUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Subtract(Vector64<sbyte> left, Vector64<sbyte> right) => Subtract(left, right); /// <summary> /// float32x2_t vsub_f32 (float32x2_t a, float32x2_t b) /// A32: VSUB.F32 Dd, Dn, Dm /// A64: FSUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<float> Subtract(Vector64<float> left, Vector64<float> right) => Subtract(left, right); /// <summary> /// uint16x4_t vsub_u16 (uint16x4_t a, uint16x4_t b) /// A32: VSUB.I16 Dd, Dn, Dm /// A64: SUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> Subtract(Vector64<ushort> left, Vector64<ushort> right) => Subtract(left, right); /// <summary> /// uint32x2_t vsub_u32 (uint32x2_t a, uint32x2_t b) /// A32: VSUB.I32 Dd, Dn, Dm /// A64: SUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> Subtract(Vector64<uint> left, Vector64<uint> right) => Subtract(left, right); /// <summary> /// uint8x16_t vsubq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VSUB.I8 Qd, Qn, Qm /// A64: SUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Subtract(Vector128<byte> left, Vector128<byte> right) => Subtract(left, right); /// <summary> /// int16x8_t vsubq_s16 (int16x8_t a, int16x8_t b) /// A32: VSUB.I16 Qd, Qn, Qm /// A64: SUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> Subtract(Vector128<short> left, Vector128<short> right) => Subtract(left, right); /// <summary> /// int32x4_t vsubq_s32 (int32x4_t a, int32x4_t b) /// A32: VSUB.I32 Qd, Qn, Qm /// A64: SUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> Subtract(Vector128<int> left, Vector128<int> right) => Subtract(left, right); /// <summary> /// int64x2_t vsubq_s64 (int64x2_t a, int64x2_t b) /// A32: VSUB.I64 Qd, Qn, Qm /// A64: SUB Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> Subtract(Vector128<long> left, Vector128<long> right) => Subtract(left, right); /// <summary> /// int8x16_t vsubq_s8 (int8x16_t a, int8x16_t b) /// A32: VSUB.I8 Qd, Qn, Qm /// A64: SUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Subtract(Vector128<sbyte> left, Vector128<sbyte> right) => Subtract(left, right); /// <summary> /// float32x4_t vsubq_f32 (float32x4_t a, float32x4_t b) /// A32: VSUB.F32 Qd, Qn, Qm /// A64: FSUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<float> Subtract(Vector128<float> left, Vector128<float> right) => Subtract(left, right); /// <summary> /// uint16x8_t vsubq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VSUB.I16 Qd, Qn, Qm /// A64: SUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> Subtract(Vector128<ushort> left, Vector128<ushort> right) => Subtract(left, right); /// <summary> /// uint32x4_t vsubq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VSUB.I32 Qd, Qn, Qm /// A64: SUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> Subtract(Vector128<uint> left, Vector128<uint> right) => Subtract(left, right); /// <summary> /// uint64x2_t vsubq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VSUB.I64 Qd, Qn, Qm /// A64: SUB Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> Subtract(Vector128<ulong> left, Vector128<ulong> right) => Subtract(left, right); /// <summary> /// uint8x8_t vsubhn_u16 (uint16x8_t a, uint16x8_t b) /// A32: VSUBHN.I16 Dd, Qn, Qm /// A64: SUBHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<byte> SubtractHighNarrowingLower(Vector128<ushort> left, Vector128<ushort> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// int16x4_t vsubhn_s32 (int32x4_t a, int32x4_t b) /// A32: VSUBHN.I32 Dd, Qn, Qm /// A64: SUBHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<short> SubtractHighNarrowingLower(Vector128<int> left, Vector128<int> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// int32x2_t vsubhn_s64 (int64x2_t a, int64x2_t b) /// A32: VSUBHN.I64 Dd, Qn, Qm /// A64: SUBHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<int> SubtractHighNarrowingLower(Vector128<long> left, Vector128<long> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// int8x8_t vsubhn_s16 (int16x8_t a, int16x8_t b) /// A32: VSUBHN.I16 Dd, Qn, Qm /// A64: SUBHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<sbyte> SubtractHighNarrowingLower(Vector128<short> left, Vector128<short> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// uint16x4_t vsubhn_u32 (uint32x4_t a, uint32x4_t b) /// A32: VSUBHN.I32 Dd, Qn, Qm /// A64: SUBHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<ushort> SubtractHighNarrowingLower(Vector128<uint> left, Vector128<uint> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// uint32x2_t vsubhn_u64 (uint64x2_t a, uint64x2_t b) /// A32: VSUBHN.I64 Dd, Qn, Qm /// A64: SUBHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<uint> SubtractHighNarrowingLower(Vector128<ulong> left, Vector128<ulong> right) => SubtractHighNarrowingLower(left, right); /// <summary> /// uint8x16_t vsubhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) /// A32: VSUBHN.I16 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<byte> SubtractHighNarrowingUpper(Vector64<byte> lower, Vector128<ushort> left, Vector128<ushort> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// int16x8_t vsubhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) /// A32: VSUBHN.I32 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<short> SubtractHighNarrowingUpper(Vector64<short> lower, Vector128<int> left, Vector128<int> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// int32x4_t vsubhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) /// A32: VSUBHN.I64 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<int> SubtractHighNarrowingUpper(Vector64<int> lower, Vector128<long> left, Vector128<long> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// int8x16_t vsubhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) /// A32: VSUBHN.I16 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<sbyte> SubtractHighNarrowingUpper(Vector64<sbyte> lower, Vector128<short> left, Vector128<short> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// uint16x8_t vsubhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) /// A32: VSUBHN.I32 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<ushort> SubtractHighNarrowingUpper(Vector64<ushort> lower, Vector128<uint> left, Vector128<uint> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// uint32x4_t vsubhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) /// A32: VSUBHN.I64 Dd+1, Qn, Qm /// A64: SUBHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<uint> SubtractHighNarrowingUpper(Vector64<uint> lower, Vector128<ulong> left, Vector128<ulong> right) => SubtractHighNarrowingUpper(lower, left, right); /// <summary> /// uint8x8_t vrsubhn_u16 (uint16x8_t a, uint16x8_t b) /// A32: VRSUBHN.I16 Dd, Qn, Qm /// A64: RSUBHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<byte> SubtractRoundedHighNarrowingLower(Vector128<ushort> left, Vector128<ushort> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// int16x4_t vrsubhn_s32 (int32x4_t a, int32x4_t b) /// A32: VRSUBHN.I32 Dd, Qn, Qm /// A64: RSUBHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<short> SubtractRoundedHighNarrowingLower(Vector128<int> left, Vector128<int> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// int32x2_t vrsubhn_s64 (int64x2_t a, int64x2_t b) /// A32: VRSUBHN.I64 Dd, Qn, Qm /// A64: RSUBHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<int> SubtractRoundedHighNarrowingLower(Vector128<long> left, Vector128<long> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// int8x8_t vrsubhn_s16 (int16x8_t a, int16x8_t b) /// A32: VRSUBHN.I16 Dd, Qn, Qm /// A64: RSUBHN Vd.8B, Vn.8H, Vm.8H /// </summary> public static Vector64<sbyte> SubtractRoundedHighNarrowingLower(Vector128<short> left, Vector128<short> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// uint16x4_t vrsubhn_u32 (uint32x4_t a, uint32x4_t b) /// A32: VRSUBHN.I32 Dd, Qn, Qm /// A64: RSUBHN Vd.4H, Vn.4S, Vm.4S /// </summary> public static Vector64<ushort> SubtractRoundedHighNarrowingLower(Vector128<uint> left, Vector128<uint> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// uint32x2_t vrsubhn_u64 (uint64x2_t a, uint64x2_t b) /// A32: VRSUBHN.I64 Dd, Qn, Qm /// A64: RSUBHN Vd.2S, Vn.2D, Vm.2D /// </summary> public static Vector64<uint> SubtractRoundedHighNarrowingLower(Vector128<ulong> left, Vector128<ulong> right) => SubtractRoundedHighNarrowingLower(left, right); /// <summary> /// uint8x16_t vrsubhn_high_u16 (uint8x8_t r, uint16x8_t a, uint16x8_t b) /// A32: VRSUBHN.I16 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<byte> SubtractRoundedHighNarrowingUpper(Vector64<byte> lower, Vector128<ushort> left, Vector128<ushort> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int16x8_t vrsubhn_high_s32 (int16x4_t r, int32x4_t a, int32x4_t b) /// A32: VRSUBHN.I32 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<short> SubtractRoundedHighNarrowingUpper(Vector64<short> lower, Vector128<int> left, Vector128<int> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int32x4_t vrsubhn_high_s64 (int32x2_t r, int64x2_t a, int64x2_t b) /// A32: VRSUBHN.I64 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<int> SubtractRoundedHighNarrowingUpper(Vector64<int> lower, Vector128<long> left, Vector128<long> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// int8x16_t vrsubhn_high_s16 (int8x8_t r, int16x8_t a, int16x8_t b) /// A32: VRSUBHN.I16 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.16B, Vn.8H, Vm.8H /// </summary> public static Vector128<sbyte> SubtractRoundedHighNarrowingUpper(Vector64<sbyte> lower, Vector128<short> left, Vector128<short> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint16x8_t vrsubhn_high_u32 (uint16x4_t r, uint32x4_t a, uint32x4_t b) /// A32: VRSUBHN.I32 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.8H, Vn.4S, Vm.4S /// </summary> public static Vector128<ushort> SubtractRoundedHighNarrowingUpper(Vector64<ushort> lower, Vector128<uint> left, Vector128<uint> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint32x4_t vrsubhn_high_u64 (uint32x2_t r, uint64x2_t a, uint64x2_t b) /// A32: VRSUBHN.I64 Dd+1, Qn, Qm /// A64: RSUBHN2 Vd.4S, Vn.2D, Vm.2D /// </summary> public static Vector128<uint> SubtractRoundedHighNarrowingUpper(Vector64<uint> lower, Vector128<ulong> left, Vector128<ulong> right) => SubtractRoundedHighNarrowingUpper(lower, left, right); /// <summary> /// uint8x8_t vqsub_u8 (uint8x8_t a, uint8x8_t b) /// A32: VQSUB.U8 Dd, Dn, Dm /// A64: UQSUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> SubtractSaturate(Vector64<byte> left, Vector64<byte> right) => SubtractSaturate(left, right); /// <summary> /// int16x4_t vqsub_s16 (int16x4_t a, int16x4_t b) /// A32: VQSUB.S16 Dd, Dn, Dm /// A64: SQSUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<short> SubtractSaturate(Vector64<short> left, Vector64<short> right) => SubtractSaturate(left, right); /// <summary> /// int32x2_t vqsub_s32 (int32x2_t a, int32x2_t b) /// A32: VQSUB.S32 Dd, Dn, Dm /// A64: SQSUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<int> SubtractSaturate(Vector64<int> left, Vector64<int> right) => SubtractSaturate(left, right); /// <summary> /// int8x8_t vqsub_s8 (int8x8_t a, int8x8_t b) /// A32: VQSUB.S8 Dd, Dn, Dm /// A64: SQSUB Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> SubtractSaturate(Vector64<sbyte> left, Vector64<sbyte> right) => SubtractSaturate(left, right); /// <summary> /// uint16x4_t vqsub_u16 (uint16x4_t a, uint16x4_t b) /// A32: VQSUB.U16 Dd, Dn, Dm /// A64: UQSUB Vd.4H, Vn.4H, Vm.4H /// </summary> public static Vector64<ushort> SubtractSaturate(Vector64<ushort> left, Vector64<ushort> right) => SubtractSaturate(left, right); /// <summary> /// uint32x2_t vqsub_u32 (uint32x2_t a, uint32x2_t b) /// A32: VQSUB.U32 Dd, Dn, Dm /// A64: UQSUB Vd.2S, Vn.2S, Vm.2S /// </summary> public static Vector64<uint> SubtractSaturate(Vector64<uint> left, Vector64<uint> right) => SubtractSaturate(left, right); /// <summary> /// uint8x16_t vqsubq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VQSUB.U8 Qd, Qn, Qm /// A64: UQSUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> SubtractSaturate(Vector128<byte> left, Vector128<byte> right) => SubtractSaturate(left, right); /// <summary> /// int16x8_t vqsubq_s16 (int16x8_t a, int16x8_t b) /// A32: VQSUB.S16 Qd, Qn, Qm /// A64: SQSUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<short> SubtractSaturate(Vector128<short> left, Vector128<short> right) => SubtractSaturate(left, right); /// <summary> /// int32x4_t vqsubq_s32 (int32x4_t a, int32x4_t b) /// A32: VQSUB.S32 Qd, Qn, Qm /// A64: SQSUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<int> SubtractSaturate(Vector128<int> left, Vector128<int> right) => SubtractSaturate(left, right); /// <summary> /// int64x2_t vqsubq_s64 (int64x2_t a, int64x2_t b) /// A32: VQSUB.S64 Qd, Qn, Qm /// A64: SQSUB Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<long> SubtractSaturate(Vector128<long> left, Vector128<long> right) => SubtractSaturate(left, right); /// <summary> /// int8x16_t vqsubq_s8 (int8x16_t a, int8x16_t b) /// A32: VQSUB.S8 Qd, Qn, Qm /// A64: SQSUB Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> SubtractSaturate(Vector128<sbyte> left, Vector128<sbyte> right) => SubtractSaturate(left, right); /// <summary> /// uint16x8_t vqsubq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VQSUB.U16 Qd, Qn, Qm /// A64: UQSUB Vd.8H, Vn.8H, Vm.8H /// </summary> public static Vector128<ushort> SubtractSaturate(Vector128<ushort> left, Vector128<ushort> right) => SubtractSaturate(left, right); /// <summary> /// uint32x4_t vqsubq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VQSUB.U32 Qd, Qn, Qm /// A64: UQSUB Vd.4S, Vn.4S, Vm.4S /// </summary> public static Vector128<uint> SubtractSaturate(Vector128<uint> left, Vector128<uint> right) => SubtractSaturate(left, right); /// <summary> /// uint64x2_t vqsubq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VQSUB.U64 Qd, Qn, Qm /// A64: UQSUB Vd.2D, Vn.2D, Vm.2D /// </summary> public static Vector128<ulong> SubtractSaturate(Vector128<ulong> left, Vector128<ulong> right) => SubtractSaturate(left, right); /// <summary> /// int64x1_t vqsub_s64 (int64x1_t a, int64x1_t b) /// A32: VQSUB.S64 Dd, Dn, Dm /// A64: SQSUB Dd, Dn, Dm /// </summary> public static Vector64<long> SubtractSaturateScalar(Vector64<long> left, Vector64<long> right) => SubtractSaturateScalar(left, right); /// <summary> /// uint64x1_t vqsub_u64 (uint64x1_t a, uint64x1_t b) /// A32: VQSUB.U64 Dd, Dn, Dm /// A64: UQSUB Dd, Dn, Dm /// </summary> public static Vector64<ulong> SubtractSaturateScalar(Vector64<ulong> left, Vector64<ulong> right) => SubtractSaturateScalar(left, right); /// <summary> /// float64x1_t vsub_f64 (float64x1_t a, float64x1_t b) /// A32: VSUB.F64 Dd, Dn, Dm /// A64: FSUB Dd, Dn, Dm /// </summary> public static Vector64<double> SubtractScalar(Vector64<double> left, Vector64<double> right) => SubtractScalar(left, right); /// <summary> /// int64x1_t vsub_s64 (int64x1_t a, int64x1_t b) /// A32: VSUB.I64 Dd, Dn, Dm /// A64: SUB Dd, Dn, Dm /// </summary> public static Vector64<long> SubtractScalar(Vector64<long> left, Vector64<long> right) => SubtractScalar(left, right); /// <summary> /// float32_t vsubs_f32 (float32_t a, float32_t b) /// A32: VSUB.F32 Sd, Sn, Sm /// A64: FSUB Sd, Sn, Sm /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> SubtractScalar(Vector64<float> left, Vector64<float> right) => SubtractScalar(left, right); /// <summary> /// uint64x1_t vsub_u64 (uint64x1_t a, uint64x1_t b) /// A32: VSUB.I64 Dd, Dn, Dm /// A64: SUB Dd, Dn, Dm /// </summary> public static Vector64<ulong> SubtractScalar(Vector64<ulong> left, Vector64<ulong> right) => SubtractScalar(left, right); /// <summary> /// uint16x8_t vsubl_u8 (uint8x8_t a, uint8x8_t b) /// A32: VSUBL.U8 Qd, Dn, Dm /// A64: USUBL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<ushort> SubtractWideningLower(Vector64<byte> left, Vector64<byte> right) => SubtractWideningLower(left, right); /// <summary> /// int32x4_t vsubl_s16 (int16x4_t a, int16x4_t b) /// A32: VSUBL.S16 Qd, Dn, Dm /// A64: SSUBL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<int> SubtractWideningLower(Vector64<short> left, Vector64<short> right) => SubtractWideningLower(left, right); /// <summary> /// int64x2_t vsubl_s32 (int32x2_t a, int32x2_t b) /// A32: VSUBL.S32 Qd, Dn, Dm /// A64: SSUBL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<long> SubtractWideningLower(Vector64<int> left, Vector64<int> right) => SubtractWideningLower(left, right); /// <summary> /// int16x8_t vsubl_s8 (int8x8_t a, int8x8_t b) /// A32: VSUBL.S8 Qd, Dn, Dm /// A64: SSUBL Vd.8H, Vn.8B, Vm.8B /// </summary> public static Vector128<short> SubtractWideningLower(Vector64<sbyte> left, Vector64<sbyte> right) => SubtractWideningLower(left, right); /// <summary> /// uint32x4_t vsubl_u16 (uint16x4_t a, uint16x4_t b) /// A32: VSUBL.U16 Qd, Dn, Dm /// A64: USUBL Vd.4S, Vn.4H, Vm.4H /// </summary> public static Vector128<uint> SubtractWideningLower(Vector64<ushort> left, Vector64<ushort> right) => SubtractWideningLower(left, right); /// <summary> /// uint64x2_t vsubl_u32 (uint32x2_t a, uint32x2_t b) /// A32: VSUBL.U32 Qd, Dn, Dm /// A64: USUBL Vd.2D, Vn.2S, Vm.2S /// </summary> public static Vector128<ulong> SubtractWideningLower(Vector64<uint> left, Vector64<uint> right) => SubtractWideningLower(left, right); /// <summary> /// int16x8_t vsubw_s8 (int16x8_t a, int8x8_t b) /// A32: VSUBW.S8 Qd, Qn, Dm /// A64: SSUBW Vd.8H, Vn.8H, Vm.8B /// </summary> public static Vector128<short> SubtractWideningLower(Vector128<short> left, Vector64<sbyte> right) => SubtractWideningLower(left, right); /// <summary> /// int32x4_t vsubw_s16 (int32x4_t a, int16x4_t b) /// A32: VSUBW.S16 Qd, Qn, Dm /// A64: SSUBW Vd.4S, Vn.4S, Vm.4H /// </summary> public static Vector128<int> SubtractWideningLower(Vector128<int> left, Vector64<short> right) => SubtractWideningLower(left, right); /// <summary> /// int64x2_t vsubw_s32 (int64x2_t a, int32x2_t b) /// A32: VSUBW.S32 Qd, Qn, Dm /// A64: SSUBW Vd.2D, Vn.2D, Vm.2S /// </summary> public static Vector128<long> SubtractWideningLower(Vector128<long> left, Vector64<int> right) => SubtractWideningLower(left, right); /// <summary> /// uint16x8_t vsubw_u8 (uint16x8_t a, uint8x8_t b) /// A32: VSUBW.U8 Qd, Qn, Dm /// A64: USUBW Vd.8H, Vn.8H, Vm.8B /// </summary> public static Vector128<ushort> SubtractWideningLower(Vector128<ushort> left, Vector64<byte> right) => SubtractWideningLower(left, right); /// <summary> /// uint32x4_t vsubw_u16 (uint32x4_t a, uint16x4_t b) /// A32: VSUBW.U16 Qd, Qn, Dm /// A64: USUBW Vd.4S, Vn.4S, Vm.4H /// </summary> public static Vector128<uint> SubtractWideningLower(Vector128<uint> left, Vector64<ushort> right) => SubtractWideningLower(left, right); /// <summary> /// uint64x2_t vsubw_u32 (uint64x2_t a, uint32x2_t b) /// A32: VSUBW.U32 Qd, Qn, Dm /// A64: USUBW Vd.2D, Vn.2D, Vm.2S /// </summary> public static Vector128<ulong> SubtractWideningLower(Vector128<ulong> left, Vector64<uint> right) => SubtractWideningLower(left, right); /// <summary> /// uint16x8_t vsubl_high_u8 (uint8x16_t a, uint8x16_t b) /// A32: VSUBL.U8 Qd, Dn+1, Dm+1 /// A64: USUBL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> SubtractWideningUpper(Vector128<byte> left, Vector128<byte> right) => SubtractWideningUpper(left, right); /// <summary> /// int32x4_t vsubl_high_s16 (int16x8_t a, int16x8_t b) /// A32: VSUBL.S16 Qd, Dn+1, Dm+1 /// A64: SSUBL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<int> SubtractWideningUpper(Vector128<short> left, Vector128<short> right) => SubtractWideningUpper(left, right); /// <summary> /// int16x8_t vsubw_high_s8 (int16x8_t a, int8x16_t b) /// A32: VSUBW.S8 Qd, Qn, Dm+1 /// A64: SSUBW2 Vd.8H, Vn.8H, Vm.16B /// </summary> public static Vector128<short> SubtractWideningUpper(Vector128<short> left, Vector128<sbyte> right) => SubtractWideningUpper(left, right); /// <summary> /// int32x4_t vsubw_high_s16 (int32x4_t a, int16x8_t b) /// A32: VSUBW.S16 Qd, Qn, Dm+1 /// A64: SSUBW2 Vd.4S, Vn.4S, Vm.8H /// </summary> public static Vector128<int> SubtractWideningUpper(Vector128<int> left, Vector128<short> right) => SubtractWideningUpper(left, right); /// <summary> /// int64x2_t vsubl_high_s32 (int32x4_t a, int32x4_t b) /// A32: VSUBL.S32 Qd, Dn+1, Dm+1 /// A64: SSUBL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<long> SubtractWideningUpper(Vector128<int> left, Vector128<int> right) => SubtractWideningUpper(left, right); /// <summary> /// int64x2_t vsubw_high_s32 (int64x2_t a, int32x4_t b) /// A32: VSUBW.S32 Qd, Qn, Dm+1 /// A64: SSUBW2 Vd.2D, Vn.2D, Vm.4S /// </summary> public static Vector128<long> SubtractWideningUpper(Vector128<long> left, Vector128<int> right) => SubtractWideningUpper(left, right); /// <summary> /// int16x8_t vsubl_high_s8 (int8x16_t a, int8x16_t b) /// A32: VSUBL.S8 Qd, Dn+1, Dm+1 /// A64: SSUBL2 Vd.8H, Vn.16B, Vm.16B /// </summary> public static Vector128<short> SubtractWideningUpper(Vector128<sbyte> left, Vector128<sbyte> right) => SubtractWideningUpper(left, right); /// <summary> /// uint16x8_t vsubw_high_u8 (uint16x8_t a, uint8x16_t b) /// A32: VSUBW.U8 Qd, Qn, Dm+1 /// A64: USUBW2 Vd.8H, Vn.8H, Vm.16B /// </summary> public static Vector128<ushort> SubtractWideningUpper(Vector128<ushort> left, Vector128<byte> right) => SubtractWideningUpper(left, right); /// <summary> /// uint32x4_t vsubl_high_u16 (uint16x8_t a, uint16x8_t b) /// A32: VSUBL.U16 Qd, Dn+1, Dm+1 /// A64: USUBL2 Vd.4S, Vn.8H, Vm.8H /// </summary> public static Vector128<uint> SubtractWideningUpper(Vector128<ushort> left, Vector128<ushort> right) => SubtractWideningUpper(left, right); /// <summary> /// uint32x4_t vsubw_high_u16 (uint32x4_t a, uint16x8_t b) /// A32: VSUBW.U16 Qd, Qn, Dm+1 /// A64: USUBW2 Vd.4S, Vn.4S, Vm.8H /// </summary> public static Vector128<uint> SubtractWideningUpper(Vector128<uint> left, Vector128<ushort> right) => SubtractWideningUpper(left, right); /// <summary> /// uint64x2_t vsubl_high_u32 (uint32x4_t a, uint32x4_t b) /// A32: VSUBL.U32 Qd, Dn+1, Dm+1 /// A64: USUBL2 Vd.2D, Vn.4S, Vm.4S /// </summary> public static Vector128<ulong> SubtractWideningUpper(Vector128<uint> left, Vector128<uint> right) => SubtractWideningUpper(left, right); /// <summary> /// uint64x2_t vsubw_high_u32 (uint64x2_t a, uint32x4_t b) /// A32: VSUBW.U32 Qd, Qn, Dm+1 /// A64: USUBW2 Vd.2D, Vn.2D, Vm.4S /// </summary> public static Vector128<ulong> SubtractWideningUpper(Vector128<ulong> left, Vector128<uint> right) => SubtractWideningUpper(left, right); /// <summary> /// uint8x8_t vqvtbl1_u8(uint8x16_t t, uint8x8_t idx) /// A32: VTBL Dd, {Dn, Dn+1}, Dm /// A64: TBL Vd.8B, {Vn.16B}, Vm.8B /// </summary> public static Vector64<byte> VectorTableLookup(Vector128<byte> table, Vector64<byte> byteIndexes) => VectorTableLookup(table, byteIndexes); /// <summary> /// int8x8_t vqvtbl1_s8(int8x16_t t, uint8x8_t idx) /// A32: VTBL Dd, {Dn, Dn+1}, Dm /// A64: TBL Vd.8B, {Vn.16B}, Vm.8B /// </summary> public static Vector64<sbyte> VectorTableLookup(Vector128<sbyte> table, Vector64<sbyte> byteIndexes) => VectorTableLookup(table, byteIndexes); /// <summary> /// uint8x8_t vqvtbx1_u8(uint8x8_t r, uint8x16_t t, uint8x8_t idx) /// A32: VTBX Dd, {Dn, Dn+1}, Dm /// A64: TBX Vd.8B, {Vn.16B}, Vm.8B /// </summary> public static Vector64<byte> VectorTableLookupExtension(Vector64<byte> defaultValues, Vector128<byte> table, Vector64<byte> byteIndexes) => VectorTableLookupExtension(defaultValues, table, byteIndexes); /// <summary> /// int8x8_t vqvtbx1_s8(int8x8_t r, int8x16_t t, uint8x8_t idx) /// A32: VTBX Dd, {Dn, Dn+1}, Dm /// A64: TBX Vd.8B, {Vn.16B}, Vm.8B /// </summary> public static Vector64<sbyte> VectorTableLookupExtension(Vector64<sbyte> defaultValues, Vector128<sbyte> table, Vector64<sbyte> byteIndexes) => VectorTableLookupExtension(defaultValues, table, byteIndexes); /// <summary> /// uint8x8_t veor_u8 (uint8x8_t a, uint8x8_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<byte> Xor(Vector64<byte> left, Vector64<byte> right) => Xor(left, right); /// <summary> /// float64x1_t veor_f64 (float64x1_t a, float64x1_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<double> Xor(Vector64<double> left, Vector64<double> right) => Xor(left, right); /// <summary> /// int16x4_t veor_s16 (int16x4_t a, int16x4_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<short> Xor(Vector64<short> left, Vector64<short> right) => Xor(left, right); /// <summary> /// int32x2_t veor_s32 (int32x2_t a, int32x2_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<int> Xor(Vector64<int> left, Vector64<int> right) => Xor(left, right); /// <summary> /// int64x1_t veor_s64 (int64x1_t a, int64x1_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<long> Xor(Vector64<long> left, Vector64<long> right) => Xor(left, right); /// <summary> /// int8x8_t veor_s8 (int8x8_t a, int8x8_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<sbyte> Xor(Vector64<sbyte> left, Vector64<sbyte> right) => Xor(left, right); /// <summary> /// float32x2_t veor_f32 (float32x2_t a, float32x2_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector64<float> Xor(Vector64<float> left, Vector64<float> right) => Xor(left, right); /// <summary> /// uint16x4_t veor_u16 (uint16x4_t a, uint16x4_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ushort> Xor(Vector64<ushort> left, Vector64<ushort> right) => Xor(left, right); /// <summary> /// uint32x2_t veor_u32 (uint32x2_t a, uint32x2_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<uint> Xor(Vector64<uint> left, Vector64<uint> right) => Xor(left, right); /// <summary> /// uint64x1_t veor_u64 (uint64x1_t a, uint64x1_t b) /// A32: VEOR Dd, Dn, Dm /// A64: EOR Vd.8B, Vn.8B, Vm.8B /// </summary> public static Vector64<ulong> Xor(Vector64<ulong> left, Vector64<ulong> right) => Xor(left, right); /// <summary> /// uint8x16_t veorq_u8 (uint8x16_t a, uint8x16_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<byte> Xor(Vector128<byte> left, Vector128<byte> right) => Xor(left, right); /// <summary> /// float64x2_t veorq_f64 (float64x2_t a, float64x2_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<double> Xor(Vector128<double> left, Vector128<double> right) => Xor(left, right); /// <summary> /// int16x8_t veorq_s16 (int16x8_t a, int16x8_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<short> Xor(Vector128<short> left, Vector128<short> right) => Xor(left, right); /// <summary> /// int32x4_t veorq_s32 (int32x4_t a, int32x4_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<int> Xor(Vector128<int> left, Vector128<int> right) => Xor(left, right); /// <summary> /// int64x2_t veorq_s64 (int64x2_t a, int64x2_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<long> Xor(Vector128<long> left, Vector128<long> right) => Xor(left, right); /// <summary> /// int8x16_t veorq_s8 (int8x16_t a, int8x16_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<sbyte> Xor(Vector128<sbyte> left, Vector128<sbyte> right) => Xor(left, right); /// <summary> /// float32x4_t veorq_f32 (float32x4_t a, float32x4_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// The above native signature does not exist. We provide this additional overload for consistency with the other scalar APIs. /// </summary> public static Vector128<float> Xor(Vector128<float> left, Vector128<float> right) => Xor(left, right); /// <summary> /// uint16x8_t veorq_u16 (uint16x8_t a, uint16x8_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ushort> Xor(Vector128<ushort> left, Vector128<ushort> right) => Xor(left, right); /// <summary> /// uint32x4_t veorq_u32 (uint32x4_t a, uint32x4_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<uint> Xor(Vector128<uint> left, Vector128<uint> right) => Xor(left, right); /// <summary> /// uint64x2_t veorq_u64 (uint64x2_t a, uint64x2_t b) /// A32: VEOR Qd, Qn, Qm /// A64: EOR Vd.16B, Vn.16B, Vm.16B /// </summary> public static Vector128<ulong> Xor(Vector128<ulong> left, Vector128<ulong> right) => Xor(left, right); /// <summary> /// uint16x8_t vmovl_u8 (uint8x8_t a) /// A32: VMOVL.U8 Qd, Dm /// A64: UXTL Vd.8H, Vn.8B /// </summary> public static Vector128<ushort> ZeroExtendWideningLower(Vector64<byte> value) => ZeroExtendWideningLower(value); /// <summary> /// uint32x4_t vmovl_u16 (uint16x4_t a) /// A32: VMOVL.U16 Qd, Dm /// A64: UXTL Vd.4S, Vn.4H /// </summary> public static Vector128<int> ZeroExtendWideningLower(Vector64<short> value) => ZeroExtendWideningLower(value); /// <summary> /// uint64x2_t vmovl_u32 (uint32x2_t a) /// A32: VMOVL.U32 Qd, Dm /// A64: UXTL Vd.2D, Vn.2S /// </summary> public static Vector128<long> ZeroExtendWideningLower(Vector64<int> value) => ZeroExtendWideningLower(value); /// <summary> /// uint16x8_t vmovl_u8 (uint8x8_t a) /// A32: VMOVL.U8 Qd, Dm /// A64: UXTL Vd.8H, Vn.8B /// </summary> public static Vector128<short> ZeroExtendWideningLower(Vector64<sbyte> value) => ZeroExtendWideningLower(value); /// <summary> /// uint32x4_t vmovl_u16 (uint16x4_t a) /// A32: VMOVL.U16 Qd, Dm /// A64: UXTL Vd.4S, Vn.4H /// </summary> public static Vector128<uint> ZeroExtendWideningLower(Vector64<ushort> value) => ZeroExtendWideningLower(value); /// <summary> /// uint64x2_t vmovl_u32 (uint32x2_t a) /// A32: VMOVL.U32 Qd, Dm /// A64: UXTL Vd.2D, Vn.2S /// </summary> public static Vector128<ulong> ZeroExtendWideningLower(Vector64<uint> value) => ZeroExtendWideningLower(value); /// <summary> /// uint16x8_t vmovl_high_u8 (uint8x16_t a) /// A32: VMOVL.U8 Qd, Dm+1 /// A64: UXTL2 Vd.8H, Vn.16B /// </summary> public static Vector128<ushort> ZeroExtendWideningUpper(Vector128<byte> value) => ZeroExtendWideningUpper(value); /// <summary> /// uint32x4_t vmovl_high_u16 (uint16x8_t a) /// A32: VMOVL.U16 Qd, Dm+1 /// A64: UXTL2 Vd.4S, Vn.8H /// </summary> public static Vector128<int> ZeroExtendWideningUpper(Vector128<short> value) => ZeroExtendWideningUpper(value); /// <summary> /// uint64x2_t vmovl_high_u32 (uint32x4_t a) /// A32: VMOVL.U32 Qd, Dm+1 /// A64: UXTL2 Vd.2D, Vn.4S /// </summary> public static Vector128<long> ZeroExtendWideningUpper(Vector128<int> value) => ZeroExtendWideningUpper(value); /// <summary> /// uint16x8_t vmovl_high_u8 (uint8x16_t a) /// A32: VMOVL.U8 Qd, Dm+1 /// A64: UXTL2 Vd.8H, Vn.16B /// </summary> public static Vector128<short> ZeroExtendWideningUpper(Vector128<sbyte> value) => ZeroExtendWideningUpper(value); /// <summary> /// uint32x4_t vmovl_high_u16 (uint16x8_t a) /// A32: VMOVL.U16 Qd, Dm+1 /// A64: UXTL2 Vd.4S, Vn.8H /// </summary> public static Vector128<uint> ZeroExtendWideningUpper(Vector128<ushort> value) => ZeroExtendWideningUpper(value); /// <summary> /// uint64x2_t vmovl_high_u32 (uint32x4_t a) /// A32: VMOVL.U32 Qd, Dm+1 /// A64: UXTL2 Vd.2D, Vn.4S /// </summary> public static Vector128<ulong> ZeroExtendWideningUpper(Vector128<uint> value) => ZeroExtendWideningUpper(value); } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Text.Encodings.Web/tests/UnicodeRangesTests.generated.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // This file was generated by a tool. // See src/System.Text.Encodings.Web/tools/GenUnicodeRanges using System.Collections.Generic; namespace System.Text.Unicode.Tests { public static partial class UnicodeRangesTests { public static IEnumerable<object[]> UnicodeRanges_GeneratedData => new[] { new object[] { '\u0000', '\u007F', nameof(UnicodeRanges.BasicLatin) }, new object[] { '\u0080', '\u00FF', nameof(UnicodeRanges.Latin1Supplement) }, new object[] { '\u0100', '\u017F', nameof(UnicodeRanges.LatinExtendedA) }, new object[] { '\u0180', '\u024F', nameof(UnicodeRanges.LatinExtendedB) }, new object[] { '\u0250', '\u02AF', nameof(UnicodeRanges.IpaExtensions) }, new object[] { '\u02B0', '\u02FF', nameof(UnicodeRanges.SpacingModifierLetters) }, new object[] { '\u0300', '\u036F', nameof(UnicodeRanges.CombiningDiacriticalMarks) }, new object[] { '\u0370', '\u03FF', nameof(UnicodeRanges.GreekandCoptic) }, new object[] { '\u0400', '\u04FF', nameof(UnicodeRanges.Cyrillic) }, new object[] { '\u0500', '\u052F', nameof(UnicodeRanges.CyrillicSupplement) }, new object[] { '\u0530', '\u058F', nameof(UnicodeRanges.Armenian) }, new object[] { '\u0590', '\u05FF', nameof(UnicodeRanges.Hebrew) }, new object[] { '\u0600', '\u06FF', nameof(UnicodeRanges.Arabic) }, new object[] { '\u0700', '\u074F', nameof(UnicodeRanges.Syriac) }, new object[] { '\u0750', '\u077F', nameof(UnicodeRanges.ArabicSupplement) }, new object[] { '\u0780', '\u07BF', nameof(UnicodeRanges.Thaana) }, new object[] { '\u07C0', '\u07FF', nameof(UnicodeRanges.NKo) }, new object[] { '\u0800', '\u083F', nameof(UnicodeRanges.Samaritan) }, new object[] { '\u0840', '\u085F', nameof(UnicodeRanges.Mandaic) }, new object[] { '\u0860', '\u086F', nameof(UnicodeRanges.SyriacSupplement) }, new object[] { '\u0870', '\u089F', nameof(UnicodeRanges.ArabicExtendedB) }, new object[] { '\u08A0', '\u08FF', nameof(UnicodeRanges.ArabicExtendedA) }, new object[] { '\u0900', '\u097F', nameof(UnicodeRanges.Devanagari) }, new object[] { '\u0980', '\u09FF', nameof(UnicodeRanges.Bengali) }, new object[] { '\u0A00', '\u0A7F', nameof(UnicodeRanges.Gurmukhi) }, new object[] { '\u0A80', '\u0AFF', nameof(UnicodeRanges.Gujarati) }, new object[] { '\u0B00', '\u0B7F', nameof(UnicodeRanges.Oriya) }, new object[] { '\u0B80', '\u0BFF', nameof(UnicodeRanges.Tamil) }, new object[] { '\u0C00', '\u0C7F', nameof(UnicodeRanges.Telugu) }, new object[] { '\u0C80', '\u0CFF', nameof(UnicodeRanges.Kannada) }, new object[] { '\u0D00', '\u0D7F', nameof(UnicodeRanges.Malayalam) }, new object[] { '\u0D80', '\u0DFF', nameof(UnicodeRanges.Sinhala) }, new object[] { '\u0E00', '\u0E7F', nameof(UnicodeRanges.Thai) }, new object[] { '\u0E80', '\u0EFF', nameof(UnicodeRanges.Lao) }, new object[] { '\u0F00', '\u0FFF', nameof(UnicodeRanges.Tibetan) }, new object[] { '\u1000', '\u109F', nameof(UnicodeRanges.Myanmar) }, new object[] { '\u10A0', '\u10FF', nameof(UnicodeRanges.Georgian) }, new object[] { '\u1100', '\u11FF', nameof(UnicodeRanges.HangulJamo) }, new object[] { '\u1200', '\u137F', nameof(UnicodeRanges.Ethiopic) }, new object[] { '\u1380', '\u139F', nameof(UnicodeRanges.EthiopicSupplement) }, new object[] { '\u13A0', '\u13FF', nameof(UnicodeRanges.Cherokee) }, new object[] { '\u1400', '\u167F', nameof(UnicodeRanges.UnifiedCanadianAboriginalSyllabics) }, new object[] { '\u1680', '\u169F', nameof(UnicodeRanges.Ogham) }, new object[] { '\u16A0', '\u16FF', nameof(UnicodeRanges.Runic) }, new object[] { '\u1700', '\u171F', nameof(UnicodeRanges.Tagalog) }, new object[] { '\u1720', '\u173F', nameof(UnicodeRanges.Hanunoo) }, new object[] { '\u1740', '\u175F', nameof(UnicodeRanges.Buhid) }, new object[] { '\u1760', '\u177F', nameof(UnicodeRanges.Tagbanwa) }, new object[] { '\u1780', '\u17FF', nameof(UnicodeRanges.Khmer) }, new object[] { '\u1800', '\u18AF', nameof(UnicodeRanges.Mongolian) }, new object[] { '\u18B0', '\u18FF', nameof(UnicodeRanges.UnifiedCanadianAboriginalSyllabicsExtended) }, new object[] { '\u1900', '\u194F', nameof(UnicodeRanges.Limbu) }, new object[] { '\u1950', '\u197F', nameof(UnicodeRanges.TaiLe) }, new object[] { '\u1980', '\u19DF', nameof(UnicodeRanges.NewTaiLue) }, new object[] { '\u19E0', '\u19FF', nameof(UnicodeRanges.KhmerSymbols) }, new object[] { '\u1A00', '\u1A1F', nameof(UnicodeRanges.Buginese) }, new object[] { '\u1A20', '\u1AAF', nameof(UnicodeRanges.TaiTham) }, new object[] { '\u1AB0', '\u1AFF', nameof(UnicodeRanges.CombiningDiacriticalMarksExtended) }, new object[] { '\u1B00', '\u1B7F', nameof(UnicodeRanges.Balinese) }, new object[] { '\u1B80', '\u1BBF', nameof(UnicodeRanges.Sundanese) }, new object[] { '\u1BC0', '\u1BFF', nameof(UnicodeRanges.Batak) }, new object[] { '\u1C00', '\u1C4F', nameof(UnicodeRanges.Lepcha) }, new object[] { '\u1C50', '\u1C7F', nameof(UnicodeRanges.OlChiki) }, new object[] { '\u1C80', '\u1C8F', nameof(UnicodeRanges.CyrillicExtendedC) }, new object[] { '\u1C90', '\u1CBF', nameof(UnicodeRanges.GeorgianExtended) }, new object[] { '\u1CC0', '\u1CCF', nameof(UnicodeRanges.SundaneseSupplement) }, new object[] { '\u1CD0', '\u1CFF', nameof(UnicodeRanges.VedicExtensions) }, new object[] { '\u1D00', '\u1D7F', nameof(UnicodeRanges.PhoneticExtensions) }, new object[] { '\u1D80', '\u1DBF', nameof(UnicodeRanges.PhoneticExtensionsSupplement) }, new object[] { '\u1DC0', '\u1DFF', nameof(UnicodeRanges.CombiningDiacriticalMarksSupplement) }, new object[] { '\u1E00', '\u1EFF', nameof(UnicodeRanges.LatinExtendedAdditional) }, new object[] { '\u1F00', '\u1FFF', nameof(UnicodeRanges.GreekExtended) }, new object[] { '\u2000', '\u206F', nameof(UnicodeRanges.GeneralPunctuation) }, new object[] { '\u2070', '\u209F', nameof(UnicodeRanges.SuperscriptsandSubscripts) }, new object[] { '\u20A0', '\u20CF', nameof(UnicodeRanges.CurrencySymbols) }, new object[] { '\u20D0', '\u20FF', nameof(UnicodeRanges.CombiningDiacriticalMarksforSymbols) }, new object[] { '\u2100', '\u214F', nameof(UnicodeRanges.LetterlikeSymbols) }, new object[] { '\u2150', '\u218F', nameof(UnicodeRanges.NumberForms) }, new object[] { '\u2190', '\u21FF', nameof(UnicodeRanges.Arrows) }, new object[] { '\u2200', '\u22FF', nameof(UnicodeRanges.MathematicalOperators) }, new object[] { '\u2300', '\u23FF', nameof(UnicodeRanges.MiscellaneousTechnical) }, new object[] { '\u2400', '\u243F', nameof(UnicodeRanges.ControlPictures) }, new object[] { '\u2440', '\u245F', nameof(UnicodeRanges.OpticalCharacterRecognition) }, new object[] { '\u2460', '\u24FF', nameof(UnicodeRanges.EnclosedAlphanumerics) }, new object[] { '\u2500', '\u257F', nameof(UnicodeRanges.BoxDrawing) }, new object[] { '\u2580', '\u259F', nameof(UnicodeRanges.BlockElements) }, new object[] { '\u25A0', '\u25FF', nameof(UnicodeRanges.GeometricShapes) }, new object[] { '\u2600', '\u26FF', nameof(UnicodeRanges.MiscellaneousSymbols) }, new object[] { '\u2700', '\u27BF', nameof(UnicodeRanges.Dingbats) }, new object[] { '\u27C0', '\u27EF', nameof(UnicodeRanges.MiscellaneousMathematicalSymbolsA) }, new object[] { '\u27F0', '\u27FF', nameof(UnicodeRanges.SupplementalArrowsA) }, new object[] { '\u2800', '\u28FF', nameof(UnicodeRanges.BraillePatterns) }, new object[] { '\u2900', '\u297F', nameof(UnicodeRanges.SupplementalArrowsB) }, new object[] { '\u2980', '\u29FF', nameof(UnicodeRanges.MiscellaneousMathematicalSymbolsB) }, new object[] { '\u2A00', '\u2AFF', nameof(UnicodeRanges.SupplementalMathematicalOperators) }, new object[] { '\u2B00', '\u2BFF', nameof(UnicodeRanges.MiscellaneousSymbolsandArrows) }, new object[] { '\u2C00', '\u2C5F', nameof(UnicodeRanges.Glagolitic) }, new object[] { '\u2C60', '\u2C7F', nameof(UnicodeRanges.LatinExtendedC) }, new object[] { '\u2C80', '\u2CFF', nameof(UnicodeRanges.Coptic) }, new object[] { '\u2D00', '\u2D2F', nameof(UnicodeRanges.GeorgianSupplement) }, new object[] { '\u2D30', '\u2D7F', nameof(UnicodeRanges.Tifinagh) }, new object[] { '\u2D80', '\u2DDF', nameof(UnicodeRanges.EthiopicExtended) }, new object[] { '\u2DE0', '\u2DFF', nameof(UnicodeRanges.CyrillicExtendedA) }, new object[] { '\u2E00', '\u2E7F', nameof(UnicodeRanges.SupplementalPunctuation) }, new object[] { '\u2E80', '\u2EFF', nameof(UnicodeRanges.CjkRadicalsSupplement) }, new object[] { '\u2F00', '\u2FDF', nameof(UnicodeRanges.KangxiRadicals) }, new object[] { '\u2FF0', '\u2FFF', nameof(UnicodeRanges.IdeographicDescriptionCharacters) }, new object[] { '\u3000', '\u303F', nameof(UnicodeRanges.CjkSymbolsandPunctuation) }, new object[] { '\u3040', '\u309F', nameof(UnicodeRanges.Hiragana) }, new object[] { '\u30A0', '\u30FF', nameof(UnicodeRanges.Katakana) }, new object[] { '\u3100', '\u312F', nameof(UnicodeRanges.Bopomofo) }, new object[] { '\u3130', '\u318F', nameof(UnicodeRanges.HangulCompatibilityJamo) }, new object[] { '\u3190', '\u319F', nameof(UnicodeRanges.Kanbun) }, new object[] { '\u31A0', '\u31BF', nameof(UnicodeRanges.BopomofoExtended) }, new object[] { '\u31C0', '\u31EF', nameof(UnicodeRanges.CjkStrokes) }, new object[] { '\u31F0', '\u31FF', nameof(UnicodeRanges.KatakanaPhoneticExtensions) }, new object[] { '\u3200', '\u32FF', nameof(UnicodeRanges.EnclosedCjkLettersandMonths) }, new object[] { '\u3300', '\u33FF', nameof(UnicodeRanges.CjkCompatibility) }, new object[] { '\u3400', '\u4DBF', nameof(UnicodeRanges.CjkUnifiedIdeographsExtensionA) }, new object[] { '\u4DC0', '\u4DFF', nameof(UnicodeRanges.YijingHexagramSymbols) }, new object[] { '\u4E00', '\u9FFF', nameof(UnicodeRanges.CjkUnifiedIdeographs) }, new object[] { '\uA000', '\uA48F', nameof(UnicodeRanges.YiSyllables) }, new object[] { '\uA490', '\uA4CF', nameof(UnicodeRanges.YiRadicals) }, new object[] { '\uA4D0', '\uA4FF', nameof(UnicodeRanges.Lisu) }, new object[] { '\uA500', '\uA63F', nameof(UnicodeRanges.Vai) }, new object[] { '\uA640', '\uA69F', nameof(UnicodeRanges.CyrillicExtendedB) }, new object[] { '\uA6A0', '\uA6FF', nameof(UnicodeRanges.Bamum) }, new object[] { '\uA700', '\uA71F', nameof(UnicodeRanges.ModifierToneLetters) }, new object[] { '\uA720', '\uA7FF', nameof(UnicodeRanges.LatinExtendedD) }, new object[] { '\uA800', '\uA82F', nameof(UnicodeRanges.SylotiNagri) }, new object[] { '\uA830', '\uA83F', nameof(UnicodeRanges.CommonIndicNumberForms) }, new object[] { '\uA840', '\uA87F', nameof(UnicodeRanges.Phagspa) }, new object[] { '\uA880', '\uA8DF', nameof(UnicodeRanges.Saurashtra) }, new object[] { '\uA8E0', '\uA8FF', nameof(UnicodeRanges.DevanagariExtended) }, new object[] { '\uA900', '\uA92F', nameof(UnicodeRanges.KayahLi) }, new object[] { '\uA930', '\uA95F', nameof(UnicodeRanges.Rejang) }, new object[] { '\uA960', '\uA97F', nameof(UnicodeRanges.HangulJamoExtendedA) }, new object[] { '\uA980', '\uA9DF', nameof(UnicodeRanges.Javanese) }, new object[] { '\uA9E0', '\uA9FF', nameof(UnicodeRanges.MyanmarExtendedB) }, new object[] { '\uAA00', '\uAA5F', nameof(UnicodeRanges.Cham) }, new object[] { '\uAA60', '\uAA7F', nameof(UnicodeRanges.MyanmarExtendedA) }, new object[] { '\uAA80', '\uAADF', nameof(UnicodeRanges.TaiViet) }, new object[] { '\uAAE0', '\uAAFF', nameof(UnicodeRanges.MeeteiMayekExtensions) }, new object[] { '\uAB00', '\uAB2F', nameof(UnicodeRanges.EthiopicExtendedA) }, new object[] { '\uAB30', '\uAB6F', nameof(UnicodeRanges.LatinExtendedE) }, new object[] { '\uAB70', '\uABBF', nameof(UnicodeRanges.CherokeeSupplement) }, new object[] { '\uABC0', '\uABFF', nameof(UnicodeRanges.MeeteiMayek) }, new object[] { '\uAC00', '\uD7AF', nameof(UnicodeRanges.HangulSyllables) }, new object[] { '\uD7B0', '\uD7FF', nameof(UnicodeRanges.HangulJamoExtendedB) }, new object[] { '\uF900', '\uFAFF', nameof(UnicodeRanges.CjkCompatibilityIdeographs) }, new object[] { '\uFB00', '\uFB4F', nameof(UnicodeRanges.AlphabeticPresentationForms) }, new object[] { '\uFB50', '\uFDFF', nameof(UnicodeRanges.ArabicPresentationFormsA) }, new object[] { '\uFE00', '\uFE0F', nameof(UnicodeRanges.VariationSelectors) }, new object[] { '\uFE10', '\uFE1F', nameof(UnicodeRanges.VerticalForms) }, new object[] { '\uFE20', '\uFE2F', nameof(UnicodeRanges.CombiningHalfMarks) }, new object[] { '\uFE30', '\uFE4F', nameof(UnicodeRanges.CjkCompatibilityForms) }, new object[] { '\uFE50', '\uFE6F', nameof(UnicodeRanges.SmallFormVariants) }, new object[] { '\uFE70', '\uFEFF', nameof(UnicodeRanges.ArabicPresentationFormsB) }, new object[] { '\uFF00', '\uFFEF', nameof(UnicodeRanges.HalfwidthandFullwidthForms) }, new object[] { '\uFFF0', '\uFFFF', nameof(UnicodeRanges.Specials) }, }; } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // This file was generated by a tool. // See src/System.Text.Encodings.Web/tools/GenUnicodeRanges using System.Collections.Generic; namespace System.Text.Unicode.Tests { public static partial class UnicodeRangesTests { public static IEnumerable<object[]> UnicodeRanges_GeneratedData => new[] { new object[] { '\u0000', '\u007F', nameof(UnicodeRanges.BasicLatin) }, new object[] { '\u0080', '\u00FF', nameof(UnicodeRanges.Latin1Supplement) }, new object[] { '\u0100', '\u017F', nameof(UnicodeRanges.LatinExtendedA) }, new object[] { '\u0180', '\u024F', nameof(UnicodeRanges.LatinExtendedB) }, new object[] { '\u0250', '\u02AF', nameof(UnicodeRanges.IpaExtensions) }, new object[] { '\u02B0', '\u02FF', nameof(UnicodeRanges.SpacingModifierLetters) }, new object[] { '\u0300', '\u036F', nameof(UnicodeRanges.CombiningDiacriticalMarks) }, new object[] { '\u0370', '\u03FF', nameof(UnicodeRanges.GreekandCoptic) }, new object[] { '\u0400', '\u04FF', nameof(UnicodeRanges.Cyrillic) }, new object[] { '\u0500', '\u052F', nameof(UnicodeRanges.CyrillicSupplement) }, new object[] { '\u0530', '\u058F', nameof(UnicodeRanges.Armenian) }, new object[] { '\u0590', '\u05FF', nameof(UnicodeRanges.Hebrew) }, new object[] { '\u0600', '\u06FF', nameof(UnicodeRanges.Arabic) }, new object[] { '\u0700', '\u074F', nameof(UnicodeRanges.Syriac) }, new object[] { '\u0750', '\u077F', nameof(UnicodeRanges.ArabicSupplement) }, new object[] { '\u0780', '\u07BF', nameof(UnicodeRanges.Thaana) }, new object[] { '\u07C0', '\u07FF', nameof(UnicodeRanges.NKo) }, new object[] { '\u0800', '\u083F', nameof(UnicodeRanges.Samaritan) }, new object[] { '\u0840', '\u085F', nameof(UnicodeRanges.Mandaic) }, new object[] { '\u0860', '\u086F', nameof(UnicodeRanges.SyriacSupplement) }, new object[] { '\u0870', '\u089F', nameof(UnicodeRanges.ArabicExtendedB) }, new object[] { '\u08A0', '\u08FF', nameof(UnicodeRanges.ArabicExtendedA) }, new object[] { '\u0900', '\u097F', nameof(UnicodeRanges.Devanagari) }, new object[] { '\u0980', '\u09FF', nameof(UnicodeRanges.Bengali) }, new object[] { '\u0A00', '\u0A7F', nameof(UnicodeRanges.Gurmukhi) }, new object[] { '\u0A80', '\u0AFF', nameof(UnicodeRanges.Gujarati) }, new object[] { '\u0B00', '\u0B7F', nameof(UnicodeRanges.Oriya) }, new object[] { '\u0B80', '\u0BFF', nameof(UnicodeRanges.Tamil) }, new object[] { '\u0C00', '\u0C7F', nameof(UnicodeRanges.Telugu) }, new object[] { '\u0C80', '\u0CFF', nameof(UnicodeRanges.Kannada) }, new object[] { '\u0D00', '\u0D7F', nameof(UnicodeRanges.Malayalam) }, new object[] { '\u0D80', '\u0DFF', nameof(UnicodeRanges.Sinhala) }, new object[] { '\u0E00', '\u0E7F', nameof(UnicodeRanges.Thai) }, new object[] { '\u0E80', '\u0EFF', nameof(UnicodeRanges.Lao) }, new object[] { '\u0F00', '\u0FFF', nameof(UnicodeRanges.Tibetan) }, new object[] { '\u1000', '\u109F', nameof(UnicodeRanges.Myanmar) }, new object[] { '\u10A0', '\u10FF', nameof(UnicodeRanges.Georgian) }, new object[] { '\u1100', '\u11FF', nameof(UnicodeRanges.HangulJamo) }, new object[] { '\u1200', '\u137F', nameof(UnicodeRanges.Ethiopic) }, new object[] { '\u1380', '\u139F', nameof(UnicodeRanges.EthiopicSupplement) }, new object[] { '\u13A0', '\u13FF', nameof(UnicodeRanges.Cherokee) }, new object[] { '\u1400', '\u167F', nameof(UnicodeRanges.UnifiedCanadianAboriginalSyllabics) }, new object[] { '\u1680', '\u169F', nameof(UnicodeRanges.Ogham) }, new object[] { '\u16A0', '\u16FF', nameof(UnicodeRanges.Runic) }, new object[] { '\u1700', '\u171F', nameof(UnicodeRanges.Tagalog) }, new object[] { '\u1720', '\u173F', nameof(UnicodeRanges.Hanunoo) }, new object[] { '\u1740', '\u175F', nameof(UnicodeRanges.Buhid) }, new object[] { '\u1760', '\u177F', nameof(UnicodeRanges.Tagbanwa) }, new object[] { '\u1780', '\u17FF', nameof(UnicodeRanges.Khmer) }, new object[] { '\u1800', '\u18AF', nameof(UnicodeRanges.Mongolian) }, new object[] { '\u18B0', '\u18FF', nameof(UnicodeRanges.UnifiedCanadianAboriginalSyllabicsExtended) }, new object[] { '\u1900', '\u194F', nameof(UnicodeRanges.Limbu) }, new object[] { '\u1950', '\u197F', nameof(UnicodeRanges.TaiLe) }, new object[] { '\u1980', '\u19DF', nameof(UnicodeRanges.NewTaiLue) }, new object[] { '\u19E0', '\u19FF', nameof(UnicodeRanges.KhmerSymbols) }, new object[] { '\u1A00', '\u1A1F', nameof(UnicodeRanges.Buginese) }, new object[] { '\u1A20', '\u1AAF', nameof(UnicodeRanges.TaiTham) }, new object[] { '\u1AB0', '\u1AFF', nameof(UnicodeRanges.CombiningDiacriticalMarksExtended) }, new object[] { '\u1B00', '\u1B7F', nameof(UnicodeRanges.Balinese) }, new object[] { '\u1B80', '\u1BBF', nameof(UnicodeRanges.Sundanese) }, new object[] { '\u1BC0', '\u1BFF', nameof(UnicodeRanges.Batak) }, new object[] { '\u1C00', '\u1C4F', nameof(UnicodeRanges.Lepcha) }, new object[] { '\u1C50', '\u1C7F', nameof(UnicodeRanges.OlChiki) }, new object[] { '\u1C80', '\u1C8F', nameof(UnicodeRanges.CyrillicExtendedC) }, new object[] { '\u1C90', '\u1CBF', nameof(UnicodeRanges.GeorgianExtended) }, new object[] { '\u1CC0', '\u1CCF', nameof(UnicodeRanges.SundaneseSupplement) }, new object[] { '\u1CD0', '\u1CFF', nameof(UnicodeRanges.VedicExtensions) }, new object[] { '\u1D00', '\u1D7F', nameof(UnicodeRanges.PhoneticExtensions) }, new object[] { '\u1D80', '\u1DBF', nameof(UnicodeRanges.PhoneticExtensionsSupplement) }, new object[] { '\u1DC0', '\u1DFF', nameof(UnicodeRanges.CombiningDiacriticalMarksSupplement) }, new object[] { '\u1E00', '\u1EFF', nameof(UnicodeRanges.LatinExtendedAdditional) }, new object[] { '\u1F00', '\u1FFF', nameof(UnicodeRanges.GreekExtended) }, new object[] { '\u2000', '\u206F', nameof(UnicodeRanges.GeneralPunctuation) }, new object[] { '\u2070', '\u209F', nameof(UnicodeRanges.SuperscriptsandSubscripts) }, new object[] { '\u20A0', '\u20CF', nameof(UnicodeRanges.CurrencySymbols) }, new object[] { '\u20D0', '\u20FF', nameof(UnicodeRanges.CombiningDiacriticalMarksforSymbols) }, new object[] { '\u2100', '\u214F', nameof(UnicodeRanges.LetterlikeSymbols) }, new object[] { '\u2150', '\u218F', nameof(UnicodeRanges.NumberForms) }, new object[] { '\u2190', '\u21FF', nameof(UnicodeRanges.Arrows) }, new object[] { '\u2200', '\u22FF', nameof(UnicodeRanges.MathematicalOperators) }, new object[] { '\u2300', '\u23FF', nameof(UnicodeRanges.MiscellaneousTechnical) }, new object[] { '\u2400', '\u243F', nameof(UnicodeRanges.ControlPictures) }, new object[] { '\u2440', '\u245F', nameof(UnicodeRanges.OpticalCharacterRecognition) }, new object[] { '\u2460', '\u24FF', nameof(UnicodeRanges.EnclosedAlphanumerics) }, new object[] { '\u2500', '\u257F', nameof(UnicodeRanges.BoxDrawing) }, new object[] { '\u2580', '\u259F', nameof(UnicodeRanges.BlockElements) }, new object[] { '\u25A0', '\u25FF', nameof(UnicodeRanges.GeometricShapes) }, new object[] { '\u2600', '\u26FF', nameof(UnicodeRanges.MiscellaneousSymbols) }, new object[] { '\u2700', '\u27BF', nameof(UnicodeRanges.Dingbats) }, new object[] { '\u27C0', '\u27EF', nameof(UnicodeRanges.MiscellaneousMathematicalSymbolsA) }, new object[] { '\u27F0', '\u27FF', nameof(UnicodeRanges.SupplementalArrowsA) }, new object[] { '\u2800', '\u28FF', nameof(UnicodeRanges.BraillePatterns) }, new object[] { '\u2900', '\u297F', nameof(UnicodeRanges.SupplementalArrowsB) }, new object[] { '\u2980', '\u29FF', nameof(UnicodeRanges.MiscellaneousMathematicalSymbolsB) }, new object[] { '\u2A00', '\u2AFF', nameof(UnicodeRanges.SupplementalMathematicalOperators) }, new object[] { '\u2B00', '\u2BFF', nameof(UnicodeRanges.MiscellaneousSymbolsandArrows) }, new object[] { '\u2C00', '\u2C5F', nameof(UnicodeRanges.Glagolitic) }, new object[] { '\u2C60', '\u2C7F', nameof(UnicodeRanges.LatinExtendedC) }, new object[] { '\u2C80', '\u2CFF', nameof(UnicodeRanges.Coptic) }, new object[] { '\u2D00', '\u2D2F', nameof(UnicodeRanges.GeorgianSupplement) }, new object[] { '\u2D30', '\u2D7F', nameof(UnicodeRanges.Tifinagh) }, new object[] { '\u2D80', '\u2DDF', nameof(UnicodeRanges.EthiopicExtended) }, new object[] { '\u2DE0', '\u2DFF', nameof(UnicodeRanges.CyrillicExtendedA) }, new object[] { '\u2E00', '\u2E7F', nameof(UnicodeRanges.SupplementalPunctuation) }, new object[] { '\u2E80', '\u2EFF', nameof(UnicodeRanges.CjkRadicalsSupplement) }, new object[] { '\u2F00', '\u2FDF', nameof(UnicodeRanges.KangxiRadicals) }, new object[] { '\u2FF0', '\u2FFF', nameof(UnicodeRanges.IdeographicDescriptionCharacters) }, new object[] { '\u3000', '\u303F', nameof(UnicodeRanges.CjkSymbolsandPunctuation) }, new object[] { '\u3040', '\u309F', nameof(UnicodeRanges.Hiragana) }, new object[] { '\u30A0', '\u30FF', nameof(UnicodeRanges.Katakana) }, new object[] { '\u3100', '\u312F', nameof(UnicodeRanges.Bopomofo) }, new object[] { '\u3130', '\u318F', nameof(UnicodeRanges.HangulCompatibilityJamo) }, new object[] { '\u3190', '\u319F', nameof(UnicodeRanges.Kanbun) }, new object[] { '\u31A0', '\u31BF', nameof(UnicodeRanges.BopomofoExtended) }, new object[] { '\u31C0', '\u31EF', nameof(UnicodeRanges.CjkStrokes) }, new object[] { '\u31F0', '\u31FF', nameof(UnicodeRanges.KatakanaPhoneticExtensions) }, new object[] { '\u3200', '\u32FF', nameof(UnicodeRanges.EnclosedCjkLettersandMonths) }, new object[] { '\u3300', '\u33FF', nameof(UnicodeRanges.CjkCompatibility) }, new object[] { '\u3400', '\u4DBF', nameof(UnicodeRanges.CjkUnifiedIdeographsExtensionA) }, new object[] { '\u4DC0', '\u4DFF', nameof(UnicodeRanges.YijingHexagramSymbols) }, new object[] { '\u4E00', '\u9FFF', nameof(UnicodeRanges.CjkUnifiedIdeographs) }, new object[] { '\uA000', '\uA48F', nameof(UnicodeRanges.YiSyllables) }, new object[] { '\uA490', '\uA4CF', nameof(UnicodeRanges.YiRadicals) }, new object[] { '\uA4D0', '\uA4FF', nameof(UnicodeRanges.Lisu) }, new object[] { '\uA500', '\uA63F', nameof(UnicodeRanges.Vai) }, new object[] { '\uA640', '\uA69F', nameof(UnicodeRanges.CyrillicExtendedB) }, new object[] { '\uA6A0', '\uA6FF', nameof(UnicodeRanges.Bamum) }, new object[] { '\uA700', '\uA71F', nameof(UnicodeRanges.ModifierToneLetters) }, new object[] { '\uA720', '\uA7FF', nameof(UnicodeRanges.LatinExtendedD) }, new object[] { '\uA800', '\uA82F', nameof(UnicodeRanges.SylotiNagri) }, new object[] { '\uA830', '\uA83F', nameof(UnicodeRanges.CommonIndicNumberForms) }, new object[] { '\uA840', '\uA87F', nameof(UnicodeRanges.Phagspa) }, new object[] { '\uA880', '\uA8DF', nameof(UnicodeRanges.Saurashtra) }, new object[] { '\uA8E0', '\uA8FF', nameof(UnicodeRanges.DevanagariExtended) }, new object[] { '\uA900', '\uA92F', nameof(UnicodeRanges.KayahLi) }, new object[] { '\uA930', '\uA95F', nameof(UnicodeRanges.Rejang) }, new object[] { '\uA960', '\uA97F', nameof(UnicodeRanges.HangulJamoExtendedA) }, new object[] { '\uA980', '\uA9DF', nameof(UnicodeRanges.Javanese) }, new object[] { '\uA9E0', '\uA9FF', nameof(UnicodeRanges.MyanmarExtendedB) }, new object[] { '\uAA00', '\uAA5F', nameof(UnicodeRanges.Cham) }, new object[] { '\uAA60', '\uAA7F', nameof(UnicodeRanges.MyanmarExtendedA) }, new object[] { '\uAA80', '\uAADF', nameof(UnicodeRanges.TaiViet) }, new object[] { '\uAAE0', '\uAAFF', nameof(UnicodeRanges.MeeteiMayekExtensions) }, new object[] { '\uAB00', '\uAB2F', nameof(UnicodeRanges.EthiopicExtendedA) }, new object[] { '\uAB30', '\uAB6F', nameof(UnicodeRanges.LatinExtendedE) }, new object[] { '\uAB70', '\uABBF', nameof(UnicodeRanges.CherokeeSupplement) }, new object[] { '\uABC0', '\uABFF', nameof(UnicodeRanges.MeeteiMayek) }, new object[] { '\uAC00', '\uD7AF', nameof(UnicodeRanges.HangulSyllables) }, new object[] { '\uD7B0', '\uD7FF', nameof(UnicodeRanges.HangulJamoExtendedB) }, new object[] { '\uF900', '\uFAFF', nameof(UnicodeRanges.CjkCompatibilityIdeographs) }, new object[] { '\uFB00', '\uFB4F', nameof(UnicodeRanges.AlphabeticPresentationForms) }, new object[] { '\uFB50', '\uFDFF', nameof(UnicodeRanges.ArabicPresentationFormsA) }, new object[] { '\uFE00', '\uFE0F', nameof(UnicodeRanges.VariationSelectors) }, new object[] { '\uFE10', '\uFE1F', nameof(UnicodeRanges.VerticalForms) }, new object[] { '\uFE20', '\uFE2F', nameof(UnicodeRanges.CombiningHalfMarks) }, new object[] { '\uFE30', '\uFE4F', nameof(UnicodeRanges.CjkCompatibilityForms) }, new object[] { '\uFE50', '\uFE6F', nameof(UnicodeRanges.SmallFormVariants) }, new object[] { '\uFE70', '\uFEFF', nameof(UnicodeRanges.ArabicPresentationFormsB) }, new object[] { '\uFF00', '\uFFEF', nameof(UnicodeRanges.HalfwidthandFullwidthForms) }, new object[] { '\uFFF0', '\uFFFF', nameof(UnicodeRanges.Specials) }, }; } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/Common/src/Interop/Windows/WtsApi32/Interop.WTSUnRegisterSessionNotification.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class Wtsapi32 { [GeneratedDllImport(Libraries.Wtsapi32)] public static partial bool WTSUnRegisterSessionNotification(IntPtr hWnd); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; internal static partial class Interop { internal static partial class Wtsapi32 { [GeneratedDllImport(Libraries.Wtsapi32)] public static partial bool WTSUnRegisterSessionNotification(IntPtr hWnd); } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/JIT/Methodical/explicit/coverage/expl_gc_float_1.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Xunit; [StructLayout(LayoutKind.Explicit)] internal class AA { [FieldOffset(13)] public long tmp1; [FieldOffset(7)] public long tmp2; [FieldOffset(7)] public long tmp3; [FieldOffset(8)] public float q; [FieldOffset(36)] public int tmp4; [FieldOffset(44)] public long tmp5; [FieldOffset(39)] public uint tmp6; public AA(float qq) { tmp1 = 0; tmp2 = 0; tmp3 = 0; tmp4 = 0; tmp5 = 0; tmp6 = 0; q = qq; } public static AA[] a_init = new AA[101]; public static AA[] a_zero = new AA[101]; public static AA[,,] aa_init = new AA[1, 101, 2]; public static AA[,,] aa_zero = new AA[1, 101, 2]; public static object b_init = new AA(100); public static AA _init, _zero; public static float call_target(float arg) { return arg; } public static float call_target_ref(ref float arg) { return arg; } public void verify() { } public static void verify_all() { a_init[100].verify(); a_zero[100].verify(); aa_init[0, 99, 1].verify(); aa_zero[0, 99, 1].verify(); _init.verify(); _zero.verify(); BB.f_init.verify(); BB.f_zero.verify(); } public static void reset() { a_init[100] = new AA(100); a_zero[100] = new AA(0); aa_init[0, 99, 1] = new AA(100); aa_zero[0, 99, 1] = new AA(0); _init = new AA(100); _zero = new AA(0); BB.f_init = new AA(100); BB.f_zero = new AA(0); } } internal struct BB { public static AA f_init, f_zero; } public static class Test_expl_gc_float_1 { [Fact] public static int TestEntrypoint() { return TestApp.RunAllTests(); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Runtime.InteropServices; using Xunit; [StructLayout(LayoutKind.Explicit)] internal class AA { [FieldOffset(13)] public long tmp1; [FieldOffset(7)] public long tmp2; [FieldOffset(7)] public long tmp3; [FieldOffset(8)] public float q; [FieldOffset(36)] public int tmp4; [FieldOffset(44)] public long tmp5; [FieldOffset(39)] public uint tmp6; public AA(float qq) { tmp1 = 0; tmp2 = 0; tmp3 = 0; tmp4 = 0; tmp5 = 0; tmp6 = 0; q = qq; } public static AA[] a_init = new AA[101]; public static AA[] a_zero = new AA[101]; public static AA[,,] aa_init = new AA[1, 101, 2]; public static AA[,,] aa_zero = new AA[1, 101, 2]; public static object b_init = new AA(100); public static AA _init, _zero; public static float call_target(float arg) { return arg; } public static float call_target_ref(ref float arg) { return arg; } public void verify() { } public static void verify_all() { a_init[100].verify(); a_zero[100].verify(); aa_init[0, 99, 1].verify(); aa_zero[0, 99, 1].verify(); _init.verify(); _zero.verify(); BB.f_init.verify(); BB.f_zero.verify(); } public static void reset() { a_init[100] = new AA(100); a_zero[100] = new AA(0); aa_init[0, 99, 1] = new AA(100); aa_zero[0, 99, 1] = new AA(0); _init = new AA(100); _zero = new AA(0); BB.f_init = new AA(100); BB.f_zero = new AA(0); } } internal struct BB { public static AA f_init, f_zero; } public static class Test_expl_gc_float_1 { [Fact] public static int TestEntrypoint() { return TestApp.RunAllTests(); } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/ShiftLogicalRounded.Vector128.Byte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftLogicalRounded_Vector128_Byte() { var test = new SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray1, SByte[] inArray2, Byte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Byte> _fld1; public Vector128<SByte> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte testClass) { var result = AdvSimd.ShiftLogicalRounded(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte testClass) { fixed (Vector128<Byte>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) { var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static Byte[] _data1 = new Byte[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static Vector128<Byte> _clsVar1; private static Vector128<SByte> _clsVar2; private Vector128<Byte> _fld1; private Vector128<SByte> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); } public SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data1, _data2, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftLogicalRounded( Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogicalRounded), new Type[] { typeof(Vector128<Byte>), typeof(Vector128<SByte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogicalRounded), new Type[] { typeof(Vector128<Byte>), typeof(Vector128<SByte>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftLogicalRounded( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Byte>* pClsVar1 = &_clsVar1) fixed (Vector128<SByte>* pClsVar2 = &_clsVar2) { var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(pClsVar1)), AdvSimd.LoadVector128((SByte*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr); var result = AdvSimd.ShiftLogicalRounded(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)); var result = AdvSimd.ShiftLogicalRounded(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte(); var result = AdvSimd.ShiftLogicalRounded(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte(); fixed (Vector128<Byte>* pFld1 = &test._fld1) fixed (Vector128<SByte>* pFld2 = &test._fld2) { var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftLogicalRounded(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Byte>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) { var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLogicalRounded(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(&test._fld1)), AdvSimd.LoadVector128((SByte*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Byte> op1, Vector128<SByte> op2, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Byte[] left, SByte[] right, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftLogicalRounded(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftLogicalRounded)}<Byte>(Vector128<Byte>, Vector128<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void ShiftLogicalRounded_Vector128_Byte() { var test = new SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray1, SByte[] inArray2, Byte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<SByte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<SByte, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Byte> _fld1; public Vector128<SByte> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref testStruct._fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); return testStruct; } public void RunStructFldScenario(SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte testClass) { var result = AdvSimd.ShiftLogicalRounded(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte testClass) { fixed (Vector128<Byte>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) { var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<SByte>>() / sizeof(SByte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static Byte[] _data1 = new Byte[Op1ElementCount]; private static SByte[] _data2 = new SByte[Op2ElementCount]; private static Vector128<Byte> _clsVar1; private static Vector128<SByte> _clsVar2; private Vector128<Byte> _fld1; private Vector128<SByte> _fld2; private DataTable _dataTable; static SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _clsVar2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); } public SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<SByte>, byte>(ref _fld2), ref Unsafe.As<SByte, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<SByte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetSByte(); } _dataTable = new DataTable(_data1, _data2, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.ShiftLogicalRounded( Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogicalRounded), new Type[] { typeof(Vector128<Byte>), typeof(Vector128<SByte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.ShiftLogicalRounded), new Type[] { typeof(Vector128<Byte>), typeof(Vector128<SByte>) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)), AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.ShiftLogicalRounded( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Byte>* pClsVar1 = &_clsVar1) fixed (Vector128<SByte>* pClsVar2 = &_clsVar2) { var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(pClsVar1)), AdvSimd.LoadVector128((SByte*)(pClsVar2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<SByte>>(_dataTable.inArray2Ptr); var result = AdvSimd.ShiftLogicalRounded(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)); var op2 = AdvSimd.LoadVector128((SByte*)(_dataTable.inArray2Ptr)); var result = AdvSimd.ShiftLogicalRounded(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte(); var result = AdvSimd.ShiftLogicalRounded(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleBinaryOpTest__ShiftLogicalRounded_Vector128_Byte(); fixed (Vector128<Byte>* pFld1 = &test._fld1) fixed (Vector128<SByte>* pFld2 = &test._fld2) { var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.ShiftLogicalRounded(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Byte>* pFld1 = &_fld1) fixed (Vector128<SByte>* pFld2 = &_fld2) { var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(pFld1)), AdvSimd.LoadVector128((SByte*)(pFld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLogicalRounded(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.ShiftLogicalRounded( AdvSimd.LoadVector128((Byte*)(&test._fld1)), AdvSimd.LoadVector128((SByte*)(&test._fld2)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Byte> op1, Vector128<SByte> op2, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; SByte[] inArray2 = new SByte[Op2ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<SByte, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<SByte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(Byte[] left, SByte[] right, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.ShiftLogicalRounded(left[i], right[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.ShiftLogicalRounded)}<Byte>(Vector128<Byte>, Vector128<SByte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Runtime/tests/System/Diagnostics/CodeAnalysis/DynamicallyAccessedMembersAttributeTests.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Diagnostics.CodeAnalysis.Tests { public class DynamicallyAccessedMembersAttributeTests { [Theory] [InlineData(DynamicallyAccessedMemberTypes.None)] [InlineData(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties)] [InlineData(DynamicallyAccessedMemberTypes.All)] public void TestConstructor(DynamicallyAccessedMemberTypes memberTypes) { var dama = new DynamicallyAccessedMembersAttribute(memberTypes); Assert.Equal(memberTypes, dama.MemberTypes); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Diagnostics.CodeAnalysis.Tests { public class DynamicallyAccessedMembersAttributeTests { [Theory] [InlineData(DynamicallyAccessedMemberTypes.None)] [InlineData(DynamicallyAccessedMemberTypes.PublicConstructors | DynamicallyAccessedMemberTypes.PublicProperties)] [InlineData(DynamicallyAccessedMemberTypes.All)] public void TestConstructor(DynamicallyAccessedMemberTypes memberTypes) { var dama = new DynamicallyAccessedMembersAttribute(memberTypes); Assert.Equal(memberTypes, dama.MemberTypes); } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/Common/src/Interop/Unix/System.Native/Interop.FAllocate.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using Microsoft.Win32.SafeHandles; internal static partial class Interop { internal static partial class Sys { /// <summary> /// Returns -1 on error, 0 on success. /// </summary> [GeneratedDllImport(Libraries.SystemNative, EntryPoint = "SystemNative_FAllocate", SetLastError = true)] internal static partial int FAllocate(SafeFileHandle fd, long offset, long length); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Runtime.InteropServices; using Microsoft.Win32.SafeHandles; internal static partial class Interop { internal static partial class Sys { /// <summary> /// Returns -1 on error, 0 on success. /// </summary> [GeneratedDllImport(Libraries.SystemNative, EntryPoint = "SystemNative_FAllocate", SetLastError = true)] internal static partial int FAllocate(SafeFileHandle fd, long offset, long length); } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/JIT/HardwareIntrinsics/General/Vector128/BitwiseOr.UInt64.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void BitwiseOrUInt64() { var test = new VectorBinaryOpTest__BitwiseOrUInt64(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__BitwiseOrUInt64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(UInt64[] inArray1, UInt64[] inArray2, UInt64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt64>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt64> _fld1; public Vector128<UInt64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__BitwiseOrUInt64 testClass) { var result = Vector128.BitwiseOr(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt64>>() / sizeof(UInt64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt64>>() / sizeof(UInt64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt64>>() / sizeof(UInt64); private static UInt64[] _data1 = new UInt64[Op1ElementCount]; private static UInt64[] _data2 = new UInt64[Op2ElementCount]; private static Vector128<UInt64> _clsVar1; private static Vector128<UInt64> _clsVar2; private Vector128<UInt64> _fld1; private Vector128<UInt64> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__BitwiseOrUInt64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _clsVar1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _clsVar2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); } public VectorBinaryOpTest__BitwiseOrUInt64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } _dataTable = new DataTable(_data1, _data2, new UInt64[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector128.BitwiseOr( Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector128).GetMethod(nameof(Vector128.BitwiseOr), new Type[] { typeof(Vector128<UInt64>), typeof(Vector128<UInt64>) }); if (method is null) { method = typeof(Vector128).GetMethod(nameof(Vector128.BitwiseOr), 1, new Type[] { typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(UInt64)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector128.BitwiseOr( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr); var result = Vector128.BitwiseOr(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__BitwiseOrUInt64(); var result = Vector128.BitwiseOr(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector128.BitwiseOr(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector128.BitwiseOr(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector128<UInt64> op1, Vector128<UInt64> op2, void* result, [CallerMemberName] string method = "") { UInt64[] inArray1 = new UInt64[Op1ElementCount]; UInt64[] inArray2 = new UInt64[Op2ElementCount]; UInt64[] outArray = new UInt64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { UInt64[] inArray1 = new UInt64[Op1ElementCount]; UInt64[] inArray2 = new UInt64[Op2ElementCount]; UInt64[] outArray = new UInt64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(UInt64[] left, UInt64[] right, UInt64[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (ulong)(left[0] | right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (ulong)(left[i] | right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.{nameof(Vector128.BitwiseOr)}<UInt64>(Vector128<UInt64>, Vector128<UInt64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics\X86\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; namespace JIT.HardwareIntrinsics.General { public static partial class Program { private static void BitwiseOrUInt64() { var test = new VectorBinaryOpTest__BitwiseOrUInt64(); // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); // Validates passing a static member works test.RunClsVarScenario(); // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); // Validates passing the field of a local class works test.RunClassLclFldScenario(); // Validates passing an instance member of a class works test.RunClassFldScenario(); // Validates passing the field of a local struct works test.RunStructLclFldScenario(); // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class VectorBinaryOpTest__BitwiseOrUInt64 { private struct DataTable { private byte[] inArray1; private byte[] inArray2; private byte[] outArray; private GCHandle inHandle1; private GCHandle inHandle2; private GCHandle outHandle; private ulong alignment; public DataTable(UInt64[] inArray1, UInt64[] inArray2, UInt64[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<UInt64>(); int sizeOfinArray2 = inArray2.Length * Unsafe.SizeOf<UInt64>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<UInt64>(); if ((alignment != 32 && alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfinArray2 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.inArray2 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.inHandle2 = GCHandle.Alloc(this.inArray2, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<UInt64, byte>(ref inArray1[0]), (uint)sizeOfinArray1); Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray2Ptr), ref Unsafe.As<UInt64, byte>(ref inArray2[0]), (uint)sizeOfinArray2); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* inArray2Ptr => Align((byte*)(inHandle2.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); inHandle2.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<UInt64> _fld1; public Vector128<UInt64> _fld2; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref testStruct._fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref testStruct._fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); return testStruct; } public void RunStructFldScenario(VectorBinaryOpTest__BitwiseOrUInt64 testClass) { var result = Vector128.BitwiseOr(_fld1, _fld2); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld2, testClass._dataTable.outArrayPtr); } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<UInt64>>() / sizeof(UInt64); private static readonly int Op2ElementCount = Unsafe.SizeOf<Vector128<UInt64>>() / sizeof(UInt64); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<UInt64>>() / sizeof(UInt64); private static UInt64[] _data1 = new UInt64[Op1ElementCount]; private static UInt64[] _data2 = new UInt64[Op2ElementCount]; private static Vector128<UInt64> _clsVar1; private static Vector128<UInt64> _clsVar2; private Vector128<UInt64> _fld1; private Vector128<UInt64> _fld2; private DataTable _dataTable; static VectorBinaryOpTest__BitwiseOrUInt64() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _clsVar1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _clsVar2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); } public VectorBinaryOpTest__BitwiseOrUInt64() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _fld1), ref Unsafe.As<UInt64, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<UInt64>, byte>(ref _fld2), ref Unsafe.As<UInt64, byte>(ref _data2[0]), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetUInt64(); } for (var i = 0; i < Op2ElementCount; i++) { _data2[i] = TestLibrary.Generator.GetUInt64(); } _dataTable = new DataTable(_data1, _data2, new UInt64[RetElementCount], LargestVectorSize); } public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = Vector128.BitwiseOr( Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var method = typeof(Vector128).GetMethod(nameof(Vector128.BitwiseOr), new Type[] { typeof(Vector128<UInt64>), typeof(Vector128<UInt64>) }); if (method is null) { method = typeof(Vector128).GetMethod(nameof(Vector128.BitwiseOr), 1, new Type[] { typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)), typeof(Vector128<>).MakeGenericType(Type.MakeGenericMethodParameter(0)) }); } if (method.IsGenericMethodDefinition) { method = method.MakeGenericMethod(typeof(UInt64)); } var result = method.Invoke(null, new object[] { Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray1Ptr), Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<UInt64>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.inArray2Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = Vector128.BitwiseOr( _clsVar1, _clsVar2 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _clsVar2, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray1Ptr); var op2 = Unsafe.Read<Vector128<UInt64>>(_dataTable.inArray2Ptr); var result = Vector128.BitwiseOr(op1, op2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op2, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new VectorBinaryOpTest__BitwiseOrUInt64(); var result = Vector128.BitwiseOr(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = Vector128.BitwiseOr(_fld1, _fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld2, _dataTable.outArrayPtr); } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = Vector128.BitwiseOr(test._fld1, test._fld2); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld2, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } private void ValidateResult(Vector128<UInt64> op1, Vector128<UInt64> op2, void* result, [CallerMemberName] string method = "") { UInt64[] inArray1 = new UInt64[Op1ElementCount]; UInt64[] inArray2 = new UInt64[Op2ElementCount]; UInt64[] outArray = new UInt64[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), op1); Unsafe.WriteUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), op2); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(void* op1, void* op2, void* result, [CallerMemberName] string method = "") { UInt64[] inArray1 = new UInt64[Op1ElementCount]; UInt64[] inArray2 = new UInt64[Op2ElementCount]; UInt64[] outArray = new UInt64[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref inArray2[0]), ref Unsafe.AsRef<byte>(op2), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<UInt64, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<UInt64>>()); ValidateResult(inArray1, inArray2, outArray, method); } private void ValidateResult(UInt64[] left, UInt64[] right, UInt64[] result, [CallerMemberName] string method = "") { bool succeeded = true; if (result[0] != (ulong)(left[0] | right[0])) { succeeded = false; } else { for (var i = 1; i < RetElementCount; i++) { if (result[i] != (ulong)(left[i] | right[i])) { succeeded = false; break; } } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(Vector128)}.{nameof(Vector128.BitwiseOr)}<UInt64>(Vector128<UInt64>, Vector128<UInt64>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" left: ({string.Join(", ", left)})"); TestLibrary.TestFramework.LogInformation($" right: ({string.Join(", ", right)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Memory/tests/ReadOnlySpan/Empty.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; namespace System.SpanTests { public static partial class ReadOnlySpanTests { [Fact] public static void Empty() { ReadOnlySpan<int> empty = ReadOnlySpan<int>.Empty; Assert.True(empty.IsEmpty); Assert.Equal(0, empty.Length); unsafe { ref int expected = ref Unsafe.AsRef<int>(null); ref int actual = ref Unsafe.AsRef(in MemoryMarshal.GetReference(empty)); Assert.True(Unsafe.AreSame(ref expected, ref actual)); } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; namespace System.SpanTests { public static partial class ReadOnlySpanTests { [Fact] public static void Empty() { ReadOnlySpan<int> empty = ReadOnlySpan<int>.Empty; Assert.True(empty.IsEmpty); Assert.Equal(0, empty.Length); unsafe { ref int expected = ref Unsafe.AsRef<int>(null); ref int actual = ref Unsafe.AsRef(in MemoryMarshal.GetReference(empty)); Assert.True(Unsafe.AreSame(ref expected, ref actual)); } } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/baseservices/threading/generics/WaitCallback/thread15.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading; interface IGen<T> { void Target(object p); T Dummy(T t); } class GenInt : IGen<int> { public int Dummy(int t) { return t; } public virtual void Target(object p) { ManualResetEvent evt = (ManualResetEvent) p; Interlocked.Increment(ref Test_thread15.Xcounter); evt.Set(); } public static void ThreadPoolTest() { ManualResetEvent[] evts = new ManualResetEvent[Test_thread15.nThreads]; WaitHandle[] hdls = new WaitHandle[Test_thread15.nThreads]; for (int i=0; i<Test_thread15.nThreads; i++) { evts[i] = new ManualResetEvent(false); hdls[i] = (WaitHandle) evts[i]; } IGen<int> obj = new GenInt(); for (int i = 0; i <Test_thread15.nThreads; i++) { WaitCallback cb = new WaitCallback(obj.Target); ThreadPool.QueueUserWorkItem(cb,evts[i]); } WaitHandle.WaitAll(hdls); Test_thread15.Eval(Test_thread15.Xcounter==Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenDouble : IGen<double> { public double Dummy(double t) { return t; } public virtual void Target(object p) { ManualResetEvent evt = (ManualResetEvent) p; Interlocked.Increment(ref Test_thread15.Xcounter); evt.Set(); } public static void ThreadPoolTest() { ManualResetEvent[] evts = new ManualResetEvent[Test_thread15.nThreads]; WaitHandle[] hdls = new WaitHandle[Test_thread15.nThreads]; for (int i=0; i<Test_thread15.nThreads; i++) { evts[i] = new ManualResetEvent(false); hdls[i] = (WaitHandle) evts[i]; } IGen<double> obj = new GenDouble(); for (int i = 0; i <Test_thread15.nThreads; i++) { WaitCallback cb = new WaitCallback(obj.Target); ThreadPool.QueueUserWorkItem(cb,evts[i]); } WaitHandle.WaitAll(hdls); Test_thread15.Eval(Test_thread15.Xcounter==Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenString : IGen<string> { public string Dummy(string t) { return t; } public virtual void Target(object p) { ManualResetEvent evt = (ManualResetEvent) p; Interlocked.Increment(ref Test_thread15.Xcounter); evt.Set(); } public static void ThreadPoolTest() { ManualResetEvent[] evts = new ManualResetEvent[Test_thread15.nThreads]; WaitHandle[] hdls = new WaitHandle[Test_thread15.nThreads]; for (int i=0; i<Test_thread15.nThreads; i++) { evts[i] = new ManualResetEvent(false); hdls[i] = (WaitHandle) evts[i]; } IGen<string> obj = new GenString(); for (int i = 0; i <Test_thread15.nThreads; i++) { WaitCallback cb = new WaitCallback(obj.Target); ThreadPool.QueueUserWorkItem(cb,evts[i]); } WaitHandle.WaitAll(hdls); Test_thread15.Eval(Test_thread15.Xcounter==Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenObject : IGen<object> { public object Dummy(object t) { return t; } public virtual void Target(object p) { ManualResetEvent evt = (ManualResetEvent) p; Interlocked.Increment(ref Test_thread15.Xcounter); evt.Set(); } public static void ThreadPoolTest() { ManualResetEvent[] evts = new ManualResetEvent[Test_thread15.nThreads]; WaitHandle[] hdls = new WaitHandle[Test_thread15.nThreads]; for (int i=0; i<Test_thread15.nThreads; i++) { evts[i] = new ManualResetEvent(false); hdls[i] = (WaitHandle) evts[i]; } IGen<object> obj = new GenObject(); for (int i = 0; i <Test_thread15.nThreads; i++) { WaitCallback cb = new WaitCallback(obj.Target); ThreadPool.QueueUserWorkItem(cb,evts[i]); } WaitHandle.WaitAll(hdls); Test_thread15.Eval(Test_thread15.Xcounter==Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenGuid : IGen<Guid> { public Guid Dummy(Guid t) { return t; } public virtual void Target(object p) { ManualResetEvent evt = (ManualResetEvent) p; Interlocked.Increment(ref Test_thread15.Xcounter); evt.Set(); } public static void ThreadPoolTest() { ManualResetEvent[] evts = new ManualResetEvent[Test_thread15.nThreads]; WaitHandle[] hdls = new WaitHandle[Test_thread15.nThreads]; for (int i=0; i<Test_thread15.nThreads; i++) { evts[i] = new ManualResetEvent(false); hdls[i] = (WaitHandle) evts[i]; } IGen<Guid> obj = new GenGuid(); for (int i = 0; i <Test_thread15.nThreads; i++) { WaitCallback cb = new WaitCallback(obj.Target); ThreadPool.QueueUserWorkItem(cb,evts[i]); } WaitHandle.WaitAll(hdls); Test_thread15.Eval(Test_thread15.Xcounter==Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } public class Test_thread15 { public static int nThreads =50; public static int counter = 0; public static int Xcounter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { GenInt.ThreadPoolTest(); GenDouble.ThreadPoolTest(); GenString.ThreadPoolTest(); GenObject.ThreadPoolTest(); GenGuid.ThreadPoolTest(); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System; using System.Threading; interface IGen<T> { void Target(object p); T Dummy(T t); } class GenInt : IGen<int> { public int Dummy(int t) { return t; } public virtual void Target(object p) { ManualResetEvent evt = (ManualResetEvent) p; Interlocked.Increment(ref Test_thread15.Xcounter); evt.Set(); } public static void ThreadPoolTest() { ManualResetEvent[] evts = new ManualResetEvent[Test_thread15.nThreads]; WaitHandle[] hdls = new WaitHandle[Test_thread15.nThreads]; for (int i=0; i<Test_thread15.nThreads; i++) { evts[i] = new ManualResetEvent(false); hdls[i] = (WaitHandle) evts[i]; } IGen<int> obj = new GenInt(); for (int i = 0; i <Test_thread15.nThreads; i++) { WaitCallback cb = new WaitCallback(obj.Target); ThreadPool.QueueUserWorkItem(cb,evts[i]); } WaitHandle.WaitAll(hdls); Test_thread15.Eval(Test_thread15.Xcounter==Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenDouble : IGen<double> { public double Dummy(double t) { return t; } public virtual void Target(object p) { ManualResetEvent evt = (ManualResetEvent) p; Interlocked.Increment(ref Test_thread15.Xcounter); evt.Set(); } public static void ThreadPoolTest() { ManualResetEvent[] evts = new ManualResetEvent[Test_thread15.nThreads]; WaitHandle[] hdls = new WaitHandle[Test_thread15.nThreads]; for (int i=0; i<Test_thread15.nThreads; i++) { evts[i] = new ManualResetEvent(false); hdls[i] = (WaitHandle) evts[i]; } IGen<double> obj = new GenDouble(); for (int i = 0; i <Test_thread15.nThreads; i++) { WaitCallback cb = new WaitCallback(obj.Target); ThreadPool.QueueUserWorkItem(cb,evts[i]); } WaitHandle.WaitAll(hdls); Test_thread15.Eval(Test_thread15.Xcounter==Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenString : IGen<string> { public string Dummy(string t) { return t; } public virtual void Target(object p) { ManualResetEvent evt = (ManualResetEvent) p; Interlocked.Increment(ref Test_thread15.Xcounter); evt.Set(); } public static void ThreadPoolTest() { ManualResetEvent[] evts = new ManualResetEvent[Test_thread15.nThreads]; WaitHandle[] hdls = new WaitHandle[Test_thread15.nThreads]; for (int i=0; i<Test_thread15.nThreads; i++) { evts[i] = new ManualResetEvent(false); hdls[i] = (WaitHandle) evts[i]; } IGen<string> obj = new GenString(); for (int i = 0; i <Test_thread15.nThreads; i++) { WaitCallback cb = new WaitCallback(obj.Target); ThreadPool.QueueUserWorkItem(cb,evts[i]); } WaitHandle.WaitAll(hdls); Test_thread15.Eval(Test_thread15.Xcounter==Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenObject : IGen<object> { public object Dummy(object t) { return t; } public virtual void Target(object p) { ManualResetEvent evt = (ManualResetEvent) p; Interlocked.Increment(ref Test_thread15.Xcounter); evt.Set(); } public static void ThreadPoolTest() { ManualResetEvent[] evts = new ManualResetEvent[Test_thread15.nThreads]; WaitHandle[] hdls = new WaitHandle[Test_thread15.nThreads]; for (int i=0; i<Test_thread15.nThreads; i++) { evts[i] = new ManualResetEvent(false); hdls[i] = (WaitHandle) evts[i]; } IGen<object> obj = new GenObject(); for (int i = 0; i <Test_thread15.nThreads; i++) { WaitCallback cb = new WaitCallback(obj.Target); ThreadPool.QueueUserWorkItem(cb,evts[i]); } WaitHandle.WaitAll(hdls); Test_thread15.Eval(Test_thread15.Xcounter==Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } class GenGuid : IGen<Guid> { public Guid Dummy(Guid t) { return t; } public virtual void Target(object p) { ManualResetEvent evt = (ManualResetEvent) p; Interlocked.Increment(ref Test_thread15.Xcounter); evt.Set(); } public static void ThreadPoolTest() { ManualResetEvent[] evts = new ManualResetEvent[Test_thread15.nThreads]; WaitHandle[] hdls = new WaitHandle[Test_thread15.nThreads]; for (int i=0; i<Test_thread15.nThreads; i++) { evts[i] = new ManualResetEvent(false); hdls[i] = (WaitHandle) evts[i]; } IGen<Guid> obj = new GenGuid(); for (int i = 0; i <Test_thread15.nThreads; i++) { WaitCallback cb = new WaitCallback(obj.Target); ThreadPool.QueueUserWorkItem(cb,evts[i]); } WaitHandle.WaitAll(hdls); Test_thread15.Eval(Test_thread15.Xcounter==Test_thread15.nThreads); Test_thread15.Xcounter = 0; } } public class Test_thread15 { public static int nThreads =50; public static int counter = 0; public static int Xcounter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { GenInt.ThreadPoolTest(); GenDouble.ThreadPoolTest(); GenString.ThreadPoolTest(); GenObject.ThreadPoolTest(); GenGuid.ThreadPoolTest(); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/JIT/Generics/Fields/instance_passing_class01.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; public struct ValX0 { } public struct ValY0 { } public struct ValX1<T> { } public struct ValY1<T> { } public struct ValX2<T, U> { } public struct ValY2<T, U> { } public struct ValX3<T, U, V> { } public struct ValY3<T, U, V> { } public class RefX0 { } public class RefY0 { } public class RefX1<T> { } public class RefY1<T> { } public class RefX2<T, U> { } public class RefY2<T, U> { } public class RefX3<T, U, V> { } public class RefY3<T, U, V> { } public class Gen<T> { public T Fld1; public T Fld2; public T PassAsIn(T t) { return t; } public T PassAsRef(ref T t) { T temp = t; t = Fld2; return temp; } public void PassAsOut(out T t) { t = Fld2; } public void PassAsParameter(T t1, T t2) { Fld1 = t1; Fld2 = t2; T temp = t1; Test_instance_passing_class01.Eval(Fld1.Equals(PassAsIn(temp))); Test_instance_passing_class01.Eval(Fld1.Equals(PassAsRef(ref temp))); Test_instance_passing_class01.Eval(Fld2.Equals(temp)); temp = t1; PassAsOut(out temp); Test_instance_passing_class01.Eval(Fld2.Equals(temp)); } } public class Test_instance_passing_class01 { public static int counter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { int _int1 = 1; int _int2 = -1; new Gen<int>().PassAsParameter(_int1, _int2); double _double1 = 1; double _double2 = -1; new Gen<double>().PassAsParameter(_double1, _double2); string _string1 = "string1"; string _string2 = "string2"; new Gen<string>().PassAsParameter(_string1, _string2); object _object1 = (object)_string1; object _object2 = (object)_string2; new Gen<object>().PassAsParameter(_object1, _object2); Guid _Guid1 = new Guid(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); Guid _Guid2 = new Guid(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); new Gen<Guid>().PassAsParameter(_Guid1, _Guid2); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // using System; public struct ValX0 { } public struct ValY0 { } public struct ValX1<T> { } public struct ValY1<T> { } public struct ValX2<T, U> { } public struct ValY2<T, U> { } public struct ValX3<T, U, V> { } public struct ValY3<T, U, V> { } public class RefX0 { } public class RefY0 { } public class RefX1<T> { } public class RefY1<T> { } public class RefX2<T, U> { } public class RefY2<T, U> { } public class RefX3<T, U, V> { } public class RefY3<T, U, V> { } public class Gen<T> { public T Fld1; public T Fld2; public T PassAsIn(T t) { return t; } public T PassAsRef(ref T t) { T temp = t; t = Fld2; return temp; } public void PassAsOut(out T t) { t = Fld2; } public void PassAsParameter(T t1, T t2) { Fld1 = t1; Fld2 = t2; T temp = t1; Test_instance_passing_class01.Eval(Fld1.Equals(PassAsIn(temp))); Test_instance_passing_class01.Eval(Fld1.Equals(PassAsRef(ref temp))); Test_instance_passing_class01.Eval(Fld2.Equals(temp)); temp = t1; PassAsOut(out temp); Test_instance_passing_class01.Eval(Fld2.Equals(temp)); } } public class Test_instance_passing_class01 { public static int counter = 0; public static bool result = true; public static void Eval(bool exp) { counter++; if (!exp) { result = exp; Console.WriteLine("Test Failed at location: " + counter); } } public static int Main() { int _int1 = 1; int _int2 = -1; new Gen<int>().PassAsParameter(_int1, _int2); double _double1 = 1; double _double2 = -1; new Gen<double>().PassAsParameter(_double1, _double2); string _string1 = "string1"; string _string2 = "string2"; new Gen<string>().PassAsParameter(_string1, _string2); object _object1 = (object)_string1; object _object2 = (object)_string2; new Gen<object>().PassAsParameter(_object1, _object2); Guid _Guid1 = new Guid(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1); Guid _Guid2 = new Guid(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); new Gen<Guid>().PassAsParameter(_Guid1, _Guid2); if (result) { Console.WriteLine("Test Passed"); return 100; } else { Console.WriteLine("Test Failed"); return 1; } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/LoadAndInsertScalar.Vector128.Byte.15.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void LoadAndInsertScalar_Vector128_Byte_15() { var test = new LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15 { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray1, Byte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Byte> _fld1; public Byte _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); testStruct._fld3 = TestLibrary.Generator.GetByte(); return testStruct; } public void RunStructFldScenario(LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15 testClass) { fixed (Byte* pFld3 = &_fld3) { var result = AdvSimd.LoadAndInsertScalar(_fld1, 15, pFld3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); } testClass.ValidateResult(_fld1, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15 testClass) { fixed (Vector128<Byte>* pFld1 = &_fld1) fixed (Byte* pFld3 = &_fld3) { var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)pFld1), 15, pFld3 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly byte ElementIndex = 15; private static Byte[] _data1 = new Byte[Op1ElementCount]; private static Vector128<Byte> _clsVar1; private static Byte _clsVar3; private Vector128<Byte> _fld1; private Byte _fld3; private DataTable _dataTable; static LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); _clsVar3 = TestLibrary.Generator.GetByte(); } public LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); _fld3 = TestLibrary.Generator.GetByte(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } _dataTable = new DataTable(_data1, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); Byte op3 = TestLibrary.Generator.GetByte(); var result = AdvSimd.LoadAndInsertScalar( Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr), 15, &op3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, op3, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); Byte op3 = TestLibrary.Generator.GetByte(); var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)), 15, &op3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, op3, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); Byte op3 = TestLibrary.Generator.GetByte(); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.LoadAndInsertScalar), new Type[] { typeof(Vector128<Byte>), typeof(byte), typeof(Byte*) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr), ElementIndex, Pointer.Box(&op3, typeof(Byte*)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, op3, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); Byte op3 = TestLibrary.Generator.GetByte(); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.LoadAndInsertScalar), new Type[] { typeof(Vector128<Byte>), typeof(byte), typeof(Byte*) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)), ElementIndex, Pointer.Box(&op3, typeof(Byte*)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, op3, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); fixed (Byte* pClsVar3 = &_clsVar3) { var result = AdvSimd.LoadAndInsertScalar( _clsVar1, 15, pClsVar3 ); Unsafe.Write(_dataTable.outArrayPtr, result); } ValidateResult(_clsVar1, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Byte>* pClsVar1 = &_clsVar1) fixed (Byte* pClsVar3 = &_clsVar3) { var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)pClsVar1), 15, pClsVar3 ); Unsafe.Write(_dataTable.outArrayPtr, result); } ValidateResult(_clsVar1, _clsVar3, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr); var op3 = TestLibrary.Generator.GetByte(); var result = AdvSimd.LoadAndInsertScalar(op1, 15, &op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)); var op3 = TestLibrary.Generator.GetByte(); var result = AdvSimd.LoadAndInsertScalar(op1, 15, &op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15(); fixed (Byte* pFld3 = &test._fld3) { var result = AdvSimd.LoadAndInsertScalar(test._fld1, 15, pFld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15(); fixed (Vector128<Byte>* pFld1 = &test._fld1) fixed (Byte* pFld3 = &test._fld3) { var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)pFld1), 15, pFld3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); fixed (Byte* pFld3 = &_fld3) { var result = AdvSimd.LoadAndInsertScalar(_fld1, 15, pFld3); Unsafe.Write(_dataTable.outArrayPtr, result); } ValidateResult(_fld1, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Byte>* pFld1 = &_fld1) fixed (Byte* pFld3 = &_fld3) { var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)pFld1), 15, pFld3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.LoadAndInsertScalar(test._fld1, 15, &test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)(&test._fld1)), 15, &test._fld3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Byte> op1, Byte op3, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, op3, outArray, method); } private void ValidateResult(void* op1, Byte op3, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, op3, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte thirdOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.Insert(firstOp, ElementIndex, thirdOp, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.LoadAndInsertScalar)}<Byte>(Vector128<Byte>, 15, Byte*): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: {thirdOp}"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void LoadAndInsertScalar_Vector128_Byte_15() { var test = new LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15 { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray1, Byte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector128<Byte> _fld1; public Byte _fld3; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); testStruct._fld3 = TestLibrary.Generator.GetByte(); return testStruct; } public void RunStructFldScenario(LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15 testClass) { fixed (Byte* pFld3 = &_fld3) { var result = AdvSimd.LoadAndInsertScalar(_fld1, 15, pFld3); Unsafe.Write(testClass._dataTable.outArrayPtr, result); } testClass.ValidateResult(_fld1, _fld3, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15 testClass) { fixed (Vector128<Byte>* pFld1 = &_fld1) fixed (Byte* pFld3 = &_fld3) { var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)pFld1), 15, pFld3 ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, _fld3, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 16; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector128<Byte>>() / sizeof(Byte); private static readonly byte ElementIndex = 15; private static Byte[] _data1 = new Byte[Op1ElementCount]; private static Vector128<Byte> _clsVar1; private static Byte _clsVar3; private Vector128<Byte> _fld1; private Byte _fld3; private DataTable _dataTable; static LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); _clsVar3 = TestLibrary.Generator.GetByte(); } public LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector128<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector128<Byte>>()); _fld3 = TestLibrary.Generator.GetByte(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } _dataTable = new DataTable(_data1, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); Byte op3 = TestLibrary.Generator.GetByte(); var result = AdvSimd.LoadAndInsertScalar( Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr), 15, &op3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, op3, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); Byte op3 = TestLibrary.Generator.GetByte(); var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)), 15, &op3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, op3, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); Byte op3 = TestLibrary.Generator.GetByte(); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.LoadAndInsertScalar), new Type[] { typeof(Vector128<Byte>), typeof(byte), typeof(Byte*) }) .Invoke(null, new object[] { Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr), ElementIndex, Pointer.Box(&op3, typeof(Byte*)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, op3, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); Byte op3 = TestLibrary.Generator.GetByte(); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.LoadAndInsertScalar), new Type[] { typeof(Vector128<Byte>), typeof(byte), typeof(Byte*) }) .Invoke(null, new object[] { AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)), ElementIndex, Pointer.Box(&op3, typeof(Byte*)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector128<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, op3, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); fixed (Byte* pClsVar3 = &_clsVar3) { var result = AdvSimd.LoadAndInsertScalar( _clsVar1, 15, pClsVar3 ); Unsafe.Write(_dataTable.outArrayPtr, result); } ValidateResult(_clsVar1, _clsVar3, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector128<Byte>* pClsVar1 = &_clsVar1) fixed (Byte* pClsVar3 = &_clsVar3) { var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)pClsVar1), 15, pClsVar3 ); Unsafe.Write(_dataTable.outArrayPtr, result); } ValidateResult(_clsVar1, _clsVar3, _dataTable.outArrayPtr); } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector128<Byte>>(_dataTable.inArray1Ptr); var op3 = TestLibrary.Generator.GetByte(); var result = AdvSimd.LoadAndInsertScalar(op1, 15, &op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op3, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector128((Byte*)(_dataTable.inArray1Ptr)); var op3 = TestLibrary.Generator.GetByte(); var result = AdvSimd.LoadAndInsertScalar(op1, 15, &op3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, op3, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15(); fixed (Byte* pFld3 = &test._fld3) { var result = AdvSimd.LoadAndInsertScalar(test._fld1, 15, pFld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new LoadAndInsertTest__LoadAndInsertScalar_Vector128_Byte_15(); fixed (Vector128<Byte>* pFld1 = &test._fld1) fixed (Byte* pFld3 = &test._fld3) { var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)pFld1), 15, pFld3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); fixed (Byte* pFld3 = &_fld3) { var result = AdvSimd.LoadAndInsertScalar(_fld1, 15, pFld3); Unsafe.Write(_dataTable.outArrayPtr, result); } ValidateResult(_fld1, _fld3, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector128<Byte>* pFld1 = &_fld1) fixed (Byte* pFld3 = &_fld3) { var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)pFld1), 15, pFld3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _fld3, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.LoadAndInsertScalar(test._fld1, 15, &test._fld3); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.LoadAndInsertScalar( AdvSimd.LoadVector128((Byte*)(&test._fld1)), 15, &test._fld3 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, test._fld3, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector128<Byte> op1, Byte op3, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, op3, outArray, method); } private void ValidateResult(void* op1, Byte op3, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector128<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector128<Byte>>()); ValidateResult(inArray1, op3, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte thirdOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.Insert(firstOp, ElementIndex, thirdOp, i) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.LoadAndInsertScalar)}<Byte>(Vector128<Byte>, 15, Byte*): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" thirdOp: {thirdOp}"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Private.CoreLib/src/System/Reflection/ResourceLocation.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Reflection { [Flags] public enum ResourceLocation { ContainedInAnotherAssembly = 2, ContainedInManifestFile = 4, Embedded = 1, } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. namespace System.Reflection { [Flags] public enum ResourceLocation { ContainedInAnotherAssembly = 2, ContainedInManifestFile = 4, Embedded = 1, } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/JIT/HardwareIntrinsics/Arm/AdvSimd/LeadingZeroCount.Vector64.Byte.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void LeadingZeroCount_Vector64_Byte() { var test = new SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray1, Byte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Byte> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); return testStruct; } public void RunStructFldScenario(SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte testClass) { var result = AdvSimd.LeadingZeroCount(_fld1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte testClass) { fixed (Vector64<Byte>* pFld1 = &_fld1) { var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(pFld1)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Byte>>() / sizeof(Byte); private static Byte[] _data1 = new Byte[Op1ElementCount]; private static Vector64<Byte> _clsVar1; private Vector64<Byte> _fld1; private DataTable _dataTable; static SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); } public SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } _dataTable = new DataTable(_data1, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.LeadingZeroCount( Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.LeadingZeroCount), new Type[] { typeof(Vector64<Byte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.LeadingZeroCount), new Type[] { typeof(Vector64<Byte>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.LeadingZeroCount( _clsVar1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Byte>* pClsVar1 = &_clsVar1) { var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(pClsVar1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr); var result = AdvSimd.LeadingZeroCount(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)); var result = AdvSimd.LeadingZeroCount(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte(); var result = AdvSimd.LeadingZeroCount(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte(); fixed (Vector64<Byte>* pFld1 = &test._fld1) { var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.LeadingZeroCount(_fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Byte>* pFld1 = &_fld1) { var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.LeadingZeroCount(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(&test._fld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Byte> op1, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Byte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Byte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.CountLeadingZeroBits(firstOp[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.LeadingZeroCount)}<Byte>(Vector64<Byte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. /****************************************************************************** * This file is auto-generated from a template file by the GenerateTests.csx * * script in tests\src\JIT\HardwareIntrinsics.Arm\Shared. In order to make * * changes, please update the corresponding template and run according to the * * directions listed in the file. * ******************************************************************************/ using System; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; using System.Runtime.Intrinsics; using System.Runtime.Intrinsics.Arm; namespace JIT.HardwareIntrinsics.Arm { public static partial class Program { private static void LeadingZeroCount_Vector64_Byte() { var test = new SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte(); if (test.IsSupported) { // Validates basic functionality works, using Unsafe.Read test.RunBasicScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates basic functionality works, using Load test.RunBasicScenario_Load(); } // Validates calling via reflection works, using Unsafe.Read test.RunReflectionScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates calling via reflection works, using Load test.RunReflectionScenario_Load(); } // Validates passing a static member works test.RunClsVarScenario(); if (AdvSimd.IsSupported) { // Validates passing a static member works, using pinning and Load test.RunClsVarScenario_Load(); } // Validates passing a local works, using Unsafe.Read test.RunLclVarScenario_UnsafeRead(); if (AdvSimd.IsSupported) { // Validates passing a local works, using Load test.RunLclVarScenario_Load(); } // Validates passing the field of a local class works test.RunClassLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local class works, using pinning and Load test.RunClassLclFldScenario_Load(); } // Validates passing an instance member of a class works test.RunClassFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a class works, using pinning and Load test.RunClassFldScenario_Load(); } // Validates passing the field of a local struct works test.RunStructLclFldScenario(); if (AdvSimd.IsSupported) { // Validates passing the field of a local struct works, using pinning and Load test.RunStructLclFldScenario_Load(); } // Validates passing an instance member of a struct works test.RunStructFldScenario(); if (AdvSimd.IsSupported) { // Validates passing an instance member of a struct works, using pinning and Load test.RunStructFldScenario_Load(); } } else { // Validates we throw on unsupported hardware test.RunUnsupportedScenario(); } if (!test.Succeeded) { throw new Exception("One or more scenarios did not complete as expected."); } } } public sealed unsafe class SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte { private struct DataTable { private byte[] inArray1; private byte[] outArray; private GCHandle inHandle1; private GCHandle outHandle; private ulong alignment; public DataTable(Byte[] inArray1, Byte[] outArray, int alignment) { int sizeOfinArray1 = inArray1.Length * Unsafe.SizeOf<Byte>(); int sizeOfoutArray = outArray.Length * Unsafe.SizeOf<Byte>(); if ((alignment != 16 && alignment != 8) || (alignment * 2) < sizeOfinArray1 || (alignment * 2) < sizeOfoutArray) { throw new ArgumentException("Invalid value of alignment"); } this.inArray1 = new byte[alignment * 2]; this.outArray = new byte[alignment * 2]; this.inHandle1 = GCHandle.Alloc(this.inArray1, GCHandleType.Pinned); this.outHandle = GCHandle.Alloc(this.outArray, GCHandleType.Pinned); this.alignment = (ulong)alignment; Unsafe.CopyBlockUnaligned(ref Unsafe.AsRef<byte>(inArray1Ptr), ref Unsafe.As<Byte, byte>(ref inArray1[0]), (uint)sizeOfinArray1); } public void* inArray1Ptr => Align((byte*)(inHandle1.AddrOfPinnedObject().ToPointer()), alignment); public void* outArrayPtr => Align((byte*)(outHandle.AddrOfPinnedObject().ToPointer()), alignment); public void Dispose() { inHandle1.Free(); outHandle.Free(); } private static unsafe void* Align(byte* buffer, ulong expectedAlignment) { return (void*)(((ulong)buffer + expectedAlignment - 1) & ~(expectedAlignment - 1)); } } private struct TestStruct { public Vector64<Byte> _fld1; public static TestStruct Create() { var testStruct = new TestStruct(); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref testStruct._fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); return testStruct; } public void RunStructFldScenario(SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte testClass) { var result = AdvSimd.LeadingZeroCount(_fld1); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } public void RunStructFldScenario_Load(SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte testClass) { fixed (Vector64<Byte>* pFld1 = &_fld1) { var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(pFld1)) ); Unsafe.Write(testClass._dataTable.outArrayPtr, result); testClass.ValidateResult(_fld1, testClass._dataTable.outArrayPtr); } } } private static readonly int LargestVectorSize = 8; private static readonly int Op1ElementCount = Unsafe.SizeOf<Vector64<Byte>>() / sizeof(Byte); private static readonly int RetElementCount = Unsafe.SizeOf<Vector64<Byte>>() / sizeof(Byte); private static Byte[] _data1 = new Byte[Op1ElementCount]; private static Vector64<Byte> _clsVar1; private Vector64<Byte> _fld1; private DataTable _dataTable; static SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte() { for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _clsVar1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); } public SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte() { Succeeded = true; for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } Unsafe.CopyBlockUnaligned(ref Unsafe.As<Vector64<Byte>, byte>(ref _fld1), ref Unsafe.As<Byte, byte>(ref _data1[0]), (uint)Unsafe.SizeOf<Vector64<Byte>>()); for (var i = 0; i < Op1ElementCount; i++) { _data1[i] = TestLibrary.Generator.GetByte(); } _dataTable = new DataTable(_data1, new Byte[RetElementCount], LargestVectorSize); } public bool IsSupported => AdvSimd.IsSupported; public bool Succeeded { get; set; } public void RunBasicScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_UnsafeRead)); var result = AdvSimd.LeadingZeroCount( Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunBasicScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunBasicScenario_Load)); var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_UnsafeRead)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.LeadingZeroCount), new Type[] { typeof(Vector64<Byte>) }) .Invoke(null, new object[] { Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunReflectionScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunReflectionScenario_Load)); var result = typeof(AdvSimd).GetMethod(nameof(AdvSimd.LeadingZeroCount), new Type[] { typeof(Vector64<Byte>) }) .Invoke(null, new object[] { AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)) }); Unsafe.Write(_dataTable.outArrayPtr, (Vector64<Byte>)(result)); ValidateResult(_dataTable.inArray1Ptr, _dataTable.outArrayPtr); } public void RunClsVarScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario)); var result = AdvSimd.LeadingZeroCount( _clsVar1 ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } public void RunClsVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClsVarScenario_Load)); fixed (Vector64<Byte>* pClsVar1 = &_clsVar1) { var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(pClsVar1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_clsVar1, _dataTable.outArrayPtr); } } public void RunLclVarScenario_UnsafeRead() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_UnsafeRead)); var op1 = Unsafe.Read<Vector64<Byte>>(_dataTable.inArray1Ptr); var result = AdvSimd.LeadingZeroCount(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunLclVarScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunLclVarScenario_Load)); var op1 = AdvSimd.LoadVector64((Byte*)(_dataTable.inArray1Ptr)); var result = AdvSimd.LeadingZeroCount(op1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(op1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario)); var test = new SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte(); var result = AdvSimd.LeadingZeroCount(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunClassLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassLclFldScenario_Load)); var test = new SimpleUnaryOpTest__LeadingZeroCount_Vector64_Byte(); fixed (Vector64<Byte>* pFld1 = &test._fld1) { var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } } public void RunClassFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario)); var result = AdvSimd.LeadingZeroCount(_fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } public void RunClassFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunClassFldScenario_Load)); fixed (Vector64<Byte>* pFld1 = &_fld1) { var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(pFld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(_fld1, _dataTable.outArrayPtr); } } public void RunStructLclFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario)); var test = TestStruct.Create(); var result = AdvSimd.LeadingZeroCount(test._fld1); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructLclFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructLclFldScenario_Load)); var test = TestStruct.Create(); var result = AdvSimd.LeadingZeroCount( AdvSimd.LoadVector64((Byte*)(&test._fld1)) ); Unsafe.Write(_dataTable.outArrayPtr, result); ValidateResult(test._fld1, _dataTable.outArrayPtr); } public void RunStructFldScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario)); var test = TestStruct.Create(); test.RunStructFldScenario(this); } public void RunStructFldScenario_Load() { TestLibrary.TestFramework.BeginScenario(nameof(RunStructFldScenario_Load)); var test = TestStruct.Create(); test.RunStructFldScenario_Load(this); } public void RunUnsupportedScenario() { TestLibrary.TestFramework.BeginScenario(nameof(RunUnsupportedScenario)); bool succeeded = false; try { RunBasicScenario_UnsafeRead(); } catch (PlatformNotSupportedException) { succeeded = true; } if (!succeeded) { Succeeded = false; } } private void ValidateResult(Vector64<Byte> op1, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.WriteUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), op1); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Byte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(void* op1, void* result, [CallerMemberName] string method = "") { Byte[] inArray1 = new Byte[Op1ElementCount]; Byte[] outArray = new Byte[RetElementCount]; Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref inArray1[0]), ref Unsafe.AsRef<byte>(op1), (uint)Unsafe.SizeOf<Vector64<Byte>>()); Unsafe.CopyBlockUnaligned(ref Unsafe.As<Byte, byte>(ref outArray[0]), ref Unsafe.AsRef<byte>(result), (uint)Unsafe.SizeOf<Vector64<Byte>>()); ValidateResult(inArray1, outArray, method); } private void ValidateResult(Byte[] firstOp, Byte[] result, [CallerMemberName] string method = "") { bool succeeded = true; for (var i = 0; i < RetElementCount; i++) { if (Helpers.CountLeadingZeroBits(firstOp[i]) != result[i]) { succeeded = false; break; } } if (!succeeded) { TestLibrary.TestFramework.LogInformation($"{nameof(AdvSimd)}.{nameof(AdvSimd.LeadingZeroCount)}<Byte>(Vector64<Byte>): {method} failed:"); TestLibrary.TestFramework.LogInformation($" firstOp: ({string.Join(", ", firstOp)})"); TestLibrary.TestFramework.LogInformation($" result: ({string.Join(", ", result)})"); TestLibrary.TestFramework.LogInformation(string.Empty); Succeeded = false; } } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/BuildWasmApps/Wasm.Build.Tests/data/nuget6.config
<?xml version="1.0" encoding="utf-8"?> <configuration> <!-- Don't use any higher level config files. --> <fallbackPackageFolders> <clear /> <add key="local" value=".nuget" /> </fallbackPackageFolders> <packageSources> <clear /> <add key="dotnet6" value="https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet6/nuget/v3/index.json" /> <add key="nuget.org" value="https://api.nuget.org/v3/index.json" protocolVersion="3" /> </packageSources> <disabledPackageSources> <clear /> </disabledPackageSources> </configuration>
<?xml version="1.0" encoding="utf-8"?> <configuration> <!-- Don't use any higher level config files. --> <fallbackPackageFolders> <clear /> <add key="local" value=".nuget" /> </fallbackPackageFolders> <packageSources> <clear /> <add key="dotnet6" value="https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet6/nuget/v3/index.json" /> <add key="nuget.org" value="https://api.nuget.org/v3/index.json" protocolVersion="3" /> </packageSources> <disabledPackageSources> <clear /> </disabledPackageSources> </configuration>
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Text.Json/tests/System.Text.Json.FSharp.Tests/RecordTests.fs
module System.Text.Json.Tests.FSharp.RecordTests open System.Text.Json open System.Text.Json.Serialization open System.Text.Json.Tests.FSharp.Helpers open Xunit type MyRecord = { Name : string MiddleName : string option LastName : string Age : int IsActive : bool } with static member Value = { Name = "John" ; MiddleName = None ; LastName = "Doe" ; Age = 34 ; IsActive = true } static member ExpectedJson = """{"Name":"John","MiddleName":null,"LastName":"Doe","Age":34,"IsActive":true}""" [<Fact>] let ``Support F# record serialization``() = let actualJson = JsonSerializer.Serialize(MyRecord.Value) Assert.Equal(MyRecord.ExpectedJson, actualJson) [<Fact>] let ``Support F# record deserialization``() = let result = JsonSerializer.Deserialize<MyRecord>(MyRecord.ExpectedJson) Assert.Equal(MyRecord.Value, result) [<Struct>] type MyStructRecord = { Name : string MiddleName : string option LastName : string Age : int IsActive : bool } with static member Value = { Name = "John" ; MiddleName = None ; LastName = "Doe" ; Age = 34 ; IsActive = true } static member ExpectedJson = """{"Name":"John","MiddleName":null,"LastName":"Doe","Age":34,"IsActive":true}""" [<Fact>] let ``Support F# struct record serialization``() = let actualJson = JsonSerializer.Serialize(MyStructRecord.Value) Assert.Equal(MyStructRecord.ExpectedJson, actualJson) [<Fact>] let ``Support F# struct record deserialization``() = let result = JsonSerializer.Deserialize<MyStructRecord>(MyStructRecord.ExpectedJson) Assert.Equal(MyStructRecord.Value, result)
module System.Text.Json.Tests.FSharp.RecordTests open System.Text.Json open System.Text.Json.Serialization open System.Text.Json.Tests.FSharp.Helpers open Xunit type MyRecord = { Name : string MiddleName : string option LastName : string Age : int IsActive : bool } with static member Value = { Name = "John" ; MiddleName = None ; LastName = "Doe" ; Age = 34 ; IsActive = true } static member ExpectedJson = """{"Name":"John","MiddleName":null,"LastName":"Doe","Age":34,"IsActive":true}""" [<Fact>] let ``Support F# record serialization``() = let actualJson = JsonSerializer.Serialize(MyRecord.Value) Assert.Equal(MyRecord.ExpectedJson, actualJson) [<Fact>] let ``Support F# record deserialization``() = let result = JsonSerializer.Deserialize<MyRecord>(MyRecord.ExpectedJson) Assert.Equal(MyRecord.Value, result) [<Struct>] type MyStructRecord = { Name : string MiddleName : string option LastName : string Age : int IsActive : bool } with static member Value = { Name = "John" ; MiddleName = None ; LastName = "Doe" ; Age = 34 ; IsActive = true } static member ExpectedJson = """{"Name":"John","MiddleName":null,"LastName":"Doe","Age":34,"IsActive":true}""" [<Fact>] let ``Support F# struct record serialization``() = let actualJson = JsonSerializer.Serialize(MyStructRecord.Value) Assert.Equal(MyStructRecord.ExpectedJson, actualJson) [<Fact>] let ``Support F# struct record deserialization``() = let result = JsonSerializer.Deserialize<MyStructRecord>(MyStructRecord.ExpectedJson) Assert.Equal(MyStructRecord.Value, result)
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Memory/tests/ParsersAndFormatters/Parser/ParserTests.Negative.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Buffers.Text.Tests { public static partial class ParserTests { [Theory] [MemberData(nameof(TestData.TypesThatCanBeParsed), MemberType = typeof(TestData))] public static void TestParserBadFormat(Type type) { Assert.Throws<FormatException>(() => TryParseUtf8(type, Array.Empty<byte>(), out object value, out int bytesConsumed, '$')); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using Xunit; namespace System.Buffers.Text.Tests { public static partial class ParserTests { [Theory] [MemberData(nameof(TestData.TypesThatCanBeParsed), MemberType = typeof(TestData))] public static void TestParserBadFormat(Type type) { Assert.Throws<FormatException>(() => TryParseUtf8(type, Array.Empty<byte>(), out object value, out int bytesConsumed, '$')); } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Private.CoreLib/src/System/Collections/Concurrent/IProducerConsumerCollection.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; namespace System.Collections.Concurrent { /// <summary> /// A common interface for all concurrent collections. /// Defines methods to manipulate thread-safe collections intended for producer/consumer usage. /// </summary> /// <typeparam name="T">Specifies the type of elements in the collection.</typeparam> /// <remarks> /// All implementations of this interface must enable all members of this interface /// to be used concurrently from multiple threads. /// </remarks> public interface IProducerConsumerCollection<T> : IEnumerable<T>, ICollection { /// <summary> /// Copies the elements of the <see cref="IProducerConsumerCollection{T}"/> to /// an /// <see cref="System.Array"/>, starting at a specified index. /// </summary> /// <param name="array">The one-dimensional <see cref="System.Array"/> that is the destination of /// the elements copied from the <see cref="IProducerConsumerCollection{T}"/>. /// The array must have zero-based indexing.</param> /// <param name="index">The zero-based index in <paramref name="array"/> at which copying /// begins.</param> /// <exception cref="ArgumentNullException"><paramref name="array"/> is a null reference (Nothing in /// Visual Basic).</exception> /// <exception cref="ArgumentOutOfRangeException"><paramref name="index"/> is less than /// zero.</exception> /// <exception cref="ArgumentException"><paramref name="index"/> is equal to or greater than the /// length of the <paramref name="array"/> /// -or- The number of elements in the source <see cref="ConcurrentQueue{T}"/> is greater than the /// available space from <paramref name="index"/> to the end of the destination <paramref /// name="array"/>. /// </exception> void CopyTo(T[] array, int index); /// <summary> /// Attempts to add an object to the <see /// cref="IProducerConsumerCollection{T}"/>. /// </summary> /// <param name="item">The object to add to the <see /// cref="IProducerConsumerCollection{T}"/>.</param> /// <returns>true if the object was added successfully; otherwise, false.</returns> /// <exception cref="System.ArgumentException">The <paramref name="item"/> was invalid for this collection.</exception> bool TryAdd(T item); /// <summary> /// Attempts to remove and return an object from the <see cref="IProducerConsumerCollection{T}"/>. /// </summary> /// <param name="item"> /// When this method returns, if the object was removed and returned successfully, <paramref /// name="item"/> contains the removed object. If no object was available to be removed, the value is /// unspecified. /// </param> /// <returns>true if an object was removed and returned successfully; otherwise, false.</returns> bool TryTake([MaybeNullWhen(false)] out T item); /// <summary> /// Copies the elements contained in the <see cref="IProducerConsumerCollection{T}"/> to a new array. /// </summary> /// <returns>A new array containing the elements copied from the <see cref="IProducerConsumerCollection{T}"/>.</returns> T[] ToArray(); } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Diagnostics.CodeAnalysis; namespace System.Collections.Concurrent { /// <summary> /// A common interface for all concurrent collections. /// Defines methods to manipulate thread-safe collections intended for producer/consumer usage. /// </summary> /// <typeparam name="T">Specifies the type of elements in the collection.</typeparam> /// <remarks> /// All implementations of this interface must enable all members of this interface /// to be used concurrently from multiple threads. /// </remarks> public interface IProducerConsumerCollection<T> : IEnumerable<T>, ICollection { /// <summary> /// Copies the elements of the <see cref="IProducerConsumerCollection{T}"/> to /// an /// <see cref="System.Array"/>, starting at a specified index. /// </summary> /// <param name="array">The one-dimensional <see cref="System.Array"/> that is the destination of /// the elements copied from the <see cref="IProducerConsumerCollection{T}"/>. /// The array must have zero-based indexing.</param> /// <param name="index">The zero-based index in <paramref name="array"/> at which copying /// begins.</param> /// <exception cref="ArgumentNullException"><paramref name="array"/> is a null reference (Nothing in /// Visual Basic).</exception> /// <exception cref="ArgumentOutOfRangeException"><paramref name="index"/> is less than /// zero.</exception> /// <exception cref="ArgumentException"><paramref name="index"/> is equal to or greater than the /// length of the <paramref name="array"/> /// -or- The number of elements in the source <see cref="ConcurrentQueue{T}"/> is greater than the /// available space from <paramref name="index"/> to the end of the destination <paramref /// name="array"/>. /// </exception> void CopyTo(T[] array, int index); /// <summary> /// Attempts to add an object to the <see /// cref="IProducerConsumerCollection{T}"/>. /// </summary> /// <param name="item">The object to add to the <see /// cref="IProducerConsumerCollection{T}"/>.</param> /// <returns>true if the object was added successfully; otherwise, false.</returns> /// <exception cref="System.ArgumentException">The <paramref name="item"/> was invalid for this collection.</exception> bool TryAdd(T item); /// <summary> /// Attempts to remove and return an object from the <see cref="IProducerConsumerCollection{T}"/>. /// </summary> /// <param name="item"> /// When this method returns, if the object was removed and returned successfully, <paramref /// name="item"/> contains the removed object. If no object was available to be removed, the value is /// unspecified. /// </param> /// <returns>true if an object was removed and returned successfully; otherwise, false.</returns> bool TryTake([MaybeNullWhen(false)] out T item); /// <summary> /// Copies the elements contained in the <see cref="IProducerConsumerCollection{T}"/> to a new array. /// </summary> /// <returns>A new array containing the elements copied from the <see cref="IProducerConsumerCollection{T}"/>.</returns> T[] ToArray(); } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Text.Encoding/tests/Encoding/Encoding.cs
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Text; using Xunit; namespace System.Text.Encodings.Tests { public class EncodingMiscTests { public static IEnumerable<object[]> Encoding_TestData() { // CodePage Name BodyName HeaderName IsBrowserDisplay IsBrowserSave IsMailNewsDisplay IsMailNewsSave WindowsCodePage yield return new object[] { 20127, "us-ascii", "us-ascii", "us-ascii", false, false, true, true, 1252 }; yield return new object[] { 28591, "iso-8859-1", "iso-8859-1", "iso-8859-1", true, true, true, true, 1252 }; yield return new object[] { 65000, "utf-7", "utf-7", "utf-7", false, false, true, true, 1200 }; yield return new object[] { 65001, "utf-8", "utf-8", "utf-8", true, true, true, true, 1200 }; yield return new object[] { 1200, "utf-16", "utf-16", "utf-16", false, true, false, false, 1200 }; yield return new object[] { 1201, "utf-16BE", "utf-16BE", "utf-16BE", false, false, false, false, 1200 }; yield return new object[] { 12000, "utf-32", "utf-32", "utf-32", false, false, false, false, 1200 }; yield return new object[] { 12001, "utf-32BE", "utf-32BE", "utf-32BE", false, false, false, false, 1200 }; } public static IEnumerable<object[]> Normalization_TestData() { // codepage isNormalized IsNormalized(FormC) IsNormalized(FormD) IsNormalized(FormKC) IsNormalized(FormKD) /* us-ascii */ yield return new object[] { 20127, false, false, false, false, false }; /* iso-8859-1 */ yield return new object[] { 28591, true, true, false, false, false }; /* utf-7 */ yield return new object[] { 65000, false, false, false, false, false }; /* utf-8 */ yield return new object[] { 65001, false, false, false, false, false }; /* utf-16 */ yield return new object[] { 1200, false, false, false, false, false }; /* utf-16BE */ yield return new object[] { 1201, false, false, false, false, false }; /* utf-32 */ yield return new object[] { 12000, false, false, false, false, false }; /* utf-32BE */ yield return new object[] { 12001, false, false, false, false, false }; } [Fact] public static void DefaultEncodingTest() { Encoding enc = (Encoding) Encoding.Default.Clone(); Assert.Equal(enc.WebName, Encoding.Default.WebName); Assert.Equal(enc.GetBytes("Some string"), Encoding.Default.GetBytes("Some string")); } [Fact] public static void DefaultEncodingBOMTest() { UTF8Encoding defaultEncoding = Encoding.Default as UTF8Encoding; Assert.True(defaultEncoding != null); Assert.Equal(0, defaultEncoding.GetPreamble().Length); } [Fact] public static void GetEncodingsTest() { EncodingInfo [] encodingList = Encoding.GetEncodings(); foreach (var info in encodingList) { Encoding encoding = Encoding.GetEncoding(info.CodePage); Assert.Equal(encoding, info.GetEncoding()); Assert.Equal(encoding.WebName, info.Name); Assert.False(string.IsNullOrEmpty(info.DisplayName)); } } [Theory] [MemberData(nameof(Encoding_TestData))] public static void VerifyCodePageAttributes(int codepage, string name, string bodyName, string headerName, bool isBrowserDisplay, bool isBrowserSave, bool isMailNewsDisplay, bool isMailNewsSave, int windowsCodePage) { Encoding encoding = Encoding.GetEncoding(codepage); Assert.Equal(name, encoding.WebName); Assert.Equal(bodyName, encoding.BodyName); Assert.Equal(headerName, encoding.HeaderName); Assert.Equal(isBrowserDisplay, encoding.IsBrowserDisplay); Assert.Equal(isBrowserSave, encoding.IsBrowserSave); Assert.Equal(isMailNewsDisplay, encoding.IsMailNewsDisplay); Assert.Equal(isMailNewsSave, encoding.IsMailNewsSave); Assert.Equal(windowsCodePage, encoding.WindowsCodePage); } [Theory] [MemberData(nameof(Normalization_TestData))] public static void NormalizationTest(int codepage, bool normalized, bool normalizedC, bool normalizedD, bool normalizedKC, bool normalizedKD) { Encoding encoding = Encoding.GetEncoding(codepage); Assert.True(encoding.IsReadOnly); Assert.Equal(normalized, encoding.IsAlwaysNormalized()); Assert.Equal(normalizedC, encoding.IsAlwaysNormalized(NormalizationForm.FormC)); Assert.Equal(normalizedD, encoding.IsAlwaysNormalized(NormalizationForm.FormD)); Assert.Equal(normalizedKC, encoding.IsAlwaysNormalized(NormalizationForm.FormKC)); Assert.Equal(normalizedKD, encoding.IsAlwaysNormalized(NormalizationForm.FormKD)); } } }
// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. using System.Collections.Generic; using System.Text; using Xunit; namespace System.Text.Encodings.Tests { public class EncodingMiscTests { public static IEnumerable<object[]> Encoding_TestData() { // CodePage Name BodyName HeaderName IsBrowserDisplay IsBrowserSave IsMailNewsDisplay IsMailNewsSave WindowsCodePage yield return new object[] { 20127, "us-ascii", "us-ascii", "us-ascii", false, false, true, true, 1252 }; yield return new object[] { 28591, "iso-8859-1", "iso-8859-1", "iso-8859-1", true, true, true, true, 1252 }; yield return new object[] { 65000, "utf-7", "utf-7", "utf-7", false, false, true, true, 1200 }; yield return new object[] { 65001, "utf-8", "utf-8", "utf-8", true, true, true, true, 1200 }; yield return new object[] { 1200, "utf-16", "utf-16", "utf-16", false, true, false, false, 1200 }; yield return new object[] { 1201, "utf-16BE", "utf-16BE", "utf-16BE", false, false, false, false, 1200 }; yield return new object[] { 12000, "utf-32", "utf-32", "utf-32", false, false, false, false, 1200 }; yield return new object[] { 12001, "utf-32BE", "utf-32BE", "utf-32BE", false, false, false, false, 1200 }; } public static IEnumerable<object[]> Normalization_TestData() { // codepage isNormalized IsNormalized(FormC) IsNormalized(FormD) IsNormalized(FormKC) IsNormalized(FormKD) /* us-ascii */ yield return new object[] { 20127, false, false, false, false, false }; /* iso-8859-1 */ yield return new object[] { 28591, true, true, false, false, false }; /* utf-7 */ yield return new object[] { 65000, false, false, false, false, false }; /* utf-8 */ yield return new object[] { 65001, false, false, false, false, false }; /* utf-16 */ yield return new object[] { 1200, false, false, false, false, false }; /* utf-16BE */ yield return new object[] { 1201, false, false, false, false, false }; /* utf-32 */ yield return new object[] { 12000, false, false, false, false, false }; /* utf-32BE */ yield return new object[] { 12001, false, false, false, false, false }; } [Fact] public static void DefaultEncodingTest() { Encoding enc = (Encoding) Encoding.Default.Clone(); Assert.Equal(enc.WebName, Encoding.Default.WebName); Assert.Equal(enc.GetBytes("Some string"), Encoding.Default.GetBytes("Some string")); } [Fact] public static void DefaultEncodingBOMTest() { UTF8Encoding defaultEncoding = Encoding.Default as UTF8Encoding; Assert.True(defaultEncoding != null); Assert.Equal(0, defaultEncoding.GetPreamble().Length); } [Fact] public static void GetEncodingsTest() { EncodingInfo [] encodingList = Encoding.GetEncodings(); foreach (var info in encodingList) { Encoding encoding = Encoding.GetEncoding(info.CodePage); Assert.Equal(encoding, info.GetEncoding()); Assert.Equal(encoding.WebName, info.Name); Assert.False(string.IsNullOrEmpty(info.DisplayName)); } } [Theory] [MemberData(nameof(Encoding_TestData))] public static void VerifyCodePageAttributes(int codepage, string name, string bodyName, string headerName, bool isBrowserDisplay, bool isBrowserSave, bool isMailNewsDisplay, bool isMailNewsSave, int windowsCodePage) { Encoding encoding = Encoding.GetEncoding(codepage); Assert.Equal(name, encoding.WebName); Assert.Equal(bodyName, encoding.BodyName); Assert.Equal(headerName, encoding.HeaderName); Assert.Equal(isBrowserDisplay, encoding.IsBrowserDisplay); Assert.Equal(isBrowserSave, encoding.IsBrowserSave); Assert.Equal(isMailNewsDisplay, encoding.IsMailNewsDisplay); Assert.Equal(isMailNewsSave, encoding.IsMailNewsSave); Assert.Equal(windowsCodePage, encoding.WindowsCodePage); } [Theory] [MemberData(nameof(Normalization_TestData))] public static void NormalizationTest(int codepage, bool normalized, bool normalizedC, bool normalizedD, bool normalizedKC, bool normalizedKD) { Encoding encoding = Encoding.GetEncoding(codepage); Assert.True(encoding.IsReadOnly); Assert.Equal(normalized, encoding.IsAlwaysNormalized()); Assert.Equal(normalizedC, encoding.IsAlwaysNormalized(NormalizationForm.FormC)); Assert.Equal(normalizedD, encoding.IsAlwaysNormalized(NormalizationForm.FormD)); Assert.Equal(normalizedKC, encoding.IsAlwaysNormalized(NormalizationForm.FormKC)); Assert.Equal(normalizedKD, encoding.IsAlwaysNormalized(NormalizationForm.FormKD)); } } }
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/System.Private.Xml/tests/Xslt/TestFiles/TestData/xsltc/baseline/cnt23.xsl
<xsl:stylesheet version= '1.0' xmlns:xsl='http://www.w3.org/1999/XSL/Transform' > <xsl:template match="/"> <xsl:for-each select="//foo"> <xsl:value-of select="."/> </xsl:for-each> </xsl:template> </xsl:stylesheet>
<xsl:stylesheet version= '1.0' xmlns:xsl='http://www.w3.org/1999/XSL/Transform' > <xsl:template match="/"> <xsl:for-each select="//foo"> <xsl:value-of select="."/> </xsl:for-each> </xsl:template> </xsl:stylesheet>
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/libraries/Microsoft.VisualBasic.Core/src/Resources/SR.vb
' Do not edit this file manually it is auto-generated during the build based on the .resx file for this project. Namespace System Friend Partial Class SR Private Const s_resourcesName As String = "Microsoft.VisualBasic.resources" ' assembly Name + .resources #If Not DEBUGRESOURCES Then Friend Shared ReadOnly Property ID91 As String Get Return SR.GetResourceString("ID91", Nothing) End Get End Property Friend Shared ReadOnly Property ID92 As String Get Return SR.GetResourceString("ID92", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_GEZero1 As String Get Return SR.GetResourceString("Argument_GEZero1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_GTZero1 As String Get Return SR.GetResourceString("Argument_GTZero1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_LengthGTZero1 As String Get Return SR.GetResourceString("Argument_LengthGTZero1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_RangeTwoBytes1 As String Get Return SR.GetResourceString("Argument_RangeTwoBytes1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_MinusOneOrGTZero1 As String Get Return SR.GetResourceString("Argument_MinusOneOrGTZero1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_GEMinusOne1 As String Get Return SR.GetResourceString("Argument_GEMinusOne1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_GEOne1 As String Get Return SR.GetResourceString("Argument_GEOne1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_RankEQOne1 As String Get Return SR.GetResourceString("Argument_RankEQOne1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_IComparable2 As String Get Return SR.GetResourceString("Argument_IComparable2", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_NotNumericType2 As String Get Return SR.GetResourceString("Argument_NotNumericType2", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidValue1 As String Get Return SR.GetResourceString("Argument_InvalidValue1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidValueType2 As String Get Return SR.GetResourceString("Argument_InvalidValueType2", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidValue As String Get Return SR.GetResourceString("Argument_InvalidValue", Nothing) End Get End Property Friend Shared ReadOnly Property Collection_BeforeAfterExclusive As String Get Return SR.GetResourceString("Collection_BeforeAfterExclusive", Nothing) End Get End Property Friend Shared ReadOnly Property Collection_DuplicateKey As String Get Return SR.GetResourceString("Collection_DuplicateKey", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_CommonType2 As String Get Return SR.GetResourceString("ForLoop_CommonType2", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_CommonType3 As String Get Return SR.GetResourceString("ForLoop_CommonType3", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_ConvertToType3 As String Get Return SR.GetResourceString("ForLoop_ConvertToType3", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_OperatorRequired2 As String Get Return SR.GetResourceString("ForLoop_OperatorRequired2", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_UnacceptableOperator2 As String Get Return SR.GetResourceString("ForLoop_UnacceptableOperator2", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_UnacceptableRelOperator2 As String Get Return SR.GetResourceString("ForLoop_UnacceptableRelOperator2", Nothing) End Get End Property Friend Shared ReadOnly Property InternalError As String Get Return SR.GetResourceString("InternalError", Nothing) End Get End Property Friend Shared ReadOnly Property MaxErrNumber As String Get Return SR.GetResourceString("MaxErrNumber", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidNullValue1 As String Get Return SR.GetResourceString("Argument_InvalidNullValue1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidRank1 As String Get Return SR.GetResourceString("Argument_InvalidRank1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_Range0to99_1 As String Get Return SR.GetResourceString("Argument_Range0to99_1", Nothing) End Get End Property Friend Shared ReadOnly Property Array_RankMismatch As String Get Return SR.GetResourceString("Array_RankMismatch", Nothing) End Get End Property Friend Shared ReadOnly Property Array_TypeMismatch As String Get Return SR.GetResourceString("Array_TypeMismatch", Nothing) End Get End Property Friend Shared ReadOnly Property InvalidCast_FromTo As String Get Return SR.GetResourceString("InvalidCast_FromTo", Nothing) End Get End Property Friend Shared ReadOnly Property InvalidCast_FromStringTo As String Get Return SR.GetResourceString("InvalidCast_FromStringTo", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidDateValue1 As String Get Return SR.GetResourceString("Argument_InvalidDateValue1", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentNotNumeric1 As String Get Return SR.GetResourceString("ArgumentNotNumeric1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_IndexLELength2 As String Get Return SR.GetResourceString("Argument_IndexLELength2", Nothing) End Get End Property Friend Shared ReadOnly Property MissingMember_NoDefaultMemberFound1 As String Get Return SR.GetResourceString("MissingMember_NoDefaultMemberFound1", Nothing) End Get End Property Friend Shared ReadOnly Property MissingMember_MemberNotFoundOnType2 As String Get Return SR.GetResourceString("MissingMember_MemberNotFoundOnType2", Nothing) End Get End Property Friend Shared ReadOnly Property IntermediateLateBoundNothingResult1 As String Get Return SR.GetResourceString("IntermediateLateBoundNothingResult1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_CollectionIndex As String Get Return SR.GetResourceString("Argument_CollectionIndex", Nothing) End Get End Property Friend Shared ReadOnly Property RValueBaseForValueType As String Get Return SR.GetResourceString("RValueBaseForValueType", Nothing) End Get End Property Friend Shared ReadOnly Property ExpressionNotProcedure As String Get Return SR.GetResourceString("ExpressionNotProcedure", Nothing) End Get End Property Friend Shared ReadOnly Property LateboundCallToInheritedComClass As String Get Return SR.GetResourceString("LateboundCallToInheritedComClass", Nothing) End Get End Property Friend Shared ReadOnly Property MissingMember_ReadOnlyField2 As String Get Return SR.GetResourceString("MissingMember_ReadOnlyField2", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidNamedArgs As String Get Return SR.GetResourceString("Argument_InvalidNamedArgs", Nothing) End Get End Property Friend Shared ReadOnly Property SyncLockRequiresReferenceType1 As String Get Return SR.GetResourceString("SyncLockRequiresReferenceType1", Nothing) End Get End Property Friend Shared ReadOnly Property NullReference_InstanceReqToAccessMember1 As String Get Return SR.GetResourceString("NullReference_InstanceReqToAccessMember1", Nothing) End Get End Property Friend Shared ReadOnly Property MatchArgumentFailure2 As String Get Return SR.GetResourceString("MatchArgumentFailure2", Nothing) End Get End Property Friend Shared ReadOnly Property NoGetProperty1 As String Get Return SR.GetResourceString("NoGetProperty1", Nothing) End Get End Property Friend Shared ReadOnly Property NoSetProperty1 As String Get Return SR.GetResourceString("NoSetProperty1", Nothing) End Get End Property Friend Shared ReadOnly Property MethodAssignment1 As String Get Return SR.GetResourceString("MethodAssignment1", Nothing) End Get End Property Friend Shared ReadOnly Property NoViableOverloadCandidates1 As String Get Return SR.GetResourceString("NoViableOverloadCandidates1", Nothing) End Get End Property Friend Shared ReadOnly Property NoArgumentCountOverloadCandidates1 As String Get Return SR.GetResourceString("NoArgumentCountOverloadCandidates1", Nothing) End Get End Property Friend Shared ReadOnly Property NoTypeArgumentCountOverloadCandidates1 As String Get Return SR.GetResourceString("NoTypeArgumentCountOverloadCandidates1", Nothing) End Get End Property Friend Shared ReadOnly Property NoCallableOverloadCandidates2 As String Get Return SR.GetResourceString("NoCallableOverloadCandidates2", Nothing) End Get End Property Friend Shared ReadOnly Property NoNonNarrowingOverloadCandidates2 As String Get Return SR.GetResourceString("NoNonNarrowingOverloadCandidates2", Nothing) End Get End Property Friend Shared ReadOnly Property NoMostSpecificOverload2 As String Get Return SR.GetResourceString("NoMostSpecificOverload2", Nothing) End Get End Property Friend Shared ReadOnly Property AmbiguousCast2 As String Get Return SR.GetResourceString("AmbiguousCast2", Nothing) End Get End Property Friend Shared ReadOnly Property NotMostSpecificOverload As String Get Return SR.GetResourceString("NotMostSpecificOverload", Nothing) End Get End Property Friend Shared ReadOnly Property NamedParamNotFound2 As String Get Return SR.GetResourceString("NamedParamNotFound2", Nothing) End Get End Property Friend Shared ReadOnly Property NamedParamArrayArgument1 As String Get Return SR.GetResourceString("NamedParamArrayArgument1", Nothing) End Get End Property Friend Shared ReadOnly Property NamedArgUsedTwice2 As String Get Return SR.GetResourceString("NamedArgUsedTwice2", Nothing) End Get End Property Friend Shared ReadOnly Property OmittedArgument1 As String Get Return SR.GetResourceString("OmittedArgument1", Nothing) End Get End Property Friend Shared ReadOnly Property OmittedParamArrayArgument As String Get Return SR.GetResourceString("OmittedParamArrayArgument", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentMismatch3 As String Get Return SR.GetResourceString("ArgumentMismatch3", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentMismatchAmbiguous3 As String Get Return SR.GetResourceString("ArgumentMismatchAmbiguous3", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentNarrowing3 As String Get Return SR.GetResourceString("ArgumentNarrowing3", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentMismatchCopyBack3 As String Get Return SR.GetResourceString("ArgumentMismatchCopyBack3", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentMismatchAmbiguousCopyBack3 As String Get Return SR.GetResourceString("ArgumentMismatchAmbiguousCopyBack3", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentNarrowingCopyBack3 As String Get Return SR.GetResourceString("ArgumentNarrowingCopyBack3", Nothing) End Get End Property Friend Shared ReadOnly Property UnboundTypeParam1 As String Get Return SR.GetResourceString("UnboundTypeParam1", Nothing) End Get End Property Friend Shared ReadOnly Property TypeInferenceFails1 As String Get Return SR.GetResourceString("TypeInferenceFails1", Nothing) End Get End Property Friend Shared ReadOnly Property FailedTypeArgumentBinding As String Get Return SR.GetResourceString("FailedTypeArgumentBinding", Nothing) End Get End Property Friend Shared ReadOnly Property UnaryOperand2 As String Get Return SR.GetResourceString("UnaryOperand2", Nothing) End Get End Property Friend Shared ReadOnly Property BinaryOperands3 As String Get Return SR.GetResourceString("BinaryOperands3", Nothing) End Get End Property Friend Shared ReadOnly Property NoValidOperator_StringType1 As String Get Return SR.GetResourceString("NoValidOperator_StringType1", Nothing) End Get End Property Friend Shared ReadOnly Property NoValidOperator_NonStringType1 As String Get Return SR.GetResourceString("NoValidOperator_NonStringType1", Nothing) End Get End Property Friend Shared ReadOnly Property PropertySetMissingArgument1 As String Get Return SR.GetResourceString("PropertySetMissingArgument1", Nothing) End Get End Property #Else Friend Shared ReadOnly Property ID91 As String Get Return SR.GetResourceString("ID91", "Object variable or With block variable not set.") End Get End Property Friend Shared ReadOnly Property ID92 As String Get Return SR.GetResourceString("ID92", "For loop not initialized.") End Get End Property Friend Shared ReadOnly Property Argument_GEZero1 As String Get Return SR.GetResourceString("Argument_GEZero1", "Argument '{0}' must be greater or equal to zero.") End Get End Property Friend Shared ReadOnly Property Argument_GTZero1 As String Get Return SR.GetResourceString("Argument_GTZero1", "Argument '{0}' must be greater than zero.") End Get End Property Friend Shared ReadOnly Property Argument_LengthGTZero1 As String Get Return SR.GetResourceString("Argument_LengthGTZero1", "Length of argument '{0}' must be greater than zero.") End Get End Property Friend Shared ReadOnly Property Argument_RangeTwoBytes1 As String Get Return SR.GetResourceString("Argument_RangeTwoBytes1", "Argument '{0}' must be within the range of -32768 to 65535.") End Get End Property Friend Shared ReadOnly Property Argument_MinusOneOrGTZero1 As String Get Return SR.GetResourceString("Argument_MinusOneOrGTZero1", "Argument '{0}' must be greater than 0 or equal to -1.") End Get End Property Friend Shared ReadOnly Property Argument_GEMinusOne1 As String Get Return SR.GetResourceString("Argument_GEMinusOne1", "Argument '{0}' must be greater than or equal to -1.") End Get End Property Friend Shared ReadOnly Property Argument_GEOne1 As String Get Return SR.GetResourceString("Argument_GEOne1", "Argument '{0}' must be greater than or equal to 1.") End Get End Property Friend Shared ReadOnly Property Argument_RankEQOne1 As String Get Return SR.GetResourceString("Argument_RankEQOne1", "Argument '{0}' cannot be a multi-dimensional array.") End Get End Property Friend Shared ReadOnly Property Argument_IComparable2 As String Get Return SR.GetResourceString("Argument_IComparable2", "Loop control variable of type '{1}' does not implement the 'System.IComparable' interface.") End Get End Property Friend Shared ReadOnly Property Argument_NotNumericType2 As String Get Return SR.GetResourceString("Argument_NotNumericType2", "Type of argument '{0}' is '{1}', which is not numeric.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidValue1 As String Get Return SR.GetResourceString("Argument_InvalidValue1", "Argument '{0}' is not a valid value.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidValueType2 As String Get Return SR.GetResourceString("Argument_InvalidValueType2", "Argument '{0}' cannot be converted to type '{1}'.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidValue As String Get Return SR.GetResourceString("Argument_InvalidValue", "Arguments are not valid.") End Get End Property Friend Shared ReadOnly Property Collection_BeforeAfterExclusive As String Get Return SR.GetResourceString("Collection_BeforeAfterExclusive", "'Before' and 'After' arguments cannot be combined.") End Get End Property Friend Shared ReadOnly Property Collection_DuplicateKey As String Get Return SR.GetResourceString("Collection_DuplicateKey", "Add failed. Duplicate key value supplied.") End Get End Property Friend Shared ReadOnly Property ForLoop_CommonType2 As String Get Return SR.GetResourceString("ForLoop_CommonType2", "Cannot convert start value of type '{0}' and step value of type '{1}' to a common numeric type.") End Get End Property Friend Shared ReadOnly Property ForLoop_CommonType3 As String Get Return SR.GetResourceString("ForLoop_CommonType3", "Cannot convert start value of type '{0}', limit value of type '{1}', and step value of type '{2}' to a common numeric type.") End Get End Property Friend Shared ReadOnly Property ForLoop_ConvertToType3 As String Get Return SR.GetResourceString("ForLoop_ConvertToType3", "Cannot convert argument '{0}' of type '{1}' to type '{2}'.") End Get End Property Friend Shared ReadOnly Property ForLoop_OperatorRequired2 As String Get Return SR.GetResourceString("ForLoop_OperatorRequired2", "Type '{0}' must define an operator '{1}', with parameters of type '{0}', to be used in a 'For' statement.") End Get End Property Friend Shared ReadOnly Property ForLoop_UnacceptableOperator2 As String Get Return SR.GetResourceString("ForLoop_UnacceptableOperator2", "Return and parameter types of '{0}' must be of type '{1}' to be used in a 'For' statement.") End Get End Property Friend Shared ReadOnly Property ForLoop_UnacceptableRelOperator2 As String Get Return SR.GetResourceString("ForLoop_UnacceptableRelOperator2", "Parameter types of '{0}' must be of type '{1}' to be used in a 'For' statement.") End Get End Property Friend Shared ReadOnly Property InternalError As String Get Return SR.GetResourceString("InternalError", "Internal error in the Microsoft Visual Basic runtime.") End Get End Property Friend Shared ReadOnly Property MaxErrNumber As String Get Return SR.GetResourceString("MaxErrNumber", "Error number must be within the range 0 to 65535.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidNullValue1 As String Get Return SR.GetResourceString("Argument_InvalidNullValue1", "Argument '{0}' is Nothing.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidRank1 As String Get Return SR.GetResourceString("Argument_InvalidRank1", "Argument '{0}' is not valid for the array.") End Get End Property Friend Shared ReadOnly Property Argument_Range0to99_1 As String Get Return SR.GetResourceString("Argument_Range0to99_1", "Argument '{0}' must be within the range 0 to 99.") End Get End Property Friend Shared ReadOnly Property Array_RankMismatch As String Get Return SR.GetResourceString("Array_RankMismatch", "'ReDim' cannot change the number of dimensions.") End Get End Property Friend Shared ReadOnly Property Array_TypeMismatch As String Get Return SR.GetResourceString("Array_TypeMismatch", "'ReDim' can only change the rightmost dimension.") End Get End Property Friend Shared ReadOnly Property InvalidCast_FromTo As String Get Return SR.GetResourceString("InvalidCast_FromTo", "Conversion from type '{0}' to type '{1}' is not valid.") End Get End Property Friend Shared ReadOnly Property InvalidCast_FromStringTo As String Get Return SR.GetResourceString("InvalidCast_FromStringTo", "Conversion from string \""{0}\"" to type '{1}' is not valid.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidDateValue1 As String Get Return SR.GetResourceString("Argument_InvalidDateValue1", "Argument '{0}' cannot be converted to type 'Date'.") End Get End Property Friend Shared ReadOnly Property ArgumentNotNumeric1 As String Get Return SR.GetResourceString("ArgumentNotNumeric1", "Argument '{0}' cannot be converted to a numeric value.") End Get End Property Friend Shared ReadOnly Property Argument_IndexLELength2 As String Get Return SR.GetResourceString("Argument_IndexLELength2", "Argument '{0}' must be less than or equal to the length of argument '{1}'.") End Get End Property Friend Shared ReadOnly Property MissingMember_NoDefaultMemberFound1 As String Get Return SR.GetResourceString("MissingMember_NoDefaultMemberFound1", "No default member found for type '{0}'.") End Get End Property Friend Shared ReadOnly Property MissingMember_MemberNotFoundOnType2 As String Get Return SR.GetResourceString("MissingMember_MemberNotFoundOnType2", "Public member '{0}' on type '{1}' not found.") End Get End Property Friend Shared ReadOnly Property IntermediateLateBoundNothingResult1 As String Get Return SR.GetResourceString("IntermediateLateBoundNothingResult1", "Invocation of '{0}' on type '{1}' returned Nothing.") End Get End Property Friend Shared ReadOnly Property Argument_CollectionIndex As String Get Return SR.GetResourceString("Argument_CollectionIndex", "Collection index must be in the range 1 to the size of the collection.") End Get End Property Friend Shared ReadOnly Property RValueBaseForValueType As String Get Return SR.GetResourceString("RValueBaseForValueType", "Late-bound assignment to a field of value type '{0}' is not valid when '{1}' is the result of a late-bound expression.") End Get End Property Friend Shared ReadOnly Property ExpressionNotProcedure As String Get Return SR.GetResourceString("ExpressionNotProcedure", "Expression '{0}' is not a procedure, but occurs as the target of a procedure call.") End Get End Property Friend Shared ReadOnly Property LateboundCallToInheritedComClass As String Get Return SR.GetResourceString("LateboundCallToInheritedComClass", "Managed classes derived from a COM class cannot be called late bound.") End Get End Property Friend Shared ReadOnly Property MissingMember_ReadOnlyField2 As String Get Return SR.GetResourceString("MissingMember_ReadOnlyField2", "Field '{0}' of type '{1}' is 'ReadOnly'.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidNamedArgs As String Get Return SR.GetResourceString("Argument_InvalidNamedArgs", "Named arguments are not valid as array subscripts.") End Get End Property Friend Shared ReadOnly Property SyncLockRequiresReferenceType1 As String Get Return SR.GetResourceString("SyncLockRequiresReferenceType1", "'SyncLock' operand cannot be of type '{0}' because '{0}' is not a reference type.") End Get End Property Friend Shared ReadOnly Property NullReference_InstanceReqToAccessMember1 As String Get Return SR.GetResourceString("NullReference_InstanceReqToAccessMember1", "Reference to non-shared member '{0}' requires an object reference.") End Get End Property Friend Shared ReadOnly Property MatchArgumentFailure2 As String Get Return SR.GetResourceString("MatchArgumentFailure2", "Method invocation failed because '{0}' cannot be called with these arguments:{1}") End Get End Property Friend Shared ReadOnly Property NoGetProperty1 As String Get Return SR.GetResourceString("NoGetProperty1", "Property '{0}' is WriteOnly.") End Get End Property Friend Shared ReadOnly Property NoSetProperty1 As String Get Return SR.GetResourceString("NoSetProperty1", "Property '{0}' is ReadOnly.") End Get End Property Friend Shared ReadOnly Property MethodAssignment1 As String Get Return SR.GetResourceString("MethodAssignment1", "Method '{0}' cannot be the target of an assignment.") End Get End Property Friend Shared ReadOnly Property NoViableOverloadCandidates1 As String Get Return SR.GetResourceString("NoViableOverloadCandidates1", "Overload resolution failed because no '{0}' is Public.") End Get End Property Friend Shared ReadOnly Property NoArgumentCountOverloadCandidates1 As String Get Return SR.GetResourceString("NoArgumentCountOverloadCandidates1", "Overload resolution failed because no accessible '{0}' accepts this number of arguments.") End Get End Property Friend Shared ReadOnly Property NoTypeArgumentCountOverloadCandidates1 As String Get Return SR.GetResourceString("NoTypeArgumentCountOverloadCandidates1", "Overload resolution failed because no accessible '{0}' accepts this number of type arguments.") End Get End Property Friend Shared ReadOnly Property NoCallableOverloadCandidates2 As String Get Return SR.GetResourceString("NoCallableOverloadCandidates2", "Overload resolution failed because no Public '{0}' can be called with these arguments:{1}") End Get End Property Friend Shared ReadOnly Property NoNonNarrowingOverloadCandidates2 As String Get Return SR.GetResourceString("NoNonNarrowingOverloadCandidates2", "Overload resolution failed because no Public '{0}' can be called without a narrowing conversion:{1}") End Get End Property Friend Shared ReadOnly Property NoMostSpecificOverload2 As String Get Return SR.GetResourceString("NoMostSpecificOverload2", "Overload resolution failed because no Public '{0}' is most specific for these arguments:{1}") End Get End Property Friend Shared ReadOnly Property AmbiguousCast2 As String Get Return SR.GetResourceString("AmbiguousCast2", "Conversion from type '{0}' to type '{1}' is ambiguous.") End Get End Property Friend Shared ReadOnly Property NotMostSpecificOverload As String Get Return SR.GetResourceString("NotMostSpecificOverload", "Not most specific.") End Get End Property Friend Shared ReadOnly Property NamedParamNotFound2 As String Get Return SR.GetResourceString("NamedParamNotFound2", "Named argument '{0}' matches no parameter of '{1}'.") End Get End Property Friend Shared ReadOnly Property NamedParamArrayArgument1 As String Get Return SR.GetResourceString("NamedParamArrayArgument1", "Named argument '{0}' cannot match a ParamArray parameter.") End Get End Property Friend Shared ReadOnly Property NamedArgUsedTwice2 As String Get Return SR.GetResourceString("NamedArgUsedTwice2", "Parameter '{0}' of '{1}' already has a matching argument.") End Get End Property Friend Shared ReadOnly Property OmittedArgument1 As String Get Return SR.GetResourceString("OmittedArgument1", "Argument not specified for parameter '{0}'.") End Get End Property Friend Shared ReadOnly Property OmittedParamArrayArgument As String Get Return SR.GetResourceString("OmittedParamArrayArgument", "Omitted argument cannot match a ParamArray parameter.") End Get End Property Friend Shared ReadOnly Property ArgumentMismatch3 As String Get Return SR.GetResourceString("ArgumentMismatch3", "Argument matching parameter '{0}' cannot convert from '{1}' to '{2}'.") End Get End Property Friend Shared ReadOnly Property ArgumentMismatchAmbiguous3 As String Get Return SR.GetResourceString("ArgumentMismatchAmbiguous3", "Argument matching parameter '{0}' cannot convert from '{1}' to '{2}' because the conversion is ambiguous.") End Get End Property Friend Shared ReadOnly Property ArgumentNarrowing3 As String Get Return SR.GetResourceString("ArgumentNarrowing3", "Argument matching parameter '{0}' narrows from '{1}' to '{2}'.") End Get End Property Friend Shared ReadOnly Property ArgumentMismatchCopyBack3 As String Get Return SR.GetResourceString("ArgumentMismatchCopyBack3", "ByRef parameter '{0}' cannot convert from '{1}' to '{2}' when assigning back to the matching argument.") End Get End Property Friend Shared ReadOnly Property ArgumentMismatchAmbiguousCopyBack3 As String Get Return SR.GetResourceString("ArgumentMismatchAmbiguousCopyBack3", "ByRef parameter '{0}' cannot convert from '{1}' to '{2}' when assigning back to the matching argument because the conversion is ambiguous.") End Get End Property Friend Shared ReadOnly Property ArgumentNarrowingCopyBack3 As String Get Return SR.GetResourceString("ArgumentNarrowingCopyBack3", "ByRef parameter '{0}' narrows from '{1}' to '{2}' when assigning back to the matching argument.") End Get End Property Friend Shared ReadOnly Property UnboundTypeParam1 As String Get Return SR.GetResourceString("UnboundTypeParam1", "Type parameter '{0}' cannot be determined.") End Get End Property Friend Shared ReadOnly Property TypeInferenceFails1 As String Get Return SR.GetResourceString("TypeInferenceFails1", "Type argument inference fails for argument matching parameter '{0}'.") End Get End Property Friend Shared ReadOnly Property FailedTypeArgumentBinding As String Get Return SR.GetResourceString("FailedTypeArgumentBinding", "Substitution of type arguments failed.") End Get End Property Friend Shared ReadOnly Property UnaryOperand2 As String Get Return SR.GetResourceString("UnaryOperand2", "Operator '{0}' is not defined for type '{1}'.") End Get End Property Friend Shared ReadOnly Property BinaryOperands3 As String Get Return SR.GetResourceString("BinaryOperands3", "Operator '{0}' is not defined for {1} and {2}.") End Get End Property Friend Shared ReadOnly Property NoValidOperator_StringType1 As String Get Return SR.GetResourceString("NoValidOperator_StringType1", "string \""{0}\""") End Get End Property Friend Shared ReadOnly Property NoValidOperator_NonStringType1 As String Get Return SR.GetResourceString("NoValidOperator_NonStringType1", "type '{0}'") End Get End Property Friend Shared ReadOnly Property PropertySetMissingArgument1 As String Get Return SR.GetResourceString("PropertySetMissingArgument1", "Call to set property '{0}' requires at least one argument.") End Get End Property #End If End Class End Namespace
' Do not edit this file manually it is auto-generated during the build based on the .resx file for this project. Namespace System Friend Partial Class SR Private Const s_resourcesName As String = "Microsoft.VisualBasic.resources" ' assembly Name + .resources #If Not DEBUGRESOURCES Then Friend Shared ReadOnly Property ID91 As String Get Return SR.GetResourceString("ID91", Nothing) End Get End Property Friend Shared ReadOnly Property ID92 As String Get Return SR.GetResourceString("ID92", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_GEZero1 As String Get Return SR.GetResourceString("Argument_GEZero1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_GTZero1 As String Get Return SR.GetResourceString("Argument_GTZero1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_LengthGTZero1 As String Get Return SR.GetResourceString("Argument_LengthGTZero1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_RangeTwoBytes1 As String Get Return SR.GetResourceString("Argument_RangeTwoBytes1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_MinusOneOrGTZero1 As String Get Return SR.GetResourceString("Argument_MinusOneOrGTZero1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_GEMinusOne1 As String Get Return SR.GetResourceString("Argument_GEMinusOne1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_GEOne1 As String Get Return SR.GetResourceString("Argument_GEOne1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_RankEQOne1 As String Get Return SR.GetResourceString("Argument_RankEQOne1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_IComparable2 As String Get Return SR.GetResourceString("Argument_IComparable2", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_NotNumericType2 As String Get Return SR.GetResourceString("Argument_NotNumericType2", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidValue1 As String Get Return SR.GetResourceString("Argument_InvalidValue1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidValueType2 As String Get Return SR.GetResourceString("Argument_InvalidValueType2", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidValue As String Get Return SR.GetResourceString("Argument_InvalidValue", Nothing) End Get End Property Friend Shared ReadOnly Property Collection_BeforeAfterExclusive As String Get Return SR.GetResourceString("Collection_BeforeAfterExclusive", Nothing) End Get End Property Friend Shared ReadOnly Property Collection_DuplicateKey As String Get Return SR.GetResourceString("Collection_DuplicateKey", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_CommonType2 As String Get Return SR.GetResourceString("ForLoop_CommonType2", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_CommonType3 As String Get Return SR.GetResourceString("ForLoop_CommonType3", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_ConvertToType3 As String Get Return SR.GetResourceString("ForLoop_ConvertToType3", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_OperatorRequired2 As String Get Return SR.GetResourceString("ForLoop_OperatorRequired2", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_UnacceptableOperator2 As String Get Return SR.GetResourceString("ForLoop_UnacceptableOperator2", Nothing) End Get End Property Friend Shared ReadOnly Property ForLoop_UnacceptableRelOperator2 As String Get Return SR.GetResourceString("ForLoop_UnacceptableRelOperator2", Nothing) End Get End Property Friend Shared ReadOnly Property InternalError As String Get Return SR.GetResourceString("InternalError", Nothing) End Get End Property Friend Shared ReadOnly Property MaxErrNumber As String Get Return SR.GetResourceString("MaxErrNumber", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidNullValue1 As String Get Return SR.GetResourceString("Argument_InvalidNullValue1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidRank1 As String Get Return SR.GetResourceString("Argument_InvalidRank1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_Range0to99_1 As String Get Return SR.GetResourceString("Argument_Range0to99_1", Nothing) End Get End Property Friend Shared ReadOnly Property Array_RankMismatch As String Get Return SR.GetResourceString("Array_RankMismatch", Nothing) End Get End Property Friend Shared ReadOnly Property Array_TypeMismatch As String Get Return SR.GetResourceString("Array_TypeMismatch", Nothing) End Get End Property Friend Shared ReadOnly Property InvalidCast_FromTo As String Get Return SR.GetResourceString("InvalidCast_FromTo", Nothing) End Get End Property Friend Shared ReadOnly Property InvalidCast_FromStringTo As String Get Return SR.GetResourceString("InvalidCast_FromStringTo", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidDateValue1 As String Get Return SR.GetResourceString("Argument_InvalidDateValue1", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentNotNumeric1 As String Get Return SR.GetResourceString("ArgumentNotNumeric1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_IndexLELength2 As String Get Return SR.GetResourceString("Argument_IndexLELength2", Nothing) End Get End Property Friend Shared ReadOnly Property MissingMember_NoDefaultMemberFound1 As String Get Return SR.GetResourceString("MissingMember_NoDefaultMemberFound1", Nothing) End Get End Property Friend Shared ReadOnly Property MissingMember_MemberNotFoundOnType2 As String Get Return SR.GetResourceString("MissingMember_MemberNotFoundOnType2", Nothing) End Get End Property Friend Shared ReadOnly Property IntermediateLateBoundNothingResult1 As String Get Return SR.GetResourceString("IntermediateLateBoundNothingResult1", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_CollectionIndex As String Get Return SR.GetResourceString("Argument_CollectionIndex", Nothing) End Get End Property Friend Shared ReadOnly Property RValueBaseForValueType As String Get Return SR.GetResourceString("RValueBaseForValueType", Nothing) End Get End Property Friend Shared ReadOnly Property ExpressionNotProcedure As String Get Return SR.GetResourceString("ExpressionNotProcedure", Nothing) End Get End Property Friend Shared ReadOnly Property LateboundCallToInheritedComClass As String Get Return SR.GetResourceString("LateboundCallToInheritedComClass", Nothing) End Get End Property Friend Shared ReadOnly Property MissingMember_ReadOnlyField2 As String Get Return SR.GetResourceString("MissingMember_ReadOnlyField2", Nothing) End Get End Property Friend Shared ReadOnly Property Argument_InvalidNamedArgs As String Get Return SR.GetResourceString("Argument_InvalidNamedArgs", Nothing) End Get End Property Friend Shared ReadOnly Property SyncLockRequiresReferenceType1 As String Get Return SR.GetResourceString("SyncLockRequiresReferenceType1", Nothing) End Get End Property Friend Shared ReadOnly Property NullReference_InstanceReqToAccessMember1 As String Get Return SR.GetResourceString("NullReference_InstanceReqToAccessMember1", Nothing) End Get End Property Friend Shared ReadOnly Property MatchArgumentFailure2 As String Get Return SR.GetResourceString("MatchArgumentFailure2", Nothing) End Get End Property Friend Shared ReadOnly Property NoGetProperty1 As String Get Return SR.GetResourceString("NoGetProperty1", Nothing) End Get End Property Friend Shared ReadOnly Property NoSetProperty1 As String Get Return SR.GetResourceString("NoSetProperty1", Nothing) End Get End Property Friend Shared ReadOnly Property MethodAssignment1 As String Get Return SR.GetResourceString("MethodAssignment1", Nothing) End Get End Property Friend Shared ReadOnly Property NoViableOverloadCandidates1 As String Get Return SR.GetResourceString("NoViableOverloadCandidates1", Nothing) End Get End Property Friend Shared ReadOnly Property NoArgumentCountOverloadCandidates1 As String Get Return SR.GetResourceString("NoArgumentCountOverloadCandidates1", Nothing) End Get End Property Friend Shared ReadOnly Property NoTypeArgumentCountOverloadCandidates1 As String Get Return SR.GetResourceString("NoTypeArgumentCountOverloadCandidates1", Nothing) End Get End Property Friend Shared ReadOnly Property NoCallableOverloadCandidates2 As String Get Return SR.GetResourceString("NoCallableOverloadCandidates2", Nothing) End Get End Property Friend Shared ReadOnly Property NoNonNarrowingOverloadCandidates2 As String Get Return SR.GetResourceString("NoNonNarrowingOverloadCandidates2", Nothing) End Get End Property Friend Shared ReadOnly Property NoMostSpecificOverload2 As String Get Return SR.GetResourceString("NoMostSpecificOverload2", Nothing) End Get End Property Friend Shared ReadOnly Property AmbiguousCast2 As String Get Return SR.GetResourceString("AmbiguousCast2", Nothing) End Get End Property Friend Shared ReadOnly Property NotMostSpecificOverload As String Get Return SR.GetResourceString("NotMostSpecificOverload", Nothing) End Get End Property Friend Shared ReadOnly Property NamedParamNotFound2 As String Get Return SR.GetResourceString("NamedParamNotFound2", Nothing) End Get End Property Friend Shared ReadOnly Property NamedParamArrayArgument1 As String Get Return SR.GetResourceString("NamedParamArrayArgument1", Nothing) End Get End Property Friend Shared ReadOnly Property NamedArgUsedTwice2 As String Get Return SR.GetResourceString("NamedArgUsedTwice2", Nothing) End Get End Property Friend Shared ReadOnly Property OmittedArgument1 As String Get Return SR.GetResourceString("OmittedArgument1", Nothing) End Get End Property Friend Shared ReadOnly Property OmittedParamArrayArgument As String Get Return SR.GetResourceString("OmittedParamArrayArgument", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentMismatch3 As String Get Return SR.GetResourceString("ArgumentMismatch3", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentMismatchAmbiguous3 As String Get Return SR.GetResourceString("ArgumentMismatchAmbiguous3", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentNarrowing3 As String Get Return SR.GetResourceString("ArgumentNarrowing3", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentMismatchCopyBack3 As String Get Return SR.GetResourceString("ArgumentMismatchCopyBack3", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentMismatchAmbiguousCopyBack3 As String Get Return SR.GetResourceString("ArgumentMismatchAmbiguousCopyBack3", Nothing) End Get End Property Friend Shared ReadOnly Property ArgumentNarrowingCopyBack3 As String Get Return SR.GetResourceString("ArgumentNarrowingCopyBack3", Nothing) End Get End Property Friend Shared ReadOnly Property UnboundTypeParam1 As String Get Return SR.GetResourceString("UnboundTypeParam1", Nothing) End Get End Property Friend Shared ReadOnly Property TypeInferenceFails1 As String Get Return SR.GetResourceString("TypeInferenceFails1", Nothing) End Get End Property Friend Shared ReadOnly Property FailedTypeArgumentBinding As String Get Return SR.GetResourceString("FailedTypeArgumentBinding", Nothing) End Get End Property Friend Shared ReadOnly Property UnaryOperand2 As String Get Return SR.GetResourceString("UnaryOperand2", Nothing) End Get End Property Friend Shared ReadOnly Property BinaryOperands3 As String Get Return SR.GetResourceString("BinaryOperands3", Nothing) End Get End Property Friend Shared ReadOnly Property NoValidOperator_StringType1 As String Get Return SR.GetResourceString("NoValidOperator_StringType1", Nothing) End Get End Property Friend Shared ReadOnly Property NoValidOperator_NonStringType1 As String Get Return SR.GetResourceString("NoValidOperator_NonStringType1", Nothing) End Get End Property Friend Shared ReadOnly Property PropertySetMissingArgument1 As String Get Return SR.GetResourceString("PropertySetMissingArgument1", Nothing) End Get End Property #Else Friend Shared ReadOnly Property ID91 As String Get Return SR.GetResourceString("ID91", "Object variable or With block variable not set.") End Get End Property Friend Shared ReadOnly Property ID92 As String Get Return SR.GetResourceString("ID92", "For loop not initialized.") End Get End Property Friend Shared ReadOnly Property Argument_GEZero1 As String Get Return SR.GetResourceString("Argument_GEZero1", "Argument '{0}' must be greater or equal to zero.") End Get End Property Friend Shared ReadOnly Property Argument_GTZero1 As String Get Return SR.GetResourceString("Argument_GTZero1", "Argument '{0}' must be greater than zero.") End Get End Property Friend Shared ReadOnly Property Argument_LengthGTZero1 As String Get Return SR.GetResourceString("Argument_LengthGTZero1", "Length of argument '{0}' must be greater than zero.") End Get End Property Friend Shared ReadOnly Property Argument_RangeTwoBytes1 As String Get Return SR.GetResourceString("Argument_RangeTwoBytes1", "Argument '{0}' must be within the range of -32768 to 65535.") End Get End Property Friend Shared ReadOnly Property Argument_MinusOneOrGTZero1 As String Get Return SR.GetResourceString("Argument_MinusOneOrGTZero1", "Argument '{0}' must be greater than 0 or equal to -1.") End Get End Property Friend Shared ReadOnly Property Argument_GEMinusOne1 As String Get Return SR.GetResourceString("Argument_GEMinusOne1", "Argument '{0}' must be greater than or equal to -1.") End Get End Property Friend Shared ReadOnly Property Argument_GEOne1 As String Get Return SR.GetResourceString("Argument_GEOne1", "Argument '{0}' must be greater than or equal to 1.") End Get End Property Friend Shared ReadOnly Property Argument_RankEQOne1 As String Get Return SR.GetResourceString("Argument_RankEQOne1", "Argument '{0}' cannot be a multi-dimensional array.") End Get End Property Friend Shared ReadOnly Property Argument_IComparable2 As String Get Return SR.GetResourceString("Argument_IComparable2", "Loop control variable of type '{1}' does not implement the 'System.IComparable' interface.") End Get End Property Friend Shared ReadOnly Property Argument_NotNumericType2 As String Get Return SR.GetResourceString("Argument_NotNumericType2", "Type of argument '{0}' is '{1}', which is not numeric.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidValue1 As String Get Return SR.GetResourceString("Argument_InvalidValue1", "Argument '{0}' is not a valid value.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidValueType2 As String Get Return SR.GetResourceString("Argument_InvalidValueType2", "Argument '{0}' cannot be converted to type '{1}'.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidValue As String Get Return SR.GetResourceString("Argument_InvalidValue", "Arguments are not valid.") End Get End Property Friend Shared ReadOnly Property Collection_BeforeAfterExclusive As String Get Return SR.GetResourceString("Collection_BeforeAfterExclusive", "'Before' and 'After' arguments cannot be combined.") End Get End Property Friend Shared ReadOnly Property Collection_DuplicateKey As String Get Return SR.GetResourceString("Collection_DuplicateKey", "Add failed. Duplicate key value supplied.") End Get End Property Friend Shared ReadOnly Property ForLoop_CommonType2 As String Get Return SR.GetResourceString("ForLoop_CommonType2", "Cannot convert start value of type '{0}' and step value of type '{1}' to a common numeric type.") End Get End Property Friend Shared ReadOnly Property ForLoop_CommonType3 As String Get Return SR.GetResourceString("ForLoop_CommonType3", "Cannot convert start value of type '{0}', limit value of type '{1}', and step value of type '{2}' to a common numeric type.") End Get End Property Friend Shared ReadOnly Property ForLoop_ConvertToType3 As String Get Return SR.GetResourceString("ForLoop_ConvertToType3", "Cannot convert argument '{0}' of type '{1}' to type '{2}'.") End Get End Property Friend Shared ReadOnly Property ForLoop_OperatorRequired2 As String Get Return SR.GetResourceString("ForLoop_OperatorRequired2", "Type '{0}' must define an operator '{1}', with parameters of type '{0}', to be used in a 'For' statement.") End Get End Property Friend Shared ReadOnly Property ForLoop_UnacceptableOperator2 As String Get Return SR.GetResourceString("ForLoop_UnacceptableOperator2", "Return and parameter types of '{0}' must be of type '{1}' to be used in a 'For' statement.") End Get End Property Friend Shared ReadOnly Property ForLoop_UnacceptableRelOperator2 As String Get Return SR.GetResourceString("ForLoop_UnacceptableRelOperator2", "Parameter types of '{0}' must be of type '{1}' to be used in a 'For' statement.") End Get End Property Friend Shared ReadOnly Property InternalError As String Get Return SR.GetResourceString("InternalError", "Internal error in the Microsoft Visual Basic runtime.") End Get End Property Friend Shared ReadOnly Property MaxErrNumber As String Get Return SR.GetResourceString("MaxErrNumber", "Error number must be within the range 0 to 65535.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidNullValue1 As String Get Return SR.GetResourceString("Argument_InvalidNullValue1", "Argument '{0}' is Nothing.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidRank1 As String Get Return SR.GetResourceString("Argument_InvalidRank1", "Argument '{0}' is not valid for the array.") End Get End Property Friend Shared ReadOnly Property Argument_Range0to99_1 As String Get Return SR.GetResourceString("Argument_Range0to99_1", "Argument '{0}' must be within the range 0 to 99.") End Get End Property Friend Shared ReadOnly Property Array_RankMismatch As String Get Return SR.GetResourceString("Array_RankMismatch", "'ReDim' cannot change the number of dimensions.") End Get End Property Friend Shared ReadOnly Property Array_TypeMismatch As String Get Return SR.GetResourceString("Array_TypeMismatch", "'ReDim' can only change the rightmost dimension.") End Get End Property Friend Shared ReadOnly Property InvalidCast_FromTo As String Get Return SR.GetResourceString("InvalidCast_FromTo", "Conversion from type '{0}' to type '{1}' is not valid.") End Get End Property Friend Shared ReadOnly Property InvalidCast_FromStringTo As String Get Return SR.GetResourceString("InvalidCast_FromStringTo", "Conversion from string \""{0}\"" to type '{1}' is not valid.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidDateValue1 As String Get Return SR.GetResourceString("Argument_InvalidDateValue1", "Argument '{0}' cannot be converted to type 'Date'.") End Get End Property Friend Shared ReadOnly Property ArgumentNotNumeric1 As String Get Return SR.GetResourceString("ArgumentNotNumeric1", "Argument '{0}' cannot be converted to a numeric value.") End Get End Property Friend Shared ReadOnly Property Argument_IndexLELength2 As String Get Return SR.GetResourceString("Argument_IndexLELength2", "Argument '{0}' must be less than or equal to the length of argument '{1}'.") End Get End Property Friend Shared ReadOnly Property MissingMember_NoDefaultMemberFound1 As String Get Return SR.GetResourceString("MissingMember_NoDefaultMemberFound1", "No default member found for type '{0}'.") End Get End Property Friend Shared ReadOnly Property MissingMember_MemberNotFoundOnType2 As String Get Return SR.GetResourceString("MissingMember_MemberNotFoundOnType2", "Public member '{0}' on type '{1}' not found.") End Get End Property Friend Shared ReadOnly Property IntermediateLateBoundNothingResult1 As String Get Return SR.GetResourceString("IntermediateLateBoundNothingResult1", "Invocation of '{0}' on type '{1}' returned Nothing.") End Get End Property Friend Shared ReadOnly Property Argument_CollectionIndex As String Get Return SR.GetResourceString("Argument_CollectionIndex", "Collection index must be in the range 1 to the size of the collection.") End Get End Property Friend Shared ReadOnly Property RValueBaseForValueType As String Get Return SR.GetResourceString("RValueBaseForValueType", "Late-bound assignment to a field of value type '{0}' is not valid when '{1}' is the result of a late-bound expression.") End Get End Property Friend Shared ReadOnly Property ExpressionNotProcedure As String Get Return SR.GetResourceString("ExpressionNotProcedure", "Expression '{0}' is not a procedure, but occurs as the target of a procedure call.") End Get End Property Friend Shared ReadOnly Property LateboundCallToInheritedComClass As String Get Return SR.GetResourceString("LateboundCallToInheritedComClass", "Managed classes derived from a COM class cannot be called late bound.") End Get End Property Friend Shared ReadOnly Property MissingMember_ReadOnlyField2 As String Get Return SR.GetResourceString("MissingMember_ReadOnlyField2", "Field '{0}' of type '{1}' is 'ReadOnly'.") End Get End Property Friend Shared ReadOnly Property Argument_InvalidNamedArgs As String Get Return SR.GetResourceString("Argument_InvalidNamedArgs", "Named arguments are not valid as array subscripts.") End Get End Property Friend Shared ReadOnly Property SyncLockRequiresReferenceType1 As String Get Return SR.GetResourceString("SyncLockRequiresReferenceType1", "'SyncLock' operand cannot be of type '{0}' because '{0}' is not a reference type.") End Get End Property Friend Shared ReadOnly Property NullReference_InstanceReqToAccessMember1 As String Get Return SR.GetResourceString("NullReference_InstanceReqToAccessMember1", "Reference to non-shared member '{0}' requires an object reference.") End Get End Property Friend Shared ReadOnly Property MatchArgumentFailure2 As String Get Return SR.GetResourceString("MatchArgumentFailure2", "Method invocation failed because '{0}' cannot be called with these arguments:{1}") End Get End Property Friend Shared ReadOnly Property NoGetProperty1 As String Get Return SR.GetResourceString("NoGetProperty1", "Property '{0}' is WriteOnly.") End Get End Property Friend Shared ReadOnly Property NoSetProperty1 As String Get Return SR.GetResourceString("NoSetProperty1", "Property '{0}' is ReadOnly.") End Get End Property Friend Shared ReadOnly Property MethodAssignment1 As String Get Return SR.GetResourceString("MethodAssignment1", "Method '{0}' cannot be the target of an assignment.") End Get End Property Friend Shared ReadOnly Property NoViableOverloadCandidates1 As String Get Return SR.GetResourceString("NoViableOverloadCandidates1", "Overload resolution failed because no '{0}' is Public.") End Get End Property Friend Shared ReadOnly Property NoArgumentCountOverloadCandidates1 As String Get Return SR.GetResourceString("NoArgumentCountOverloadCandidates1", "Overload resolution failed because no accessible '{0}' accepts this number of arguments.") End Get End Property Friend Shared ReadOnly Property NoTypeArgumentCountOverloadCandidates1 As String Get Return SR.GetResourceString("NoTypeArgumentCountOverloadCandidates1", "Overload resolution failed because no accessible '{0}' accepts this number of type arguments.") End Get End Property Friend Shared ReadOnly Property NoCallableOverloadCandidates2 As String Get Return SR.GetResourceString("NoCallableOverloadCandidates2", "Overload resolution failed because no Public '{0}' can be called with these arguments:{1}") End Get End Property Friend Shared ReadOnly Property NoNonNarrowingOverloadCandidates2 As String Get Return SR.GetResourceString("NoNonNarrowingOverloadCandidates2", "Overload resolution failed because no Public '{0}' can be called without a narrowing conversion:{1}") End Get End Property Friend Shared ReadOnly Property NoMostSpecificOverload2 As String Get Return SR.GetResourceString("NoMostSpecificOverload2", "Overload resolution failed because no Public '{0}' is most specific for these arguments:{1}") End Get End Property Friend Shared ReadOnly Property AmbiguousCast2 As String Get Return SR.GetResourceString("AmbiguousCast2", "Conversion from type '{0}' to type '{1}' is ambiguous.") End Get End Property Friend Shared ReadOnly Property NotMostSpecificOverload As String Get Return SR.GetResourceString("NotMostSpecificOverload", "Not most specific.") End Get End Property Friend Shared ReadOnly Property NamedParamNotFound2 As String Get Return SR.GetResourceString("NamedParamNotFound2", "Named argument '{0}' matches no parameter of '{1}'.") End Get End Property Friend Shared ReadOnly Property NamedParamArrayArgument1 As String Get Return SR.GetResourceString("NamedParamArrayArgument1", "Named argument '{0}' cannot match a ParamArray parameter.") End Get End Property Friend Shared ReadOnly Property NamedArgUsedTwice2 As String Get Return SR.GetResourceString("NamedArgUsedTwice2", "Parameter '{0}' of '{1}' already has a matching argument.") End Get End Property Friend Shared ReadOnly Property OmittedArgument1 As String Get Return SR.GetResourceString("OmittedArgument1", "Argument not specified for parameter '{0}'.") End Get End Property Friend Shared ReadOnly Property OmittedParamArrayArgument As String Get Return SR.GetResourceString("OmittedParamArrayArgument", "Omitted argument cannot match a ParamArray parameter.") End Get End Property Friend Shared ReadOnly Property ArgumentMismatch3 As String Get Return SR.GetResourceString("ArgumentMismatch3", "Argument matching parameter '{0}' cannot convert from '{1}' to '{2}'.") End Get End Property Friend Shared ReadOnly Property ArgumentMismatchAmbiguous3 As String Get Return SR.GetResourceString("ArgumentMismatchAmbiguous3", "Argument matching parameter '{0}' cannot convert from '{1}' to '{2}' because the conversion is ambiguous.") End Get End Property Friend Shared ReadOnly Property ArgumentNarrowing3 As String Get Return SR.GetResourceString("ArgumentNarrowing3", "Argument matching parameter '{0}' narrows from '{1}' to '{2}'.") End Get End Property Friend Shared ReadOnly Property ArgumentMismatchCopyBack3 As String Get Return SR.GetResourceString("ArgumentMismatchCopyBack3", "ByRef parameter '{0}' cannot convert from '{1}' to '{2}' when assigning back to the matching argument.") End Get End Property Friend Shared ReadOnly Property ArgumentMismatchAmbiguousCopyBack3 As String Get Return SR.GetResourceString("ArgumentMismatchAmbiguousCopyBack3", "ByRef parameter '{0}' cannot convert from '{1}' to '{2}' when assigning back to the matching argument because the conversion is ambiguous.") End Get End Property Friend Shared ReadOnly Property ArgumentNarrowingCopyBack3 As String Get Return SR.GetResourceString("ArgumentNarrowingCopyBack3", "ByRef parameter '{0}' narrows from '{1}' to '{2}' when assigning back to the matching argument.") End Get End Property Friend Shared ReadOnly Property UnboundTypeParam1 As String Get Return SR.GetResourceString("UnboundTypeParam1", "Type parameter '{0}' cannot be determined.") End Get End Property Friend Shared ReadOnly Property TypeInferenceFails1 As String Get Return SR.GetResourceString("TypeInferenceFails1", "Type argument inference fails for argument matching parameter '{0}'.") End Get End Property Friend Shared ReadOnly Property FailedTypeArgumentBinding As String Get Return SR.GetResourceString("FailedTypeArgumentBinding", "Substitution of type arguments failed.") End Get End Property Friend Shared ReadOnly Property UnaryOperand2 As String Get Return SR.GetResourceString("UnaryOperand2", "Operator '{0}' is not defined for type '{1}'.") End Get End Property Friend Shared ReadOnly Property BinaryOperands3 As String Get Return SR.GetResourceString("BinaryOperands3", "Operator '{0}' is not defined for {1} and {2}.") End Get End Property Friend Shared ReadOnly Property NoValidOperator_StringType1 As String Get Return SR.GetResourceString("NoValidOperator_StringType1", "string \""{0}\""") End Get End Property Friend Shared ReadOnly Property NoValidOperator_NonStringType1 As String Get Return SR.GetResourceString("NoValidOperator_NonStringType1", "type '{0}'") End Get End Property Friend Shared ReadOnly Property PropertySetMissingArgument1 As String Get Return SR.GetResourceString("PropertySetMissingArgument1", "Call to set property '{0}' requires at least one argument.") End Get End Property #End If End Class End Namespace
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/Loader/classloader/TypeGeneratorTests/TypeGeneratorTest147/Generated147.ilproj
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated147.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk.IL"> <PropertyGroup> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <ItemGroup> <Compile Include="Generated147.il" /> </ItemGroup> <ItemGroup> <ProjectReference Include="..\TestFramework\TestFramework.csproj" /> </ItemGroup> </Project>
-1
dotnet/runtime
65,899
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync
I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
adamsitnik
"2022-02-25T17:17:25Z"
"2022-03-01T13:15:47Z"
097d9ea3c1584eb8745bd0a72ebf9cd3a31f1618
3cbed4b71800709a8121e50e964ae5b02bd80b94
add missing GC.SuppressFinalize(this) to FileStream.DisposeAsync. I was unable to repro #65835 locally for a few hours, but I believe that the it's caused by lack of `GC.SuppressFinalize(this)` in `FileStream.DisposeAsync` and my recent changes from #64997 have just exposed the problem by adding the finalizer to `FileStream` itself. Explanation: the default `Stream.DisposeAsync` calls `Dispose`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L176-L180 which calls `Close` which calls `GC.SuppressFinalize(this)`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Stream.cs#L158-L167 `FileStream` was overriding `DisposeAsync`, but not calling `GC.SuppressFinalize(this)`. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L500 In #65835 we can see that the buffer was actually written to disk twice. I guess (I was not able to repro it) that it's caused by a flush implemented for finalizer. I am not 100% sure because the finally block sets write position to 0: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L115-L121 So after `DisposeAsync` the flushing should in theory see that there is nothing to flush. Moreover, it should also observe that the handle was already closed. https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/Strategies/BufferedFileStreamStrategy.cs#L125-L130 I've tried really hard to write a failing unit test, but I've failed. The reason for that is that all custom types deriving from `FileStream` are calling `BaseDisposeAsync`: https://github.com/dotnet/runtime/blob/95f7f7a026d74e0720d0dfdf2de799933b832df2/src/libraries/System.Private.CoreLib/src/System/IO/FileStream.cs#L579 which does call the base impl and is bug free.
./src/tests/JIT/Generics/ConstrainedCall/vt1_cs_ro.csproj
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="vt1.cs" /> </ItemGroup> </Project>
<Project Sdk="Microsoft.NET.Sdk"> <PropertyGroup> <OutputType>Exe</OutputType> <CLRTestPriority>1</CLRTestPriority> </PropertyGroup> <PropertyGroup> <DebugType>None</DebugType> <Optimize>True</Optimize> </PropertyGroup> <ItemGroup> <Compile Include="vt1.cs" /> </ItemGroup> </Project>
-1