diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile
index d76e325e8b6cae..1dfc556cd6b423 100644
--- a/.devcontainer/Dockerfile
+++ b/.devcontainer/Dockerfile
@@ -5,12 +5,19 @@
ARG VARIANT="6.0-jammy"
FROM mcr.microsoft.com/devcontainers/dotnet:0-${VARIANT}
+# Set up machine requirements to build the repo and the gh CLI
+# Clang-16 up is required but Ubuntu 22.04 comes with clang-14 highest, so add clang-18 sources
+RUN apt-get update \
+ && wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - \
+ && apt-get install software-properties-common -y \
+ && add-apt-repository "deb http://apt.llvm.org/$(lsb_release -s -c)/ llvm-toolchain-$(lsb_release -s -c)-18 main" -y \
+ && apt-get update \
+ && apt-get install clang-18 -y
+
# Set up machine requirements to build the repo and the gh CLI
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
&& apt-get -y install --no-install-recommends \
cmake \
- llvm \
- clang \
build-essential \
python3 \
curl \
diff --git a/.devcontainer/wasm-multiThreaded/Dockerfile b/.devcontainer/wasm-multiThreaded/Dockerfile
index 75f2465b391b3c..9062a8b8570725 100644
--- a/.devcontainer/wasm-multiThreaded/Dockerfile
+++ b/.devcontainer/wasm-multiThreaded/Dockerfile
@@ -5,12 +5,19 @@
ARG VARIANT="6.0-jammy"
FROM mcr.microsoft.com/devcontainers/dotnet:0-${VARIANT}
+# Set up machine requirements to build the repo and the gh CLI
+# Clang-16 up is required but Ubuntu 22.04 comes with clang-14 highest, so add clang-18 sources
+RUN apt-get update \
+ && wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - \
+ && apt-get install software-properties-common -y \
+ && add-apt-repository "deb http://apt.llvm.org/$(lsb_release -s -c)/ llvm-toolchain-$(lsb_release -s -c)-18 main" -y \
+ && apt-get update \
+ && apt-get install clang-18 -y
+
# Set up machine requirements to build the repo and the gh CLI
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
&& apt-get -y install --no-install-recommends \
cmake \
- llvm \
- clang \
build-essential \
python3 \
curl \
diff --git a/.devcontainer/wasm/Dockerfile b/.devcontainer/wasm/Dockerfile
index 75f2465b391b3c..1dcab2652c6db5 100644
--- a/.devcontainer/wasm/Dockerfile
+++ b/.devcontainer/wasm/Dockerfile
@@ -6,11 +6,17 @@ ARG VARIANT="6.0-jammy"
FROM mcr.microsoft.com/devcontainers/dotnet:0-${VARIANT}
# Set up machine requirements to build the repo and the gh CLI
+# Clang-16 up is required but Ubuntu 22.04 comes with clang-14 highest, so add clang-18 sources
+RUN apt-get update \
+ && wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - \
+ && apt-get install software-properties-common -y \
+ && add-apt-repository "deb http://apt.llvm.org/$(lsb_release -s -c)/ llvm-toolchain-$(lsb_release -s -c)-18 main" -y \
+ && apt-get update \
+ && apt-get install clang-18 -y
+
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
&& apt-get -y install --no-install-recommends \
cmake \
- llvm \
- clang \
build-essential \
python3 \
curl \
diff --git a/Directory.Build.targets b/Directory.Build.targets
index 1161d409dec1ab..bc5b847efc614c 100644
--- a/Directory.Build.targets
+++ b/Directory.Build.targets
@@ -97,18 +97,10 @@
-
-
- $(IsReferenceAssemblyProject)
-
-
-
-
-
diff --git a/THIRD-PARTY-NOTICES.TXT b/THIRD-PARTY-NOTICES.TXT
index 1f17cf6bc11065..18e149ccfd170d 100644
--- a/THIRD-PARTY-NOTICES.TXT
+++ b/THIRD-PARTY-NOTICES.TXT
@@ -66,38 +66,6 @@ shall not be used in advertising or otherwise to promote the sale,
use or other dealings in these Data Files or Software without prior
written authorization of the copyright holder.
-License notice for Zlib
------------------------
-
-https://github.com/madler/zlib
-https://zlib.net/zlib_license.html
-
-/* zlib.h -- interface of the 'zlib' general purpose compression library
- version 1.3.1, January 22nd, 2024
-
- Copyright (C) 1995-2022 Jean-loup Gailly and Mark Adler
-
- This software is provided 'as-is', without any express or implied
- warranty. In no event will the authors be held liable for any damages
- arising from the use of this software.
-
- Permission is granted to anyone to use this software for any purpose,
- including commercial applications, and to alter it and redistribute it
- freely, subject to the following restrictions:
-
- 1. The origin of this software must not be misrepresented; you must not
- claim that you wrote the original software. If you use this software
- in a product, an acknowledgment in the product documentation would be
- appreciated but is not required.
- 2. Altered source versions must be plainly marked as such, and must not be
- misrepresented as being the original software.
- 3. This notice may not be removed or altered from any source distribution.
-
- Jean-loup Gailly Mark Adler
- jloup@gzip.org madler@alumni.caltech.edu
-
-*/
-
License notice for zlib-ng
-----------------------
diff --git a/docs/coding-guidelines/api-guidelines/nullability.md b/docs/coding-guidelines/api-guidelines/nullability.md
index 7e7a6a6e83bfcb..fed947f07d4881 100644
--- a/docs/coding-guidelines/api-guidelines/nullability.md
+++ b/docs/coding-guidelines/api-guidelines/nullability.md
@@ -67,7 +67,7 @@ However, for existing virtual APIs that do not have any such strong guarantee do
4. How common is it in the case of (3) for such invocations to then dereference the result rather than passing it off to something else that accepts a `T?`?
`Object.ToString` is arguably the most extreme case. Answering the above questions:
-1. It is fairly easy in any reasonably-sized code base to find cases, intentional or otherwise, where `ToString` returns `null` in some cases (we've found examples in dotnet/corefx, dotnet/roslyn, NuGet/NuGet.Client, dotnet/aspnetcore, and so on). One of the most prevalent conditions for this are types that just return the value in a string field which may contain its default value of `null`, and in particular for structs where a ctor may not have even had a chance to run and validate an input. Guidance in the docs suggests that `ToString` shouldn't return `null` or `string.Empty`, but even the docs don't follow its own guidance.
+1. It is fairly easy in any reasonably-sized code base to find cases, intentional or otherwise, where `ToString` returns `null` in some cases (we've found examples in dotnet/runtime, dotnet/roslyn, NuGet/NuGet.Client, dotnet/aspnetcore, and so on). One of the most prevalent conditions for this are types that just return the value in a string field which may contain its default value of `null`, and in particular for structs where a ctor may not have even had a chance to run and validate an input. Guidance in the docs suggests that `ToString` shouldn't return `null` or `string.Empty`, but even the docs don't follow its own guidance.
2. Thousands upon thousands of types we don't control override this method today.
3. It's common for helper routines to invoke via the base `object.ToString`, but many `ToString` uses are actually on derived types. This is particularly true when working in a code base that both defines a type and consumes its `ToString`.
4. Based on examination of several large code bases, we believe it to be relatively rare that the result of an `Object.ToString` call (made on the base) to be directly dereferenced. It's much more common to pass it to another method that accepts `string?`, such as `String.Concat`, `String.Format`, `Console.WriteLine`, logging utilities, and so on. And while we advocate that `ToString` results shouldn't be assumed to be in a particular machine-readable format and parsed, it's certainly the case that code bases do, such as using `Substring` on the result, but in such cases, the caller needs to understand the format of what's being rendered, which generally means they're working with a derived type rather than calling through the base `Object.ToString`.
diff --git a/docs/design/coreclr/jit/JitOptimizerTodoAssessment.md b/docs/design/coreclr/jit/JitOptimizerTodoAssessment.md
index 1c1eec65a4b66b..1e40195a3ac149 100644
--- a/docs/design/coreclr/jit/JitOptimizerTodoAssessment.md
+++ b/docs/design/coreclr/jit/JitOptimizerTodoAssessment.md
@@ -49,7 +49,7 @@ the code quality improvements, though most have issues associated with them.
We may well be able to find some additional benchmarks or real-world-code with some looking,
though it may be the case that current performance-sensitive code avoids structs.
-There's also work going on in corefx to use `Span` more broadly. We should
+There's also work going on to use `Span` more broadly. We should
make sure we are expanding our span benchmarks appropriately to track and
respond to any particular issues that come out of that work.
diff --git a/docs/design/coreclr/jit/object-stack-allocation.md b/docs/design/coreclr/jit/object-stack-allocation.md
index 0101d5b4425e0c..7457087caf4154 100644
--- a/docs/design/coreclr/jit/object-stack-allocation.md
+++ b/docs/design/coreclr/jit/object-stack-allocation.md
@@ -83,7 +83,7 @@ with version reseliency.
**Pros:**
* ILLInk can afford to spend more time for escape analysis.
* For self-contained apps, ILLink has access to all of application's code and can do full interprocedural analysis.
-* ILLink is already a part of System.Private.CoreLib and CoreFX build toolchain so the assemblies built there can benefit
+* ILLink is already a part of System.Private.CoreLib and core libraries build toolchain so the assemblies built there can benefit
from this.
**Cons:**
diff --git a/docs/design/coreclr/jit/profile-count-reconstruction.md b/docs/design/coreclr/jit/profile-count-reconstruction.md
index f5f4c8006eb5e1..1a66382d010454 100644
--- a/docs/design/coreclr/jit/profile-count-reconstruction.md
+++ b/docs/design/coreclr/jit/profile-count-reconstruction.md
@@ -111,7 +111,7 @@ So solution techniques that can leverage sparseness are of particular interest.
Note the matrix $\boldsymbol I - \boldsymbol P$ has non-negative diagonal elements and negative non-diagonal elements, since all entries of $\boldsymbol P$ are in the range [0,1].
-If we further restrict ourselves to the case where $p_{i,i} \lt 1$ (meaning there are are no infinite self-loops) then all the diagonal entries are positive and the matrix has an inverse with no negative elements.
+If we further restrict ourselves to the case where $p_{i,i} \lt 1$ (meaning there are no infinite self-loops) then all the diagonal entries are positive and the matrix has an inverse with no negative elements.
Such matrices are known as M-matrices.
diff --git a/docs/design/features/AssemblyLoadContext.ContextualReflection.md b/docs/design/features/AssemblyLoadContext.ContextualReflection.md
index 456bc1bb2dd238..ff8c9d74a203ee 100644
--- a/docs/design/features/AssemblyLoadContext.ContextualReflection.md
+++ b/docs/design/features/AssemblyLoadContext.ContextualReflection.md
@@ -22,7 +22,7 @@ Using `pluginDependency` to determine the `AssemblyLoadContext` used for loading
### Failing Scenarios
#### Xunit story
-We have been working on building a test harness in Xunit for running the CoreFX test suite inside `AssemblyLoadContext`s (each test case in its own context). This has proven to be somewhat difficult due to Xunit being a very reflection heavy codebase with tons of instances of types, assemblies, etc. being converted to strings and then fed through `Activator`. One of the main learnings is that it is not always obvious what will stay inside the “bounds” of an `AssemblyLoadContext` and what won’t. The basic rule of thumb is that any `Assembly.Load()` will result in the assembly being loaded onto the `AssemblyLoadContext` of the calling code, so if code loaded by an ALC calls `Assembly.Load(...)`, the resulting assembly will be within the “bounds” of the ALC. This unfortunately breaks down in some cases, specifically when code calls `Activator` which lives in `System.Private.CoreLib` which is always shared.
+We have been working on building a test harness in Xunit for running the core libraries test suite inside `AssemblyLoadContext`s (each test case in its own context). This has proven to be somewhat difficult due to Xunit being a very reflection heavy codebase with tons of instances of types, assemblies, etc. being converted to strings and then fed through `Activator`. One of the main learnings is that it is not always obvious what will stay inside the “bounds” of an `AssemblyLoadContext` and what won’t. The basic rule of thumb is that any `Assembly.Load()` will result in the assembly being loaded onto the `AssemblyLoadContext` of the calling code, so if code loaded by an ALC calls `Assembly.Load(...)`, the resulting assembly will be within the “bounds” of the ALC. This unfortunately breaks down in some cases, specifically when code calls `Activator` which lives in `System.Private.CoreLib` which is always shared.
#### System.Xaml
This problem also manifests when using an `Object` deserialization framework which allows specifying assembly qualified type names.
diff --git a/docs/design/features/arm64-intrinsics.md b/docs/design/features/arm64-intrinsics.md
index 5e4ddabb0dcfd8..65d9ae0fa14648 100644
--- a/docs/design/features/arm64-intrinsics.md
+++ b/docs/design/features/arm64-intrinsics.md
@@ -263,9 +263,6 @@ To facilitate incremental progress, initial intrinsic API for a given `static cl
As intrinsic support is added test coverage must be extended to provide basic testing.
-Tests should be added as soon as practical. CoreCLR Implementation and CoreFX API will need to be merged before tests
-can be merged.
-
## LSRA changes to allocate contiguous register ranges
Some ARM64 instructions will require allocation of contiguous blocks of registers. These are likely limited to load and
diff --git a/docs/design/features/byreflike-generics.md b/docs/design/features/byreflike-generics.md
index d529f18f140dfb..4bccfa85ef1b17 100644
--- a/docs/design/features/byreflike-generics.md
+++ b/docs/design/features/byreflike-generics.md
@@ -7,22 +7,26 @@ Using ByRefLike types in Generic parameters is possible by building upon support
## Runtime impact
-Supporting ByRefLike type as Generic parameters will impact the following IL instructions:
+Supporting ByRefLike types as Generic parameters will impact the following IL instructions.
-- `box` – Types with ByRefLike parameters used in fields cannot be boxed.
+The `constrained. callvirt` sequence is valid if a ByRefLike type is provided. A `NotSupportedException` will be thrown at the call-site, if the target resolves to a method implemented on `object` or a default interface method.
+
+Throws `InvalidProgramException` when passed a ByRefLike type:
+- `box` – ByRefLike types cannot be allocated on the heap.
+
+Throws `TypeLoadException` when passed a ByRefLike type:
- `stsfld` / `ldsfld` – Type fields of a ByRefLike parameter cannot be marked `static`.
- `newarr` / `stelem` / `ldelem` / `ldelema` – Arrays are not able to contain ByRefLike types.
- `newobj` – For multi-dimensional array construction.
-- `constrained.callvirt` – If this IL sequence resolves to a method implemented on `object` or default interface method, an error will occur during the attempt to box the instance.
-
-If any of the above instructions are attempted to be used with a ByRefLike type, the runtime will throw an `InvalidProgramException`. Sequences involving some of the above instructions are considered optimizations and represent cases that will remain valid regardless of a `T` being ByRefLike. See "Special IL Sequences" section below for details.
The following instructions are already set up to support this feature since their behavior will fail as currently defined due to the inability to box a ByRefLike type.
-- `throw` – Requires an object reference to be on stack, which can never be a ByRefLike type.
-- `unbox` / `unbox.any` – Requires an object reference to be on stack, which can never be a ByRefLike type.
-- `isinst` – Will always place `null` on stack.
-- `castclass` – Will always throw `InvalidCastException`.
+- `throw`
+- `unbox` / `unbox.any`
+- `isinst`
+- `castclass`
+
+**NOTE** There are sequences involving some of the above instructions that may remain valid regardless of a `T` being ByRefLike—see ["Options for invalid IL" section](#invalid_il_options) below for details.
The expansion of ByRefLike types as Generic parameters does not relax restrictions on where ByRefLike types can be used. When `T` is ByRefLike, the use of `T` as a field will require the enclosing type to be ByRefLike.
@@ -110,23 +114,123 @@ throw
Adding `gpAcceptByRefLike` to the metadata of a Generic parameter will be considered a non-breaking binary change.
-Enumerating of constructors/methods on `Span` and `ReadOnlySpan` may throw `TypeLoadException` if `T` is a ByRefLike type. See "Troublesome APIs" above for the list of APIs that cause this condition.
+Enumerating of constructors/methods on `Span` and `ReadOnlySpan` may throw `TypeLoadException` if `T` is a ByRefLike type. See "Troublesome API mitigation" above for the list of APIs that cause this condition.
+
+## Options for invalid IL
+
+There are two potential options below for how to address this issue. Based on communication with the Roslyn team, option (1) is the current plan of record for .NET 10.
+
+The first indented IL sequences below represents the `is-type` sequence. Combining the first with the second indented section represents the "type pattern matching" scenario in C#. The below sequence performs a type check and then, if successful, consumes the unboxed instance.
+
+```IL
+// Type check
+ldarg.0
+ box
+ isinst
+ brfalse.s NOT_INST
+
+// Unbox and store unboxed instance
+ldarg.0
+ box
+ isinst
+ unbox.any
+stloc.X
+
+NOT_INST:
+ret
+```
+
+With the above IL composition implemented, the following C# describes the following "type pattern matching" scenarios and what one might expect given current C# semantics.
+
+```csharp
+struct S {}
+struct S {}
+ref struct RS {}
+ref struct RS {}
+interface I {}
+class C {}
+class C {}
+
+// Not currently valid C#
+void M(T t) where T: allows ref struct
+{
+ // Valid
+ if (t is int i)
+
+ if (t is S s)
+ if (t is S sc)
+ if (t is S su)
+
+ if (t is RS rs)
+ if (t is RS rsc)
+ if (t is RS rsu)
+
+ if (t is string str)
+ if (t is C c)
+ if (t is C ci)
+ if (t is C cu)
+
+ // Can be made to work in IL.
+ if (t is I itf) // A new local "I" would not be used for ByRefLike scenarios.
+ // The local would be the ByRefLike type, not "I".
+
+ // Invalid
+ if (t is object o) // ByRefLike types evaluate "true" for object.
+ if (t is U u)
+}
+```
+
+### Option 1) Compiler helpers
+
+The following two helper functions could be introduced and would replace currently invalid `is-type` IL sequences when ByRefLike types are involved. Their behavior would broadly be defined to operate as if the ByRefLike aspect of either the `TFrom` and `TTo` is not present. An alternative approach would be consult with the Roslyn team and define the semantics of these functions to adhere to C# language rules.
+
+```csharp
+namespace System.Runtime.CompilerServices
+{
+ public static class RuntimeHelpers
+ {
+ // Replacement for the [box; isinst; brfalse/true] sequence.
+ public static bool IsInstanceOf(TFrom source)
+ where TFrom: allows ref struct
+ where TTo: allows ref struct;
+
+ // Replacement for the [box; isinst; unbox.any] sequence.
+ // Would throw InvalidCastException for invalid use at run-time.
+ // For example:
+ // TFrom: RS, TTo: object => always throws
+ // TFrom: RS, TTo: => always throws
+ public static TTo CastTo(TFrom source)
+ where TFrom: allows ref struct
+ where TTo: allows ref struct;
+ }
+}
+```
+
+Example usage of the above methods.
+
+```csharp
+TTo result;
+if (RuntimeHelpers.IsInstanceOf(source))
+{
+ result = RuntimeHelpers.CastTo(source);
+}
+```
-## Special IL Sequences
+### Option 2) Special IL sequences
-The following are IL sequences involving the `box` instruction. They are used for common C# language constructs and shall continue to be valid, even with ByRefLike types, in cases where the result can be computed at JIT time and elided safely. These sequences must now be elided when the target type is ByRefLike. The conditions where each sequence is elided are described below and each condition will be added to the ECMA-335 addendum.
+The following are IL sequences involving the `box` instruction. They are used for common C# language constructs and would continue to be valid, even with ByRefLike types. These sequences would be **required** to be valid when the target type is ByRefLike. Each sequence would be added to the ECMA-335 addendum.
-`box` ; `unbox.any` – The box target type is equal to the unboxed target type.
+`box` ; `isinst` ; `br_true/false` – Passing a ByRefLike type as the argument to the `box` instruction is permitted to accomplish a type check, in C# `x is Y`. **Note** ByRefLike types would evaluate to `true` when compared against `System.Object`.
-`box` ; `br_true/false` – The box target type is non-`Nullable`.
+`box` ; `isinst` ; `unbox.any` – In order to permit "type pattern matching", in C# `x is Y y`, this sequence will permit use of a ByRefLike type on any instruction, but does not permit the use of generic parameters being exposed to `isinst` or `unbox.any`.
-`box` ; `isinst` ; `unbox.any` – The box, `isint`, and unbox target types are all equal.
+`box` ; `unbox.any` – Valid to use ByRefLike types.
-`box` ; `isinst` ; `br_true/false` – The box target type is equal to the unboxed target type or the box target type is `Nullable` and target type equalities can be computed.
+`box` ; `br_true/false` – Valid to use ByRefLike types.
## Examples
-Below are valid and invalid examples of ByRefLike as Generic parameters. All examples use the **not official** syntax, `allows ref struct`, for indicating the Generic permits ByRefLike types.
+Below are currently (.NET 9) valid and invalid examples of ByRefLike as Generic parameters.
**1) Valid**
```csharp
diff --git a/docs/design/features/hw-intrinsics.md b/docs/design/features/hw-intrinsics.md
index 3ed120d5142e5e..5234b366528ec9 100644
--- a/docs/design/features/hw-intrinsics.md
+++ b/docs/design/features/hw-intrinsics.md
@@ -10,11 +10,7 @@ There is a design document for the Arm64 intrinsics: https://github.com/dotnet/r
## Overview
-The reference assemblies for the hardware intrinsics live in corefx, but all of the implementation is in the coreclr repo:
-
-* The C# implementation lives in coreclr/System.Private.CoreLib/shared/System/Runtime/Intrinsics. These are little more than skeleton methods that are only compiled if needed for indirect invocation.
-
- * Note that they are mirrored to other repositories, including corefx, corert and mono.
+* The C# implementation lives in src/libraries/System.Private.CoreLib/shared/System/Runtime/Intrinsics. These are little more than skeleton methods that are only compiled if needed for indirect invocation.
## C# Implementation
diff --git a/docs/design/mono/web/aot.md b/docs/design/mono/web/aot.md
index ffa14737f3ee3c..b2be0bcbd4a390 100644
--- a/docs/design/mono/web/aot.md
+++ b/docs/design/mono/web/aot.md
@@ -105,7 +105,7 @@ It is possible to use LLVM in AOT mode. This is implemented by compiling methods
### Full AOT mode
-Some platforms like the iphone prohibit JITted code, using technical and/or legal means. This is a significant problem for the mono runtime, since it generates a lot of code dynamically, using either the JIT or more low-level code generation macros. To solve this, the AOT compiler is able to function in full-aot or aot-only mode, where it generates and saves all the neccesary code in the aot image, so at runtime, no code needs to be generated. There are two kinds of code which needs to be considered:
+Some platforms like the iphone prohibit JITted code, using technical and/or legal means. This is a significant problem for the mono runtime, since it generates a lot of code dynamically, using either the JIT or more low-level code generation macros. To solve this, the AOT compiler is able to function in full-aot or aot-only mode, where it generates and saves all the necessary code in the aot image, so at runtime, no code needs to be generated. There are two kinds of code which needs to be considered:
- wrapper methods, that is methods whose IL is generated dynamically by the runtime. They are handled by generating them in the add_wrappers () function, then emitting them as 'extra' methods.
- trampolines and other small hand generated pieces of code. They are handled in an ad-hoc way in the emit_trampolines () function.
diff --git a/docs/design/mono/web/exception-handling.md b/docs/design/mono/web/exception-handling.md
index 2561c9245e8960..ab5f79d9595719 100644
--- a/docs/design/mono/web/exception-handling.md
+++ b/docs/design/mono/web/exception-handling.md
@@ -114,7 +114,7 @@ Currently, exceptions are raised by calling mono_raise_exception () in the middl
- To allow mono_raise_exception () to unwind through native code, we need to save the LMF structures which can add a lot of overhead even in the common case when no exception is thrown. So this is not zero-cost exception handling.
-An alternative might be to use a JNI style set-pending-exception API. Runtime code could call mono_set_pending_exception (), then return to its caller with an error indication allowing the caller to clean up. When execution returns to managed code, then managed-\>native wrapper could check whenever there is a pending exception and throw it if neccesary. Since we already check for pending thread interruption, this would have no overhead, allowing us to drop the LMF saving/restoring code, or significant parts of it.
+An alternative might be to use a JNI style set-pending-exception API. Runtime code could call mono_set_pending_exception (), then return to its caller with an error indication allowing the caller to clean up. When execution returns to managed code, then managed-\>native wrapper could check whenever there is a pending exception and throw it if necessary. Since we already check for pending thread interruption, this would have no overhead, allowing us to drop the LMF saving/restoring code, or significant parts of it.
### libunwind
diff --git a/docs/design/mono/web/linear-ir.md b/docs/design/mono/web/linear-ir.md
index af650f57a9c79f..b72e6c28c911c1 100644
--- a/docs/design/mono/web/linear-ir.md
+++ b/docs/design/mono/web/linear-ir.md
@@ -96,7 +96,7 @@ The JIT allocates a large number of vregs. Most of these are created during the
### Transitioning between the two states
- Most vregs start out being local. Others, like the ones representing the arguments and locals of a method, start out being global.
-- Some transformations done by the JIT can break the invariant that an lvreg is local to a basic block. There is a separate pass, mono_handle_global_vregs (), which verifies this invariant and transforms lvregs into global vregs if neccesary. This pass also does the opposite transformation, by transforming global vregs used only in one bblock into an lvreg.
+- Some transformations done by the JIT can break the invariant that an lvreg is local to a basic block. There is a separate pass, mono_handle_global_vregs (), which verifies this invariant and transforms lvregs into global vregs if necessary. This pass also does the opposite transformation, by transforming global vregs used only in one bblock into an lvreg.
- If an address of a vreg needs to be taken, the vreg is transformed into a global vreg.
JIT Passes
diff --git a/docs/design/mono/web/register-allocation.md b/docs/design/mono/web/register-allocation.md
index e6247d8eb95875..3dc7ae1a1a669c 100644
--- a/docs/design/mono/web/register-allocation.md
+++ b/docs/design/mono/web/register-allocation.md
@@ -56,7 +56,7 @@ Some variables might already be allocated to hardware registers during the globa
#### Floating point stack
-The x86 architecture uses a floating point register stack instead of a set of fp registers. The allocator supports this by a post-processing pass which keeps track of the height of the fp stack, and spills/loads values from the stack as neccesary.
+The x86 architecture uses a floating point register stack instead of a set of fp registers. The allocator supports this by a post-processing pass which keeps track of the height of the fp stack, and spills/loads values from the stack as necessary.
#### Calls
diff --git a/docs/design/specs/Ecma-335-Augments.md b/docs/design/specs/Ecma-335-Augments.md
index f6b8f9b964eff9..fd8a0fd93ca87f 100644
--- a/docs/design/specs/Ecma-335-Augments.md
+++ b/docs/design/specs/Ecma-335-Augments.md
@@ -14,7 +14,8 @@ This is a list of additions and edits to be made in ECMA-335 specifications. It
- [Covariant Return Types](#covariant-return-types)
- [Function Pointer Type Identity](#function-pointer-type-identity)
- [Unsigned data conversion with overflow detection](#unsigned-data-conversion-with-overflow-detection)
-- [Ref field support](#ref-fields)
+- [Ref fields support](#ref-fields)
+- [ByRefLike types in generics](#byreflike-generics)
- [Rules for IL rewriters](#rules-for-il-rewriters)
- [Checked user-defined operators](#checked-user-defined-operators)
- [Atomic reads and writes](#atomic-reads-and-writes)
@@ -1026,6 +1027,31 @@ Changes to signatures:
- Add a bullet point
- Managed pointers which point at null, the address just past the end of an object, or the address where an element just past the end of an array would be stored, are permitted but not dereferenceable.
+## ByRefLike types in generics
+
+ByRefLike types, defined in C# with the `ref struct` syntax, represent types that cannot escape to the managed heap and must remain on the stack. It is possible for these types to be used as generic parameters, but in order to improve utility certain affordances are required.
+
+### II.10.1.7
+An additional IL keyword, `byreflike`, is introduced to indicate use of ByRefLike types is permitted. This expands the set of permissible types used by this parameters, but limits the potential instructions that can be used on instances of this generic parameter type.
+
+### II.23.1.7
+Update the `SpecialConstraintMask` flag value and description, and add a new flag, `AllowByRefLike`.
+
+| Flag | Value | Description |
+| --- | ----- | ----------- |
+| `SpecialConstraintMask` | `0x3C` | These 4 bits contain one of the following values: |
+| ... | ... | ... |
+| `AllowByRefLike` | `0x20` | The generic parameter is allowed to be ByRefLike |
+
+### III.2.1
+The following case is added as the **third** cases in the "if _thisType_" sequence.
+
+> If _thisType_ is ByRefLike and _thisType_ does not implement _method_ then; a `NotSupportedException` is thrown at the callsite.
+
+The following is added to the paragraph starting with "This last case can only occur when _method_ was defined on `System.Object`, `System.ValueType`, or `System.Enum`".
+
+> The third case can only occur when _method_ was defined on `System.Object` or is a Default Interface Method.
+
## Rules for IL Rewriters
There are apis such as `System.Runtime.CompilerServices.RuntimeHelpers.CreateSpan(...)` which require that the PE file have a particular structure. In particular, that api requires that the associated RVA of a FieldDef which is used to create a span must be naturally aligned over the data type that `CreateSpan` is instantiated over. There are 2 major concerns.
diff --git a/docs/project/dotnet-standards.md b/docs/project/dotnet-standards.md
index 310589ef2462df..c373aa8f39ef6f 100644
--- a/docs/project/dotnet-standards.md
+++ b/docs/project/dotnet-standards.md
@@ -33,12 +33,12 @@ ECMA 335 - CLI
**ECMA 335 Partitions with added Microsoft Specific Implementation Notes**
-- [Partition I: Concepts and Architecture](http://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20I.pdf)
-- [Partition II: Meta Data Definition and Semantics](http://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20II.pdf)
-- [Partition III: CIL Instruction Set](http://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20III.pdf)
-- [Partition IV: Profiles and Libraries](http://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20IV.pdf)
-- [Partition V: Debug Interchange Format](http://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20V.pdf)
-- [Partition VI: Annexes](http://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20VI.pdf)
+- [Partition I: Concepts and Architecture](https://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20I.pdf)
+- [Partition II: Meta Data Definition and Semantics](https://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20II.pdf)
+- [Partition III: CIL Instruction Set](https://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20III.pdf)
+- [Partition IV: Profiles and Libraries](https://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20IV.pdf)
+- [Partition V: Debug Interchange Format](https://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20V.pdf)
+- [Partition VI: Annexes](https://download.microsoft.com/download/7/3/3/733AD403-90B2-4064-A81E-01035A7FE13C/MS%20Partition%20VI.pdf)
**ECMA Technical Report 084: Information Derived from Partition IV XML File**
diff --git a/docs/project/glossary.md b/docs/project/glossary.md
index 55d59730bd5720..a8567f8bc1e7a2 100644
--- a/docs/project/glossary.md
+++ b/docs/project/glossary.md
@@ -14,13 +14,13 @@ terminology.
| AOT | Ahead-of-time compiler. Converts the MSIL bytecode to native machine code for a specific target CPU architecture. |
| BBT | Microsoft internal early version of C/C++ PGO. See https://www.microsoft.com/windows/cse/bit_projects.mspx. |
| BOTR | Book Of The Runtime. |
-| BCL | Base Class Library. A set of `System.*` (and to a limited extent `Microsoft.*`) libraries that make up the lower layer of the .NET library stack. |
+| BCL | Base Class Library. A set of `System.*` (and to a limited extent `Microsoft.*`) libraries that make up the lower layer of the .NET library stack. See CoreFX. |
| CIL | Common Intermediate Language. Equivalent to IL, also equivalent to [MSIL](https://learn.microsoft.com/dotnet/standard/managed-execution-process#compiling-to-msil). |
| CLI | Command Line Interface, or Common Language Infastructure. |
| CLR | [Common Language Runtime](https://learn.microsoft.com/dotnet/standard/clr). |
| COMPlus | An early name for the .NET platform, back when it was envisioned as a successor to the COM platform (hence, "COM+"). Used in various places in the CLR infrastructure, most prominently as a common prefix for the names of internal configuration settings. Note that this is different from the product that eventually ended up being named [COM+](https://msdn.microsoft.com/library/windows/desktop/ms685978.aspx). |
| COR | [Common Object Runtime](http://www.danielmoth.com/Blog/mscorlibdll.aspx). The name of .NET before it was named .NET. |
-| CoreFX | Core Framework. Original project name for open source and cross-platform version of [.NET runtime libraries](https://github.com/dotnet/runtime/tree/main/src/libraries) |
+| CoreFX | Core Framework. Original project name for open source and cross-platform version of [.NET runtime libraries](https://github.com/dotnet/runtime/tree/main/src/libraries). Where we want to distinguish the foundational .NET libraries - those in the dotnet/runtime repo - instead of CoreFX or BCL we call them the core libraries. |
| DAC | Data Access Component. An abstraction layer over the internal structures in the runtime. |
| EE | [Execution Engine](https://learn.microsoft.com/dotnet/standard/managed-execution-process#running_code). |
| GC | [Garbage Collector](https://github.com/dotnet/runtime/blob/main/docs/design/coreclr/botr/garbage-collection.md). |
@@ -285,8 +285,8 @@ and enabling support for running WPF on .NET Core (Windows Only).
## Engineering system
* **Helix**. It's a massively-parallel, general-purpose job processing and
- result aggregation system running in the cloud. The work items that corefx
- sends to Helix are [xunit][xunit] tests. Test results are shown through the
+ result aggregation system running in the cloud. The work items that we
+ send to Helix are [xunit][xunit] tests. Test results are shown through the
[*Mission Control* reporting site][mc.dot.net]; to go to the test results in a
PR from Azure DevOps, you can click on the *Send to Helix* step in the build,
and the logs will have the URL.
diff --git a/docs/project/issue-guide.md b/docs/project/issue-guide.md
index eef40d65dcee08..b055327f012506 100644
--- a/docs/project/issue-guide.md
+++ b/docs/project/issue-guide.md
@@ -1,14 +1,14 @@
Issue Guide
===========
-This page outlines how the CoreFx team thinks about and handles issues. For us, issues on GitHub represent actionable work that should be done at some future point. It may be as simple as a small product or test bug or as large as the work tracking the design of a new feature. However, it should be work that falls under the charter of CoreFx, which is a collection of foundational libraries that make up the .NET Core development stack. We will keep issues open even if the CoreFx team internally has no plans to address them in an upcoming release, as long as we consider the issue to fall under our purview.
+This page outlines how the team thinks about and handles issues in this repo. For us, issues on GitHub represent actionable work that should be done at some future point. It may be as simple as a small product or test bug or as large as the work tracking the design of a new feature. However, it should be work that falls under the charter of the team. We will keep issues open even if the team internally has no plans to address them in an upcoming release, as long as we consider the issue to fall under our purview and something we would potentially take a change for.
### When we close issues
As noted above, we don't close issues just because we don't plan to address them in an upcoming release. So why do we close issues? There are few major reasons:
-1. Issues unrelated to CoreFx. When possible, we'll try to find a better home for the issue and point you to it.
+1. Issues unrelated to code in this repo. When possible, we'll try to find a better home for the issue and point you to it.
2. Cross cutting work better suited for another team. Sometimes the line between the framework, languages and runtime blurs. For some issues, we may feel that the work is better suited for the runtime team, language team or other partner. In these cases, we'll close the issue and open it with the partner team. If they end up not deciding to take on the issue, we can reconsider it here.
-3. Nebulous and Large open issues. Large open issues are sometimes better suited for [User Voice](http://visualstudio.uservoice.com/forums/121579-visual-studio/category/31481--net), especially when the work will cross the boundaries of the framework, language and runtime. A good example of this is the SIMD support we recently added to CoreFx. This started as a [User Voice request](https://visualstudio.uservoice.com/forums/121579-visual-studio-2015/suggestions/2212443-c-and-simd), and eventually turned into work for both the core libraries and runtime.
+3. Nebulous and Large open issues. Large open issues are sometimes better suited for [User Voice](http://visualstudio.uservoice.com/forums/121579-visual-studio/category/31481--net), especially when the work will cross the boundaries of the framework, language and runtime. A good example of this is the SIMD support we recently added to the core libraries. This started as a [User Voice request](https://visualstudio.uservoice.com/forums/121579-visual-studio-2015/suggestions/2212443-c-and-simd), and eventually turned into work for both the core libraries and runtime.
Sometimes after debate, we'll decide an issue isn't a good fit for the .NET runtime codebase. In that case, we'll also close it. Because of this, we ask that you don't start working on an issue until it's tagged with [help wanted](https://github.com/dotnet/runtime/labels/help%20wanted) or [api-approved](https://github.com/dotnet/runtime/labels/api-approved). Both you and the team will be unhappy if you spend time and effort working on a change we'll ultimately be unable to take. We try to avoid that.
@@ -78,8 +78,8 @@ Feel free to use other labels if it helps your triage efforts (e.g. **needs more
1. Each PR has exactly one **area-\*** label
* Movitation: Area owners will get email notification about new issue in their area.
-1. PR has **Assignee** set to author of the PR, if it is non-CoreFX engineer, then area owners are co-assignees
- * Motivation #1: Area owners are responsible to do code reviews for PRs from external contributors. CoreFX engineers know how to get code reviews from others.
+1. PR has **Assignee** set to author of the PR, if it is engineer that does not work in this repo, then area owners are co-assignees
+ * Motivation #1: Area owners are responsible to do code reviews for PRs from external contributors. Committers know how to get code reviews from others.
* Motivation #2: Assignees will get notifications for anything happening on the PR.
1. [Optional] Set milestone according to the branch the PR is against (main = 6.0, release/5.0 = 5.0)
* Motivation: Easier to track and audit where which fix ended up and if it needs to be ported into another branch (hence reflecting branch the specific PR ended up and not the abstract issue).
diff --git a/docs/project/jit-testing.md b/docs/project/jit-testing.md
index dadc221537efec..d1e273e191f3b1 100644
--- a/docs/project/jit-testing.md
+++ b/docs/project/jit-testing.md
@@ -112,7 +112,7 @@ Not yet clear what porting this entails.
### Leverage peer repo test suites.
We should be able to directly leverage tests provided in peer repo suites, once
-they can run on top of CoreCLR. In particular CoreFx and Roslyn test cases
+they can run on top of CoreCLR. In particular libraries and Roslyn test cases
could be good initial targets.
Note LLILC is currently working through the remaining issues that prevent it
diff --git a/docs/project/list-of-diagnostics.md b/docs/project/list-of-diagnostics.md
index c8295e99c70b69..6f3c52063df440 100644
--- a/docs/project/list-of-diagnostics.md
+++ b/docs/project/list-of-diagnostics.md
@@ -276,3 +276,20 @@ The diagnostic id values reserved for .NET Libraries analyzer warnings are `SYSL
| __`SYSLIBSUPPRESS0001`__ | CA1822 | Do not offer to make methods static when the methods need to be instance methods for a custom marshaller shape. |
| __`SYSLIBSUPPRESS0002`__ | IL2026 | ConfigurationBindingGenerator: suppress RequiresUnreferencedCode diagnostic for binding call that has been intercepted by a generated static variant. |
| __`SYSLIBSUPPRESS0003`__ | IL3050 | ConfigurationBindingGenerator: suppress RequiresDynamicCode diagnostic for binding call that has been intercepted by a generated static variant. |
+
+## Experimental APIs
+
+APIs can be marked as `[Experimental]` if their shape or functionality is included in a release but not yet officially supported. Experimental APIs offer the opportunity to collect customer feedback on these APIs in a major release, usually refining the APIs and removing the `[Experimental]` attribute in the next release. The `[Experimental]` attribute differs from `[RequiresPreviewFeatures]`, wherein:
+
+* `[RequiresPreviewFeatures]` APIs require a corresponding preview feature in another product area such as the compiler or SDK
+* `[Experimental]` APIs are entirely self-contained within the libraries and do not require preview features in other parts of the product
+
+The diagnostic id values reserved for experimental APIs are `SYSLIB5001` through `SYSLIB5999`. When marking an API as `[Experimental]`, claim the next three-digit identifier in the `SYSLIB5###` sequence and add it to the list below. The URL template for all experimental APIs is `https://aka.ms/dotnet-warnings/{0}`. The `{0}` placeholder is replaced by the compiler with the `SYSLIB5###` identifier.
+
+### Experimental Diagnostics (`SYSLIB5001` - `SYSLIB5999`)
+
+Diagnostic id values for experimental APIs must not be recycled, as that could silently opt customers into new experimental APIs where they had previously suppressed the ID for a previous usage of the value.
+
+| Diagnostic ID | Introduced | Removed | Description |
+| :---------------- | ---------: | ------: | :---------- |
+| __`SYSLIB5001`__ | .NET 9 | TBD | `Tensor` and related APIs in System.Numerics.Tensors are experimental in .NET 9 |
diff --git a/docs/workflow/debugging/libraries/unix-instructions.md b/docs/workflow/debugging/libraries/unix-instructions.md
index 5063b1f4147194..b217740c3e59d5 100644
--- a/docs/workflow/debugging/libraries/unix-instructions.md
+++ b/docs/workflow/debugging/libraries/unix-instructions.md
@@ -1,7 +1,7 @@
-Debugging CoreFX on Unix
-==========================
+Debugging core .NET libraries on Unix
+=====================================
-CoreFX can be debugged on unix using both lldb and visual studio code
+.NET can be debugged on unix using both lldb and Visual Studio Code.
## Using lldb and SOS
diff --git a/docs/workflow/debugging/libraries/windows-instructions.md b/docs/workflow/debugging/libraries/windows-instructions.md
index 16c48cdc4d4dce..543da53dcd3e45 100644
--- a/docs/workflow/debugging/libraries/windows-instructions.md
+++ b/docs/workflow/debugging/libraries/windows-instructions.md
@@ -1,5 +1,5 @@
-Debugging CoreFX on Windows
-==========================
+Debugging core .NET libraries on Windows
+========================================
You can Debug .NET via Visual Studio or WinDBG.
@@ -37,11 +37,11 @@ For example: `src\System.Net.Sockets\tests\Functional\System.Net.Sockets.Tests.c
* Execute the test
-Assuming that your repo is at `C:\corefx`:
+Assuming that your repo is at `C:\root`:
```
-cd C:\corefx\bin\tests\windows.AnyCPU.Debug\System.Net.Sockets.Tests\netcoreapp1.0
-C:\corefx\bin\tests\windows.AnyCPU.Debug\System.Net.Sockets.Tests\netcoreapp1.0\CoreRun.exe xunit.console.dll System.Net.Sockets.Tests.dll -xml testResults.xml -notrait category=nonwindowstests -notrait category=OuterLoop -notrait category=failing
+cd C:\root\bin\tests\windows.AnyCPU.Debug\System.Net.Sockets.Tests\netcoreapp1.0
+C:\root\bin\tests\windows.AnyCPU.Debug\System.Net.Sockets.Tests\netcoreapp1.0\CoreRun.exe xunit.console.dll System.Net.Sockets.Tests.dll -xml testResults.xml -notrait category=nonwindowstests -notrait category=OuterLoop -notrait category=failing
```
* If the test crashes or encounters a `Debugger.Launch()` method call, WinDBG will automatically start and attach to the `CoreRun.exe` process
@@ -52,7 +52,7 @@ The following commands will properly configure the debugging extension and fix s
.symfix
.srcfix
.reload
-!load C:\corefx\packages\runtime.win7-x64.Microsoft.NETCore.Runtime.CoreCLR\\tools\sos
+!load C:\root\packages\runtime.win7-x64.Microsoft.NETCore.Runtime.CoreCLR\\tools\sos
```
_Important_: Pass in the correct path to your SOS extension discovered during the Prerequisites, step 2.
@@ -137,7 +137,7 @@ Logs are going to be placed in %SYSTEMDRIVE%\sockets.etl.
### Built-in EventSource tracing
-The following EventSources are built-in to CoreFX. The ones that are not marked as [__TestCode__] can be enabled in production scenarios for log collection.
+The following EventSources are built-in to the .NET platform. The ones that are not marked as [__TestCode__] can be enabled in production scenarios for log collection.
#### Global
* `*System.Diagnostics.Eventing.FrameworkEventSource {8E9F5090-2D75-4d03-8A81-E5AFBF85DAF1}`: Global EventSource used by multiple namespaces.
diff --git a/docs/workflow/requirements/linux-requirements.md b/docs/workflow/requirements/linux-requirements.md
index 6b4aa1ee65ec3f..2aa1794ef1ac81 100644
--- a/docs/workflow/requirements/linux-requirements.md
+++ b/docs/workflow/requirements/linux-requirements.md
@@ -37,7 +37,7 @@ Install the following packages for the toolchain:
* CMake 3.20 or newer
* llvm
* lld
-* clang
+* clang (for WASM 16 or newer)
* build-essential
* python-is-python3
* curl
@@ -59,6 +59,7 @@ sudo apt install -y cmake llvm lld clang build-essential \
```
**NOTE**: As of now, Ubuntu's `apt` only has until CMake version 3.16.3 if you're using Ubuntu 20.04 LTS (less in older Ubuntu versions), and version 3.18.4 in Debian 11 (less in older Debian versions). This is lower than the required 3.20, which in turn makes it incompatible with the repo. For this case, we can use the `snap` package manager or the _Kitware APT feed_ to get a new enough version of CMake.
+**NOTE**: If you have Ubuntu 22.04 LTS and older and your `apt` does not have clang version 16, you can add `"deb http://apt.llvm.org/$(lsb_release -s -c)/ llvm-toolchain-$(lsb_release -s -c)-18 main"` repository to your `apt`. See how we do it for linux-based containers [here](./../../../.devcontainer/Dockerfile).
For snap:
diff --git a/docs/workflow/trimming/feature-switches.md b/docs/workflow/trimming/feature-switches.md
index 40a168d7c25bcb..92d04e897a2da1 100644
--- a/docs/workflow/trimming/feature-switches.md
+++ b/docs/workflow/trimming/feature-switches.md
@@ -2,41 +2,22 @@
Starting with .NET 5 there are several [feature-switches](https://github.com/dotnet/designs/blob/master/accepted/2020/feature-switch.md) available which
can be used to control the size of the final binary. They are available in all
-configurations but their defaults might vary as any SDK can set the defaults differently.
+configurations but their defaults might vary as any SDK can set the defaults differently. Publicly documented feature switches can be found on the [official docs](https://learn.microsoft.com/en-us/dotnet/core/deploying/trimming/trimming-options#trimming-framework-library-features). Non-public feature switches that impact the runtime libraries can be found in the following table.
## Available Feature Switches
| MSBuild Property Name | AppContext Setting | Description |
|-|-|-|
-| DebuggerSupport | System.Diagnostics.Debugger.IsSupported | Any dependency that enables better debugging experience to be trimmed when set to false |
-| EnableUnsafeUTF7Encoding | System.Text.Encoding.EnableUnsafeUTF7Encoding | Insecure UTF-7 encoding is trimmed when set to false |
-| EnableUnsafeBinaryFormatterSerialization | System.Runtime.Serialization.EnableUnsafeBinaryFormatterSerialization | BinaryFormatter serialization support is trimmed when set to false |
-| EventSourceSupport | System.Diagnostics.Tracing.EventSource.IsSupported | Any EventSource related code or logic is trimmed when set to false |
-| InvariantGlobalization | System.Globalization.Invariant | All globalization specific code and data is trimmed when set to true |
-| MetricsSupport | System.Diagnostics.Metrics.Meter.IsSupported | Any Metrics related code or logic is trimmed when set to false |
-| PredefinedCulturesOnly | System.Globalization.PredefinedCulturesOnly | Don't allow creating a culture for which the platform does not have data |
-| HybridGlobalization | System.Globalization.Hybrid | Properties connected with the mixed: platform-specific + icu-based globalization will be trimmed |
-| UseSystemResourceKeys | System.Resources.UseSystemResourceKeys | Any localizable resources for system assemblies is trimmed when set to true |
-| HttpActivityPropagationSupport | System.Net.Http.EnableActivityPropagation | Any dependency related to diagnostics support for System.Net.Http is trimmed when set to false |
-| UseNativeHttpHandler | System.Net.Http.UseNativeHttpHandler | HttpClient uses by default platform native implementation of HttpMessageHandler if set to true. |
-| StartupHookSupport | System.StartupHookProvider.IsSupported | Startup hooks are disabled when set to false. Startup hook related functionality can be trimmed. |
-| AutoreleasePoolSupport | System.Threading.Thread.EnableAutoreleasePool | When set to true, creates an NSAutoreleasePool for each thread and thread pool work item on applicable platforms. |
-| CustomResourceTypesSupport | System.Resources.ResourceManager.AllowCustomResourceTypes | Use of custom resource types is disabled when set to false. ResourceManager code paths that use reflection for custom types can be trimmed. |
-| EnableUnsafeBinaryFormatterInDesigntimeLicenseContextSerialization | System.ComponentModel.TypeConverter.EnableUnsafeBinaryFormatterInDesigntimeLicenseContextSerialization | BinaryFormatter serialization support is trimmed when set to false. |
-| BuiltInComInteropSupport | System.Runtime.InteropServices.BuiltInComInterop.IsSupported | Built-in COM support is trimmed when set to false. |
-| EnableCppCLIHostActivation | System.Runtime.InteropServices.EnableCppCLIHostActivation | C++/CLI host activation code is disabled when set to false and related functionality can be trimmed. |
-| MetadataUpdaterSupport | System.Reflection.Metadata.MetadataUpdater.IsSupported | Metadata update related code to be trimmed when set to false |
-| _EnableConsumingManagedCodeFromNativeHosting | System.Runtime.InteropServices.EnableConsumingManagedCodeFromNativeHosting | Getting a managed function from native hosting is disabled when set to false and related functionality can be trimmed. |
-| VerifyDependencyInjectionOpenGenericServiceTrimmability | Microsoft.Extensions.DependencyInjection.VerifyOpenGenericServiceTrimmability | When set to true, DependencyInjection will verify trimming annotations applied to open generic services are correct |
-| DisableDependencyInjectionDynamicEngine | Microsoft.Extensions.DependencyInjection.DisableDynamicEngine | When set to true, DependencyInjection will avoid using System.Reflection.Emit when realizing services |
+| DisableDependencyInjectionDynamicEngine | Microsoft.Extensions.DependencyInjection.DisableDynamicEngine | When set to true, DependencyInjection will avoid using System.Reflection.Emit when realizing services. |
| DynamicCodeSupport | System.Runtime.CompilerServices.RuntimeFeature.IsDynamicCodeSupported | Changes RuntimeFeature.IsDynamicCodeSupported to false to allow testing AOT-safe fallback code without publishing for Native AOT. |
-| _AggressiveAttributeTrimming | System.AggressiveAttributeTrimming | When set to true, aggressively trims attributes to allow for the most size savings possible, even if it could result in runtime behavior changes |
-| JsonSerializerIsReflectionEnabledByDefault | System.Text.Json.JsonSerializer.IsReflectionEnabledByDefault | When set to false, disables using reflection as the default contract resolver in System.Text.Json |
| EnableGeneratedComInterfaceComImportInterop | System.Runtime.InteropServices.Marshalling.EnableGeneratedComInterfaceComImportInterop | When set to true, enables casting source-generated COM object wrappers to built-in COM-based COM interfaces. |
-| _UseManagedNtlm | System.Net.Security.UseManagedNtlm | When set to true, uses built-in managed implementation of NTLM and SPNEGO algorithm for HTTP, SMTP authentication, and NegotiateAuthentication API instead of system provided GSSAPI implementation. |
+| VerifyDependencyInjectionOpenGenericServiceTrimmability | Microsoft.Extensions.DependencyInjection.VerifyOpenGenericServiceTrimmability | When set to true, DependencyInjection will verify trimming annotations applied to open generic services are correct. |
+| _AggressiveAttributeTrimming | System.AggressiveAttributeTrimming | When set to true, aggressively trims attributes to allow for the most size savings possible, even if it could result in runtime behavior changes |
| _ComObjectDescriptorSupport | System.ComponentModel.TypeDescriptor.IsComObjectDescriptorSupported | When set to true, supports creating a TypeDescriptor based view of COM objects. |
-| _DesignerHostSupport | System.ComponentModel.Design.IDesignerHost.IsSupported | When set to true, supports creating design components at runtime. |
| _DefaultValueAttributeSupport | System.ComponentModel.DefaultValueAttribute.IsSupported | When set to true, supports creating a DefaultValueAttribute at runtime. |
+| _DesignerHostSupport | System.ComponentModel.Design.IDesignerHost.IsSupported | When set to true, supports creating design components at runtime. |
+| _EnableConsumingManagedCodeFromNativeHosting | System.Runtime.InteropServices.EnableConsumingManagedCodeFromNativeHosting | Getting a managed function from native hosting is disabled when set to false and related functionality can be trimmed. |
+| _UseManagedNtlm | System.Net.Security.UseManagedNtlm | When set to true, uses built-in managed implementation of NTLM and SPNEGO algorithm for HTTP, SMTP authentication, and NegotiateAuthentication API instead of system provided GSSAPI implementation. |
Any feature-switch which defines property can be set in csproj file or
on the command line as any other MSBuild property. Those without predefined property name
diff --git a/eng/SourceBuildPrebuiltBaseline.xml b/eng/SourceBuildPrebuiltBaseline.xml
index 960a504ffa0e59..4e366ddb46918d 100644
--- a/eng/SourceBuildPrebuiltBaseline.xml
+++ b/eng/SourceBuildPrebuiltBaseline.xml
@@ -28,5 +28,11 @@
+
+
+
+
+
+
diff --git a/eng/Version.Details.xml b/eng/Version.Details.xml
index e188a4fe12a6a0..fffb15f6f3b72f 100644
--- a/eng/Version.Details.xml
+++ b/eng/Version.Details.xml
@@ -12,41 +12,37 @@
https://github.com/dotnet/wcf
7f504aabb1988e9a093c1e74d8040bd52feb2f01
-
- https://github.com/dotnet/emsdk
- d3583522209829d1ed0440662ba136c7b7700b16
-
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
https://github.com/dotnet/command-line-api
@@ -58,267 +54,267 @@
803d8598f98fb4efd94604b32627ee9407f246db
-
+
https://github.com/dotnet/cecil
- 7e4af02521473d89d6144b3da58fef253e498974
+ e05101e694b7c86f03f767014fc203cec5dc7f18
-
+
https://github.com/dotnet/cecil
- 7e4af02521473d89d6144b3da58fef253e498974
+ e05101e694b7c86f03f767014fc203cec5dc7f18
-
+
https://github.com/dotnet/emsdk
- 99ea0c06b84d3084d090da537080dd35d2a193cf
+ 40781ca2fc6be1dd81143d8773105a0f9f77d923
-
+
https://github.com/dotnet/emsdk
- 99ea0c06b84d3084d090da537080dd35d2a193cf
+ 40781ca2fc6be1dd81143d8773105a0f9f77d923
-
+
https://github.com/dotnet/source-build-reference-packages
- 9ae78a4e6412926d19ba97cfed159bf9de70b538
+ 97ffbaec397634584a11218e5a29e82601764226
-
+
https://github.com/dotnet/source-build-externals
- 311ef7fef52828f4a70a94d13e32c394fd3292ee
+ 26c52d02b67816269e647cc584f6b5db9a91970f
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/llvm-project
- 26f8c30340764cfa7fa9090dc01a36c222bf09c1
+ 3358dfd351b424698f3f2cd67432dc62c333a64d
-
+
https://github.com/dotnet/runtime
- 4e278fe17f69ea31fbdcbab74ac47ec6fa84914b
+ db32911d71075a81b50ad07bfcf10194212bda20
-
+
https://github.com/dotnet/runtime
- 4e278fe17f69ea31fbdcbab74ac47ec6fa84914b
+ db32911d71075a81b50ad07bfcf10194212bda20
-
+
https://github.com/dotnet/runtime
- 4e278fe17f69ea31fbdcbab74ac47ec6fa84914b
+ db32911d71075a81b50ad07bfcf10194212bda20
-
+
https://github.com/dotnet/runtime
- 4e278fe17f69ea31fbdcbab74ac47ec6fa84914b
+ db32911d71075a81b50ad07bfcf10194212bda20
-
+
https://github.com/dotnet/runtime
- 4e278fe17f69ea31fbdcbab74ac47ec6fa84914b
+ db32911d71075a81b50ad07bfcf10194212bda20
-
+
https://github.com/dotnet/runtime
- 4e278fe17f69ea31fbdcbab74ac47ec6fa84914b
+ db32911d71075a81b50ad07bfcf10194212bda20
-
+
https://github.com/dotnet/runtime
- 4e278fe17f69ea31fbdcbab74ac47ec6fa84914b
+ db32911d71075a81b50ad07bfcf10194212bda20
-
+
https://github.com/dotnet/runtime
- 4e278fe17f69ea31fbdcbab74ac47ec6fa84914b
+ db32911d71075a81b50ad07bfcf10194212bda20
-
+
https://github.com/dotnet/runtime
- 4e278fe17f69ea31fbdcbab74ac47ec6fa84914b
+ db32911d71075a81b50ad07bfcf10194212bda20
https://github.com/dotnet/xharness
@@ -332,45 +328,45 @@
https://github.com/dotnet/xharness
65d0584b517952962b7a79195b5d7606b52fcbfe
-
+
https://github.com/dotnet/arcade
- 731d793be2d0a66bafc96b1a79dc96b4d1f0301b
+ 4dc25182f9153b892628ef0d4e98663774ad2f01
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- c3acfd159662959ff09f3a0d7663023db48bb78a
+ 5553d120d993730b2dbb7a1d086eec81017f3d66
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- c3acfd159662959ff09f3a0d7663023db48bb78a
+ 5553d120d993730b2dbb7a1d086eec81017f3d66
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- c3acfd159662959ff09f3a0d7663023db48bb78a
+ 5553d120d993730b2dbb7a1d086eec81017f3d66
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- c3acfd159662959ff09f3a0d7663023db48bb78a
+ 5553d120d993730b2dbb7a1d086eec81017f3d66
-
+
https://github.com/dotnet/hotreload-utils
- 48812ad620b54916f684b7e7ff885de8ad675ebf
+ 97873b855f5817bedeb70a03ed066e6ec7686823
-
+
https://github.com/dotnet/runtime-assets
- 0cab6ca16f49b666163d4e1c0e3c080faf5a4e05
+ 2bea5264b7f62818ec8a4b7ff4a36906110f118f
-
+
https://github.com/dotnet/roslyn
- cada394f99c521861c39e2a5334678e6aba1ac62
+ aea9e82da403c397265f7fd0fefee5ebbb886179
-
+
https://github.com/dotnet/roslyn
- cada394f99c521861c39e2a5334678e6aba1ac62
+ aea9e82da403c397265f7fd0fefee5ebbb886179
-
+
https://github.com/dotnet/roslyn
- cada394f99c521861c39e2a5334678e6aba1ac62
+ aea9e82da403c397265f7fd0fefee5ebbb886179
https://github.com/dotnet/roslyn-analyzers
@@ -381,28 +377,28 @@
43709af7570da7140fb3e9a5237f55ffb24677e7
-
+
https://github.com/dotnet/roslyn
- cada394f99c521861c39e2a5334678e6aba1ac62
+ aea9e82da403c397265f7fd0fefee5ebbb886179
-
+
https://github.com/dotnet/sdk
- 5e03abbcf74bdef38ca67f04fbd4982e333d1f58
+ 29904b25a99c8984bacb86473df7d98735fb3207
-
+
https://github.com/dotnet/sdk
- 5e03abbcf74bdef38ca67f04fbd4982e333d1f58
+ 29904b25a99c8984bacb86473df7d98735fb3207
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- c3acfd159662959ff09f3a0d7663023db48bb78a
+ 5553d120d993730b2dbb7a1d086eec81017f3d66
-
+
https://dev.azure.com/dnceng/internal/_git/dotnet-optimization
- c3acfd159662959ff09f3a0d7663023db48bb78a
+ 5553d120d993730b2dbb7a1d086eec81017f3d66
diff --git a/eng/Versions.props b/eng/Versions.props
index ae8234d2840f94..254683a3cc2880 100644
--- a/eng/Versions.props
+++ b/eng/Versions.props
@@ -10,8 +10,8 @@
8.0.7
7.0.20
6.0.$([MSBuild]::Add($([System.Version]::Parse('$(PackageVersionNet8)').Build),25))
- preview
- 7
+ rc
+ 1
false
release
@@ -42,9 +42,9 @@
Any tools that contribute to the design-time experience should use the MicrosoftCodeAnalysisVersion_LatestVS property above to ensure
they do not break the local dev experience.
-->
- 4.12.0-1.24362.11
- 4.12.0-1.24362.11
- 4.12.0-1.24362.11
+ 4.12.0-1.24372.18
+ 4.12.0-1.24372.18
+ 4.12.0-1.24372.18
- 9.0.100-preview.7.24358.3
+ 9.0.100-preview.7.24371.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
- 2.8.1-beta.24360.4
- 9.0.0-beta.24360.4
- 2.8.1-beta.24360.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
- 9.0.0-beta.24360.4
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 2.9.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 2.9.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
+ 9.0.0-beta.24372.7
1.4.0
6.0.0-preview.1.102
- 9.0.0-preview.7.24357.2
+ 9.0.0-preview.7.24371.6
6.0.0
- 9.0.0-preview.7.24357.2
+ 9.0.0-preview.7.24371.6
6.0.0
1.1.1
@@ -119,46 +119,46 @@
8.0.0
5.0.0
4.5.5
- 9.0.0-preview.7.24357.2
- 9.0.0-preview.7.24357.2
+ 9.0.0-preview.7.24371.6
+ 9.0.0-preview.7.24371.6
6.0.0
5.0.0
5.0.0
5.0.0
7.0.0
- 9.0.0-preview.7.24357.2
+ 9.0.0-preview.7.24371.6
6.0.0
7.0.0
4.5.4
4.5.0
- 9.0.0-preview.7.24357.2
+ 9.0.0-preview.7.24371.6
8.0.0
8.0.0
8.0.0
8.0.0
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
- 9.0.0-beta.24358.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
+ 9.0.0-beta.24365.1
- 1.0.0-prerelease.24223.3
- 1.0.0-prerelease.24223.3
- 1.0.0-prerelease.24223.3
- 1.0.0-prerelease.24223.3
- 1.0.0-prerelease.24223.3
- 1.0.0-prerelease.24223.3
+ 1.0.0-prerelease.24369.3
+ 1.0.0-prerelease.24369.3
+ 1.0.0-prerelease.24369.3
+ 1.0.0-prerelease.24369.3
+ 1.0.0-prerelease.24369.3
+ 1.0.0-prerelease.24369.3
2.0.0
17.10.0-beta1.24272.1
@@ -184,7 +184,7 @@
9.0.0-prerelease.24317.3
9.0.0-prerelease.24317.3
9.0.0-prerelease.24317.3
- 9.0.0-alpha.0.24351.1
+ 9.0.0-alpha.0.24365.2
3.12.0
4.5.0
6.0.0
@@ -212,48 +212,47 @@
8.0.0-preview-20230918.1
- 0.11.5-alpha.24324.1
+ 0.11.5-alpha.24365.1
- 9.0.0-preview.7.24357.2
+ 9.0.0-preview.7.24371.6
9.0.0-preview.7.24365.2
2.3.6
9.0.0-alpha.1.24167.3
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
- 9.0.0-preview.7.24365.1
+ 9.0.0-rc.1.24373.3
$(MicrosoftNETWorkloadEmscriptenCurrentManifest90100TransportVersion)
- 9.0.0-preview.7.24352.2
1.1.87-gba258badda
1.0.0-v3.14.0.5722
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
- 16.0.5-alpha.1.24204.1
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
+ 19.0.0-alpha.1.24370.2
3.1.7
1.0.406601
@@ -262,6 +261,7 @@
9.0.0-alpha.1.24175.1
$(MicrosoftNETRuntimeEmscriptenVersion)
$(runtimewinx64MicrosoftNETCoreRuntimeWasmNodeTransportPackageVersion)
+
3.1.56
diff --git a/eng/common/core-templates/job/publish-build-assets.yml b/eng/common/core-templates/job/publish-build-assets.yml
index ec3cd14191abed..3d3356e3196727 100644
--- a/eng/common/core-templates/job/publish-build-assets.yml
+++ b/eng/common/core-templates/job/publish-build-assets.yml
@@ -143,9 +143,10 @@ jobs:
scriptType: ps
scriptLocation: scriptPath
scriptPath: $(Build.SourcesDirectory)/eng/common/post-build/publish-using-darc.ps1
- arguments: -BuildId $(BARBuildId)
+ arguments: >
+ -BuildId $(BARBuildId)
-PublishingInfraVersion 3
- -AzdoToken '$(publishing-dnceng-devdiv-code-r-build-re)'
+ -AzdoToken '$(System.AccessToken)'
-WaitPublishingFinish true
-ArtifactsPublishingAdditionalParameters '${{ parameters.artifactsPublishingAdditionalParameters }}'
-SymbolPublishingAdditionalParameters '${{ parameters.symbolPublishingAdditionalParameters }}'
diff --git a/eng/common/core-templates/job/source-index-stage1.yml b/eng/common/core-templates/job/source-index-stage1.yml
index 945c1c19e82495..205fb5b3a39563 100644
--- a/eng/common/core-templates/job/source-index-stage1.yml
+++ b/eng/common/core-templates/job/source-index-stage1.yml
@@ -34,10 +34,12 @@ jobs:
pool:
${{ if eq(variables['System.TeamProject'], 'public') }}:
name: $(DncEngPublicBuildPool)
- image: windows.vs2022.amd64.open
+ image: 1es-windows-2022-open
+ os: windows
${{ if eq(variables['System.TeamProject'], 'internal') }}:
name: $(DncEngInternalBuildPool)
- image: windows.vs2022.amd64
+ image: 1es-windows-2022
+ os: windows
steps:
- ${{ if eq(parameters.is1ESPipeline, '') }}:
diff --git a/eng/common/core-templates/post-build/post-build.yml b/eng/common/core-templates/post-build/post-build.yml
index 20924366b8a43c..454fd75c7aff19 100644
--- a/eng/common/core-templates/post-build/post-build.yml
+++ b/eng/common/core-templates/post-build/post-build.yml
@@ -307,9 +307,10 @@ stages:
scriptType: ps
scriptLocation: scriptPath
scriptPath: $(Build.SourcesDirectory)/eng/common/post-build/publish-using-darc.ps1
- arguments: -BuildId $(BARBuildId)
+ arguments: >
+ -BuildId $(BARBuildId)
-PublishingInfraVersion ${{ parameters.publishingInfraVersion }}
- -AzdoToken '$(publishing-dnceng-devdiv-code-r-build-re)'
+ -AzdoToken '$(System.AccessToken)'
-WaitPublishingFinish true
-ArtifactsPublishingAdditionalParameters '${{ parameters.artifactsPublishingAdditionalParameters }}'
-SymbolPublishingAdditionalParameters '${{ parameters.symbolPublishingAdditionalParameters }}'
diff --git a/eng/common/core-templates/steps/get-federated-access-token.yml b/eng/common/core-templates/steps/get-federated-access-token.yml
index c8c49cc0e8f0fc..3a4d4410c48294 100644
--- a/eng/common/core-templates/steps/get-federated-access-token.yml
+++ b/eng/common/core-templates/steps/get-federated-access-token.yml
@@ -3,6 +3,14 @@ parameters:
type: string
- name: outputVariableName
type: string
+- name: is1ESPipeline
+ type: boolean
+- name: stepName
+ type: string
+ default: 'getFederatedAccessToken'
+- name: condition
+ type: string
+ default: ''
# Resource to get a token for. Common values include:
# - '499b84ac-1321-427f-aa17-267ca6975798' for Azure DevOps
# - 'https://storage.azure.com/' for storage
@@ -10,10 +18,16 @@ parameters:
- name: resource
type: string
default: '499b84ac-1321-427f-aa17-267ca6975798'
+- name: isStepOutputVariable
+ type: boolean
+ default: false
steps:
- task: AzureCLI@2
displayName: 'Getting federated access token for feeds'
+ name: ${{ parameters.stepName }}
+ ${{ if ne(parameters.condition, '') }}:
+ condition: ${{ parameters.condition }}
inputs:
azureSubscription: ${{ parameters.federatedServiceConnection }}
scriptType: 'pscore'
@@ -25,4 +39,4 @@ steps:
exit 1
}
Write-Host "Setting '${{ parameters.outputVariableName }}' with the access token value"
- Write-Host "##vso[task.setvariable variable=${{ parameters.outputVariableName }};issecret=true]$accessToken"
\ No newline at end of file
+ Write-Host "##vso[task.setvariable variable=${{ parameters.outputVariableName }};issecret=true;isOutput=${{ parameters.isStepOutputVariable }}]$accessToken"
\ No newline at end of file
diff --git a/eng/common/core-templates/steps/publish-logs.yml b/eng/common/core-templates/steps/publish-logs.yml
index 8c5ea77b586d27..80788c5231912f 100644
--- a/eng/common/core-templates/steps/publish-logs.yml
+++ b/eng/common/core-templates/steps/publish-logs.yml
@@ -32,7 +32,6 @@ steps:
'$(MaestroAccessToken)'
'$(dn-bot-all-orgs-artifact-feeds-rw)'
'$(akams-client-id)'
- '$(akams-client-secret)'
'$(microsoft-symbol-server-pat)'
'$(symweb-symbol-server-pat)'
'$(dn-bot-all-orgs-build-rw-code-rw)'
diff --git a/eng/common/native/init-compiler.sh b/eng/common/native/init-compiler.sh
index 62900e12b21c10..14175343cc67dc 100644
--- a/eng/common/native/init-compiler.sh
+++ b/eng/common/native/init-compiler.sh
@@ -19,11 +19,9 @@ case "$compiler" in
# clangx.y or clang-x.y
version="$(echo "$compiler" | tr -d '[:alpha:]-=')"
majorVersion="${version%%.*}"
- [ -z "${version##*.*}" ] && minorVersion="${version#*.}"
- if [ -z "$minorVersion" ] && [ -n "$majorVersion" ] && [ "$majorVersion" -le 6 ]; then
- minorVersion=0;
- fi
+ # LLVM based on v18 released in early 2024, with two releases per year
+ maxVersion="$((18 + ((($(date +%Y) - 2024) * 12 + $(date +%m) - 3) / 6)))"
compiler=clang
;;
@@ -31,7 +29,9 @@ case "$compiler" in
# gccx.y or gcc-x.y
version="$(echo "$compiler" | tr -d '[:alpha:]-=')"
majorVersion="${version%%.*}"
- [ -z "${version##*.*}" ] && minorVersion="${version#*.}"
+
+ # GCC based on v14 released in early 2024, with one release per year
+ maxVersion="$((14 + ((($(date +%Y) - 2024) * 12 + $(date +%m) - 3) / 12)))"
compiler=gcc
;;
esac
@@ -49,12 +49,10 @@ check_version_exists() {
desired_version=-1
# Set up the environment to be used for building with the desired compiler.
- if command -v "$compiler-$1.$2" > /dev/null; then
- desired_version="-$1.$2"
- elif command -v "$compiler$1$2" > /dev/null; then
- desired_version="$1$2"
- elif command -v "$compiler-$1$2" > /dev/null; then
- desired_version="-$1$2"
+ if command -v "$compiler-$1" > /dev/null; then
+ desired_version="-$1"
+ elif command -v "$compiler$1" > /dev/null; then
+ desired_version="$1"
fi
echo "$desired_version"
@@ -75,7 +73,7 @@ set_compiler_version_from_CC() {
fi
# gcc and clang often display 3 part versions. However, gcc can show only 1 part in some environments.
- IFS=. read -r majorVersion minorVersion _ < /dev/null; then
- echo "Error: No usable version of $compiler found."
+ echo "Error: No compatible version of $compiler was found within the range of $minVersion to $maxVersion. Please upgrade your toolchain or specify the compiler explicitly using CLR_CC and CLR_CXX environment variables."
exit 1
fi
CC="$(command -v "$compiler" 2> /dev/null)"
CXX="$(command -v "$cxxCompiler" 2> /dev/null)"
set_compiler_version_from_CC
- else
- if [ "$compiler" = "clang" ] && [ "$majorVersion" -lt 5 ] && { [ "$build_arch" = "arm" ] || [ "$build_arch" = "armel" ]; }; then
- # If a major version was provided explicitly, and it was too old, find a newer compiler instead
- if ! command -v "$compiler" > /dev/null; then
- echo "Error: Found clang version $majorVersion which is not supported on arm/armel architectures, and there is no clang in PATH."
- exit 1
- fi
-
- CC="$(command -v "$compiler" 2> /dev/null)"
- CXX="$(command -v "$cxxCompiler" 2> /dev/null)"
- set_compiler_version_from_CC
- fi
fi
else
- desired_version="$(check_version_exists "$majorVersion" "$minorVersion")"
+ desired_version="$(check_version_exists "$majorVersion")"
if [ "$desired_version" = "-1" ]; then
- echo "Error: Could not find specific version of $compiler: $majorVersion $minorVersion."
+ echo "Error: Could not find specific version of $compiler: $majorVersion."
exit 1
fi
fi
diff --git a/eng/common/post-build/publish-using-darc.ps1 b/eng/common/post-build/publish-using-darc.ps1
index 4ff587ca46a99c..90b58e32a87bfb 100644
--- a/eng/common/post-build/publish-using-darc.ps1
+++ b/eng/common/post-build/publish-using-darc.ps1
@@ -42,6 +42,7 @@ try {
--azdev-pat "$AzdoToken" `
--bar-uri "$MaestroApiEndPoint" `
--ci `
+ --verbose `
@optionalParams
if ($LastExitCode -ne 0) {
diff --git a/eng/intellisense.targets b/eng/intellisense.targets
index 12aaad834a9929..d4e360957f206b 100644
--- a/eng/intellisense.targets
+++ b/eng/intellisense.targets
@@ -7,6 +7,8 @@
$(IntellisensePackageXmlFilePathFromNetFolder)
$(IntellisensePackageXmlFilePathFromDotNetPlatExtFolder)
+ $(IntermediateOutputPath)$(TargetName).intellisense-package.xml
+
$(NoWarn);1591
@@ -25,13 +27,26 @@
+
+
+
+
+
+
+
-
+
diff --git a/eng/native/configurecompiler.cmake b/eng/native/configurecompiler.cmake
index 6b7cd6b2cbc8a2..ebb4a427f2896f 100644
--- a/eng/native/configurecompiler.cmake
+++ b/eng/native/configurecompiler.cmake
@@ -670,22 +670,22 @@ if (CLR_CMAKE_HOST_UNIX)
set(DISABLE_OVERRIDING_MIN_VERSION_ERROR -Wno-overriding-t-option)
add_link_options(-Wno-overriding-t-option)
if(CLR_CMAKE_HOST_ARCH_ARM64)
- set(MACOS_VERSION_MIN_FLAGS "-target arm64-apple-ios15.0-macabi")
- add_link_options(-target arm64-apple-ios15.0-macabi)
+ set(CLR_CMAKE_MACCATALYST_COMPILER_TARGET "arm64-apple-ios15.0-macabi")
+ add_link_options(-target ${CLR_CMAKE_MACCATALYST_COMPILER_TARGET})
elseif(CLR_CMAKE_HOST_ARCH_AMD64)
- set(MACOS_VERSION_MIN_FLAGS "-target x86_64-apple-ios15.0-macabi")
- add_link_options(-target x86_64-apple-ios15.0-macabi)
+ set(CLR_CMAKE_MACCATALYST_COMPILER_TARGET "x86_64-apple-ios15.0-macabi")
+ add_link_options(-target ${CLR_CMAKE_MACCATALYST_COMPILER_TARGET})
else()
clr_unknown_arch()
endif()
# These options are intentionally set using the CMAKE_XXX_FLAGS instead of
# add_compile_options so that they take effect on the configuration functions
# in various configure.cmake files.
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}")
- set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}")
- set(CMAKE_OBJC_FLAGS "${CMAKE_OBJC_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}")
- set(CMAKE_OBJCXX_FLAGS "${CMAKE_OBJCXX_FLAGS} ${MACOS_VERSION_MIN_FLAGS} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -target ${CLR_CMAKE_MACCATALYST_COMPILER_TARGET} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}")
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -target ${CLR_CMAKE_MACCATALYST_COMPILER_TARGET} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -target ${CLR_CMAKE_MACCATALYST_COMPILER_TARGET} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}")
+ set(CMAKE_OBJC_FLAGS "${CMAKE_OBJC_FLAGS}-target ${CLR_CMAKE_MACCATALYST_COMPILER_TARGET} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}")
+ set(CMAKE_OBJCXX_FLAGS "${CMAKE_OBJCXX_FLAGS} -target ${CLR_CMAKE_MACCATALYST_COMPILER_TARGET} ${DISABLE_OVERRIDING_MIN_VERSION_ERROR}")
elseif(CLR_CMAKE_HOST_OSX)
set(CMAKE_OSX_DEPLOYMENT_TARGET "12.0")
if(CLR_CMAKE_HOST_ARCH_ARM64)
diff --git a/eng/pipelines/coreclr/templates/build-perf-sample-apps.yml b/eng/pipelines/coreclr/templates/build-perf-sample-apps.yml
index fd55342e1a0552..901078393fab8e 100644
--- a/eng/pipelines/coreclr/templates/build-perf-sample-apps.yml
+++ b/eng/pipelines/coreclr/templates/build-perf-sample-apps.yml
@@ -38,7 +38,7 @@ steps:
displayName: clean bindir
- ${{ if and(eq(parameters.osGroup, 'ios'), eq(parameters.nameSuffix, 'iOSMono')) }}:
- - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=False DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=false HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }}
+ - script: make build-appbundle TARGET_OS=ios TARGET_ARCH=arm64 BUILD_CONFIG=Release AOT=True USE_LLVM=False DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=false HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }}
env:
DevTeamProvisioning: '-'
workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS
@@ -60,7 +60,7 @@ steps:
- script: rm -r -f $(Build.SourcesDirectory)/src/mono/sample/iOS/bin
workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS
displayName: Clean bindir
- - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=False DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=true HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }}
+ - script: make build-appbundle TARGET_OS=ios TARGET_ARCH=arm64 BUILD_CONFIG=Release AOT=True USE_LLVM=False DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=true HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }}
env:
DevTeamProvisioning: '-'
workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS
@@ -82,7 +82,7 @@ steps:
- script: rm -r -f $(Build.SourcesDirectory)/src/mono/sample/iOS/bin
workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS
displayName: Clean bindir
- - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=True DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=false HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }}
+ - script: make build-appbundle TARGET_OS=ios TARGET_ARCH=arm64 BUILD_CONFIG=Release AOT=True USE_LLVM=True DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=false HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }}
env:
DevTeamProvisioning: '-'
workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS
@@ -104,7 +104,7 @@ steps:
- script: rm -r -f $(Build.SourcesDirectory)/src/mono/sample/iOS/bin
workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS
displayName: Clean bindir
- - script: make build-appbundle TARGET=ios MONO_ARCH=arm64 MONO_CONFIG=Release AOT=True USE_LLVM=True DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=true HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }}
+ - script: make build-appbundle TARGET_OS=ios TARGET_ARCH=arm64 BUILD_CONFIG=Release AOT=True USE_LLVM=True DEPLOY_AND_RUN=false STRIP_DEBUG_SYMBOLS=true HYBRID_GLOBALIZATION=${{ parameters.hybridGlobalization }}
env:
DevTeamProvisioning: '-'
workingDirectory: $(Build.SourcesDirectory)/src/mono/sample/iOS
diff --git a/eng/pipelines/coreclr/templates/run-performance-job.yml b/eng/pipelines/coreclr/templates/run-performance-job.yml
index 73bfbb7c14900c..6b910496d9b00a 100644
--- a/eng/pipelines/coreclr/templates/run-performance-job.yml
+++ b/eng/pipelines/coreclr/templates/run-performance-job.yml
@@ -86,7 +86,7 @@ jobs:
${V8_ENGINE_PATH} -e 'console.log(`V8 version: ${this.version()}`)'
- ${{ if ne(parameters.runtimeType, 'wasm') }}:
- HelixPreCommandsWasmOnLinux: echo
- - HelixPreCommandStemWindows: 'set ORIGPYPATH=%PYTHONPATH%;py -m pip install -U pip;py -3 -m venv %HELIX_WORKITEM_PAYLOAD%\.venv;call %HELIX_WORKITEM_PAYLOAD%\.venv\Scripts\activate.bat;set PYTHONPATH=;py -3 -m pip install -U pip;py -3 -m pip install urllib3==1.26.15;py -3 -m pip install azure.storage.blob==12.13.0;py -3 -m pip install azure.storage.queue==12.4.0;py -3 -m pip install azure.identity==1.16.1;set "PERFLAB_UPLOAD_TOKEN=$(HelixPerfUploadTokenValue)"'
+ - HelixPreCommandStemWindows: 'set ORIGPYPATH=%PYTHONPATH%;py -m pip install -U pip;py -3 -m venv %HELIX_WORKITEM_PAYLOAD%\.venv;call %HELIX_WORKITEM_PAYLOAD%\.venv\Scripts\activate.bat;echo on;set PYTHONPATH=;python -m pip install -U pip;python -m pip install urllib3==1.26.15;python -m pip install azure.storage.blob==12.13.0;python -m pip install azure.storage.queue==12.4.0;python -m pip install azure.identity==1.16.1;set "PERFLAB_UPLOAD_TOKEN=$(HelixPerfUploadTokenValue)"'
- HelixPreCommandStemLinux: >-
export ORIGPYPATH=$PYTHONPATH
export CRYPTOGRAPHY_ALLOW_OPENSSL_102=true;
diff --git a/eng/pipelines/coreclr/templates/run-scenarios-job.yml b/eng/pipelines/coreclr/templates/run-scenarios-job.yml
index 60c773ae9db0ca..84525834164ac0 100644
--- a/eng/pipelines/coreclr/templates/run-scenarios-job.yml
+++ b/eng/pipelines/coreclr/templates/run-scenarios-job.yml
@@ -60,7 +60,7 @@ jobs:
- SharedHelixPreCommands: 'chmod +x $HELIX_WORKITEM_PAYLOAD/machine-setup.sh;. $HELIX_WORKITEM_PAYLOAD/machine-setup.sh;export PYTHONPATH=$HELIX_WORKITEM_PAYLOAD/scripts:$HELIX_WORKITEM_PAYLOAD'
- ${{ if eq(parameters.osGroup, 'windows') }}:
- - HelixPreCommandWindows: 'set ORIGPYPATH=%PYTHONPATH%;py -3 -m venv %HELIX_WORKITEM_PAYLOAD%\.venv;call %HELIX_WORKITEM_PAYLOAD%\.venv\Scripts\activate.bat;set PYTHONPATH=;py -3 -m pip install -U pip;py -3 -m pip install --user azure.storage.blob==12.13.0;py -3 -m pip install --user azure.storage.queue==12.4.0;py -3 -m pip install --user urllib3==1.26.15;py -3 -m pip install --user azure.identity==1.16.1;set "PERFLAB_UPLOAD_TOKEN=$(PerfCommandUploadToken)"'
+ - HelixPreCommandWindows: 'set ORIGPYPATH=%PYTHONPATH%;py -3 -m venv %HELIX_WORKITEM_PAYLOAD%\.venv;call %HELIX_WORKITEM_PAYLOAD%\.venv\Scripts\activate.bat;echo on;set PYTHONPATH=;python -m pip install -U pip;python -m pip install --user azure.storage.blob==12.13.0;python -m pip install --user azure.storage.queue==12.4.0;python -m pip install --user urllib3==1.26.15;python -m pip install --user azure.identity==1.16.1;set "PERFLAB_UPLOAD_TOKEN=$(PerfCommandUploadToken)"'
- HelixPostCommandsWindows: 'set PYTHONPATH=%ORIGPYPATH%'
- ${{ if and(ne(parameters.osGroup, 'windows'), ne(parameters.osGroup, 'osx'), ne(parameters.osSubGroup, '_musl')) }}:
- HelixPreCommandLinux: 'export ORIGPYPATH=$PYTHONPATH;export CRYPTOGRAPHY_ALLOW_OPENSSL_102=true;sudo apt-get -y install python3-venv;python3 -m venv $HELIX_WORKITEM_PAYLOAD/.venv;source $HELIX_WORKITEM_PAYLOAD/.venv/bin/activate;export PYTHONPATH=;python3 -m pip install -U pip;pip3 install --user azure.storage.blob==12.13.0;pip3 install --user azure.storage.queue==12.4.0;pip3 install --user azure.identity==1.16.1;pip3 install --user urllib3==1.26.15;export PERFLAB_UPLOAD_TOKEN="$(PerfCommandUploadTokenLinux)"'
diff --git a/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml b/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml
index d21dd56df2647e..8c9d16d9701ae1 100644
--- a/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml
+++ b/eng/pipelines/extra-platforms/runtime-extra-platforms-maccatalyst.yml
@@ -77,3 +77,77 @@ jobs:
creator: dotnet-bot
interpreter: true
testRunNamePrefixSuffix: Mono_$(_BuildConfig)
+
+#
+# Build the whole product using Native AOT and run runtime tests
+#
+- template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/common/global-build-job.yml
+ helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml
+ buildConfig: Release
+ runtimeFlavor: coreclr
+ isExtraPlatformsBuild: ${{ parameters.isExtraPlatformsBuild }}
+ isMacCatalystOnlyBuild: ${{ parameters.isMacCatalystOnlyBuild }}
+ platforms:
+ - maccatalyst_x64
+ - maccatalyst_arm64
+ variables:
+ # map dependencies variables to local variables
+ - name: librariesContainsChange
+ value: $[ stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_libraries.containsChange'] ]
+ - name: monoContainsChange
+ value: $[ stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'] ]
+ jobParameters:
+ testGroup: innerloop
+ nameSuffix: AllSubsets_NativeAOT_RuntimeTests
+ buildArgs: --cross -s clr.alljits+clr.tools+clr.nativeaotruntime+clr.nativeaotlibs+libs -c $(_BuildConfig)
+ timeoutInMinutes: 180
+ # extra steps, run tests
+ extraVariablesTemplates:
+ - template: /eng/pipelines/common/templates/runtimes/test-variables.yml
+ parameters:
+ testGroup: innerloop
+ postBuildSteps:
+ - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml
+ parameters:
+ creator: dotnet-bot
+ testBuildArgs: tree nativeaot/SmokeTests /p:BuildNativeAOTRuntimePack=true
+ testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig)
+
+#
+# Build the whole product using Native AOT with the App Sandbox entitlement and run runtime tests
+#
+- template: /eng/pipelines/common/platform-matrix.yml
+ parameters:
+ jobTemplate: /eng/pipelines/common/global-build-job.yml
+ helixQueuesTemplate: /eng/pipelines/coreclr/templates/helix-queues-setup.yml
+ buildConfig: Release
+ runtimeFlavor: coreclr
+ isExtraPlatformsBuild: ${{ parameters.isExtraPlatformsBuild }}
+ isMacCatalystOnlyBuild: ${{ parameters.isMacCatalystOnlyBuild }}
+ platforms:
+ - maccatalyst_x64
+ - maccatalyst_arm64
+ variables:
+ # map dependencies variables to local variables
+ - name: librariesContainsChange
+ value: $[ stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_libraries.containsChange'] ]
+ - name: monoContainsChange
+ value: $[ stageDependencies.EvaluatePaths.evaluate_paths.outputs['SetPathVars_mono_excluding_wasm.containsChange'] ]
+ jobParameters:
+ testGroup: innerloop
+ nameSuffix: AllSubsets_NativeAOT_RuntimeTests_AppSandbox
+ buildArgs: --cross -s clr.alljits+clr.tools+clr.nativeaotruntime+clr.nativeaotlibs+libs -c $(_BuildConfig)
+ timeoutInMinutes: 180
+ # extra steps, run tests
+ extraVariablesTemplates:
+ - template: /eng/pipelines/common/templates/runtimes/test-variables.yml
+ parameters:
+ testGroup: innerloop
+ postBuildSteps:
+ - template: /eng/pipelines/common/templates/runtimes/build-runtime-tests-and-send-to-helix.yml
+ parameters:
+ creator: dotnet-bot
+ testBuildArgs: tree nativeaot/SmokeTests /p:BuildNativeAOTRuntimePack=true /p:DevTeamProvisioning=adhoc /p:EnableAppSandbox=true
+ testRunNamePrefixSuffix: NativeAOT_$(_BuildConfig)
diff --git a/eng/testing/.runsettings b/eng/testing/.runsettings
index 1dfbe23e6501bd..5a7ae2e2b5ca2b 100644
--- a/eng/testing/.runsettings
+++ b/eng/testing/.runsettings
@@ -18,6 +18,10 @@
$$TESTCASEFILTER$$
$$DOTNETHOSTPATH$$
+
+
+ 1
+
diff --git a/eng/testing/linker/project.csproj.template b/eng/testing/linker/project.csproj.template
index d31c8df9c9271e..f62317e7eba16f 100644
--- a/eng/testing/linker/project.csproj.template
+++ b/eng/testing/linker/project.csproj.template
@@ -77,7 +77,7 @@
- <_BoolPropertiesThatTriggerRelinking Remove="InvariantGlobalization" />
+ <_PropertiesThatTriggerRelinking Remove="InvariantGlobalization" />
diff --git a/eng/testing/performance/crossgen_perf.proj b/eng/testing/performance/crossgen_perf.proj
index 4ed0fad94877f1..252ec7eaedb198 100644
--- a/eng/testing/performance/crossgen_perf.proj
+++ b/eng/testing/performance/crossgen_perf.proj
@@ -10,7 +10,7 @@
Crossgen and Crossgen2 Scenario WorkItems
-->
- py -3
+ python
$(HelixPreCommands)
%HELIX_CORRELATION_PAYLOAD%\Core_Root
%HELIX_CORRELATION_PAYLOAD%\performance\src\scenarios\
diff --git a/eng/testing/performance/microbenchmarks.proj b/eng/testing/performance/microbenchmarks.proj
index 556d4cfc7ddcb7..8453f469cd0af6 100644
--- a/eng/testing/performance/microbenchmarks.proj
+++ b/eng/testing/performance/microbenchmarks.proj
@@ -5,7 +5,7 @@
$(HelixPreCommands) && robocopy /np /nfl /e %HELIX_CORRELATION_PAYLOAD%\performance $(PerformanceDirectory) /XD %HELIX_CORRELATION_PAYLOAD%\performance\.git
$(PerformanceDirectory)\scripts\benchmarks_ci.py --csproj $(PerformanceDirectory)\$(TargetCsproj)
--dotnet-versions %DOTNET_VERSION% --cli-source-info args --cli-branch %PERFLAB_BRANCH% --cli-commit-sha %PERFLAB_HASH% --cli-repository https://github.com/%PERFLAB_REPO% --cli-source-timestamp %PERFLAB_BUILDTIMESTAMP%
- py -3
+ python
%HELIX_CORRELATION_PAYLOAD%\Core_Root\CoreRun.exe
%HELIX_CORRELATION_PAYLOAD%\Baseline_Core_Root\CoreRun.exe
$(HelixPreCommands);call $(PerformanceDirectory)\tools\machine-setup.cmd;set PYTHONPATH=%HELIX_WORKITEM_PAYLOAD%\scripts%3B%HELIX_WORKITEM_PAYLOAD%
diff --git a/eng/testing/xunit/xunit.console.targets b/eng/testing/xunit/xunit.console.targets
index 175d149c96a914..82f90c258c5275 100644
--- a/eng/testing/xunit/xunit.console.targets
+++ b/eng/testing/xunit/xunit.console.targets
@@ -1,10 +1,17 @@
+
true
testResults.xml
true
+
+
+
+
+
+
<_depsFileArgument Condition="'$(GenerateDependencyFile)' == 'true'">--depsfile $(AssemblyName).deps.json
"$(RunScriptHost)" exec --runtimeconfig $(AssemblyName).runtimeconfig.json $(_depsFileArgument) xunit.console.dll
@@ -85,13 +92,6 @@
CopyToOutputDirectory="PreserveNewest"
Visible="false" />
-
-
-
-
-
-
-
-
+
diff --git a/global.json b/global.json
index 1ce506fa636f8a..510f47b5aed98c 100644
--- a/global.json
+++ b/global.json
@@ -1,18 +1,18 @@
{
"sdk": {
- "version": "9.0.100-preview.5.24307.3",
+ "version": "9.0.100-preview.6.24328.19",
"allowPrerelease": true,
"rollForward": "major"
},
"tools": {
- "dotnet": "9.0.100-preview.5.24307.3"
+ "dotnet": "9.0.100-preview.6.24328.19"
},
"msbuild-sdks": {
- "Microsoft.DotNet.Arcade.Sdk": "9.0.0-beta.24360.4",
- "Microsoft.DotNet.Helix.Sdk": "9.0.0-beta.24360.4",
- "Microsoft.DotNet.SharedFramework.Sdk": "9.0.0-beta.24360.4",
+ "Microsoft.DotNet.Arcade.Sdk": "9.0.0-beta.24372.7",
+ "Microsoft.DotNet.Helix.Sdk": "9.0.0-beta.24372.7",
+ "Microsoft.DotNet.SharedFramework.Sdk": "9.0.0-beta.24372.7",
"Microsoft.Build.NoTargets": "3.7.0",
"Microsoft.Build.Traversal": "3.4.0",
- "Microsoft.NET.Sdk.IL": "9.0.0-preview.7.24357.2"
+ "Microsoft.NET.Sdk.IL": "9.0.0-preview.7.24371.6"
}
}
diff --git a/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/ComActivator.cs b/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/ComActivator.cs
index 9c32a327a28972..97065b11c2b0ae 100644
--- a/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/ComActivator.cs
+++ b/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/ComActivator.cs
@@ -842,11 +842,7 @@ public string RequestLicKey(Type type)
((IDisposable?)parameters[2])?.Dispose();
- var licenseKey = (string?)parameters[3];
- if (licenseKey == null)
- {
- throw new COMException(); // E_FAIL
- }
+ var licenseKey = (string?)parameters[3] ?? throw new COMException(); // E_FAIL
return licenseKey;
}
diff --git a/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/InMemoryAssemblyLoader.cs b/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/InMemoryAssemblyLoader.cs
index dc8d8ccb4c7cca..4a7d0a88c19962 100644
--- a/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/InMemoryAssemblyLoader.cs
+++ b/src/coreclr/System.Private.CoreLib/src/Internal/Runtime/InteropServices/InMemoryAssemblyLoader.cs
@@ -49,7 +49,7 @@ private static unsafe void LoadInMemoryAssemblyInContextWhenSupported(IntPtr mod
///
/// The native module handle for the assembly.
/// The path to the assembly (as a pointer to a UTF-16 C string).
- /// Load context (currently must be IntPtr.Zero)
+ /// Load context (currently must be either IntPtr.Zero for default ALC or -1 for isolated ALC)
[UnmanagedCallersOnly]
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2026:RequiresUnreferencedCode",
Justification = "The same C++/CLI feature switch applies to LoadInMemoryAssembly and this function. We rely on the warning from LoadInMemoryAssembly.")]
@@ -58,16 +58,18 @@ public static unsafe void LoadInMemoryAssemblyInContext(IntPtr moduleHandle, Int
if (!IsSupported)
throw new NotSupportedException(SR.NotSupported_CppCli);
- ArgumentOutOfRangeException.ThrowIfNotEqual(loadContext, IntPtr.Zero);
+ if ((loadContext != IntPtr.Zero) && (loadContext != -1))
+ {
+ throw new ArgumentOutOfRangeException(nameof(loadContext));
+ }
- LoadInMemoryAssemblyInContextImpl(moduleHandle, assemblyPath, AssemblyLoadContext.Default);
+ LoadInMemoryAssemblyInContextImpl(moduleHandle, assemblyPath, (loadContext == IntPtr.Zero) ? AssemblyLoadContext.Default : null);
}
[RequiresUnreferencedCode("C++/CLI is not trim-compatible", Url = "https://aka.ms/dotnet-illink/nativehost")]
private static void LoadInMemoryAssemblyInContextImpl(IntPtr moduleHandle, IntPtr assemblyPath, AssemblyLoadContext? alc = null)
{
- string? assemblyPathString = Marshal.PtrToStringUni(assemblyPath);
- if (assemblyPathString == null)
+ string assemblyPathString = Marshal.PtrToStringUni(assemblyPath) ??
throw new ArgumentOutOfRangeException(nameof(assemblyPath));
// We don't cache the ALCs or resolvers here since each IJW assembly will call this method at most once
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Delegate.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Delegate.CoreCLR.cs
index 1168784b30563b..ee48dc77e94fe2 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Delegate.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Delegate.CoreCLR.cs
@@ -61,7 +61,7 @@ protected Delegate([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.Al
if (target.ContainsGenericParameters)
throw new ArgumentException(SR.Arg_UnboundGenParam, nameof(target));
- if (!(target is RuntimeType rtTarget))
+ if (target is not RuntimeType rtTarget)
throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(target));
// This API existed in v1/v1.1 and only expected to create open
@@ -85,7 +85,6 @@ protected Delegate([DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.Al
return invoke.Invoke(this, BindingFlags.Default, null, args, null);
}
-
public override bool Equals([NotNullWhen(true)] object? obj)
{
if (obj == null || !InternalEqualTypes(this, obj))
@@ -107,9 +106,11 @@ public override bool Equals([NotNullWhen(true)] object? obj)
{
if (d._methodPtrAux != IntPtr.Zero)
return false; // different delegate kind
+
// they are both closed over the first arg
if (_target != d._target)
return false;
+
// fall through method handle check
}
else
@@ -121,19 +122,20 @@ public override bool Equals([NotNullWhen(true)] object? obj)
/*
if (_methodPtr != d._methodPtr)
return false;
- */
+ */
if (_methodPtrAux == d._methodPtrAux)
return true;
+
// fall through method handle check
}
// method ptrs don't match, go down long path
- //
- if (_methodBase == null || d._methodBase == null || !(_methodBase is MethodInfo) || !(d._methodBase is MethodInfo))
- return InternalEqualMethodHandles(this, d);
- else
+
+ if (_methodBase is MethodInfo && d._methodBase is MethodInfo)
return _methodBase.Equals(d._methodBase);
+ else
+ return InternalEqualMethodHandles(this, d);
}
public override int GetHashCode()
@@ -156,56 +158,63 @@ public override int GetHashCode()
protected virtual MethodInfo GetMethodImpl()
{
- if ((_methodBase == null) || !(_methodBase is MethodInfo))
+ if (_methodBase is MethodInfo methodInfo)
{
- IRuntimeMethodInfo method = FindMethodHandle();
- RuntimeType? declaringType = RuntimeMethodHandle.GetDeclaringType(method);
- // need a proper declaring type instance method on a generic type
- if (declaringType.IsGenericType)
+ return methodInfo;
+ }
+
+ IRuntimeMethodInfo method = FindMethodHandle();
+ RuntimeType? declaringType = RuntimeMethodHandle.GetDeclaringType(method);
+
+ // need a proper declaring type instance method on a generic type
+ if (declaringType.IsGenericType)
+ {
+ bool isStatic = (RuntimeMethodHandle.GetAttributes(method) & MethodAttributes.Static) != (MethodAttributes)0;
+ if (!isStatic)
{
- bool isStatic = (RuntimeMethodHandle.GetAttributes(method) & MethodAttributes.Static) != (MethodAttributes)0;
- if (!isStatic)
+ if (_methodPtrAux == IntPtr.Zero)
{
- if (_methodPtrAux == IntPtr.Zero)
+ // The target may be of a derived type that doesn't have visibility onto the
+ // target method. We don't want to call RuntimeType.GetMethodBase below with that
+ // or reflection can end up generating a MethodInfo where the ReflectedType cannot
+ // see the MethodInfo itself and that breaks an important invariant. But the
+ // target type could include important generic type information we need in order
+ // to work out what the exact instantiation of the method's declaring type is. So
+ // we'll walk up the inheritance chain (which will yield exactly instantiated
+ // types at each step) until we find the declaring type. Since the declaring type
+ // we get from the method is probably shared and those in the hierarchy we're
+ // walking won't be we compare using the generic type definition forms instead.
+ Type targetType = declaringType.GetGenericTypeDefinition();
+ Type? currentType;
+ for (currentType = _target!.GetType(); currentType != null; currentType = currentType.BaseType)
{
- // The target may be of a derived type that doesn't have visibility onto the
- // target method. We don't want to call RuntimeType.GetMethodBase below with that
- // or reflection can end up generating a MethodInfo where the ReflectedType cannot
- // see the MethodInfo itself and that breaks an important invariant. But the
- // target type could include important generic type information we need in order
- // to work out what the exact instantiation of the method's declaring type is. So
- // we'll walk up the inheritance chain (which will yield exactly instantiated
- // types at each step) until we find the declaring type. Since the declaring type
- // we get from the method is probably shared and those in the hierarchy we're
- // walking won't be we compare using the generic type definition forms instead.
- Type? currentType = _target!.GetType();
- Type targetType = declaringType.GetGenericTypeDefinition();
- while (currentType != null)
+ if (currentType.IsGenericType &&
+ currentType.GetGenericTypeDefinition() == targetType)
{
- if (currentType.IsGenericType &&
- currentType.GetGenericTypeDefinition() == targetType)
- {
- declaringType = currentType as RuntimeType;
- break;
- }
- currentType = currentType.BaseType;
+ declaringType = currentType as RuntimeType;
+ break;
}
-
- // RCWs don't need to be "strongly-typed" in which case we don't find a base type
- // that matches the declaring type of the method. This is fine because interop needs
- // to work with exact methods anyway so declaringType is never shared at this point.
- Debug.Assert(currentType != null || _target.GetType().IsCOMObject, "The class hierarchy should declare the method");
- }
- else
- {
- // it's an open one, need to fetch the first arg of the instantiation
- MethodInfo invoke = this.GetType().GetMethod("Invoke")!;
- declaringType = (RuntimeType)invoke.GetParametersAsSpan()[0].ParameterType;
}
+
+ // RCWs don't need to be "strongly-typed" in which case we don't find a base type
+ // that matches the declaring type of the method. This is fine because interop needs
+ // to work with exact methods anyway so declaringType is never shared at this point.
+ // The targetType may also be an interface with a Default interface method (DIM).
+ Debug.Assert(
+ currentType != null
+ || _target.GetType().IsCOMObject
+ || targetType.IsInterface, "The class hierarchy should declare the method or be a DIM");
+ }
+ else
+ {
+ // it's an open one, need to fetch the first arg of the instantiation
+ MethodInfo invoke = this.GetType().GetMethod("Invoke")!;
+ declaringType = (RuntimeType)invoke.GetParametersAsSpan()[0].ParameterType;
}
}
- _methodBase = (MethodInfo)RuntimeType.GetMethodBase(declaringType, method)!;
}
+
+ _methodBase = (MethodInfo)RuntimeType.GetMethodBase(declaringType, method)!;
return (MethodInfo)_methodBase;
}
@@ -219,7 +228,7 @@ protected virtual MethodInfo GetMethodImpl()
ArgumentNullException.ThrowIfNull(target);
ArgumentNullException.ThrowIfNull(method);
- if (!(type is RuntimeType rtType))
+ if (type is not RuntimeType rtType)
throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(type));
if (!rtType.IsDelegate())
throw new ArgumentException(SR.Arg_MustBeDelegate, nameof(type));
@@ -256,9 +265,9 @@ protected virtual MethodInfo GetMethodImpl()
if (target.ContainsGenericParameters)
throw new ArgumentException(SR.Arg_UnboundGenParam, nameof(target));
- if (!(type is RuntimeType rtType))
+ if (type is not RuntimeType rtType)
throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(type));
- if (!(target is RuntimeType rtTarget))
+ if (target is not RuntimeType rtTarget)
throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(target));
if (!rtType.IsDelegate())
@@ -289,10 +298,10 @@ protected virtual MethodInfo GetMethodImpl()
ArgumentNullException.ThrowIfNull(type);
ArgumentNullException.ThrowIfNull(method);
- if (!(type is RuntimeType rtType))
+ if (type is not RuntimeType rtType)
throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(type));
- if (!(method is RuntimeMethodInfo rmi))
+ if (method is not RuntimeMethodInfo rmi)
throw new ArgumentException(SR.Argument_MustBeRuntimeMethodInfo, nameof(method));
if (!rtType.IsDelegate())
@@ -324,10 +333,10 @@ protected virtual MethodInfo GetMethodImpl()
ArgumentNullException.ThrowIfNull(type);
ArgumentNullException.ThrowIfNull(method);
- if (!(type is RuntimeType rtType))
+ if (type is not RuntimeType rtType)
throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(type));
- if (!(method is RuntimeMethodInfo rmi))
+ if (method is not RuntimeMethodInfo rmi)
throw new ArgumentException(SR.Argument_MustBeRuntimeMethodInfo, nameof(method));
if (!rtType.IsDelegate())
@@ -362,7 +371,7 @@ internal static Delegate CreateDelegateNoSecurityCheck(Type type, object? target
if (method.IsNullHandle())
throw new ArgumentNullException(nameof(method));
- if (!(type is RuntimeType rtType))
+ if (type is not RuntimeType rtType)
throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(type));
if (!rtType.IsDelegate())
@@ -473,10 +482,35 @@ internal static unsafe bool InternalEqualTypes(object a, object b)
private extern void DelegateConstruct(object target, IntPtr slot);
[MethodImpl(MethodImplOptions.InternalCall)]
- internal extern IntPtr GetMulticastInvoke();
+ private static extern unsafe void* GetMulticastInvoke(MethodTable* pMT);
+
+ [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "Delegate_GetMulticastInvokeSlow")]
+ private static unsafe partial void* GetMulticastInvokeSlow(MethodTable* pMT);
+
+ internal unsafe IntPtr GetMulticastInvoke()
+ {
+ MethodTable* pMT = RuntimeHelpers.GetMethodTable(this);
+ void* ptr = GetMulticastInvoke(pMT);
+ if (ptr == null)
+ {
+ ptr = GetMulticastInvokeSlow(pMT);
+ Debug.Assert(ptr != null);
+ Debug.Assert(ptr == GetMulticastInvoke(pMT));
+ }
+ // No GC.KeepAlive() since the caller must keep instance alive to use returned pointer.
+ return (IntPtr)ptr;
+ }
[MethodImpl(MethodImplOptions.InternalCall)]
- internal extern IntPtr GetInvokeMethod();
+ private static extern unsafe void* GetInvokeMethod(MethodTable* pMT);
+
+ internal unsafe IntPtr GetInvokeMethod()
+ {
+ MethodTable* pMT = RuntimeHelpers.GetMethodTable(this);
+ void* ptr = GetInvokeMethod(pMT);
+ // No GC.KeepAlive() since the caller must keep instance alive to use returned pointer.
+ return (IntPtr)ptr;
+ }
internal IRuntimeMethodInfo FindMethodHandle()
{
diff --git a/src/coreclr/System.Private.CoreLib/src/System/MulticastDelegate.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/MulticastDelegate.CoreCLR.cs
index ab63d4cf83b74d..0a850691d68678 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/MulticastDelegate.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/MulticastDelegate.CoreCLR.cs
@@ -225,7 +225,7 @@ protected sealed override Delegate CombineImpl(Delegate? follow)
followCount = (int)dFollow._invocationCount;
int resultCount;
- if (!(_invocationList is object[] invocationList))
+ if (_invocationList is not object[] invocationList)
{
resultCount = 1 + followCount;
resultList = new object[resultCount];
@@ -335,9 +335,9 @@ private static bool EqualInvocationLists(object[] a, object[] b, int start, int
if (v == null)
return this;
- if (!(v._invocationList is object[]))
+ if (v._invocationList is not object[])
{
- if (!(_invocationList is object[] invocationList))
+ if (_invocationList is not object[] invocationList)
{
// they are both not real Multicast
if (this.Equals(value))
@@ -401,7 +401,7 @@ private static bool EqualInvocationLists(object[] a, object[] b, int start, int
public sealed override Delegate[] GetInvocationList()
{
Delegate[] del;
- if (!(_invocationList is object[] invocationList))
+ if (_invocationList is not object[] invocationList)
{
del = new Delegate[1];
del[0] = this;
@@ -418,12 +418,12 @@ public sealed override Delegate[] GetInvocationList()
return del;
}
- internal new bool HasSingleTarget => !(_invocationList is object[]);
+ internal new bool HasSingleTarget => _invocationList is not object[];
// Used by delegate invocation list enumerator
internal object? /* Delegate? */ TryGetAt(int index)
{
- if (!(_invocationList is object[] invocationList))
+ if (_invocationList is not object[] invocationList)
{
return (index == 0) ? this : null;
}
@@ -447,7 +447,7 @@ public sealed override int GetHashCode()
}
}
- if (!(_invocationList is object[] invocationList))
+ if (_invocationList is not object[] invocationList)
{
return base.GetHashCode();
}
@@ -515,20 +515,23 @@ protected override MethodInfo GetMethodImpl()
{
// we handle unmanaged function pointers here because the generic ones (used for WinRT) would otherwise
// be treated as open delegates by the base implementation, resulting in failure to get the MethodInfo
- if ((_methodBase == null) || !(_methodBase is MethodInfo))
+ if (_methodBase is MethodInfo methodInfo)
{
- IRuntimeMethodInfo method = FindMethodHandle();
- RuntimeType declaringType = RuntimeMethodHandle.GetDeclaringType(method);
+ return methodInfo;
+ }
- // need a proper declaring type instance method on a generic type
- if (declaringType.IsGenericType)
- {
- // we are returning the 'Invoke' method of this delegate so use this.GetType() for the exact type
- RuntimeType reflectedType = (RuntimeType)GetType();
- declaringType = reflectedType;
- }
- _methodBase = (MethodInfo)RuntimeType.GetMethodBase(declaringType, method)!;
+ IRuntimeMethodInfo method = FindMethodHandle();
+ RuntimeType declaringType = RuntimeMethodHandle.GetDeclaringType(method);
+
+ // need a proper declaring type instance method on a generic type
+ if (declaringType.IsGenericType)
+ {
+ // we are returning the 'Invoke' method of this delegate so use this.GetType() for the exact type
+ RuntimeType reflectedType = (RuntimeType)GetType();
+ declaringType = reflectedType;
}
+
+ _methodBase = (MethodInfo)RuntimeType.GetMethodBase(declaringType, method)!;
return (MethodInfo)_methodBase;
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/CustomAttributeBuilder.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/CustomAttributeBuilder.cs
index b7ec740a7d3371..81733fbcea6561 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/CustomAttributeBuilder.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/CustomAttributeBuilder.cs
@@ -130,9 +130,7 @@ public CustomAttributeBuilder(ConstructorInfo con, object?[] constructorArgs, Pr
for (i = 0; i < namedProperties.Length; i++)
{
// Validate the property.
- PropertyInfo property = namedProperties[i];
- if (property == null)
- throw new ArgumentNullException("namedProperties[" + i + "]");
+ PropertyInfo property = namedProperties[i] ?? throw new ArgumentNullException($"namedProperties[{i}]");
// Allow null for non-primitive types only.
Type propType = property.PropertyType;
@@ -150,7 +148,7 @@ public CustomAttributeBuilder(ConstructorInfo con, object?[] constructorArgs, Pr
// Property has to be from the same class or base class as ConstructorInfo.
if (property.DeclaringType != con.DeclaringType
- && (!(con.DeclaringType is TypeBuilderInstantiation))
+ && (con.DeclaringType is not TypeBuilderInstantiation)
&& !con.DeclaringType!.IsSubclassOf(property.DeclaringType!))
{
// Might have failed check because one type is a XXXBuilder
@@ -162,7 +160,7 @@ public CustomAttributeBuilder(ConstructorInfo con, object?[] constructorArgs, Pr
// the constructor is a TypeBuilder, but we still need
// to deal with the case where the property's declaring
// type is one.
- if (!(property.DeclaringType is TypeBuilder) ||
+ if (property.DeclaringType is not TypeBuilder ||
!con.DeclaringType.IsSubclassOf(((RuntimeTypeBuilder)property.DeclaringType).BakedRuntimeType))
throw new ArgumentException(SR.Argument_BadPropertyForConstructorBuilder);
}
@@ -188,9 +186,7 @@ public CustomAttributeBuilder(ConstructorInfo con, object?[] constructorArgs, Pr
for (i = 0; i < namedFields.Length; i++)
{
// Validate the field.
- FieldInfo namedField = namedFields[i];
- if (namedField == null)
- throw new ArgumentNullException("namedFields[" + i + "]");
+ FieldInfo namedField = namedFields[i] ?? throw new ArgumentNullException($"namedFields[{i}]");
// Allow null for non-primitive types only.
Type fldType = namedField.FieldType;
@@ -204,7 +200,7 @@ public CustomAttributeBuilder(ConstructorInfo con, object?[] constructorArgs, Pr
// Field has to be from the same class or base class as ConstructorInfo.
if (namedField.DeclaringType != con.DeclaringType
- && (!(con.DeclaringType is TypeBuilderInstantiation))
+ && (con.DeclaringType is not TypeBuilderInstantiation)
&& !con.DeclaringType!.IsSubclassOf(namedField.DeclaringType!))
{
// Might have failed check because one type is a XXXBuilder
@@ -216,7 +212,7 @@ public CustomAttributeBuilder(ConstructorInfo con, object?[] constructorArgs, Pr
// the constructor is a TypeBuilder, but we still need
// to deal with the case where the field's declaring
// type is one.
- if (!(namedField.DeclaringType is TypeBuilder) ||
+ if (namedField.DeclaringType is not TypeBuilder ||
!con.DeclaringType.IsSubclassOf(((RuntimeTypeBuilder)namedFields[i].DeclaringType!).BakedRuntimeType))
throw new ArgumentException(SR.Argument_BadFieldForConstructorBuilder);
}
@@ -255,20 +251,11 @@ private static bool ValidateType(Type t)
}
if (t.IsEnum)
{
- switch (Type.GetTypeCode(Enum.GetUnderlyingType(t)))
- {
- case TypeCode.SByte:
- case TypeCode.Byte:
- case TypeCode.Int16:
- case TypeCode.UInt16:
- case TypeCode.Int32:
- case TypeCode.UInt32:
- case TypeCode.Int64:
- case TypeCode.UInt64:
- return true;
- default:
- return false;
- }
+ return Type.GetTypeCode(Enum.GetUnderlyingType(t)) is
+ TypeCode.SByte or TypeCode.Byte or
+ TypeCode.Int16 or TypeCode.UInt16 or
+ TypeCode.Int32 or TypeCode.UInt32 or
+ TypeCode.Int64 or TypeCode.UInt64;
}
if (t.IsArray)
{
@@ -429,8 +416,7 @@ private static void EmitValue(BinaryWriter writer, Type type, object? value)
writer.Write((byte)0xff);
else
{
- string? typeName = TypeNameBuilder.ToString((Type)value, TypeNameBuilder.Format.AssemblyQualifiedName);
- if (typeName == null)
+ string typeName = TypeNameBuilder.ToString((Type)value, TypeNameBuilder.Format.AssemblyQualifiedName) ??
throw new ArgumentException(SR.Format(SR.Argument_InvalidTypeForCA, value.GetType()));
EmitString(writer, typeName);
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicILGenerator.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicILGenerator.cs
index 327113c63f9a3c..d432b00c1ebf6c 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicILGenerator.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicILGenerator.cs
@@ -35,16 +35,13 @@ internal void GetCallableMethod(RuntimeModule module, DynamicMethod dm)
public override LocalBuilder DeclareLocal(Type localType, bool pinned)
{
ArgumentNullException.ThrowIfNull(localType);
-
- RuntimeLocalBuilder localBuilder;
-
- RuntimeType? rtType = localType as RuntimeType;
-
- if (rtType == null)
+ if (localType is not RuntimeType)
+ {
throw new ArgumentException(SR.Argument_MustBeRuntimeType);
+ }
- localBuilder = new RuntimeLocalBuilder(m_localCount, localType, m_methodBuilder, pinned);
// add the localType to local signature
+ RuntimeLocalBuilder localBuilder = new RuntimeLocalBuilder(m_localCount, localType, m_methodBuilder, pinned);
m_localSignature.AddArgument(localType, pinned);
m_localCount++;
return localBuilder;
@@ -64,23 +61,24 @@ public override void Emit(OpCode opcode, MethodInfo meth)
DynamicMethod? dynMeth = meth as DynamicMethod;
if (dynMeth == null)
{
- RuntimeMethodInfo? rtMeth = meth as RuntimeMethodInfo;
- if (rtMeth == null)
+ RuntimeMethodInfo rtMeth = meth as RuntimeMethodInfo ??
throw new ArgumentException(SR.Argument_MustBeRuntimeMethodInfo, nameof(meth));
RuntimeType declaringType = rtMeth.GetRuntimeType();
- if (declaringType != null && (declaringType.IsGenericType || declaringType.IsArray))
- token = GetTokenFor(rtMeth, declaringType);
- else
- token = GetTokenFor(rtMeth);
+ token = declaringType != null && (declaringType.IsGenericType || declaringType.IsArray) ?
+ GetTokenFor(rtMeth, declaringType) :
+ GetTokenFor(rtMeth);
}
else
{
// rule out not allowed operations on DynamicMethods
- if (opcode.Equals(OpCodes.Ldtoken) || opcode.Equals(OpCodes.Ldftn) || opcode.Equals(OpCodes.Ldvirtftn))
+ if (opcode.Equals(OpCodes.Ldtoken) ||
+ opcode.Equals(OpCodes.Ldftn) ||
+ opcode.Equals(OpCodes.Ldvirtftn))
{
throw new ArgumentException(SR.Argument_InvalidOpCodeOnDynamicMethod);
}
+
token = GetTokenFor(dynMeth);
}
@@ -112,18 +110,13 @@ public override void Emit(OpCode opcode, ConstructorInfo con)
{
ArgumentNullException.ThrowIfNull(con);
- RuntimeConstructorInfo? rtConstructor = con as RuntimeConstructorInfo;
- if (rtConstructor == null)
+ RuntimeConstructorInfo rtConstructor = con as RuntimeConstructorInfo ??
throw new ArgumentException(SR.Argument_MustBeRuntimeMethodInfo, nameof(con));
RuntimeType declaringType = rtConstructor.GetRuntimeType();
- int token;
-
- if (declaringType != null && (declaringType.IsGenericType || declaringType.IsArray))
- // need to sort out the stack size story
- token = GetTokenFor(rtConstructor, declaringType);
- else
- token = GetTokenFor(rtConstructor);
+ int token = declaringType != null && (declaringType.IsGenericType || declaringType.IsArray) ?
+ GetTokenFor(rtConstructor, declaringType) : // need to sort out the stack size story
+ GetTokenFor(rtConstructor);
EnsureCapacity(7);
InternalEmit(opcode);
@@ -137,11 +130,7 @@ public override void Emit(OpCode opcode, Type type)
{
ArgumentNullException.ThrowIfNull(type);
- RuntimeType? rtType = type as RuntimeType;
-
- if (rtType == null)
- throw new ArgumentException(SR.Argument_MustBeRuntimeType);
-
+ RuntimeType rtType = type as RuntimeType ?? throw new ArgumentException(SR.Argument_MustBeRuntimeType);
int token = GetTokenFor(rtType);
EnsureCapacity(7);
InternalEmit(opcode);
@@ -152,15 +141,12 @@ public override void Emit(OpCode opcode, FieldInfo field)
{
ArgumentNullException.ThrowIfNull(field);
- RuntimeFieldInfo? runtimeField = field as RuntimeFieldInfo;
- if (runtimeField == null)
+ RuntimeFieldInfo runtimeField = field as RuntimeFieldInfo ??
throw new ArgumentException(SR.Argument_MustBeRuntimeFieldInfo, nameof(field));
- int token;
- if (field.DeclaringType == null)
- token = GetTokenFor(runtimeField);
- else
- token = GetTokenFor(runtimeField, runtimeField.GetRuntimeType());
+ int token = field.DeclaringType == null ?
+ GetTokenFor(runtimeField) :
+ GetTokenFor(runtimeField, runtimeField.GetRuntimeType());
EnsureCapacity(7);
InternalEmit(opcode);
@@ -280,7 +266,7 @@ public override void EmitCall(OpCode opcode, MethodInfo methodInfo, Type[]? opti
stackchange -= methodInfo.GetParameterTypes().Length;
// Pop the this parameter if the method is non-static and the
// instruction is not newobj.
- if (!(methodInfo is SymbolMethod) && !methodInfo.IsStatic && !opcode.Equals(OpCodes.Newobj))
+ if (methodInfo is not SymbolMethod && !methodInfo.IsStatic && !opcode.Equals(OpCodes.Newobj))
stackchange--;
// Pop the optional parameters off the stack.
if (optionalParameterTypes != null)
@@ -783,11 +769,7 @@ internal override void ResolveToken(int token, out IntPtr typeHandle, out IntPtr
methodHandle = default;
fieldHandle = default;
- object? handle = m_scope[token];
-
- if (handle == null)
- throw new InvalidProgramException();
-
+ object handle = m_scope[token] ?? throw new InvalidProgramException();
if (handle is RuntimeTypeHandle)
{
typeHandle = ((RuntimeTypeHandle)handle).Value;
@@ -1010,7 +992,7 @@ internal int GetTokenFor(VarArgMethod varArgMethod)
if (fromMethod == 0)
return (byte[]?)this[token];
- if (!(this[token] is VarArgMethod vaMethod))
+ if (this[token] is not VarArgMethod vaMethod)
return null;
return vaMethod.m_signature.GetSignature(true);
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicMethod.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicMethod.CoreCLR.cs
index 1e3242cc0fbe1c..abdb8be14b27a3 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicMethod.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/DynamicMethod.CoreCLR.cs
@@ -132,26 +132,13 @@ Signature LazyCreateSignature()
int argCount = (parameters != null) ? parameters.Length : 0;
if (Signature.Arguments.Length != argCount)
throw new TargetParameterCountException(SR.Arg_ParmCnt);
-
- object? retValue;
- switch (argCount)
+ object? retValue = argCount switch
{
- case 0:
- retValue = Invoker.InvokeWithNoArgs(obj, invokeAttr);
- break;
- case 1:
- retValue = Invoker.InvokeWithOneArg(obj, invokeAttr, binder, parameters!, culture);
- break;
- case 2:
- case 3:
- case 4:
- retValue = Invoker.InvokeWithFewArgs(obj, invokeAttr, binder, parameters!, culture);
- break;
- default:
- retValue = Invoker.InvokeWithManyArgs(obj, invokeAttr, binder, parameters!, culture);
- break;
- }
-
+ 0 => Invoker.InvokeWithNoArgs(obj, invokeAttr),
+ 1 => Invoker.InvokeWithOneArg(obj, invokeAttr, binder, parameters!, culture),
+ 2 or 3 or 4 => Invoker.InvokeWithFewArgs(obj, invokeAttr, binder, parameters!, culture),
+ _ => Invoker.InvokeWithManyArgs(obj, invokeAttr, binder, parameters!, culture),
+ };
GC.KeepAlive(this);
return retValue;
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeAssemblyBuilder.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeAssemblyBuilder.cs
index fa3bfd25dac2f8..474e2c68292709 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeAssemblyBuilder.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeAssemblyBuilder.cs
@@ -54,14 +54,9 @@ private static RuntimeAssemblyBuilder DefineDynamicAssembly(
EnsureDynamicCodeSupported();
- AssemblyLoadContext? assemblyLoadContext =
- AssemblyLoadContext.CurrentContextualReflectionContext ?? AssemblyLoadContext.GetLoadContext(callingAssembly);
-
- if (assemblyLoadContext == null)
- {
+ AssemblyLoadContext assemblyLoadContext =
+ (AssemblyLoadContext.CurrentContextualReflectionContext ?? AssemblyLoadContext.GetLoadContext(callingAssembly)) ??
throw new InvalidOperationException();
- }
-
return new RuntimeAssemblyBuilder(name, access, assemblyLoadContext, assemblyAttributes);
}
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeILGenerator.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeILGenerator.cs
index c2d08f7e85af8e..8976805080d145 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeILGenerator.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeILGenerator.cs
@@ -619,7 +619,7 @@ public override void EmitCall(OpCode opcode, MethodInfo methodInfo, Type[]? opti
// Pop the this parameter if the method is non-static and the
// instruction is not newobj.
- if (!(methodInfo is SymbolMethod) && !methodInfo.IsStatic && !opcode.Equals(OpCodes.Newobj))
+ if (methodInfo is not SymbolMethod && !methodInfo.IsStatic && !opcode.Equals(OpCodes.Newobj))
stackchange--;
// Pop the optional parameters off the stack.
if (optionalParameterTypes != null)
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeMethodBuilder.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeMethodBuilder.cs
index fbc3136840f372..6adb12f0012e69 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeMethodBuilder.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeMethodBuilder.cs
@@ -881,20 +881,12 @@ internal ExceptionHandler(int tryStartOffset, int tryEndOffset, int filterOffset
m_exceptionClass = exceptionTypeToken;
}
- private static bool IsValidKind(ExceptionHandlingClauseOptions kind)
- {
- switch (kind)
- {
- case ExceptionHandlingClauseOptions.Clause:
- case ExceptionHandlingClauseOptions.Filter:
- case ExceptionHandlingClauseOptions.Finally:
- case ExceptionHandlingClauseOptions.Fault:
- return true;
-
- default:
- return false;
- }
- }
+ private static bool IsValidKind(ExceptionHandlingClauseOptions kind) =>
+ kind is
+ ExceptionHandlingClauseOptions.Clause or
+ ExceptionHandlingClauseOptions.Filter or
+ ExceptionHandlingClauseOptions.Finally or
+ ExceptionHandlingClauseOptions.Fault;
#endregion
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeModuleBuilder.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeModuleBuilder.cs
index c78d8da5a9a07f..2aad5096f25f05 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeModuleBuilder.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/RuntimeModuleBuilder.cs
@@ -252,11 +252,8 @@ public override int GetMethodMetadataToken(ConstructorInfo constructor)
{
// some user derived ConstructorInfo
// go through the slower code path, i.e. retrieve parameters and form signature helper.
- ParameterInfo[] parameters = constructor.GetParameters();
- if (parameters == null)
- {
+ ParameterInfo[] parameters = constructor.GetParameters() ??
throw new ArgumentException(SR.Argument_InvalidConstructorInfo);
- }
Type[] parameterTypes = new Type[parameters.Length];
Type[][] requiredCustomModifiers = new Type[parameters.Length][];
@@ -995,13 +992,8 @@ private int GetMethodTokenNoLock(MethodInfo method, bool getGenericTypeDefinitio
}
else
{
- Type? declaringType = method.DeclaringType;
-
- // We need to get the TypeRef tokens
- if (declaringType == null)
- {
+ Type declaringType = method.DeclaringType ??
throw new InvalidOperationException(SR.InvalidOperation_CannotImportGlobalFromDifferentModule);
- }
if (declaringType.IsArray)
{
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/SignatureHelper.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/SignatureHelper.cs
index 0e730bf25fbd4d..2ba8e7a1388f07 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/SignatureHelper.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/Emit/SignatureHelper.cs
@@ -276,9 +276,7 @@ private void AddOneArgTypeHelper(Type clsArgument, Type[]? requiredCustomModifie
{
for (int i = 0; i < optionalCustomModifiers.Length; i++)
{
- Type t = optionalCustomModifiers[i];
-
- if (t == null)
+ Type t = optionalCustomModifiers[i] ??
throw new ArgumentNullException(nameof(optionalCustomModifiers));
if (t.HasElementType)
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/MdImport.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/MdImport.cs
index c95e1630a04a1f..ad960b1bb45631 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/MdImport.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/MdImport.cs
@@ -214,7 +214,7 @@ public override int GetHashCode()
public override bool Equals(object? obj)
{
- if (!(obj is MetadataImport))
+ if (obj is not MetadataImport)
return false;
return Equals((MetadataImport)obj);
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/MemberInfo.Internal.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/MemberInfo.Internal.cs
index 7b4a21e157cd64..590b91b4edf7c5 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/MemberInfo.Internal.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/MemberInfo.Internal.cs
@@ -12,7 +12,7 @@ internal bool HasSameMetadataDefinitionAsCore(MemberInfo other) where TO
ArgumentNullException.ThrowIfNull(other);
// Ensure that "other" is a runtime-implemented MemberInfo. Do this check before calling any methods on it!
- if (!(other is TOther))
+ if (other is not TOther)
return false;
if (MetadataToken != other.MetadataToken)
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs
index 3af48057a870e0..ccd3d9aa9efc5e 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeAssembly.cs
@@ -103,12 +103,7 @@ public override string? CodeBase
throw new NotSupportedException(SR.NotSupported_DynamicAssembly);
}
- string? codeBase = GetCodeBase();
- if (codeBase is null)
- {
- // Not supported if the assembly was loaded from single-file bundle.
- throw new NotSupportedException(SR.NotSupported_CodeBase);
- }
+ string? codeBase = GetCodeBase() ?? throw new NotSupportedException(SR.NotSupported_CodeBase);
if (codeBase.Length == 0)
{
// For backward compatibility, return CoreLib codebase for assemblies loaded from memory.
@@ -598,12 +593,12 @@ private CultureInfo GetLocale()
[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "AssemblyNative_GetSimpleName")]
private static partial void GetSimpleName(QCallAssembly assembly, StringHandleOnStack retSimpleName);
- internal string? GetSimpleName()
+ internal string GetSimpleName()
{
RuntimeAssembly runtimeAssembly = this;
string? name = null;
GetSimpleName(new QCallAssembly(ref runtimeAssembly), new StringHandleOnStack(ref name));
- return name;
+ return name!;
}
[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "AssemblyNative_GetHashAlgorithm")]
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeCustomAttributeData.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeCustomAttributeData.cs
index 72003736ff230a..6a4f639d8eb083 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeCustomAttributeData.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeCustomAttributeData.cs
@@ -892,12 +892,7 @@ private static CustomAttributeType ParseCustomAttributeType(ref CustomAttributeD
&& arrayTag is CustomAttributeEncoding.Enum))
{
// We cannot determine the underlying type without loading the enum.
- string? enumTypeMaybe = parser.GetString();
- if (enumTypeMaybe is null)
- {
- throw new BadImageFormatException();
- }
-
+ string enumTypeMaybe = parser.GetString() ?? throw new BadImageFormatException();
enumType = TypeNameResolver.GetTypeReferencedByCustomAttribute(enumTypeMaybe, module);
if (!enumType.IsEnum)
{
@@ -1580,14 +1575,8 @@ private static void AddCustomAttributes(
RuntimePropertyInfo? property = (RuntimePropertyInfo?)(type is null ?
attributeType.GetProperty(name) :
- attributeType.GetProperty(name, type, Type.EmptyTypes));
-
- // Did we get a valid property reference?
- if (property is null)
- {
+ attributeType.GetProperty(name, type, Type.EmptyTypes)) ??
throw new CustomAttributeFormatException(SR.Format(SR.RFLCT_InvalidPropFail, name));
- }
-
RuntimeMethodInfo setMethod = property.GetSetMethod(true)!;
// Public properties may have non-public setter methods
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeMethodInfo.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeMethodInfo.CoreCLR.cs
index 9c999912382a63..a4bd430b620082 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeMethodInfo.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeMethodInfo.CoreCLR.cs
@@ -380,20 +380,14 @@ private Delegate CreateDelegateInternal(Type delegateType, object? firstArgument
{
ArgumentNullException.ThrowIfNull(delegateType);
- RuntimeType? rtType = delegateType as RuntimeType;
- if (rtType == null)
+ RuntimeType rtType = delegateType as RuntimeType ??
throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(delegateType));
if (!rtType.IsDelegate())
throw new ArgumentException(SR.Arg_MustBeDelegate, nameof(delegateType));
- Delegate? d = Delegate.CreateDelegateInternal(rtType, this, firstArgument, bindingFlags);
- if (d == null)
- {
+ return Delegate.CreateDelegateInternal(rtType, this, firstArgument, bindingFlags) ??
throw new ArgumentException(SR.Arg_DlgtTargMeth);
- }
-
- return d;
}
#endregion
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeModule.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeModule.cs
index 43760e17b4d987..9fed8f5838b2fe 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeModule.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimeModule.cs
@@ -33,14 +33,11 @@ internal sealed partial class RuntimeModule : Module
RuntimeTypeHandle[] typeHandleArgs = new RuntimeTypeHandle[size];
for (int i = 0; i < size; i++)
{
- Type typeArg = genericArguments[i];
- if (typeArg == null)
- throw new ArgumentException(SR.Argument_InvalidGenericInstArray);
- typeArg = typeArg.UnderlyingSystemType;
- if (typeArg == null)
- throw new ArgumentException(SR.Argument_InvalidGenericInstArray);
- if (!(typeArg is RuntimeType))
+ Type? typeArg = genericArguments[i]?.UnderlyingSystemType;
+
+ if (typeArg is not System.RuntimeType)
throw new ArgumentException(SR.Argument_InvalidGenericInstArray);
+
typeHandleArgs[i] = typeArg.TypeHandle;
}
return typeHandleArgs;
@@ -307,12 +304,8 @@ public override string ResolveString(int metadataToken)
throw new ArgumentOutOfRangeException(nameof(metadataToken),
SR.Format(SR.Argument_InvalidToken, tk, this));
- string? str = MetadataImport.GetUserString(metadataToken);
-
- if (str == null)
- throw new ArgumentException(
- SR.Format(SR.Argument_ResolveString, metadataToken, this));
-
+ string str = MetadataImport.GetUserString(metadataToken) ??
+ throw new ArgumentException(SR.Format(SR.Argument_ResolveString, metadataToken, this));
return str;
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimePropertyInfo.cs b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimePropertyInfo.cs
index d49ac821e684d9..2d542fb78ee101 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimePropertyInfo.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Reflection/RuntimePropertyInfo.cs
@@ -333,9 +333,7 @@ internal ReadOnlySpan GetIndexParametersSpan()
[DebuggerHidden]
public override object? GetValue(object? obj, BindingFlags invokeAttr, Binder? binder, object?[]? index, CultureInfo? culture)
{
- RuntimeMethodInfo? m = GetGetMethod(true);
- if (m == null)
- throw new ArgumentException(SR.Arg_GetMethNotFnd);
+ RuntimeMethodInfo m = GetGetMethod(true) ?? throw new ArgumentException(SR.Arg_GetMethNotFnd);
return m.Invoke(obj, invokeAttr, binder, index, null);
}
@@ -355,11 +353,7 @@ public override void SetValue(object? obj, object? value, object?[]? index)
[DebuggerHidden]
public override void SetValue(object? obj, object? value, BindingFlags invokeAttr, Binder? binder, object?[]? index, CultureInfo? culture)
{
- RuntimeMethodInfo? m = GetSetMethod(true);
-
- if (m == null)
- throw new ArgumentException(SR.Arg_SetMethNotFnd);
-
+ RuntimeMethodInfo m = GetSetMethod(true) ?? throw new ArgumentException(SR.Arg_SetMethNotFnd);
if (index is null)
{
m.InvokePropertySetter(obj, invokeAttr, binder, value, culture);
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs
index a0af2fedefac53..e155b109886d08 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/CompilerServices/RuntimeHelpers.CoreCLR.cs
@@ -167,8 +167,7 @@ private static unsafe ref byte GetSpanDataFrom(
[RequiresUnreferencedCode("Trimmer can't guarantee existence of class constructor")]
public static void RunClassConstructor(RuntimeTypeHandle type)
{
- RuntimeType rt = type.GetRuntimeType();
- if (rt is null)
+ RuntimeType rt = type.GetRuntimeType() ??
throw new ArgumentException(SR.InvalidOperation_HandleIsNotInitialized, nameof(type));
RunClassConstructor(new QCallTypeHandle(ref rt));
@@ -187,8 +186,7 @@ public static void RunClassConstructor(RuntimeTypeHandle type)
public static void RunModuleConstructor(ModuleHandle module)
{
- RuntimeModule rm = module.GetRuntimeModule();
- if (rm is null)
+ RuntimeModule rm = module.GetRuntimeModule() ??
throw new ArgumentException(SR.InvalidOperation_HandleIsNotInitialized, nameof(module));
RunModuleConstructor(new QCallModule(ref rm));
@@ -204,8 +202,7 @@ public static void RunModuleConstructor(ModuleHandle module)
public static unsafe void PrepareMethod(RuntimeMethodHandle method, RuntimeTypeHandle[]? instantiation)
{
- IRuntimeMethodInfo methodInfo = method.GetMethodInfo();
- if (methodInfo == null)
+ IRuntimeMethodInfo methodInfo = method.GetMethodInfo() ??
throw new ArgumentException(SR.InvalidOperation_HandleIsNotInitialized, nameof(method));
// defensive copy of user-provided array, per CopyRuntimeTypeHandles contract
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.cs
index ebe5db7df86a64..7e98a13fcc5393 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/ComWrappers.cs
@@ -384,14 +384,13 @@ public static void GetIUnknownImpl(out IntPtr fpQueryInterface, out IntPtr fpAdd
internal static int CallICustomQueryInterface(object customQueryInterfaceMaybe, ref Guid iid, out IntPtr ppObject)
{
- var customQueryInterface = customQueryInterfaceMaybe as ICustomQueryInterface;
- if (customQueryInterface is null)
+ if (customQueryInterfaceMaybe is ICustomQueryInterface customQueryInterface)
{
- ppObject = IntPtr.Zero;
- return -1; // See TryInvokeICustomQueryInterfaceResult
+ return (int)customQueryInterface.GetInterface(ref iid, out ppObject);
}
- return (int)customQueryInterface.GetInterface(ref iid, out ppObject);
+ ppObject = IntPtr.Zero;
+ return -1; // See TryInvokeICustomQueryInterfaceResult
}
}
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/CustomMarshalers/EnumerableViewOfDispatch.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/CustomMarshalers/EnumerableViewOfDispatch.cs
index 3c152d5a3a7d36..4c23a4e3207719 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/CustomMarshalers/EnumerableViewOfDispatch.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/CustomMarshalers/EnumerableViewOfDispatch.cs
@@ -45,7 +45,7 @@ public Collections.IEnumerator GetEnumerator()
try
{
object? resultAsObject = result.ToObject();
- if (!(resultAsObject is IEnumVARIANT enumVariant))
+ if (resultAsObject is not IEnumVARIANT enumVariant)
{
throw new InvalidOperationException(SR.InvalidOp_InvalidNewEnumVariant);
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs
index 64669dd5458f78..d9913b6b6d9b80 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/InteropServices/Marshal.CoreCLR.cs
@@ -37,14 +37,10 @@ public static IntPtr OffsetOf(Type t, string fieldName)
{
ArgumentNullException.ThrowIfNull(t);
- FieldInfo? f = t.GetField(fieldName, BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic);
-
- if (f is null)
- {
+ FieldInfo f = t.GetField(fieldName, BindingFlags.Instance | BindingFlags.Public | BindingFlags.NonPublic) ??
throw new ArgumentException(SR.Format(SR.Argument_OffsetOfFieldNotFound, t.FullName), nameof(fieldName));
- }
- if (!(f is RtFieldInfo rtField))
+ if (f is not RtFieldInfo rtField)
{
throw new ArgumentException(SR.Argument_MustBeRuntimeFieldInfo, nameof(fieldName));
}
@@ -213,7 +209,7 @@ private static unsafe void WriteValueSlow(object ptr, int ofs, T val, Action<
private static void PrelinkCore(MethodInfo m)
{
- if (!(m is RuntimeMethodInfo rmi))
+ if (m is not RuntimeMethodInfo rmi)
{
throw new ArgumentException(SR.Argument_MustBeRuntimeMethodInfo, nameof(m));
}
@@ -602,7 +598,7 @@ public static int ReleaseComObject(object o)
// Match .NET Framework behaviour.
throw new NullReferenceException();
}
- if (!(o is __ComObject co))
+ if (o is not __ComObject co)
{
throw new ArgumentException(SR.Argument_ObjNotComObject, nameof(o));
}
@@ -626,7 +622,7 @@ public static int FinalReleaseComObject(object o)
}
ArgumentNullException.ThrowIfNull(o);
- if (!(o is __ComObject co))
+ if (o is not __ComObject co)
{
throw new ArgumentException(SR.Argument_ObjNotComObject, nameof(o));
}
@@ -648,7 +644,7 @@ public static int FinalReleaseComObject(object o)
ArgumentNullException.ThrowIfNull(obj);
ArgumentNullException.ThrowIfNull(key);
- if (!(obj is __ComObject co))
+ if (obj is not __ComObject co)
{
throw new ArgumentException(SR.Argument_ObjNotComObject, nameof(obj));
}
@@ -673,7 +669,7 @@ public static bool SetComObjectData(object obj, object key, object? data)
ArgumentNullException.ThrowIfNull(obj);
ArgumentNullException.ThrowIfNull(key);
- if (!(obj is __ComObject co))
+ if (obj is not __ComObject co)
{
throw new ArgumentException(SR.Argument_ObjNotComObject, nameof(obj));
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Runtime/Loader/AssemblyLoadContext.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/Runtime/Loader/AssemblyLoadContext.CoreCLR.cs
index 27debf8d2a2718..bd2a5414a719aa 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Runtime/Loader/AssemblyLoadContext.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Runtime/Loader/AssemblyLoadContext.CoreCLR.cs
@@ -110,7 +110,7 @@ internal Assembly LoadFromInMemoryModule(IntPtr moduleHandle)
// This method is invoked by the VM to resolve a satellite assembly reference
// after trying assembly resolution via Load override without success.
- private static Assembly? ResolveSatelliteAssembly(IntPtr gchManagedAssemblyLoadContext, AssemblyName assemblyName)
+ private static RuntimeAssembly? ResolveSatelliteAssembly(IntPtr gchManagedAssemblyLoadContext, AssemblyName assemblyName)
{
AssemblyLoadContext context = (AssemblyLoadContext)(GCHandle.FromIntPtr(gchManagedAssemblyLoadContext).Target)!;
@@ -136,7 +136,7 @@ private static IntPtr ResolveUnmanagedDllUsingEvent(string unmanagedDllName, Ass
// This method is invoked by the VM to resolve an assembly reference using the Resolving event
// after trying assembly resolution via Load override and TPA load context without success.
- private static Assembly? ResolveUsingResolvingEvent(IntPtr gchManagedAssemblyLoadContext, AssemblyName assemblyName)
+ private static RuntimeAssembly? ResolveUsingResolvingEvent(IntPtr gchManagedAssemblyLoadContext, AssemblyName assemblyName)
{
AssemblyLoadContext context = (AssemblyLoadContext)(GCHandle.FromIntPtr(gchManagedAssemblyLoadContext).Target)!;
// Invoke the AssemblyResolve event callbacks if wired up
diff --git a/src/coreclr/System.Private.CoreLib/src/System/RuntimeHandles.cs b/src/coreclr/System.Private.CoreLib/src/System/RuntimeHandles.cs
index a7fcf364f36d99..3d230536519cea 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/RuntimeHandles.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/RuntimeHandles.cs
@@ -18,24 +18,12 @@ namespace System
public unsafe partial struct RuntimeTypeHandle : IEquatable, ISerializable
{
// Returns handle for interop with EE. The handle is guaranteed to be non-null.
- internal RuntimeTypeHandle GetNativeHandle()
- {
- // Create local copy to avoid a race condition
- RuntimeType type = m_type;
- if (type == null)
- throw new ArgumentNullException(null, SR.Arg_InvalidHandle);
- return new RuntimeTypeHandle(type);
- }
+ internal RuntimeTypeHandle GetNativeHandle() =>
+ new RuntimeTypeHandle(m_type ?? throw new ArgumentNullException(null, SR.Arg_InvalidHandle));
// Returns type for interop with EE. The type is guaranteed to be non-null.
- internal RuntimeType GetTypeChecked()
- {
- // Create local copy to avoid a race condition
- RuntimeType type = m_type;
- if (type == null)
- throw new ArgumentNullException(null, SR.Arg_InvalidHandle);
- return type;
- }
+ internal RuntimeType GetTypeChecked() =>
+ m_type ?? throw new ArgumentNullException(null, SR.Arg_InvalidHandle);
[MethodImpl(MethodImplOptions.InternalCall)]
internal static extern bool IsInstanceOfType(RuntimeType type, [NotNullWhen(true)] object? o);
@@ -441,8 +429,14 @@ internal IntPtr FreeGCHandle(IntPtr objHandle)
[MethodImpl(MethodImplOptions.InternalCall)]
internal static extern int GetNumVirtuals(RuntimeType type);
- [MethodImpl(MethodImplOptions.InternalCall)]
- internal static extern int GetNumVirtualsAndStaticVirtuals(RuntimeType type);
+ [LibraryImport(RuntimeHelpers.QCall, EntryPoint = "RuntimeTypeHandle_GetNumVirtualsAndStaticVirtuals")]
+ private static partial int GetNumVirtualsAndStaticVirtuals(QCallTypeHandle type);
+
+ internal static int GetNumVirtualsAndStaticVirtuals(RuntimeType type)
+ {
+ Debug.Assert(type != null);
+ return GetNumVirtualsAndStaticVirtuals(new QCallTypeHandle(ref type));
+ }
[LibraryImport(RuntimeHelpers.QCall, EntryPoint = "RuntimeTypeHandle_VerifyInterfaceIsImplemented")]
private static partial void VerifyInterfaceIsImplemented(QCallTypeHandle handle, QCallTypeHandle interfaceHandle);
@@ -784,7 +778,7 @@ public override int GetHashCode()
public override bool Equals(object? obj)
{
- if (!(obj is RuntimeMethodHandle))
+ if (obj is not RuntimeMethodHandle)
return false;
RuntimeMethodHandle handle = (RuntimeMethodHandle)obj;
@@ -1092,14 +1086,8 @@ public RuntimeFieldInfoStub(RuntimeFieldHandleInternal fieldHandle, object keepa
public unsafe partial struct RuntimeFieldHandle : IEquatable, ISerializable
{
// Returns handle for interop with EE. The handle is guaranteed to be non-null.
- internal RuntimeFieldHandle GetNativeHandle()
- {
- // Create local copy to avoid a race condition
- IRuntimeFieldInfo field = m_ptr;
- if (field == null)
- throw new ArgumentNullException(null, SR.Arg_InvalidHandle);
- return new RuntimeFieldHandle(field);
- }
+ internal RuntimeFieldHandle GetNativeHandle() =>
+ new RuntimeFieldHandle(m_ptr ?? throw new ArgumentNullException(null, SR.Arg_InvalidHandle));
private readonly IRuntimeFieldInfo m_ptr;
@@ -1127,7 +1115,7 @@ public override int GetHashCode()
public override bool Equals(object? obj)
{
- if (!(obj is RuntimeFieldHandle))
+ if (obj is not RuntimeFieldHandle)
return false;
RuntimeFieldHandle handle = (RuntimeFieldHandle)obj;
@@ -1259,7 +1247,7 @@ public override int GetHashCode()
public override bool Equals([NotNullWhen(true)] object? obj)
{
- if (!(obj is ModuleHandle))
+ if (obj is not ModuleHandle)
return false;
ModuleHandle handle = (ModuleHandle)obj;
diff --git a/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs b/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs
index 90ad4bd92a01b8..c476619a463f76 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/RuntimeType.CoreCLR.cs
@@ -2730,9 +2730,7 @@ public override InterfaceMapping GetInterfaceMap([DynamicallyAccessedMembers(Dyn
ArgumentNullException.ThrowIfNull(interfaceType);
- RuntimeType? ifaceRtType = interfaceType as RuntimeType;
-
- if (ifaceRtType == null)
+ RuntimeType ifaceRtType = interfaceType as RuntimeType ??
throw new ArgumentException(SR.Argument_MustBeRuntimeType, nameof(interfaceType));
RuntimeTypeHandle ifaceRtTypeHandle = ifaceRtType.TypeHandle;
@@ -3312,7 +3310,7 @@ public override bool IsSubclassOf(Type type)
// Reflexive, symmetric, transitive.
public override bool IsEquivalentTo([NotNullWhen(true)] Type? other)
{
- if (!(other is RuntimeType otherRtType))
+ if (other is not RuntimeType otherRtType)
{
return false;
}
@@ -3580,10 +3578,7 @@ public override Type MakeGenericType(Type[] instantiation)
bool foundNonRuntimeType = false;
for (int i = 0; i < instantiation.Length; i++)
{
- Type instantiationElem = instantiation[i];
- if (instantiationElem == null)
- throw new ArgumentNullException();
-
+ Type instantiationElem = instantiation[i] ?? throw new ArgumentNullException();
RuntimeType? rtInstantiationElem = instantiationElem as RuntimeType;
if (rtInstantiationElem == null)
@@ -4327,7 +4322,7 @@ internal void Insert(K key, V value)
private static int GetHashCodeHelper(K key)
{
// For strings we don't want the key to differ across domains as CerHashtable might be shared.
- if (!(key is string sKey))
+ if (key is not string sKey)
{
return key.GetHashCode();
}
diff --git a/src/coreclr/System.Private.CoreLib/src/System/Variant.cs b/src/coreclr/System.Private.CoreLib/src/System/Variant.cs
index fe6e216881d6d0..1ee6acc5be1c47 100644
--- a/src/coreclr/System.Private.CoreLib/src/System/Variant.cs
+++ b/src/coreclr/System.Private.CoreLib/src/System/Variant.cs
@@ -370,7 +370,7 @@ internal static void MarshalHelperConvertObjectToVariant(object o, ref Variant v
// updated object back to the original type.
internal static void MarshalHelperCastVariant(object pValue, int vt, ref Variant v)
{
- if (!(pValue is IConvertible iv))
+ if (pValue is not IConvertible iv)
{
switch (vt)
{
diff --git a/src/coreclr/debug/daccess/request.cpp b/src/coreclr/debug/daccess/request.cpp
index 3731f9ad799a8a..33a8ad66354359 100644
--- a/src/coreclr/debug/daccess/request.cpp
+++ b/src/coreclr/debug/daccess/request.cpp
@@ -490,20 +490,31 @@ ClrDataAccess::GetMethodTableSlotEnumerator(CLRDATA_ADDRESS mt, ISOSMethodEnum *
HRESULT DacMethodTableSlotEnumerator::Init(PTR_MethodTable mTable)
{
- unsigned int slot = 0;
-
WORD numVtableSlots = mTable->GetNumVtableSlots();
- while (slot < numVtableSlots)
+ for (WORD slot = 0; slot < numVtableSlots; slot++)
{
- MethodDesc* pMD = mTable->GetMethodDescForSlot_NoThrow(slot);
- SOSMethodData methodData = {0};
- methodData.MethodDesc = HOST_CDADDR(pMD);
- methodData.Entrypoint = mTable->GetSlot(slot);
- methodData.DefininingMethodTable = PTR_CDADDR(pMD->GetMethodTable());
- methodData.DefiningModule = HOST_CDADDR(pMD->GetModule());
- methodData.Token = pMD->GetMemberDef();
+ SOSMethodData methodData = {0, 0, 0, 0, 0, 0};
+ MethodDesc* pMD = nullptr;
+
+ EX_TRY
+ {
+ pMD = mTable->GetMethodDescForSlot_NoThrow(slot);
+ }
+ EX_CATCH
+ {
+ }
+ EX_END_CATCH(SwallowAllExceptions)
- methodData.Slot = slot++;
+ if (pMD != nullptr)
+ {
+ methodData.MethodDesc = HOST_CDADDR(pMD);
+ methodData.DefininingMethodTable = PTR_CDADDR(pMD->GetMethodTable());
+ methodData.DefiningModule = HOST_CDADDR(pMD->GetModule());
+ methodData.Token = pMD->GetMemberDef();
+ }
+
+ methodData.Entrypoint = mTable->GetSlot(slot);
+ methodData.Slot = slot;
if (!mMethods.Add(methodData))
return E_OUTOFMEMORY;
@@ -2408,7 +2419,7 @@ ClrDataAccess::GetMethodTableForEEClass(CLRDATA_ADDRESS eeClassReallyCanonMT, CL
}
else
{
- hr = GetMethodTableForEEClassImpl (eeClassReallyCanonMT, value);
+ hr = GetMethodTableForEEClassImpl(eeClassReallyCanonMT, value);
}
SOSDacLeave();
return hr;
@@ -3535,7 +3546,7 @@ ClrDataAccess::GetUsefulGlobals(struct DacpUsefulGlobalsData* globalsData)
hr = m_cdacSos->GetUsefulGlobals(globalsData);
if (FAILED(hr))
{
- hr = GetUsefulGlobals(globalsData);
+ hr = GetUsefulGlobalsImpl(globalsData);
}
#ifdef _DEBUG
else
@@ -3554,7 +3565,7 @@ ClrDataAccess::GetUsefulGlobals(struct DacpUsefulGlobalsData* globalsData)
}
else
{
- hr = GetUsefulGlobals(globalsData);;
+ hr = GetUsefulGlobalsImpl(globalsData);;
}
SOSDacLeave();
diff --git a/src/coreclr/debug/di/divalue.cpp b/src/coreclr/debug/di/divalue.cpp
index 853bac550e4554..53ba0f6e9d9885 100644
--- a/src/coreclr/debug/di/divalue.cpp
+++ b/src/coreclr/debug/di/divalue.cpp
@@ -2691,22 +2691,22 @@ HRESULT CordbObjectValue::GetFunctionHelper(ICorDebugFunction **ppFunction)
if (hr != S_OK)
return hr;
- mdMethodDef functionMethodDef = 0;
- VMPTR_DomainAssembly functionDomainAssembly;
- NativeCodeFunctionData nativeCodeForDelFunc;
-
- hr = pDAC->GetDelegateFunctionData(delType, pDelegateObj, &functionDomainAssembly, &functionMethodDef);
- if (hr != S_OK)
- return hr;
-
- // TODO: How to ensure results are sanitized?
- // Also, this is expensive. Do we really care that much about this?
- pDAC->GetNativeCodeInfo(functionDomainAssembly, functionMethodDef, &nativeCodeForDelFunc);
-
- RSSmartPtr funcModule(GetProcess()->LookupOrCreateModule(functionDomainAssembly));
RSSmartPtr func;
{
RSLockHolder lockHolder(GetProcess()->GetProcessLock());
+
+ VMPTR_DomainAssembly functionDomainAssembly;
+ mdMethodDef functionMethodDef = 0;
+ hr = pDAC->GetDelegateFunctionData(delType, pDelegateObj, &functionDomainAssembly, &functionMethodDef);
+ if (hr != S_OK)
+ return hr;
+
+ // TODO: How to ensure results are sanitized?
+ // Also, this is expensive. Do we really care that much about this?
+ NativeCodeFunctionData nativeCodeForDelFunc;
+ pDAC->GetNativeCodeInfo(functionDomainAssembly, functionMethodDef, &nativeCodeForDelFunc);
+
+ RSSmartPtr funcModule(GetProcess()->LookupOrCreateModule(functionDomainAssembly));
func.Assign(funcModule->LookupOrCreateFunction(functionMethodDef, nativeCodeForDelFunc.encVersion));
}
diff --git a/src/coreclr/debug/di/process.cpp b/src/coreclr/debug/di/process.cpp
index 805dc9996e31a9..aae787486adbc9 100644
--- a/src/coreclr/debug/di/process.cpp
+++ b/src/coreclr/debug/di/process.cpp
@@ -13503,6 +13503,40 @@ bool CordbProcess::IsSpecialStackOverflowCase(CordbUnmanagedThread *pUThread, co
return true;
}
+#ifdef FEATURE_INTEROP_DEBUGGING
+bool CordbProcess::IsUnmanagedThreadHijacked(ICorDebugThread * pICorDebugThread)
+{
+ PUBLIC_REENTRANT_API_ENTRY_FOR_SHIM(this);
+
+ if (GetShim() == NULL || !IsInteropDebugging())
+ {
+ return false;
+ }
+
+ {
+ RSLockHolder lockHolder(GetProcessLock());
+ CordbThread * pCordbThread = static_cast (pICorDebugThread);
+ HRESULT hr = pCordbThread->EnsureThreadIsAlive();
+ if (FAILED(hr))
+ {
+ return false;
+ }
+
+ // And only if we have a CordbUnmanagedThread and we are hijacked to code:Debugger::GenericHijackFunc
+ CordbUnmanagedThread * pUT = GetUnmanagedThread(pCordbThread->GetVolatileOSThreadID());
+ if (pUT != NULL)
+ {
+ if (pUT->IsFirstChanceHijacked() || pUT->IsGenericHijacked())
+ {
+ return true;
+ }
+ }
+ }
+ return false;
+}
+#endif // FEATURE_INTEROP_DEBUGGING
+
+
//-----------------------------------------------------------------------------
// Longhorn broke ContinueDebugEvent.
// In previous OS releases, DBG_CONTINUE would continue a non-continuable exception.
diff --git a/src/coreclr/debug/di/rspriv.h b/src/coreclr/debug/di/rspriv.h
index ee9f4e9849a47a..4f5acbbdc1c0b6 100644
--- a/src/coreclr/debug/di/rspriv.h
+++ b/src/coreclr/debug/di/rspriv.h
@@ -2879,6 +2879,10 @@ class IProcessShimHooks
virtual void RequestSyncAtEvent()= 0;
virtual bool IsThreadSuspendedOrHijacked(ICorDebugThread * pThread) = 0;
+
+#ifdef FEATURE_INTEROP_DEBUGGING
+ virtual bool IsUnmanagedThreadHijacked(ICorDebugThread * pICorDebugThread) = 0;
+#endif
};
@@ -3462,6 +3466,8 @@ class CordbProcess :
_ASSERTE(ThreadHoldsProcessLock());
return m_unmanagedThreads.GetBase(dwThreadId);
}
+
+ virtual bool IsUnmanagedThreadHijacked(ICorDebugThread * pICorDebugThread);
#endif // FEATURE_INTEROP_DEBUGGING
/*
diff --git a/src/coreclr/debug/di/shimpriv.h b/src/coreclr/debug/di/shimpriv.h
index ff0f16436a1f2c..2bd2fd5b45bd9a 100644
--- a/src/coreclr/debug/di/shimpriv.h
+++ b/src/coreclr/debug/di/shimpriv.h
@@ -431,6 +431,8 @@ class ShimProcess
bool IsThreadSuspendedOrHijacked(ICorDebugThread * pThread);
+ bool IsUnmanagedThreadHijacked(ICorDebugThread * pThread);
+
// Expose m_attached to CordbProcess.
bool GetAttached();
diff --git a/src/coreclr/debug/di/shimprocess.cpp b/src/coreclr/debug/di/shimprocess.cpp
index ac47a2e70e3aeb..c44be066370a5e 100644
--- a/src/coreclr/debug/di/shimprocess.cpp
+++ b/src/coreclr/debug/di/shimprocess.cpp
@@ -1654,3 +1654,12 @@ bool ShimProcess::IsThreadSuspendedOrHijacked(ICorDebugThread * pThread)
{
return m_pProcess->IsThreadSuspendedOrHijacked(pThread);
}
+
+bool ShimProcess::IsUnmanagedThreadHijacked(ICorDebugThread * pThread)
+{
+#ifdef FEATURE_INTEROP_DEBUGGING
+ return m_pProcess->IsUnmanagedThreadHijacked(pThread);
+#else
+ return false;
+#endif
+}
diff --git a/src/coreclr/debug/di/shimstackwalk.cpp b/src/coreclr/debug/di/shimstackwalk.cpp
index a0d84f1a5ecb89..984e0d0a113739 100644
--- a/src/coreclr/debug/di/shimstackwalk.cpp
+++ b/src/coreclr/debug/di/shimstackwalk.cpp
@@ -155,7 +155,8 @@ BOOL ShimStackWalk::ShouldTrackUMChain(StackWalkInfo * pswInfo)
// returning false above. We need to check the exception state to make sure we don't
// track the chain in this case. Since we know the type of Frame we are dealing with,
// we can make a more accurate determination of whether we should track the chain.
- if (GetInternalFrameType(pswInfo->GetCurrentInternalFrame()) == STUBFRAME_EXCEPTION)
+ // However if we are interop debugging and the thread is hijacked, we should track the chain.
+ if (!m_pProcess->IsUnmanagedThreadHijacked(m_pThread) && GetInternalFrameType(pswInfo->GetCurrentInternalFrame()) == STUBFRAME_EXCEPTION)
return FALSE;
return TRUE;
diff --git a/src/coreclr/dlls/mscordac/update.pl b/src/coreclr/dlls/mscordac/update.pl
deleted file mode 100644
index 353e1a177d8720..00000000000000
--- a/src/coreclr/dlls/mscordac/update.pl
+++ /dev/null
@@ -1,34 +0,0 @@
-#!perl -w
-
-#
-# Renames the DAC to a long name form that windbg looks for
-#
-
-my $sSrcFile = shift or &Usage();
-my $sDestName = shift or &Usage();
-my $sHostMach = shift or &Usage();
-my $sTargMach = shift or &Usage();
-my $sVersion = shift or &Usage();
-my $sDestDir = shift or &Usage();
-
-my $sName = "$sDestDir\\${sDestName}_${sHostMach}_${sTargMach}_" .
- "$sVersion";
-
-if ($ENV{'_BuildType'} eq "dbg" ||
- $ENV{'_BuildType'} eq "chk") {
- $sName .= "." . $ENV{'_BuildType'};
-}
-
-$sName .= ".dll";
-
-if (system("copy $sSrcFile $sName") / 256) {
- die("$0: Unable to copy $sSrcFile to $sName\n");
-}
-
-exit 0;
-
-sub Usage
-{
- die("usage: $0 " .
- " \n");
-}
diff --git a/src/coreclr/dlls/mscorrc/mscorrc.rc b/src/coreclr/dlls/mscorrc/mscorrc.rc
index 0088949d317126..2f6d8aae5906f7 100644
--- a/src/coreclr/dlls/mscorrc/mscorrc.rc
+++ b/src/coreclr/dlls/mscorrc/mscorrc.rc
@@ -702,7 +702,7 @@ END
STRINGTABLE DISCARDABLE
BEGIN
IDS_HOST_ASSEMBLY_RESOLVER_ASSEMBLY_ALREADY_LOADED_IN_CONTEXT "Assembly with same name is already loaded"
- IDS_HOST_ASSEMBLY_RESOLVER_DYNAMICALLY_EMITTED_ASSEMBLIES_UNSUPPORTED "Dynamically emitted assemblies are unsupported during host-based resolution."
+ IDS_HOST_ASSEMBLY_RESOLVER_DYNAMICALLY_EMITTED_ASSEMBLIES_UNSUPPORTED "Dynamically emitted assemblies are unsupported for AssemblyLoadContext resolution."
END
STRINGTABLE DISCARDABLE
diff --git a/src/coreclr/gc/env/gcenv.h b/src/coreclr/gc/env/gcenv.h
index f90efe93241da6..8a796a0ab9a8ec 100644
--- a/src/coreclr/gc/env/gcenv.h
+++ b/src/coreclr/gc/env/gcenv.h
@@ -55,72 +55,38 @@
LOGALWAYS(msg); \
} while(0)
+#define STRESS_LOG_WRITE(facility, level, msg, ...) do { \
+ if (StressLog::LogOn(facility, level)) \
+ StressLog::LogMsg(facility, level, StressLogMsg(msg, __VA_ARGS__)); \
+ LOG((facility, level, msg, __VA_ARGS__)); \
+ } while(0)
+
#define STRESS_LOG0(facility, level, msg) do { \
if (StressLog::LogOn(facility, level)) \
StressLog::LogMsg(level, facility, StressLogMsg(msg)); \
LOG((facility, level, msg)); \
} while(0)
-#define STRESS_LOG1(facility, level, msg, data1) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, \
- StressLogMsg(msg, (void*)(size_t)(data1))); \
- LOG((facility, level, msg, data1)); \
- } while(0)
+#define STRESS_LOG1(facility, level, msg, data1) \
+ STRESS_LOG_WRITE(facility, level, msg, data1)
-#define STRESS_LOG2(facility, level, msg, data1, data2) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, \
- StressLogMsg(msg, (void*)(size_t)(data1), (void*)(size_t)(data2))); \
- LOG((facility, level, msg, data1, data2)); \
- } while(0)
+#define STRESS_LOG2(facility, level, msg, data1, data2) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2)
-#define STRESS_LOG2_CHECK_EE_STARTED(facility, level, msg, data1, data2) do { \
- if (g_fEEStarted) \
- STRESS_LOG2(facility, level, msg, data1, data2); \
- else \
- LOG((facility, level, msg, data1, data2)); \
- } while(0)
-
-#define STRESS_LOG3(facility, level, msg, data1, data2, data3) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, \
- StressLogMsg(msg, \
- (void*)(size_t)(data1),(void*)(size_t)(data2),(void*)(size_t)(data3))); \
- LOG((facility, level, msg, data1, data2, data3)); \
- } while(0)
+#define STRESS_LOG3(facility, level, msg, data1, data2, data3) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3)
-#define STRESS_LOG4(facility, level, msg, data1, data2, data3, data4) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, \
- StressLogMsg(msg, (void*)(size_t)(data1),(void*)(size_t)(data2), \
- (void*)(size_t)(data3),(void*)(size_t)(data4))); \
- LOG((facility, level, msg, data1, data2, data3, data4)); \
- } while(0)
+#define STRESS_LOG4(facility, level, msg, data1, data2, data3, data4) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4)
-#define STRESS_LOG5(facility, level, msg, data1, data2, data3, data4, data5) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 5, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5)); \
- LOG((facility, level, msg, data1, data2, data3, data4, data5)); \
- } while(0)
+#define STRESS_LOG5(facility, level, msg, data1, data2, data3, data4, data5) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4, data5)
-#define STRESS_LOG6(facility, level, msg, data1, data2, data3, data4, data5, data6) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 6, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5), (void*)(size_t)(data6)); \
- LOG((facility, level, msg, data1, data2, data3, data4, data5, data6)); \
- } while(0)
+#define STRESS_LOG6(facility, level, msg, data1, data2, data3, data4, data5, data6) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4, data5, data6)
-#define STRESS_LOG7(facility, level, msg, data1, data2, data3, data4, data5, data6, data7) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 7, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5), (void*)(size_t)(data6), (void*)(size_t)(data7)); \
- LOG((facility, level, msg, data1, data2, data3, data4, data5, data6, data7)); \
- } while(0)
+#define STRESS_LOG7(facility, level, msg, data1, data2, data3, data4, data5, data6, data7) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4, data5, data6, data7)
#define LOGALWAYS(msg)
@@ -205,200 +171,53 @@ struct StressLogMsg
const char* m_format;
void* m_args[16];
- StressLogMsg(const char* format) : m_cArgs(0), m_format(format)
- {
- }
-
- template < typename T1 >
- StressLogMsg(const char* format, T1 data1) : m_cArgs(1), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- }
-
- template < typename T1, typename T2 >
- StressLogMsg(const char* format, T1 data1, T2 data2) : m_cArgs(2), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- }
-
- template < typename T1, typename T2, typename T3 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3) : m_cArgs(3), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- }
-
- template < typename T1, typename T2, typename T3, typename T4 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4) : m_cArgs(4), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5) : m_cArgs(5), m_format(format)
+ template
+ static void* ConvertArgument(T arg)
{
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
+ static_assert_no_msg(sizeof(T) <= sizeof(void*));
+ return (void*)(size_t)arg;
}
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6) : m_cArgs(6), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7) : m_cArgs(7), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8) : m_cArgs(8), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9) : m_cArgs(9), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10) : m_cArgs(10), m_format(format)
+ StressLogMsg(const char* format) : m_cArgs(0), m_format(format)
{
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- m_args[9] = (void*)(size_t)data10;
}
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11) : m_cArgs(11), m_format(format)
+ template
+ StressLogMsg(const char* format, Ts... args)
+ : m_cArgs(sizeof...(args))
+ , m_format(format)
+ , m_args{ ConvertArgument(args)... }
{
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- m_args[9] = (void*)(size_t)data10;
- m_args[10] = (void*)(size_t)data11;
+ static_assert_no_msg(sizeof...(args) <= ARRAY_SIZE(m_args));
}
+};
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12) : m_cArgs(12), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- m_args[9] = (void*)(size_t)data10;
- m_args[10] = (void*)(size_t)data11;
- m_args[11] = (void*)(size_t)data12;
- }
+template<>
+void* StressLogMsg::ConvertArgument(float arg) = delete;
+#if TARGET_64BIT
+template<>
+inline void* StressLogMsg::ConvertArgument(double arg)
+{
+ return (void*)(size_t)(*((uint64_t*)&arg));
+}
+#else
+template<>
+void* StressLogMsg::ConvertArgument(double arg) = delete;
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12, T13 data13) : m_cArgs(13), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*) && sizeof(T13) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- m_args[9] = (void*)(size_t)data10;
- m_args[10] = (void*)(size_t)data11;
- m_args[11] = (void*)(size_t)data12;
- m_args[12] = (void*)(size_t)data13;
- }
+// COMPAT: Truncate 64-bit integer arguments to 32-bit
+template<>
+inline void* StressLogMsg::ConvertArgument(uint64_t arg)
+{
+ return (void*)(size_t)arg;
+}
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13, typename T14 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12, T13 data13, T14 data14) : m_cArgs(14), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*) && sizeof(T13) <= sizeof(void*) && sizeof(T14) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- m_args[9] = (void*)(size_t)data10;
- m_args[10] = (void*)(size_t)data11;
- m_args[11] = (void*)(size_t)data12;
- m_args[12] = (void*)(size_t)data13;
- m_args[13] = (void*)(size_t)data14;
- }
-};
+template<>
+inline void* StressLogMsg::ConvertArgument(int64_t arg)
+{
+ return (void*)(size_t)arg;
+}
+#endif
class StressLog
{
diff --git a/src/coreclr/gc/gc.cpp b/src/coreclr/gc/gc.cpp
index 7f41750d553366..05e4ffb9cd0b84 100644
--- a/src/coreclr/gc/gc.cpp
+++ b/src/coreclr/gc/gc.cpp
@@ -25402,17 +25402,21 @@ void gc_heap::calculate_new_heap_count ()
// If there was a blocking gen2 GC, the overhead would be very large and most likely we would not pick it. So we
// rely on the gen2 sample's overhead calculated above.
float throughput_cost_percents[dynamic_heap_count_data_t::sample_size];
- for (int i = 0; i < dynamic_heap_count_data_t::sample_size; i++)
+
+ if (process_eph_samples_p)
{
- dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[i];
- assert (sample.elapsed_between_gcs > 0);
- throughput_cost_percents[i] = (sample.elapsed_between_gcs ? (((float)sample.msl_wait_time / n_heaps + sample.gc_pause_time) * 100.0f / (float)sample.elapsed_between_gcs) : 0.0f);
- assert (throughput_cost_percents[i] >= 0.0);
- if (throughput_cost_percents[i] > 100.0)
- throughput_cost_percents[i] = 100.0;
- dprintf (6666, ("sample %d in GC#%Id msl %I64d / %d + pause %I64d / elapsed %I64d = tcp: %.3f, surv %zd, gc speed %zd/ms", i,
- sample.gc_index, sample.msl_wait_time, n_heaps, sample.gc_pause_time, sample.elapsed_between_gcs, throughput_cost_percents[i],
- sample.gc_survived_size, (sample.gc_pause_time ? (sample.gc_survived_size * 1000 / sample.gc_pause_time) : 0)));
+ for (int i = 0; i < dynamic_heap_count_data_t::sample_size; i++)
+ {
+ dynamic_heap_count_data_t::sample& sample = dynamic_heap_count_data.samples[i];
+ assert (sample.elapsed_between_gcs > 0);
+ throughput_cost_percents[i] = (sample.elapsed_between_gcs ? (((float)sample.msl_wait_time / n_heaps + sample.gc_pause_time) * 100.0f / (float)sample.elapsed_between_gcs) : 0.0f);
+ assert (throughput_cost_percents[i] >= 0.0);
+ if (throughput_cost_percents[i] > 100.0)
+ throughput_cost_percents[i] = 100.0;
+ dprintf (6666, ("sample %d in GC#%Id msl %I64d / %d + pause %I64d / elapsed %I64d = tcp: %.3f, surv %zd, gc speed %zd/ms", i,
+ sample.gc_index, sample.msl_wait_time, n_heaps, sample.gc_pause_time, sample.elapsed_between_gcs, throughput_cost_percents[i],
+ sample.gc_survived_size, (sample.gc_pause_time ? (sample.gc_survived_size * 1000 / sample.gc_pause_time) : 0)));
+ }
}
float median_throughput_cost_percent = median_of_3 (throughput_cost_percents[0], throughput_cost_percents[1], throughput_cost_percents[2]);
@@ -33890,8 +33894,8 @@ void gc_heap::plan_phase (int condemned_gen_number)
}
#endif //FEATURE_EVENT_TRACE
-#if defined(_DEBUG)
- verify_committed_bytes ();
+#ifdef _DEBUG
+ verify_committed_bytes ();
#endif // _DEBUG
#ifdef MULTIPLE_HEAPS
@@ -47426,6 +47430,10 @@ void gc_heap::verify_committed_bytes_per_heap()
void gc_heap::verify_committed_bytes()
{
+#ifndef USE_REGIONS
+ // TODO, https://github.com/dotnet/runtime/issues/102706, re-enable the testing after fixing this bug
+ return;
+#endif //!USE_REGIONS
size_t total_committed = 0;
size_t committed_decommit; // unused
size_t committed_free; // unused
diff --git a/src/coreclr/gcinfo/gcinfoencoder.cpp b/src/coreclr/gcinfo/gcinfoencoder.cpp
index c116d6d0dbc7b8..0abf65047b8d9a 100644
--- a/src/coreclr/gcinfo/gcinfoencoder.cpp
+++ b/src/coreclr/gcinfo/gcinfoencoder.cpp
@@ -1216,19 +1216,42 @@ void GcInfoEncoder::Build()
///////////////////////////////////////////////////////////////////////
// Normalize call sites
+ // Eliminate call sites that fall inside interruptible ranges
///////////////////////////////////////////////////////////////////////
- _ASSERTE(m_NumCallSites == 0 || numInterruptibleRanges == 0);
-
UINT32 numCallSites = 0;
for(UINT32 callSiteIndex = 0; callSiteIndex < m_NumCallSites; callSiteIndex++)
{
UINT32 callSite = m_pCallSites[callSiteIndex];
- callSite += m_pCallSiteSizes[callSiteIndex];
+ // There's a contract with the EE that says for non-leaf stack frames, where the
+ // method is stopped at a call site, the EE will not query with the return PC, but
+ // rather the return PC *minus 1*.
+ // The reason is that variable/register liveness may change at the instruction immediately after the
+ // call, so we want such frames to appear as if they are "within" the call.
+ // Since we use "callSite" as the "key" when we search for the matching descriptor, also subtract 1 here
+ // (after, of course, adding the size of the call instruction to get the return PC).
+ callSite += m_pCallSiteSizes[callSiteIndex] - 1;
_ASSERTE(DENORMALIZE_CODE_OFFSET(NORMALIZE_CODE_OFFSET(callSite)) == callSite);
UINT32 normOffset = NORMALIZE_CODE_OFFSET(callSite);
- m_pCallSites[numCallSites++] = normOffset;
+
+ BOOL keepIt = TRUE;
+
+ for(UINT32 intRangeIndex = 0; intRangeIndex < numInterruptibleRanges; intRangeIndex++)
+ {
+ InterruptibleRange *pRange = &pRanges[intRangeIndex];
+ if(pRange->NormStopOffset > normOffset)
+ {
+ if(pRange->NormStartOffset <= normOffset)
+ {
+ keepIt = FALSE;
+ }
+ break;
+ }
+ }
+
+ if(keepIt)
+ m_pCallSites[numCallSites++] = normOffset;
}
GCINFO_WRITE_VARL_U(m_Info1, NORMALIZE_NUM_SAFE_POINTS(numCallSites), NUM_SAFE_POINTS_ENCBASE, NumCallSitesSize);
@@ -1395,7 +1418,7 @@ void GcInfoEncoder::Build()
for(pCurrent = pTransitions; pCurrent < pEndTransitions; )
{
- if(pCurrent->CodeOffset >= callSite)
+ if(pCurrent->CodeOffset > callSite)
{
couldBeLive |= liveState;
@@ -1750,7 +1773,7 @@ void GcInfoEncoder::Build()
{
for(pCurrent = pTransitions; pCurrent < pEndTransitions; )
{
- if(pCurrent->CodeOffset >= callSite)
+ if(pCurrent->CodeOffset > callSite)
{
// Time to record the call site
@@ -1849,7 +1872,7 @@ void GcInfoEncoder::Build()
for(pCurrent = pTransitions; pCurrent < pEndTransitions; )
{
- if(pCurrent->CodeOffset >= callSite)
+ if(pCurrent->CodeOffset > callSite)
{
// Time to encode the call site
@@ -1896,7 +1919,7 @@ void GcInfoEncoder::Build()
for(pCurrent = pTransitions; pCurrent < pEndTransitions; )
{
- if(pCurrent->CodeOffset >= callSite)
+ if(pCurrent->CodeOffset > callSite)
{
// Time to encode the call site
GCINFO_WRITE_VECTOR(m_Info1, liveState, CallSiteStateSize);
diff --git a/src/coreclr/ilasm/GrammarExtractor/GrammarExtractor.csproj b/src/coreclr/ilasm/GrammarExtractor/GrammarExtractor.csproj
new file mode 100644
index 00000000000000..f36d5e8fc38f5d
--- /dev/null
+++ b/src/coreclr/ilasm/GrammarExtractor/GrammarExtractor.csproj
@@ -0,0 +1,9 @@
+
+
+
+ Exe
+ $(NetCoreAppToolCurrent)
+ enable
+
+
+
diff --git a/src/coreclr/ilasm/GrammarExtractor/Program.cs b/src/coreclr/ilasm/GrammarExtractor/Program.cs
new file mode 100644
index 00000000000000..a993e32fcda00a
--- /dev/null
+++ b/src/coreclr/ilasm/GrammarExtractor/Program.cs
@@ -0,0 +1,98 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+using System;
+using System.IO;
+using System.Text.RegularExpressions;
+using static Patterns;
+
+if (args.Length != 1)
+{
+ Console.Error.WriteLine("Usage: ");
+ return 1;
+}
+
+string filePath = args[0];
+
+string fileContent = File.ReadAllText(filePath);
+
+var match = GetRegexExtractMarkers().Match(fileContent);
+if (!match.Success)
+{
+ Console.Error.WriteLine("Could not find %% markers");
+ return 1;
+}
+
+//string prefix = match.Groups[1].Value;
+string grammar = match.Groups[2].Value;
+
+// Remove any text in {}
+var regexRemoveTextInBraces = GetRegexRemoveTextInBraces();
+string previousGrammar;
+
+do
+{
+ previousGrammar = grammar;
+ grammar = regexRemoveTextInBraces.Replace(grammar, "$1");
+} while (grammar != previousGrammar);
+
+// Change keyword identifiers into the string they represent (lowercase)
+grammar = GetRegexKeywordIdentifiers().Replace(grammar, m => $"'{m.Groups[1].Value.ToLowerInvariant()}'");
+
+// Change assembler directives into their string (lowercase with a period)
+grammar = GetRegexAssemblerDirectives().Replace(grammar, m => $"'.{m.Groups[1].Value.ToLowerInvariant()}'");
+
+// Handle special punctuation
+grammar = GetRegexEllipsis().Replace(grammar, "'...'");
+grammar = GetRegexDcolon().Replace(grammar, "'::'");
+
+// Print the output header
+Console.Write(@"// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+
+Lexical tokens
+ ID - C style alphaNumeric identifier (e.g. Hello_There2)
+ DOTTEDNAME - Sequence of dot-separated IDs (e.g. System.Object)
+ QSTRING - C style quoted string (e.g. ""hi\n"")
+ SQSTRING - C style singlely quoted string(e.g. 'hi')
+ INT32 - C style 32 bit integer (e.g. 235, 03423, 0x34FFF)
+ INT64 - C style 64 bit integer (e.g. -2353453636235234, 0x34FFFFFFFFFF)
+ FLOAT64 - C style floating point number (e.g. -0.2323, 354.3423, 3435.34E-5)
+ INSTR_* - IL instructions of a particular class (see opcode.def).
+ HEXBYTE - 1- or 2-digit hexadecimal number (e.g., A2, F0).
+Auxiliary lexical tokens
+ TYPEDEF_T - Aliased class (TypeDef or TypeRef).
+ TYPEDEF_M - Aliased method.
+ TYPEDEF_F - Aliased field.
+ TYPEDEF_TS - Aliased type specification (TypeSpec).
+ TYPEDEF_MR - Aliased field/method reference (MemberRef).
+ TYPEDEF_CA - Aliased Custom Attribute.
+----------------------------------------------------------------------------------
+START : decls
+ ;");
+
+// Print the output
+Console.Write(grammar);
+
+return 0;
+
+internal static partial class Patterns
+{
+ [GeneratedRegex(@"^(.*)%%(.*)%%", RegexOptions.Singleline)]
+ internal static partial Regex GetRegexExtractMarkers();
+
+ [GeneratedRegex(@"\s*([^'])\{[^{}]*\}", RegexOptions.Singleline)]
+ internal static partial Regex GetRegexRemoveTextInBraces();
+
+ [GeneratedRegex(@"\b([A-Z0-9_]+)_\b", RegexOptions.Singleline)]
+ internal static partial Regex GetRegexKeywordIdentifiers();
+
+ [GeneratedRegex(@"\b_([A-Z0-9]+)\b", RegexOptions.Singleline)]
+ internal static partial Regex GetRegexAssemblerDirectives();
+
+ [GeneratedRegex(@"\bELLIPSIS\b", RegexOptions.Singleline)]
+ internal static partial Regex GetRegexEllipsis();
+
+ [GeneratedRegex(@"\bDCOLON\b", RegexOptions.Singleline)]
+ internal static partial Regex GetRegexDcolon();
+}
diff --git a/src/coreclr/ilasm/GrammarExtractor/README.md b/src/coreclr/ilasm/GrammarExtractor/README.md
new file mode 100644
index 00000000000000..caba704369262e
--- /dev/null
+++ b/src/coreclr/ilasm/GrammarExtractor/README.md
@@ -0,0 +1,10 @@
+# Grammar extractor for IL tools
+
+Tool to extract IL grammar in `Backus-Naur Form (BNF)` from `Yet Another Compiler-Compiler (Yacc)`.
+
+Usage:
+
+```sh
+cd runtime
+./dotnet.sh run --project src/coreclr/ilasm/GrammarExtractor src/coreclr/ilasm/asmparse.y > src/coreclr/ilasm/prebuilt/asmparse.grammar
+```
diff --git a/src/coreclr/ilasm/README.md b/src/coreclr/ilasm/README.md
index 35f621b6df9a1b..759eef939e59ae 100644
--- a/src/coreclr/ilasm/README.md
+++ b/src/coreclr/ilasm/README.md
@@ -16,3 +16,4 @@ $ docker run --rm -v$(pwd):/runtime -w /runtime/src/coreclr/ilasm alpine \
sh -c 'apk add bison && yacc asmparse.y -o prebuilt/asmparse.cpp'
```
+To generate grammar, see [GrammarExtractor README](GrammarExtractor/README.md).
diff --git a/src/coreclr/ilasm/extractGrammar.pl b/src/coreclr/ilasm/extractGrammar.pl
deleted file mode 100644
index 0e1c80e9f90104..00000000000000
--- a/src/coreclr/ilasm/extractGrammar.pl
+++ /dev/null
@@ -1,59 +0,0 @@
-# Licensed to the .NET Foundation under one or more agreements.
-# The .NET Foundation licenses this file to you under the MIT license.
-#
-# a simple script that extracts the grammar from a yacc file
-
-undef $/; # read in the whole file
-my $file = <>;
-$file =~ /^(.*)%%(.*)%%/s || die "Could not find %% markers";
-my $prefix = $1;
-my $grammar = $2;
-
-#my $line;
-#foreach $line (split /\n/s, $prefix) {
-# if ($line =~ /^\s*%token/) {
-# $line =~ s/\s*<.*>//g;
-# print "$line\n"
-# }
-#}
-
- # remove any text in {}
-while ($grammar =~ s/\s*([^']){[^{}]*}/$1/sg) {}
-
- # change keyword identifiers into the string they represent
-$grammar =~ s/\b([A-Z0-9_]+)_\b/'\L$1\E'/sg;
-
- # change assembler directives into their string
-$grammar =~ s/\b_([A-Z0-9]+)\b/'\L.$1\E'/sg;
-
- # do the special punctuation by hand
-$grammar =~ s/\bELLIPSIS\b/'...'/sg;
-$grammar =~ s/\bDCOLON\b/'::'/sg;
-
-#
- # remove TODO comments
-$grammar =~ s/\n\s*\/\*[^\n]*TODO[^\n]*\*\/\s*\n/\n/sg;
-#
-
-print "Lexical tokens\n";
-print " ID - C style alphaNumeric identifier (e.g. Hello_There2)\n";
-print " DOTTEDNAME - Sequence of dot-separated IDs (e.g. System.Object)\n";
-print " QSTRING - C style quoted string (e.g. \"hi\\n\")\n";
-print " SQSTRING - C style singlely quoted string(e.g. 'hi')\n";
-print " INT32 - C style 32 bit integer (e.g. 235, 03423, 0x34FFF)\n";
-print " INT64 - C style 64 bit integer (e.g. -2353453636235234, 0x34FFFFFFFFFF)\n";
-print " FLOAT64 - C style floating point number (e.g. -0.2323, 354.3423, 3435.34E-5)\n";
-print " INSTR_* - IL instructions of a particular class (see opcode.def).\n";
-print " HEXBYTE - 1- or 2-digit hexadecimal number (e.g., A2, F0).\n";
-print "Auxiliary lexical tokens\n";
-print " TYPEDEF_T - Aliased class (TypeDef or TypeRef).\n";
-print " TYPEDEF_M - Aliased method.\n";
-print " TYPEDEF_F - Aliased field.\n";
-print " TYPEDEF_TS - Aliased type specification (TypeSpec).\n";
-print " TYPEDEF_MR - Aliased field/method reference (MemberRef).\n";
-print " TYPEDEF_CA - Aliased Custom Attribute.\n";
-print "----------------------------------------------------------------------------------\n";
-print "START : decls\n";
-print " ;";
-
-print $grammar;
diff --git a/src/coreclr/ilasm/prebuilt/asmparse.grammar b/src/coreclr/ilasm/prebuilt/asmparse.grammar
index 6d90815f8d28b3..544a442ed5605b 100644
--- a/src/coreclr/ilasm/prebuilt/asmparse.grammar
+++ b/src/coreclr/ilasm/prebuilt/asmparse.grammar
@@ -76,11 +76,11 @@ dottedName : id
| dottedName '.' dottedName
;
-int32 : INT32
+int32 : INT32_V
;
-int64 : INT64
- | INT32
+int64 : INT64_V
+ | INT32_V
;
float64 : FLOAT64
diff --git a/src/coreclr/inc/OpCodeGen.pl b/src/coreclr/inc/OpCodeGen.pl
deleted file mode 100644
index 952dd477ef681e..00000000000000
--- a/src/coreclr/inc/OpCodeGen.pl
+++ /dev/null
@@ -1,481 +0,0 @@
-# Licensed to the .NET Foundation under one or more agreements.
-# The .NET Foundation licenses this file to you under the MIT license.
-#
-# OpCodeGen.pl
-#
-# PERL script used to generate the numbering of the reference opcodes
-#
-#use strict 'vars';
-#use strict 'subs';
-#use strict 'refs';
-
-
-my $ret = 0;
-my %opcodeEnum;
-my %oneByte;
-my %twoByte;
-my %controlFlow;
-my @singleByteArg;
-my %stackbehav;
-my %opcodetype;
-my %operandtype;
-my %opcodes;
-my $popstate;
-my $pushstate;
-
-$ctrlflowcount = 0;
-
-$count = 0;
-
-my @lowercaseAlphabet = ('a'..'z','0'..'9');
-my %upcaseAlphabet = ();
-
-foreach $letter (@lowercaseAlphabet) {
- $j = $letter;
- $j=~tr/a-z/A-Z/;
- $upcaseAlphabet{$letter}=$j;
-}
-
-$license = "// Licensed to the .NET Foundation under one or more agreements.\n";
-$license .= "// The .NET Foundation licenses this file to you under the MIT license.\n";
-
-$startHeaderComment = "/*============================================================\n**\n";
-$endHeaderComment = "**\n** THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT BY HAND!\n";
-$endHeaderComment .= "** See \$(RepoRoot)\\src\\inc\\OpCodeGen.pl for more information.**\n";
-$endHeaderComment .= "==============================================================*/\n\n";
-
-$usingAndRefEmitNmsp = "namespace System.Reflection.Emit\n{\n\n";
-$obsoleteAttr = " [Obsolete(\"This API has been deprecated. https://go.microsoft.com/fwlink/?linkid=14202\")]\n";
-
-# Open source file and target files
-
-open (OPCODE, "opcode.def") or die "Couldn't open opcode.def: $!\n";
-open (OUTPUT, ">OpCodes.cs") or die "Couldn't open OpCodes.cs: $!\n";
-open (FCOUTPUT, ">FlowControl.cs") or die "Couldn't open FlowControl.cs: $!\n";
-open (SOUTPUT, ">StackBehaviour.cs") or die "Couldn't open StackBehaviour.cs: $!\n";
-open (OCOUTPUT, ">OpCodeType.cs") or die "Couldn't open OpCodeType.cs: $!\n";
-open (OPOUTPUT, ">OperandType.cs") or die "Couldn't open OperandType.cs: $!\n";
-
-print OUTPUT $license;
-print OUTPUT $startHeaderComment;
-print OUTPUT "** Class: OpCodes\n";
-print OUTPUT "**\n";
-print OUTPUT "** Purpose: Exposes all of the IL instructions supported by the runtime.\n";
-print OUTPUT $endHeaderComment;
-
-print OUTPUT $usingAndRefEmitNmsp;
-
-print FCOUTPUT $license;
-print FCOUTPUT $startHeaderComment;
-print FCOUTPUT "** Enumeration: FlowControl\n";
-print FCOUTPUT "**\n";
-print FCOUTPUT "** Purpose: Exposes FlowControl Attribute of IL.\n";
-print FCOUTPUT $endHeaderComment;
-
-print FCOUTPUT $usingAndRefEmitNmsp;
-print FCOUTPUT " public enum FlowControl\n {\n";
-
-print SOUTPUT $license;
-print SOUTPUT $startHeaderComment;
-print SOUTPUT "** Enumeration: StackBehaviour\n";
-print SOUTPUT "**\n";
-print SOUTPUT "** Purpose: Exposes StackBehaviour Attribute of IL.\n";
-print SOUTPUT $endHeaderComment;
-
-print SOUTPUT $usingAndRefEmitNmsp;
-print SOUTPUT " public enum StackBehaviour\n {\n";
-
-print OCOUTPUT $license;
-print OCOUTPUT $startHeaderComment;
-print OCOUTPUT "** Enumeration: OpCodeType\n";
-print OCOUTPUT "**\n";
-print OCOUTPUT "** Purpose: Exposes OpCodeType Attribute of IL.\n";
-print OCOUTPUT $endHeaderComment;
-
-print OCOUTPUT $usingAndRefEmitNmsp;
-print OCOUTPUT " public enum OpCodeType\n {\n";
-
-print OPOUTPUT $license;
-print OPOUTPUT $startHeaderComment;
-print OPOUTPUT "** Enumeration: OperandType\n";
-print OPOUTPUT "**\n";
-print OPOUTPUT "** Purpose: Exposes OperandType Attribute of IL.\n";
-print OPOUTPUT $endHeaderComment;
-
-print OPOUTPUT $usingAndRefEmitNmsp;
-print OPOUTPUT " public enum OperandType\n {\n";
-
-while ()
-{
- # Process only OPDEF(....) lines
- if (/OPDEF\(\s*/)
- {
- chop; # Strip off trailing CR
- s/^OPDEF\(\s*//; # Strip off "OP("
- s/,\s*/,/g; # Remove whitespace
- s/\).*$//; # Strip off ")" and everything behind it at end
-
- # Split the line up into its basic parts
- ($enumname, $stringname, $pop, $push, $operand, $type, $size, $s1, $s2, $ctrl) = split(/,/);
- $s1 =~ s/0x//;
- $s1 = hex($s1);
- $s2 =~ s/0x//;
- $s2 = hex($s2);
-
- if ($size == 0)
- {
- next;
- }
-
- next if ($enumname =~ /UNUSED/);
-
- #Remove the prefix
- $enumname=~s/CEE_//g;
-
- #Convert name to our casing convention
- $enumname=~tr/A-Z/a-z/;
- $enumname=~s/^(.)/\u$1/g;
- $enumname=~s/_(.)/_\u$1/g;
-
- #Convert pop to our casing convention
- $pop=~tr/A-Z/a-z/;
- $pop=~s/^(.)/\u$1/g;
- $pop=~s/_(.)/_\u$1/g;
-
- #Convert push to our casing convention
- $push=~tr/A-Z/a-z/;
- $push=~s/^(.)/\u$1/g;
- $push=~s/_(.)/_\u$1/g;
-
- #Convert operand to our casing convention
- #$operand=~tr/A-Z/a-z/;
- #$operand=~s/^(.)/\u$1/g;
- #$operand=~s/_(.)/_\u$1/g;
-
- #Remove the I prefix on type
- $type=~s/I//g;
-
- #Convert Type to our casing convention
- $type=~tr/A-Z/a-z/;
- $type=~s/^(.)/\u$1/g;
- $type=~s/_(.)/_\u$1/g;
-
- #Convert ctrl to our casing convention
- $ctrl=~tr/A-Z/a-z/;
- $ctrl=~s/^(.)/\u$1/g;
- $ctrl=~s/_(.)/_\u$1/g;
-
- # Make a list of the flow Control type
-
- # Make a list of the opcodes and their values
- if ($opcodes{$enumname})
- {
- }
- elsif ($size == 1)
- {
- $opcodes{$enumname} = $s2;
- }
- elsif ($size == 2)
- {
- $opcodes{$enumname} = ($s2 + 256 * $s1);
- }
-
- #Make a list of the instructions which only take one-byte arguments
- if ($enumname =~ /^.*_S$/) {
- #but exclude the deprecated expressions (sometimes spelled "depricated")
- if (!($enumname=~/^Depr.cated.*/)) {
- my $caseStatement = sprintf(" case %-20s: \n", $enumname);
- push(@singleByteArg, $caseStatement);
- }
- }
-
- #make a list of the control Flow Types
- if ($controlFlow{$ctrl})
- {
- #printf("DUPE Control Flow\n");
- }
- else
- {
- $controlFlow{$ctrl} = $ctrlflowcount;
- $ctrlflowcount++;
- }
-
- $ctrlflowcount = 0;
- #make a list of the StackBehaviour Types
- $pop=~s/\+/_/g;
- if ($stackbehav{$pop})
- {
- #printf("DUPE stack behaviour pop\n");
- }
- else
- {
- $stackbehav{$pop} = $ctrlflowcount;
- $ctrlflowcount++;
- }
-
- #make a list of the StackBehaviour Types
- $push=~s/\+/_/g;
- if ($stackbehav{$push})
- {
- #printf("DUPE stack behaviour push\n");
- }
- else
- {
- $stackbehav{$push} = $ctrlflowcount;
- $ctrlflowcount++;
- }
- #make a list of operand types
- if ($operandtype{$operand})
- {
- #printf("DUPE operand type\n");
- }
- else
- {
- $operandtype{$operand} = $ctrlflowcount;
- $ctrlflowcount++;
- }
-
-
- #make a list of opcode types
- if ($opcodetype{$type})
- {
- #printf("DUPE opcode type\n");
- }
- else
- {
- $opcodetype{$type} = $ctrlflowcount;
- $ctrlflowcount++;
- }
-
- my $opcodeName = $enumname;
-
- # Tailcall OpCode enum name does not comply with convention
- # that all enum names are exactly the same as names in opcode.def
- # file less leading CEE_ and changed casing convention
- $enumname = substr $enumname, 0, 4 unless $enumname !~ m/Tailcall$/;
-
- # If string name ends with dot OpCode enum name ends with underscore
- $enumname .= "_" unless $stringname !~ m/\."$/;
-
- printf(" OpCode name:%20s,\t\tEnum label:%20s,\t\tString name:%20s\n", $opcodeName, $enumname, $stringname);
- if ($stringname eq "arglist")
- {
- print "This is arglist----------\n";
- }
-
- my $lineEnum;
- if ($size == 1)
- {
- $lineEnum = sprintf(" %s = 0x%.2x,\n", $enumname, $s2);
- $opcodeEnum{$s2} = $lineEnum;
- }
- elsif ($size == 2)
- {
- $lineEnum = sprintf(" %s = 0x%.4x,\n", $enumname, $s2 + 256 * $s1);
- $opcodeEnum{$s2 + 256 * $s1} = $lineEnum;
- }
-
- my $line;
- $line = sprintf(" public static readonly OpCode %s = new OpCode(OpCodeValues.%s,\n", $opcodeName, $enumname);
- $line .= sprintf(" ((int)OperandType.%s) |\n", $operand);
- $line .= sprintf(" ((int)FlowControl.%s << OpCode.FlowControlShift) |\n", $ctrl);
- $line .= sprintf(" ((int)OpCodeType.%s << OpCode.OpCodeTypeShift) |\n", $type);
- $line .= sprintf(" ((int)StackBehaviour.%s << OpCode.StackBehaviourPopShift) |\n", $pop);
- $line .= sprintf(" ((int)StackBehaviour.%s << OpCode.StackBehaviourPushShift) |\n", $push);
-
- $popstate = 0;
- if($pop eq "Pop0" || $pop eq "Varpop")
- {
- $popstate = 0;
- }
- elsif ($pop eq "Pop1" || $pop eq "Popi" || $pop eq "Popref")
- {
- $popstate = $popstate -1;
- }
- elsif ($pop eq "Pop1_pop1" || $pop eq "Popi_pop1" || $pop eq "Popi_popi" || $pop eq "Popi_popi8" || $pop eq "Popi_popr4" || $pop eq "Popi_popr8" || $pop eq "Popref_pop1" || $pop eq "Popref_popi")
- {
- $popstate = $popstate -2;
- }
- elsif ($pop eq "Popi_popi_popi" || $pop eq "Popref_popi_popi" || $pop eq "Popref_popi_popi8" || $pop eq "Popref_popi_popr4" || $pop eq "Popref_popi_popr8" || $pop eq "Popref_popi_popref" || $pop eq "Popref_popi_pop1")
- {
- $popstate = $popstate -3;
- }
-
- if ($push eq "Push1" || $push eq "Pushi" ||$push eq "Pushi8" ||$push eq "Pushr4" ||$push eq "Pushr8" ||$push eq "Pushref")
- {
- $popstate = $popstate + 1;
- }
- elsif($push eq "Push1_push1")
- {
- $popstate = $popstate + 2;
- }
-
- $line .= sprintf(" (%s << OpCode.SizeShift) |\n", $size);
- if ($ctrl =~ m/Return/ || $ctrl =~ m/^Branch/ || $ctrl =~ m/^Throw/ || $enumname =~ m/Jmp/){
- $line .= sprintf(" OpCode.EndsUncondJmpBlkFlag |\n", $size);
- }
- $line .= sprintf(" (%d << OpCode.StackChangeShift)\n", $popstate);
- $line .= sprintf(" );\n\n");
-
- if ($size == 1)
- {
- if ($oneByte{$s2})
- {
- printf("Error opcode 0x%x already defined!\n", $s2);
- print " Old = $oneByte{$s2}";
- print " New = $line";
- $ret = -1;
- }
- $oneByte{$s2} = $line;
- }
- elsif ($size == 2)
- {
- if ($twoByte{$s2})
- {
- printf("Error opcode 0x%x%x already defined!\n", $s1, $s2);
- print " Old = $oneByte{$s2}";
- print " New = $line";
- $ret = -1;
- }
-
- $twoByte{$s2 + 256 * $s1} = $line;
- }
- else
- {
- $line .= "\n";
- push(@deprecated, $line);
- printf("deprecated code!\n");
- }
- $count++;
- }
-}
-
-# Generate the Flow Control enum
-$ctrlflowcount = 0;
-foreach $key (sort {$a cmp $b} keys (%controlFlow))
-{
- print FCOUTPUT " $key";
- print FCOUTPUT " = $ctrlflowcount,\n";
- $ctrlflowcount++;
- if ($key =~ m/Next/){
- print FCOUTPUT $obsoleteAttr;
- print FCOUTPUT " Phi";
- print FCOUTPUT " = $ctrlflowcount,\n";
- $ctrlflowcount++;
- }
-}
-#end the flowcontrol enum
-print FCOUTPUT " }\n}\n";
-
-# Generate the StackBehaviour enum
-$ctrlflowcount = 0;
-foreach $key (sort {$a cmp $b} keys (%stackbehav))
-{
- if ($key !~ m/Popref_popi_pop1/){
- print SOUTPUT " $key";
- print SOUTPUT " = $ctrlflowcount,\n";
- $ctrlflowcount++;
- }
-}
-print SOUTPUT " Popref_popi_pop1 = $ctrlflowcount,\n";
-#end the StackBehaviour enum
-print SOUTPUT " }\n}\n";
-
-# Generate OpCodeType enum
-$ctrlflowcount = 0;
-foreach $key (sort {$a cmp $b} keys (%opcodetype))
-{
- if ($ctrlflowcount == 0){
- print OCOUTPUT $obsoleteAttr;
- print OCOUTPUT " Annotation = 0,\n";
- $ctrlflowcount++;
- }
- print OCOUTPUT " $key";
- print OCOUTPUT " = $ctrlflowcount,\n";
- $ctrlflowcount++;
-}
-# end the OpCodeType enum
-print OCOUTPUT " }\n}\n";
-
-# Generate OperandType enum
-$ctrlflowcount = 0;
-foreach $key (sort {$a cmp $b} keys (%operandtype))
-{
- print OPOUTPUT " $key";
- print OPOUTPUT " = $ctrlflowcount,\n";
- $ctrlflowcount++;
- if ($key =~ m/InlineNone/){
- print OPOUTPUT $obsoleteAttr;
- print OPOUTPUT " InlinePhi = 6,\n";
- $ctrlflowcount++;
- }
- if ($key =~ m/^InlineR$/){
- $ctrlflowcount++;
- }
-}
-#end the OperandType enum
-print OPOUTPUT " }\n}\n";
-
-# Generate OpCodeValues internal enum
-print OUTPUT " ///\n";
-print OUTPUT " /// Internal enum OpCodeValues for opcode values.\n";
-print OUTPUT " ///\n";
-print OUTPUT " ///\n";
-print OUTPUT " /// Note that the value names are used to construct publicly visible\n";
-print OUTPUT " /// ilasm-compatible opcode names, so their exact form is important!\n";
-print OUTPUT " ///\n";
-print OUTPUT " internal enum OpCodeValues\n";
-print OUTPUT " {\n";
-
-foreach $opcodeValue (sort {$a <=> $b} keys(%opcodeEnum)) {
- print OUTPUT $opcodeEnum{$opcodeValue};
-}
-
-# End generating OpCodeValues internal enum
-print OUTPUT " }\n\n";
-
-
-# Generate public OpCodes class
-print OUTPUT " /// \n";
-print OUTPUT " /// \n";
-print OUTPUT " /// The IL instruction opcodes supported by the runtime.\n";
-print OUTPUT " /// The Specification of IL Instruction describes each Opcode.\n";
-print OUTPUT " /// \n";
-print OUTPUT " /// \n";
-print OUTPUT " /// \n";
-print OUTPUT " public class OpCodes\n";
-print OUTPUT " {\n\n";;
-print OUTPUT " private OpCodes()\n {\n }\n\n";
-
-my $opcode;
-my $lastOp = -1;
-foreach $opcode (sort {$a <=> $b} keys(%oneByte)) {
- printf("***** GAP %d instrs ****\n", $opcode - $lastOp) if ($lastOp + 1 != $opcode && $lastOp > 0);
- print OUTPUT $oneByte{$opcode};
- $lastOp = $opcode;
-}
-
-$lastOp = -1;
-foreach $opcode (sort {$a <=> $b} keys(%twoByte)) {
- printf("***** GAP %d instrs ****\n", $opcode - $lastOp) if ($lastOp + 1 != $opcode && $lastOp > 0);
- print OUTPUT $twoByte{$opcode};
- $lastOp = $opcode;
-}
-
-print OUTPUT "\n";;
-print OUTPUT " public static bool TakesSingleByteArgument(OpCode inst)\n";
-print OUTPUT " {\n";
-print OUTPUT " switch (inst.OperandType)\n";
-print OUTPUT " {\n";
-print OUTPUT " case OperandType.ShortInlineBrTarget:\n";
-print OUTPUT " case OperandType.ShortInlineI:\n";
-print OUTPUT " case OperandType.ShortInlineVar:\n";
-print OUTPUT " return true;\n";
-print OUTPUT " }\n";
-print OUTPUT " return false;\n";
-print OUTPUT " }\n";
-
-# End Generate public OpCodes class and close namespace
-print OUTPUT " }\n}\n";
-
-exit($ret);
diff --git a/src/coreclr/inc/clrnt.h b/src/coreclr/inc/clrnt.h
index cacc865b715f02..bfea4fdf6cae7a 100644
--- a/src/coreclr/inc/clrnt.h
+++ b/src/coreclr/inc/clrnt.h
@@ -369,6 +369,24 @@ RtlVirtualUnwind(
IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL
);
+// Mirror the XSTATE_ARM64_SVE flags from winnt.h
+
+#ifndef XSTATE_ARM64_SVE
+#define XSTATE_ARM64_SVE (2)
+#endif // XSTATE_ARM64_SVE
+
+#ifndef XSTATE_MASK_ARM64_SVE
+#define XSTATE_MASK_ARM64_SVE (1ui64 << (XSTATE_ARM64_SVE))
+#endif // XSTATE_MASK_ARM64_SVE
+
+#ifndef CONTEXT_ARM64_XSTATE
+#define CONTEXT_ARM64_XSTATE (CONTEXT_ARM64 | 0x20L)
+#endif // CONTEXT_ARM64_XSTATE
+
+#ifndef CONTEXT_XSTATE
+#define CONTEXT_XSTATE CONTEXT_ARM64_XSTATE
+#endif // CONTEXT_XSTATE
+
#endif
#ifdef TARGET_LOONGARCH64
diff --git a/src/coreclr/inc/contract.h b/src/coreclr/inc/contract.h
index 6658d4a999cda3..a8f3f6f47c2263 100644
--- a/src/coreclr/inc/contract.h
+++ b/src/coreclr/inc/contract.h
@@ -1981,21 +1981,6 @@ inline ClrDebugState *GetClrDebugState(BOOL fAlloc)
#define LOCK_RELEASED_MULTIPLE(dbgStateLockType, cExits, pvLock) \
::GetClrDebugState()->LockReleased((dbgStateLockType), (cExits), (void*) (pvLock))
-// Use these only if you need to force multiple entrances or exits in a single
-// line (e.g., to restore the lock to a previous state). CRWLock in vm\rwlock.cpp does this
-#define EE_LOCK_TAKEN_MULTIPLE(cEntrances, pvLock) \
- LOCK_TAKEN_MULTIPLE(kDbgStateLockType_EE, cEntrances, pvLock)
-#define EE_LOCK_RELEASED_MULTIPLE(cExits, pvLock) \
- LOCK_RELEASED_MULTIPLE(kDbgStateLockType_EE, cExits, pvLock)
-#define HOST_BREAKABLE_CRST_TAKEN_MULTIPLE(cEntrances, pvLock) \
- LOCK_TAKEN_MULTIPLE(kDbgStateLockType_HostBreakableCrst, cEntrances, pvLock)
-#define HOST_BREAKABLE_CRST_RELEASED_MULTIPLE(cExits, pvLock) \
- LOCK_RELEASED_MULTIPLE(kDbgStateLockType_HostBreakableCrst, cExits, pvLock)
-#define USER_LOCK_TAKEN_MULTIPLE(cEntrances, pvLock) \
- LOCK_TAKEN_MULTIPLE(kDbgStateLockType_User, cEntrances, pvLock)
-#define USER_LOCK_RELEASED_MULTIPLE(cExits, pvLock) \
- LOCK_RELEASED_MULTIPLE(kDbgStateLockType_User, cExits, pvLock)
-
// These are most typically used
#define EE_LOCK_TAKEN(pvLock) \
LOCK_TAKEN_MULTIPLE(kDbgStateLockType_EE, 1, pvLock)
@@ -2014,12 +1999,6 @@ inline ClrDebugState *GetClrDebugState(BOOL fAlloc)
#define LOCK_TAKEN_MULTIPLE(dbgStateLockType, cEntrances, pvLock)
#define LOCK_RELEASED_MULTIPLE(dbgStateLockType, cExits, pvLock)
-#define EE_LOCK_TAKEN_MULTIPLE(cEntrances, pvLock)
-#define EE_LOCK_RELEASED_MULTIPLE(cExits, pvLock)
-#define HOST_BREAKABLE_CRST_TAKEN_MULTIPLE(cEntrances, pvLock)
-#define HOST_BREAKABLE_CRST_RELEASED_MULTIPLE(cExits, pvLock)
-#define USER_LOCK_TAKEN_MULTIPLE(cEntrances, pvLock)
-#define USER_LOCK_RELEASED_MULTIPLE(cExits, pvLock)
#define EE_LOCK_TAKEN(pvLock)
#define EE_LOCK_RELEASED(pvLock)
#define HOST_BREAKABLE_CRST_TAKEN(pvLock)
diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h
index 7333b15c7d5df6..16a9fa1b3007c7 100644
--- a/src/coreclr/inc/corinfo.h
+++ b/src/coreclr/inc/corinfo.h
@@ -307,45 +307,6 @@ struct SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR
}
};
-// StructFloadFieldInfoFlags: used on LoongArch64 architecture by `getLoongArch64PassStructInRegisterFlags` and
-// `getRISCV64PassStructInRegisterFlags` API to convey struct argument passing information.
-//
-// `STRUCT_NO_FLOAT_FIELD` means structs are not passed using the float register(s).
-//
-// Otherwise, and only for structs with no more than two fields and a total struct size no larger
-// than two pointers:
-//
-// The lowest four bits denote the floating-point info:
-// bit 0: `1` means there is only one float or double field within the struct.
-// bit 1: `1` means only the first field is floating-point type.
-// bit 2: `1` means only the second field is floating-point type.
-// bit 3: `1` means the two fields are both floating-point type.
-// The bits[5:4] denoting whether the field size is 8-bytes:
-// bit 4: `1` means the first field's size is 8.
-// bit 5: `1` means the second field's size is 8.
-//
-// Note that bit 0 and 3 cannot both be set.
-enum StructFloatFieldInfoFlags
-{
- STRUCT_NO_FLOAT_FIELD = 0x0,
- STRUCT_FLOAT_FIELD_ONLY_ONE = 0x1,
- STRUCT_FLOAT_FIELD_ONLY_TWO = 0x8,
- STRUCT_FLOAT_FIELD_FIRST = 0x2,
- STRUCT_FLOAT_FIELD_SECOND = 0x4,
- STRUCT_FIRST_FIELD_SIZE_IS8 = 0x10,
- STRUCT_SECOND_FIELD_SIZE_IS8 = 0x20,
-
- STRUCT_FIRST_FIELD_DOUBLE = (STRUCT_FLOAT_FIELD_FIRST | STRUCT_FIRST_FIELD_SIZE_IS8),
- STRUCT_SECOND_FIELD_DOUBLE = (STRUCT_FLOAT_FIELD_SECOND | STRUCT_SECOND_FIELD_SIZE_IS8),
- STRUCT_FIELD_TWO_DOUBLES = (STRUCT_FIRST_FIELD_SIZE_IS8 | STRUCT_SECOND_FIELD_SIZE_IS8 | STRUCT_FLOAT_FIELD_ONLY_TWO),
-
- STRUCT_MERGE_FIRST_SECOND = (STRUCT_FLOAT_FIELD_FIRST | STRUCT_FLOAT_FIELD_ONLY_TWO),
- STRUCT_MERGE_FIRST_SECOND_8 = (STRUCT_FLOAT_FIELD_FIRST | STRUCT_FLOAT_FIELD_ONLY_TWO | STRUCT_SECOND_FIELD_SIZE_IS8),
-
- STRUCT_HAS_FLOAT_FIELDS_MASK = (STRUCT_FLOAT_FIELD_FIRST | STRUCT_FLOAT_FIELD_SECOND | STRUCT_FLOAT_FIELD_ONLY_TWO | STRUCT_FLOAT_FIELD_ONLY_ONE),
- STRUCT_HAS_8BYTES_FIELDS_MASK = (STRUCT_FIRST_FIELD_SIZE_IS8 | STRUCT_SECOND_FIELD_SIZE_IS8),
-};
-
#include "corinfoinstructionset.h"
// CorInfoHelpFunc defines the set of helpers (accessed via the ICorDynamicInfo::getHelperFtn())
@@ -1940,6 +1901,23 @@ struct CORINFO_SWIFT_LOWERING
size_t numLoweredElements;
};
+#define MAX_FPSTRUCT_LOWERED_ELEMENTS 2
+
+// Lowering information on fields of a struct passed by hardware floating-point calling convention on RISC-V and LoongArch
+struct CORINFO_FPSTRUCT_LOWERING
+{
+ // Whether the struct should be passed by integer calling convention (cannot be passed by FP calling convention).
+ bool byIntegerCallConv;
+ // Types of lowered struct fields.
+ // Note: the integer field is denoted with a signed type reflecting size only so e.g. ushort is reported
+ // as CORINFO_TYPE_SHORT and object or string is reported as CORINFO_TYPE_LONG.
+ CorInfoType loweredElements[MAX_FPSTRUCT_LOWERED_ELEMENTS];
+ // Offsets of lowered struct fields.
+ uint32_t offsets[MAX_FPSTRUCT_LOWERED_ELEMENTS];
+ // Number of lowered struct fields.
+ size_t numLoweredElements;
+};
+
#define SIZEOF__CORINFO_Object TARGET_POINTER_SIZE /* methTable */
#define CORINFO_Array_MaxLength 0x7FFFFFC7
@@ -2402,7 +2380,7 @@ class ICorStaticInfo
virtual size_t getClassThreadStaticDynamicInfo (
CORINFO_CLASS_HANDLE cls
) = 0;
-
+
virtual bool getStaticBaseAddress(
CORINFO_CLASS_HANDLE cls,
bool isGc,
@@ -3065,8 +3043,9 @@ class ICorStaticInfo
// Classifies a swift structure into primitives or an implicit byref for ABI purposes.
virtual void getSwiftLowering(CORINFO_CLASS_HANDLE structHnd, CORINFO_SWIFT_LOWERING* pLowering) = 0;
- virtual uint32_t getLoongArch64PassStructInRegisterFlags(CORINFO_CLASS_HANDLE cls) = 0;
- virtual uint32_t getRISCV64PassStructInRegisterFlags(CORINFO_CLASS_HANDLE cls) = 0;
+ // Returns lowering info for fields of a RISC-V/LoongArch struct passed in registers according to
+ // hardware floating-point calling convention.
+ virtual void getFpStructLowering(CORINFO_CLASS_HANDLE structHnd, CORINFO_FPSTRUCT_LOWERING* pLowering) = 0;
};
/*****************************************************************************
diff --git a/src/coreclr/inc/gcinfotypes.h b/src/coreclr/inc/gcinfotypes.h
index 0727e5e1a7b3e4..7457063d47eb3f 100644
--- a/src/coreclr/inc/gcinfotypes.h
+++ b/src/coreclr/inc/gcinfotypes.h
@@ -678,8 +678,8 @@ void FASTCALL decodeCallPattern(int pattern,
#define NORMALIZE_SIZE_OF_STACK_AREA(x) ((x)>>2)
#define DENORMALIZE_SIZE_OF_STACK_AREA(x) ((x)<<2)
#define CODE_OFFSETS_NEED_NORMALIZATION 1
-#define NORMALIZE_CODE_OFFSET(x) ((x)>>1) // Instructions are 2/4 bytes long in Thumb/ARM states,
-#define DENORMALIZE_CODE_OFFSET(x) ((x)<<1)
+#define NORMALIZE_CODE_OFFSET(x) (x) // Instructions are 2/4 bytes long in Thumb/ARM states,
+#define DENORMALIZE_CODE_OFFSET(x) (x) // but the safe-point offsets are encoded with a -1 adjustment.
#define NORMALIZE_REGISTER(x) (x)
#define DENORMALIZE_REGISTER(x) (x)
#define NORMALIZE_NUM_SAFE_POINTS(x) (x)
@@ -734,9 +734,9 @@ void FASTCALL decodeCallPattern(int pattern,
#define DENORMALIZE_STACK_BASE_REGISTER(x) ((x)^29)
#define NORMALIZE_SIZE_OF_STACK_AREA(x) ((x)>>3)
#define DENORMALIZE_SIZE_OF_STACK_AREA(x) ((x)<<3)
-#define CODE_OFFSETS_NEED_NORMALIZATION 1
-#define NORMALIZE_CODE_OFFSET(x) ((x)>>2) // Instructions are 4 bytes long
-#define DENORMALIZE_CODE_OFFSET(x) ((x)<<2)
+#define CODE_OFFSETS_NEED_NORMALIZATION 0
+#define NORMALIZE_CODE_OFFSET(x) (x) // Instructions are 4 bytes long, but the safe-point
+#define DENORMALIZE_CODE_OFFSET(x) (x) // offsets are encoded with a -1 adjustment.
#define NORMALIZE_REGISTER(x) (x)
#define DENORMALIZE_REGISTER(x) (x)
#define NORMALIZE_NUM_SAFE_POINTS(x) (x)
@@ -789,9 +789,9 @@ void FASTCALL decodeCallPattern(int pattern,
#define DENORMALIZE_STACK_BASE_REGISTER(x) ((x) == 0 ? 22 : 3)
#define NORMALIZE_SIZE_OF_STACK_AREA(x) ((x)>>3)
#define DENORMALIZE_SIZE_OF_STACK_AREA(x) ((x)<<3)
-#define CODE_OFFSETS_NEED_NORMALIZATION 1
-#define NORMALIZE_CODE_OFFSET(x) ((x)>>2) // Instructions are 4 bytes long
-#define DENORMALIZE_CODE_OFFSET(x) ((x)<<2)
+#define CODE_OFFSETS_NEED_NORMALIZATION 0
+#define NORMALIZE_CODE_OFFSET(x) (x) // Instructions are 4 bytes long, but the safe-point
+#define DENORMALIZE_CODE_OFFSET(x) (x) // offsets are encoded with a -1 adjustment.
#define NORMALIZE_REGISTER(x) (x)
#define DENORMALIZE_REGISTER(x) (x)
#define NORMALIZE_NUM_SAFE_POINTS(x) (x)
@@ -844,9 +844,9 @@ void FASTCALL decodeCallPattern(int pattern,
#define DENORMALIZE_STACK_BASE_REGISTER(x) ((x) == 0 ? 8 : 2)
#define NORMALIZE_SIZE_OF_STACK_AREA(x) ((x)>>3)
#define DENORMALIZE_SIZE_OF_STACK_AREA(x) ((x)<<3)
-#define CODE_OFFSETS_NEED_NORMALIZATION 1
-#define NORMALIZE_CODE_OFFSET(x) ((x)>>2) // Instructions are 4 bytes long
-#define DENORMALIZE_CODE_OFFSET(x) ((x)<<2)
+#define CODE_OFFSETS_NEED_NORMALIZATION 0
+#define NORMALIZE_CODE_OFFSET(x) (x) // Instructions are 4 bytes long, but the safe-point
+#define DENORMALIZE_CODE_OFFSET(x) (x) // offsets are encoded with a -1 adjustment.
#define NORMALIZE_REGISTER(x) (x)
#define DENORMALIZE_REGISTER(x) (x)
#define NORMALIZE_NUM_SAFE_POINTS(x) (x)
diff --git a/src/coreclr/inc/genrops.pl b/src/coreclr/inc/genrops.pl
deleted file mode 100644
index f90aee2817f3b6..00000000000000
--- a/src/coreclr/inc/genrops.pl
+++ /dev/null
@@ -1,90 +0,0 @@
-# Licensed to the .NET Foundation under one or more agreements.
-# The .NET Foundation licenses this file to you under the MIT license.
-#
-# GENREFOPS.PL
-#
-# PERL script used to generate the numbering of the reference opcodes
-#
-#use strict 'vars';
-#use strict 'subs';
-#use strict 'refs';
-
-print "Reference opcodes\n";
-print "This file is presently only for human consumption\n";
-print "This file is generated from opcode.def using the genrops.pl script\n\n";
-print "Name String Name refop encode\n";
-print "-----------------------------------------------------------------\n";
-
-my $ret = 0;
-my %oneByte;
-my %twoByte;
-$count = 0;
-while (<>)
-{
- # Process only OPDEF(....) lines
- if (/OPDEF\(\s*/)
- {
- chop; # Strip off trailing CR
- s/^OPDEF\(\s*//; # Strip off "OP("
- s/\)$//; # Strip off ")" at end
- s/,\s*/,/g; # Remove whitespace
-
- # Split the line up into its basic parts
- ($enumname, $stringname, $pop, $push, $operand, $type, $size, $s1, $s2, $ctrl) = split(/,/);
- $s1 =~ s/0x//;
- $s1 = hex($s1);
- $s2 =~ s/0x//;
- $s2 = hex($s2);
-
-
- my $line = sprintf("%-24s %-24s 0x%03x",
- $enumname, $stringname, $count);
- if ($size == 1) {
- $line .= sprintf(" 0x%02x\n", $s2);
- if ($oneByte{$s2}) {
- printf("Error opcode 0x%x already defined!\n", $s2);
- print " Old = $oneByte{$s2}";
- print " New = $line";
- $ret = -1;
- }
- $oneByte{$s2} = $line;
- }
- elsif ($size == 2) {
- if ($twoByte{$s2}) {
- printf("Error opcode 0x%x 0x%x already defined!\n", $s1, $s2);
- print " Old = $twoByte{$s2}";
- print " New = $line";
- $ret = -1;
- }
- $line .= sprintf(" 0x%02x 0x%02x\n", $s1, $s2);
- $twoByte{$s2 + 256 * $s1} = $line;
- }
- else {
- $line .= "\n";
- push(@deprecated, $line);
- }
- $count++;
- }
-}
-
-my $opcode;
-my $lastOp = -1;
-foreach $opcode (sort {$a <=> $b} keys(%oneByte)) {
- printf("***** GAP %d instrs ****\n", $opcode - $lastOp) if ($lastOp + 1 != $opcode && $lastOp > 0);
- print $oneByte{$opcode};
- $lastOp = $opcode;
-}
-
-$lastOp = -1;
-foreach $opcode (sort {$a <=> $b} keys(%twoByte)) {
- printf("***** GAP %d instrs ****\n", $opcode - $lastOp) if ($lastOp + 1 != $opcode && $lastOp > 0);
- print $twoByte{$opcode};
- $lastOp = $opcode;
-}
-
-print @deprecated;
-
-exit($ret);
-
-
-
diff --git a/src/coreclr/inc/icorjitinfoimpl_generated.h b/src/coreclr/inc/icorjitinfoimpl_generated.h
index fc415320cad835..a8d1923f1971d8 100644
--- a/src/coreclr/inc/icorjitinfoimpl_generated.h
+++ b/src/coreclr/inc/icorjitinfoimpl_generated.h
@@ -520,11 +520,9 @@ void getSwiftLowering(
CORINFO_CLASS_HANDLE structHnd,
CORINFO_SWIFT_LOWERING* pLowering) override;
-uint32_t getLoongArch64PassStructInRegisterFlags(
- CORINFO_CLASS_HANDLE structHnd) override;
-
-uint32_t getRISCV64PassStructInRegisterFlags(
- CORINFO_CLASS_HANDLE structHnd) override;
+void getFpStructLowering(
+ CORINFO_CLASS_HANDLE structHnd,
+ CORINFO_FPSTRUCT_LOWERING* pLowering) override;
uint32_t getThreadTLSIndex(
void** ppIndirection) override;
diff --git a/src/coreclr/inc/jiteeversionguid.h b/src/coreclr/inc/jiteeversionguid.h
index a6ad06fade3389..6ec44578ba2aeb 100644
--- a/src/coreclr/inc/jiteeversionguid.h
+++ b/src/coreclr/inc/jiteeversionguid.h
@@ -43,11 +43,11 @@ typedef const GUID *LPCGUID;
#define GUID_DEFINED
#endif // !GUID_DEFINED
-constexpr GUID JITEEVersionIdentifier = { /* 488a17ce-26c9-4ad0-a7b7-79bf320ea4d1 */
- 0x488a17ce,
- 0x26c9,
- 0x4ad0,
- {0xa7, 0xb7, 0x79, 0xbf, 0x32, 0x0e, 0xa4, 0xd1}
+constexpr GUID JITEEVersionIdentifier = { /* e770e8ad-50d5-4511-a435-a3ed3a847a47 */
+ 0xe770e8ad,
+ 0x50d5,
+ 0x4511,
+ {0xa4, 0x35, 0xa3, 0xed, 0x3a, 0x84, 0x7a, 0x47}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/coreclr/inc/stresslog.h b/src/coreclr/inc/stresslog.h
index ef8ecb1462986d..39a0d6c5be3705 100644
--- a/src/coreclr/inc/stresslog.h
+++ b/src/coreclr/inc/stresslog.h
@@ -71,126 +71,36 @@
LOGALWAYS(msg); \
} while(0)
-#define STRESS_LOG0(facility, level, msg) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 0, msg); \
- LOG((facility, level, msg)); \
+#define STRESS_LOG_WRITE(facility, level, msg, ...) do { \
+ if (StressLog::StressLogOn(facility, level)) \
+ StressLog::LogMsgOL(facility, level, msg, __VA_ARGS__); \
} while(0)
-#define STRESS_LOG1(facility, level, msg, data1) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 1, msg, (void*)(size_t)(data1));\
- LOG((facility, level, msg, data1)); \
+#define STRESS_LOG0(facility, level, msg) do { \
+ if (StressLog::StressLogOn(facility, level)) \
+ StressLog::LogMsg(facility, level, 0, msg); \
} while(0)
-#define STRESS_LOG2(facility, level, msg, data1, data2) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 2, msg, \
- (void*)(size_t)(data1), (void*)(size_t)(data2)); \
- LOG((facility, level, msg, data1, data2)); \
- } while(0)
-
-#define STRESS_LOG2_CHECK_EE_STARTED(facility, level, msg, data1, data2) do { \
- if (g_fEEStarted) \
- STRESS_LOG2(facility, level, msg, data1, data2); \
- else \
- LOG((facility, level, msg, data1, data2)); \
- } while(0)
+#define STRESS_LOG1(facility, level, msg, data1) \
+ STRESS_LOG_WRITE(facility, level, msg, data1)
-#define STRESS_LOG3(facility, level, msg, data1, data2, data3) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 3, msg, \
- (void*)(size_t)(data1),(void*)(size_t)(data2),(void*)(size_t)(data3)); \
- LOG((facility, level, msg, data1, data2, data3)); \
- } while(0)
+#define STRESS_LOG2(facility, level, msg, data1, data2) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2)
-#define STRESS_LOG4(facility, level, msg, data1, data2, data3, data4) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 4, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4)); \
- LOG((facility, level, msg, data1, data2, data3, data4)); \
- } while(0)
+#define STRESS_LOG3(facility, level, msg, data1, data2, data3) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3)
-#define STRESS_LOG5(facility, level, msg, data1, data2, data3, data4, data5) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 5, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5)); \
- LOG((facility, level, msg, data1, data2, data3, data4, data5)); \
- } while(0)
+#define STRESS_LOG4(facility, level, msg, data1, data2, data3, data4) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4)
-#define STRESS_LOG6(facility, level, msg, data1, data2, data3, data4, data5, data6) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 6, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5), (void*)(size_t)(data6)); \
- LOG((facility, level, msg, data1, data2, data3, data4, data5, data6)); \
- } while(0)
+#define STRESS_LOG5(facility, level, msg, data1, data2, data3, data4, data5) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4, data5)
-#define STRESS_LOG7(facility, level, msg, data1, data2, data3, data4, data5, data6, data7) do { \
- if (StressLog::LogOn(facility, level)) \
- StressLog::LogMsg(level, facility, 7, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5), (void*)(size_t)(data6), (void*)(size_t)(data7)); \
- LOG((facility, level, msg, data1, data2, data3, data4, data5, data6, data7)); \
- } while(0)
+#define STRESS_LOG6(facility, level, msg, data1, data2, data3, data4, data5, data6) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4, data5, data6)
-#define STRESS_LOG_COND0(facility, level, cond, msg) do { \
- if (StressLog::LogOn(facility, level) && (cond)) \
- StressLog::LogMsg(level, facility, 0, msg); \
- LOG((facility, level, msg)); \
- } while(0)
-
-#define STRESS_LOG_COND1(facility, level, cond, msg, data1) do { \
- if (StressLog::LogOn(facility, level) && (cond)) \
- StressLog::LogMsg(level, facility, 1, msg, (void*)(size_t)(data1)); \
- LOG((facility, level, msg, data1)); \
- } while(0)
-
-#define STRESS_LOG_COND2(facility, level, cond, msg, data1, data2) do { \
- if (StressLog::LogOn(facility, level) && (cond)) \
- StressLog::LogMsg(level, facility, 2, msg, \
- (void*)(size_t)(data1), (void*)(size_t)(data2)); \
- LOG((facility, level, msg, data1, data2)); \
- } while(0)
-
-#define STRESS_LOG_COND3(facility, level, cond, msg, data1, data2, data3) do { \
- if (StressLog::LogOn(facility, level) && (cond)) \
- StressLog::LogMsg(level, facility, 3, msg, \
- (void*)(size_t)(data1),(void*)(size_t)(data2),(void*)(size_t)(data3)); \
- LOG((facility, level, msg, data1, data2, data3)); \
- } while(0)
-
-#define STRESS_LOG_COND4(facility, level, cond, msg, data1, data2, data3, data4) do { \
- if (StressLog::LogOn(facility, level) && (cond)) \
- StressLog::LogMsg(level, facility, 4, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4)); \
- LOG((facility, level, msg, data1, data2, data3, data4)); \
- } while(0)
-
-#define STRESS_LOG_COND5(facility, level, cond, msg, data1, data2, data3, data4, data5) do { \
- if (StressLog::LogOn(facility, level) && (cond)) \
- StressLog::LogMsg(level, facility, 5, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5)); \
- LOG((facility, level, msg, data1, data2, data3, data4, data5)); \
- } while(0)
-
-#define STRESS_LOG_COND6(facility, level, cond, msg, data1, data2, data3, data4, data5, data6) do { \
- if (StressLog::LogOn(facility, level) && (cond)) \
- StressLog::LogMsg(level, facility, 6, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5), (void*)(size_t)(data6)); \
- LOG((facility, level, msg, data1, data2, data3, data4, data5, data6)); \
- } while(0)
-
-#define STRESS_LOG_COND7(facility, level, cond, msg, data1, data2, data3, data4, data5, data6, data7) do { \
- if (StressLog::LogOn(facility, level) && (cond)) \
- StressLog::LogMsg(level, facility, 7, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5), (void*)(size_t)(data6), (void*)(size_t)(data7)); \
- LOG((facility, level, msg, data1, data2, data3, data4, data5, data6, data7)); \
- } while(0)
+#define STRESS_LOG7(facility, level, msg, data1, data2, data3, data4, data5, data6, data7) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4, data5, data6, data7)
#define STRESS_LOG_RESERVE_MEM(numChunks) do { \
if (StressLog::StressLogOn(LF_ALL, LL_ALWAYS)) \
@@ -408,6 +318,13 @@ class StressLog {
static void AddModule(uint8_t* moduleBase);
+ template
+ static void* ConvertArgument(T arg)
+ {
+ static_assert_no_msg(sizeof(T) <= sizeof(void*));
+ return (void*)(size_t)arg;
+ }
+
// Support functions for STRESS_LOG_VA
// We disable the warning "conversion from 'type' to 'type' of greater size" since everything will
// end up on the stack, and LogMsg will know the size of the variable based on the format string.
@@ -415,105 +332,20 @@ class StressLog {
#pragma warning( push )
#pragma warning( disable : 4312 )
#endif
+
static void LogMsgOL(const char* format)
{ LogMsg(LL_ALWAYS, LF_GC, 0, format); }
- template < typename T1 >
- static void LogMsgOL(const char* format, T1 data1)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 1, format, (void*)(size_t)data1);
- }
-
- template < typename T1, typename T2 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 2, format, (void*)(size_t)data1, (void*)(size_t)data2);
- }
-
- template < typename T1, typename T2, typename T3 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 3, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3);
- }
-
- template < typename T1, typename T2, typename T3, typename T4 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 4, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 5, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 6, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 7, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 8, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 9, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 10, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9, (void*)(size_t)data10);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11)
+ template
+ static void LogMsgOL(const char* format, Ts... args)
{
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 11, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9, (void*)(size_t)data10, (void*)(size_t)data11);
+ LogMsg(LL_ALWAYS, LF_GC, sizeof...(args), format, ConvertArgument(args)...);
}
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12)
+ template
+ static void LogMsgOL(unsigned facility, unsigned level, const char* format, Ts... args)
{
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 12, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9, (void*)(size_t)data10, (void*)(size_t)data11, (void*)(size_t)data12);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12, T13 data13)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*) && sizeof(T13) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 13, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9, (void*)(size_t)data10, (void*)(size_t)data11, (void*)(size_t)data12, (void*)(size_t)data13);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13, typename T14 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12, T13 data13, T14 data14)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*) && sizeof(T13) <= sizeof(void*) && sizeof(T14) <= sizeof(void*));
- LogMsg(LL_ALWAYS, LF_GC, 14, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9, (void*)(size_t)data10, (void*)(size_t)data11, (void*)(size_t)data12, (void*)(size_t)data13, (void*)(size_t)data14);
+ LogMsg(level, facility, sizeof...(args), format, ConvertArgument(args)...);
}
#ifdef _MSC_VER
@@ -538,6 +370,34 @@ typedef USHORT
static StressLog theLog; // We only have one log, and this is it
};
+
+template<>
+void* StressLog::ConvertArgument(float arg) = delete;
+
+#if TARGET_64BIT
+template<>
+inline void* StressLog::ConvertArgument(double arg)
+{
+ return (void*)(size_t)(*((uint64_t*)&arg));
+}
+#else
+template<>
+void* StressLog::ConvertArgument(double arg) = delete;
+
+// COMPAT: Truncate 64-bit integer arguments to 32-bit
+template<>
+inline void* StressLog::ConvertArgument(uint64_t arg)
+{
+ return (void*)(size_t)arg;
+}
+
+template<>
+inline void* StressLog::ConvertArgument(int64_t arg)
+{
+ return (void*)(size_t)arg;
+}
+#endif
+
#ifndef STRESS_LOG_ANALYZER
typedef Holder> StressLogLockHolder;
#endif //!STRESS_LOG_ANALYZER
@@ -1009,193 +869,13 @@ struct StressLogMsg
{
}
- template < typename T1 >
- StressLogMsg(const char* format, T1 data1) : m_cArgs(1), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- }
-
- template < typename T1, typename T2 >
- StressLogMsg(const char* format, T1 data1, T2 data2) : m_cArgs(2), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- }
-
- template < typename T1, typename T2, typename T3 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3) : m_cArgs(3), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- }
-
- template < typename T1, typename T2, typename T3, typename T4 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4) : m_cArgs(4), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5) : m_cArgs(5), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6) : m_cArgs(6), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7) : m_cArgs(7), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8) : m_cArgs(8), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9) : m_cArgs(9), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10) : m_cArgs(10), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- m_args[9] = (void*)(size_t)data10;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11) : m_cArgs(11), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- m_args[9] = (void*)(size_t)data10;
- m_args[10] = (void*)(size_t)data11;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12) : m_cArgs(12), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- m_args[9] = (void*)(size_t)data10;
- m_args[10] = (void*)(size_t)data11;
- m_args[11] = (void*)(size_t)data12;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12, T13 data13) : m_cArgs(13), m_format(format)
- {
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*) && sizeof(T13) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- m_args[9] = (void*)(size_t)data10;
- m_args[10] = (void*)(size_t)data11;
- m_args[11] = (void*)(size_t)data12;
- m_args[12] = (void*)(size_t)data13;
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13, typename T14 >
- StressLogMsg(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12, T13 data13, T14 data14) : m_cArgs(14), m_format(format)
+ template
+ StressLogMsg(const char* format, Ts... args)
+ : m_cArgs(sizeof...(args))
+ , m_format(format)
+ , m_args{ StressLog::ConvertArgument(args)... }
{
- static_assert_no_msg(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*) && sizeof(T13) <= sizeof(void*) && sizeof(T14) <= sizeof(void*));
- m_args[0] = (void*)(size_t)data1;
- m_args[1] = (void*)(size_t)data2;
- m_args[2] = (void*)(size_t)data3;
- m_args[3] = (void*)(size_t)data4;
- m_args[4] = (void*)(size_t)data5;
- m_args[5] = (void*)(size_t)data6;
- m_args[6] = (void*)(size_t)data7;
- m_args[7] = (void*)(size_t)data8;
- m_args[8] = (void*)(size_t)data9;
- m_args[9] = (void*)(size_t)data10;
- m_args[10] = (void*)(size_t)data11;
- m_args[11] = (void*)(size_t)data12;
- m_args[12] = (void*)(size_t)data13;
- m_args[13] = (void*)(size_t)data14;
+ static_assert_no_msg(sizeof...(args) <= ARRAY_SIZE(m_args));
}
};
@@ -1205,7 +885,6 @@ struct StressLogMsg
#define STRESS_LOG0(facility, level, msg) do { } while(0)
#define STRESS_LOG1(facility, level, msg, data1) do { } while(0)
#define STRESS_LOG2(facility, level, msg, data1, data2) do { } while(0)
-#define STRESS_LOG2_CHECK_EE_STARTED(facility, level, msg, data1, data2)do { } while(0)
#define STRESS_LOG3(facility, level, msg, data1, data2, data3) do { } while(0)
#define STRESS_LOG4(facility, level, msg, data1, data2, data3, data4) do { } while(0)
#define STRESS_LOG5(facility, level, msg, data1, data2, data3, data4, data5) do { } while(0)
diff --git a/src/coreclr/jit/ICorJitInfo_names_generated.h b/src/coreclr/jit/ICorJitInfo_names_generated.h
index d6526b94ee9ee2..4779f13a029b84 100644
--- a/src/coreclr/jit/ICorJitInfo_names_generated.h
+++ b/src/coreclr/jit/ICorJitInfo_names_generated.h
@@ -130,8 +130,7 @@ DEF_CLR_API(getMethodNameFromMetadata)
DEF_CLR_API(getMethodHash)
DEF_CLR_API(getSystemVAmd64PassStructInRegisterDescriptor)
DEF_CLR_API(getSwiftLowering)
-DEF_CLR_API(getLoongArch64PassStructInRegisterFlags)
-DEF_CLR_API(getRISCV64PassStructInRegisterFlags)
+DEF_CLR_API(getFpStructLowering)
DEF_CLR_API(getThreadTLSIndex)
DEF_CLR_API(getAddrOfCaptureThreadGlobal)
DEF_CLR_API(getHelperFtn)
diff --git a/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp b/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp
index 3410d97d1fc565..9c81be10f41f7b 100644
--- a/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp
+++ b/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp
@@ -1238,22 +1238,13 @@ void WrapICorJitInfo::getSwiftLowering(
API_LEAVE(getSwiftLowering);
}
-uint32_t WrapICorJitInfo::getLoongArch64PassStructInRegisterFlags(
- CORINFO_CLASS_HANDLE structHnd)
-{
- API_ENTER(getLoongArch64PassStructInRegisterFlags);
- uint32_t temp = wrapHnd->getLoongArch64PassStructInRegisterFlags(structHnd);
- API_LEAVE(getLoongArch64PassStructInRegisterFlags);
- return temp;
-}
-
-uint32_t WrapICorJitInfo::getRISCV64PassStructInRegisterFlags(
- CORINFO_CLASS_HANDLE structHnd)
+void WrapICorJitInfo::getFpStructLowering(
+ CORINFO_CLASS_HANDLE structHnd,
+ CORINFO_FPSTRUCT_LOWERING* pLowering)
{
- API_ENTER(getRISCV64PassStructInRegisterFlags);
- uint32_t temp = wrapHnd->getRISCV64PassStructInRegisterFlags(structHnd);
- API_LEAVE(getRISCV64PassStructInRegisterFlags);
- return temp;
+ API_ENTER(getFpStructLowering);
+ wrapHnd->getFpStructLowering(structHnd, pLowering);
+ API_LEAVE(getFpStructLowering);
}
uint32_t WrapICorJitInfo::getThreadTLSIndex(
diff --git a/src/coreclr/jit/buildstring.cpp b/src/coreclr/jit/buildstring.cpp
index 3f0222ad2649ac..9843c9fcf516f2 100644
--- a/src/coreclr/jit/buildstring.cpp
+++ b/src/coreclr/jit/buildstring.cpp
@@ -1,17 +1,15 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-#define STRINGIFY(L) #L
-#define MAKESTRING(M, L) M(L)
-#define STRINGIZE(X) MAKESTRING(STRINGIFY, X)
+#include "utils.h"
#if defined(__clang__)
#define BUILD_COMPILER \
- "Clang " STRINGIZE(__clang_major__) "." STRINGIZE(__clang_minor__) "." STRINGIZE(__clang_patchlevel__)
+ "Clang " STRINGIFY(__clang_major__) "." STRINGIFY(__clang_minor__) "." STRINGIFY(__clang_patchlevel__)
#elif defined(_MSC_VER)
-#define BUILD_COMPILER "MSVC " STRINGIZE(_MSC_FULL_VER)
+#define BUILD_COMPILER "MSVC " STRINGIFY(_MSC_FULL_VER)
#elif defined(__GNUC__)
-#define BUILD_COMPILER "GCC " STRINGIZE(__GNUC__) "." STRINGIZE(__GNUC_MINOR__) "." STRINGIZE(__GNUC_PATCHLEVEL__)
+#define BUILD_COMPILER "GCC " STRINGIFY(__GNUC__) "." STRINGIFY(__GNUC_MINOR__) "." STRINGIFY(__GNUC_PATCHLEVEL__)
#else
#define BUILD_COMPILER "Unknown"
#endif
@@ -26,6 +24,8 @@
#define TARGET_ARCH_STRING "arm64"
#elif defined(TARGET_LOONGARCH64)
#define TARGET_ARCH_STRING "loongarch64"
+#elif defined(TARGET_RISCV64)
+#define TARGET_ARCH_STRING "riscv64"
#else
#define TARGET_ARCH_STRING "Unknown"
#endif
diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp
index 16cad5618112b7..b0d5d5727fc50d 100644
--- a/src/coreclr/jit/codegenarmarch.cpp
+++ b/src/coreclr/jit/codegenarmarch.cpp
@@ -1508,8 +1508,18 @@ void CodeGen::genCodeForPhysReg(GenTreePhysReg* tree)
var_types targetType = tree->TypeGet();
regNumber targetReg = tree->GetRegNum();
- inst_Mov(targetType, targetReg, tree->gtSrcReg, /* canSkip */ true);
- genTransferRegGCState(targetReg, tree->gtSrcReg);
+#ifdef TARGET_ARM64
+ if (varTypeIsMask(targetType))
+ {
+ assert(tree->gtSrcReg == REG_FFR);
+ GetEmitter()->emitIns_R(INS_sve_rdffr, EA_SCALABLE, targetReg);
+ }
+ else
+#endif
+ {
+ inst_Mov(targetType, targetReg, tree->gtSrcReg, /* canSkip */ true);
+ genTransferRegGCState(targetReg, tree->gtSrcReg);
+ }
genProduceReg(tree);
}
diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp
index b2bdd295adf54a..dad1ba0b929b89 100644
--- a/src/coreclr/jit/codegencommon.cpp
+++ b/src/coreclr/jit/codegencommon.cpp
@@ -1517,6 +1517,29 @@ void CodeGen::genExitCode(BasicBlock* block)
if (compiler->getNeedsGSSecurityCookie())
{
genEmitGSCookieCheck(jmpEpilog);
+
+ if (jmpEpilog)
+ {
+ // Dev10 642944 -
+ // The GS cookie check created a temp label that has no live
+ // incoming GC registers, we need to fix that
+
+ unsigned varNum;
+ LclVarDsc* varDsc;
+
+ /* Figure out which register parameters hold pointers */
+
+ for (varNum = 0, varDsc = compiler->lvaTable; varNum < compiler->lvaCount && varDsc->lvIsRegArg;
+ varNum++, varDsc++)
+ {
+ noway_assert(varDsc->lvIsParam);
+
+ gcInfo.gcMarkRegPtrVal(varDsc->GetArgReg(), varDsc->TypeGet());
+ }
+
+ GetEmitter()->emitThisGCrefRegs = GetEmitter()->emitInitGCrefRegs = gcInfo.gcRegGCrefSetCur;
+ GetEmitter()->emitThisByrefRegs = GetEmitter()->emitInitByrefRegs = gcInfo.gcRegByrefSetCur;
+ }
}
genReserveEpilog(block);
diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp
index e11d3cd30bbc17..a9b9b68517b7aa 100644
--- a/src/coreclr/jit/compiler.cpp
+++ b/src/coreclr/jit/compiler.cpp
@@ -927,37 +927,24 @@ var_types Compiler::getReturnTypeForStruct(CORINFO_CLASS_HANDLE clsHnd,
howToReturnStruct = SPK_ByReference;
useType = TYP_UNKNOWN;
}
-#elif defined(TARGET_LOONGARCH64)
+#elif defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
if (structSize <= (TARGET_POINTER_SIZE * 2))
{
- uint32_t floatFieldFlags = info.compCompHnd->getLoongArch64PassStructInRegisterFlags(clsHnd);
-
- if ((floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_ONE) != 0)
- {
- howToReturnStruct = SPK_PrimitiveType;
- useType = (structSize > 4) ? TYP_DOUBLE : TYP_FLOAT;
- }
- else if (floatFieldFlags & (STRUCT_HAS_FLOAT_FIELDS_MASK ^ STRUCT_FLOAT_FIELD_ONLY_ONE))
- {
- howToReturnStruct = SPK_ByValue;
- useType = TYP_STRUCT;
- }
- }
-
-#elif defined(TARGET_RISCV64)
- if (structSize <= (TARGET_POINTER_SIZE * 2))
- {
- uint32_t floatFieldFlags = info.compCompHnd->getRISCV64PassStructInRegisterFlags(clsHnd);
-
- if ((floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_ONE) != 0)
- {
- howToReturnStruct = SPK_PrimitiveType;
- useType = (structSize > 4) ? TYP_DOUBLE : TYP_FLOAT;
- }
- else if (floatFieldFlags & (STRUCT_HAS_FLOAT_FIELDS_MASK ^ STRUCT_FLOAT_FIELD_ONLY_ONE))
+ const CORINFO_FPSTRUCT_LOWERING* lowering = GetFpStructLowering(clsHnd);
+ if (!lowering->byIntegerCallConv)
{
- howToReturnStruct = SPK_ByValue;
- useType = TYP_STRUCT;
+ if (lowering->numLoweredElements == 1)
+ {
+ useType = JITtype2varType(lowering->loweredElements[0]);
+ assert(varTypeIsFloating(useType));
+ howToReturnStruct = SPK_PrimitiveType;
+ }
+ else
+ {
+ assert(lowering->numLoweredElements == 2);
+ howToReturnStruct = SPK_ByValue;
+ useType = TYP_STRUCT;
+ }
}
}
@@ -1998,6 +1985,9 @@ void Compiler::compInit(ArenaAllocator* pAlloc,
#ifdef SWIFT_SUPPORT
m_swiftLoweringCache = nullptr;
#endif
+#if defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
+ m_fpStructLoweringCache = nullptr;
+#endif
// check that HelperCallProperties are initialized
@@ -5273,7 +5263,7 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl
#ifdef DEBUG
// Stash the current estimate of the function's size if necessary.
- if (verbose)
+ if (verbose && opts.OptimizationEnabled())
{
compSizeEstimate = 0;
compCycleEstimate = 0;
@@ -8301,6 +8291,53 @@ void Compiler::GetStructTypeOffset(
GetStructTypeOffset(structDesc, type0, type1, offset0, offset1);
}
+#elif defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
+//------------------------------------------------------------------------
+// GetFpStructLowering: Gets the information on passing of a struct according to hardware floating-point
+// calling convention, i.e. the types and offsets of struct fields lowered for passing.
+//
+// Arguments:
+// structHandle - type handle
+//
+// Return value:
+// Lowering info for the struct fields
+const CORINFO_FPSTRUCT_LOWERING* Compiler::GetFpStructLowering(CORINFO_CLASS_HANDLE structHandle)
+{
+ if (m_fpStructLoweringCache == nullptr)
+ m_fpStructLoweringCache = new (this, CMK_CallArgs) FpStructLoweringMap(getAllocator(CMK_CallArgs));
+
+ CORINFO_FPSTRUCT_LOWERING* lowering;
+ if (!m_fpStructLoweringCache->Lookup(structHandle, &lowering))
+ {
+ lowering = new (this, CMK_CallArgs) CORINFO_FPSTRUCT_LOWERING;
+ info.compCompHnd->getFpStructLowering(structHandle, lowering);
+ m_fpStructLoweringCache->Set(structHandle, lowering);
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("**** getFpStructInRegistersInfo(0x%x (%s, %u bytes)) =>\n", dspPtr(structHandle),
+ eeGetClassName(structHandle), info.compCompHnd->getClassSize(structHandle));
+
+ if (lowering->byIntegerCallConv)
+ {
+ printf(" pass by integer calling convention\n");
+ }
+ else
+ {
+ printf(" may be passed by floating-point calling convention (%zu fields):\n",
+ lowering->numLoweredElements);
+ for (size_t i = 0; i < lowering->numLoweredElements; ++i)
+ {
+ const char* type = varTypeName(JITtype2varType(lowering->loweredElements[i]));
+ printf(" * field[%zu]: type %s at offset %u\n", i, type, lowering->offsets[i]);
+ }
+ }
+ }
+#endif // DEBUG
+ }
+ return lowering;
+}
+
#endif // defined(UNIX_AMD64_ABI)
/*****************************************************************************/
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index ce8cfe9f57f0d6..24fd48b2ef3b78 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -561,17 +561,9 @@ class LclVarDsc
unsigned char lvIsLastUseCopyOmissionCandidate : 1;
#endif // FEATURE_IMPLICIT_BYREFS
-#if defined(TARGET_LOONGARCH64)
- unsigned char lvIs4Field1 : 1; // Set if the 1st field is int or float within struct for LA-ABI64.
- unsigned char lvIs4Field2 : 1; // Set if the 2nd field is int or float within struct for LA-ABI64.
- unsigned char lvIsSplit : 1; // Set if the argument is splited.
-#endif // defined(TARGET_LOONGARCH64)
-
-#if defined(TARGET_RISCV64)
- unsigned char lvIs4Field1 : 1; // Set if the 1st field is int or float within struct for RISCV64.
- unsigned char lvIs4Field2 : 1; // Set if the 2nd field is int or float within struct for RISCV64.
- unsigned char lvIsSplit : 1; // Set if the argument is splited.
-#endif // defined(TARGET_RISCV64)
+#if defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
+ unsigned char lvIsSplit : 1; // Set if the argument is split across last integer register and stack.
+#endif // defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
unsigned char lvSingleDef : 1; // variable has a single def. Used to identify ref type locals that can get type
// updates
@@ -3655,6 +3647,7 @@ class Compiler
bool gtMarkAddrMode(GenTree* addr, int* costEx, int* costSz, var_types type);
unsigned gtSetEvalOrder(GenTree* tree);
+ unsigned gtSetEvalOrderMinOpts(GenTree* tree);
bool gtMayHaveStoreInterference(GenTree* treeWithStores, GenTree* tree);
bool gtTreeHasLocalRead(GenTree* tree, unsigned lclNum);
@@ -4344,6 +4337,10 @@ class Compiler
#endif // defined(FEATURE_SIMD)
unsigned lvaGSSecurityCookie; // LclVar number
+#ifdef TARGET_ARM64
+ unsigned lvaFfrRegister; // LclVar number
+ unsigned getFFRegisterVarNum();
+#endif
bool lvaTempsHaveLargerOffsetThanVars();
// Returns "true" iff local variable "lclNum" is in SSA form.
@@ -9985,6 +9982,8 @@ class Compiler
// Maximum number of locals before turning off the inlining
#define MAX_LV_NUM_COUNT_FOR_INLINING 512
+ bool canUseTier0Opts;
+ bool canUseAllOpts;
bool compMinOpts;
bool compMinOptsIsSet;
#ifdef DEBUG
@@ -10011,13 +10010,22 @@ class Compiler
}
#endif // !DEBUG
+ // TODO: we should convert these into a single OptimizationLevel
+
bool OptimizationDisabled() const
{
- return MinOpts() || compDbgCode;
+ assert(compMinOptsIsSet);
+ return !canUseAllOpts;
}
bool OptimizationEnabled() const
{
- return !OptimizationDisabled();
+ assert(compMinOptsIsSet);
+ return canUseAllOpts;
+ }
+ bool Tier0OptimizationEnabled() const
+ {
+ assert(compMinOptsIsSet);
+ return canUseTier0Opts;
}
void SetMinOpts(bool val)
@@ -10026,6 +10034,9 @@ class Compiler
assert(!compMinOptsIsSet || (compMinOpts == val));
compMinOpts = val;
compMinOptsIsSet = true;
+
+ canUseTier0Opts = !compDbgCode && !jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT);
+ canUseAllOpts = canUseTier0Opts && !val;
}
// true if the CLFLG_* for an optimization is set.
@@ -11477,6 +11488,11 @@ class Compiler
void GetStructTypeOffset(
CORINFO_CLASS_HANDLE typeHnd, var_types* type0, var_types* type1, uint8_t* offset0, uint8_t* offset1);
+#elif defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)
+ typedef JitHashTable, CORINFO_FPSTRUCT_LOWERING*>
+ FpStructLoweringMap;
+ FpStructLoweringMap* m_fpStructLoweringCache;
+ const CORINFO_FPSTRUCT_LOWERING* GetFpStructLowering(CORINFO_CLASS_HANDLE structHandle);
#endif // defined(UNIX_AMD64_ABI)
void fgMorphMultiregStructArgs(GenTreeCall* call);
diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp
index c82d2df22b9e12..d91b822afa11a9 100644
--- a/src/coreclr/jit/emit.cpp
+++ b/src/coreclr/jit/emit.cpp
@@ -10427,9 +10427,9 @@ regMaskTP emitter::emitGetGCRegsKilledByNoGCCall(CorInfoHelpFunc helper)
// of the last instruction in the region makes GC safe again.
// In particular - once the IP is on the first instruction, but not executed it yet,
// it is still safe to do GC.
-// The only special case is when NoGC region is used for prologs.
-// In such case the GC info could be incorrect until the prolog completes, so the first
-// instruction cannot have GC.
+// The only special case is when NoGC region is used for prologs/epilogs.
+// In such case the GC info could be incorrect until the prolog completes and epilogs
+// may have unwindability restrictions, so the first instruction cannot have GC.
void emitter::emitDisableGC()
{
diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp
index 422d539d3b8c03..ed24d4894beecf 100644
--- a/src/coreclr/jit/emitarm64.cpp
+++ b/src/coreclr/jit/emitarm64.cpp
@@ -14360,6 +14360,7 @@ void emitter::emitDispInsHelp(
break;
}
+#ifdef DEBUG
if (id->idIsLclVar())
{
printf("\t// ");
@@ -14373,6 +14374,7 @@ void emitter::emitDispInsHelp(
asmfm);
}
}
+#endif
printf("\n");
}
diff --git a/src/coreclr/jit/emitinl.h b/src/coreclr/jit/emitinl.h
index a586193dd5b714..022064073d908d 100644
--- a/src/coreclr/jit/emitinl.h
+++ b/src/coreclr/jit/emitinl.h
@@ -594,7 +594,8 @@ bool emitter::emitGenNoGCLst(Callback& cb)
emitter::instrDesc* id = emitFirstInstrDesc(ig->igData);
assert(id != nullptr);
assert(id->idCodeSize() > 0);
- if (!cb(ig->igFuncIdx, ig->igOffs, ig->igSize, id->idCodeSize(), ig->igFlags & (IGF_FUNCLET_PROLOG)))
+ if (!cb(ig->igFuncIdx, ig->igOffs, ig->igSize, id->idCodeSize(),
+ ig->igFlags & (IGF_FUNCLET_PROLOG | IGF_FUNCLET_EPILOG | IGF_EPILOG)))
{
return false;
}
diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp
index 3d9e40e9fc22e4..3bebd027138c1a 100644
--- a/src/coreclr/jit/fginline.cpp
+++ b/src/coreclr/jit/fginline.cpp
@@ -1975,7 +1975,7 @@ void Compiler::fgInsertInlineeArgument(
// * Passing of call arguments via temps
//
// Newly added statements are placed just after the original call
-// and are are given the same inline context as the call any calls
+// and are given the same inline context as the call any calls
// added here will appear to have been part of the immediate caller.
//
Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo)
diff --git a/src/coreclr/jit/gcencode.cpp b/src/coreclr/jit/gcencode.cpp
index f99407e90b4e28..4863c95a7f59dd 100644
--- a/src/coreclr/jit/gcencode.cpp
+++ b/src/coreclr/jit/gcencode.cpp
@@ -4027,7 +4027,8 @@ class InterruptibleRangeReporter
// Report everything between the previous region and the current
// region as interruptible.
- bool operator()(unsigned igFuncIdx, unsigned igOffs, unsigned igSize, unsigned firstInstrSize, bool isInProlog)
+ bool operator()(
+ unsigned igFuncIdx, unsigned igOffs, unsigned igSize, unsigned firstInstrSize, bool isInPrologOrEpilog)
{
if (igOffs < m_uninterruptibleEnd)
{
@@ -4043,7 +4044,7 @@ class InterruptibleRangeReporter
// Once the first instruction in IG executes, we cannot have GC.
// But it is ok to have GC while the IP is on the first instruction, unless we are in prolog/epilog.
unsigned interruptibleEnd = igOffs;
- if (!isInProlog)
+ if (!isInPrologOrEpilog)
{
interruptibleEnd += firstInstrSize;
}
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index 5e05856d8d9e6c..11b51d8629cfab 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -3931,8 +3931,10 @@ unsigned Compiler::gtSetMultiOpOrder(GenTreeMultiOp* multiOp)
int costSz = 1;
unsigned level = 0;
+ bool optsEnabled = opts.OptimizationEnabled();
+
#if defined(FEATURE_HW_INTRINSICS)
- if (multiOp->OperIs(GT_HWINTRINSIC))
+ if (multiOp->OperIs(GT_HWINTRINSIC) && optsEnabled)
{
GenTreeHWIntrinsic* hwTree = multiOp->AsHWIntrinsic();
#if defined(TARGET_XARCH)
@@ -4052,8 +4054,12 @@ unsigned Compiler::gtSetMultiOpOrder(GenTreeMultiOp* multiOp)
level += 1;
}
- costEx += (multiOp->Op(1)->GetCostEx() + multiOp->Op(2)->GetCostEx());
- costSz += (multiOp->Op(1)->GetCostSz() + multiOp->Op(2)->GetCostSz());
+ if (optsEnabled)
+ {
+ // We don't need/have costs in MinOpts
+ costEx += (multiOp->Op(1)->GetCostEx() + multiOp->Op(2)->GetCostEx());
+ costSz += (multiOp->Op(1)->GetCostSz() + multiOp->Op(2)->GetCostSz());
+ }
}
else
{
@@ -4064,12 +4070,19 @@ unsigned Compiler::gtSetMultiOpOrder(GenTreeMultiOp* multiOp)
level = max(lvl, level + 1);
- costEx += op->GetCostEx();
- costSz += op->GetCostSz();
+ if (optsEnabled)
+ {
+ // We don't need/have costs in MinOpts
+ costEx += op->GetCostEx();
+ costSz += op->GetCostSz();
+ }
}
}
- multiOp->SetCosts(costEx, costSz);
+ if (optsEnabled)
+ {
+ multiOp->SetCosts(costEx, costSz);
+ }
return level;
}
#endif
@@ -4823,6 +4836,44 @@ bool Compiler::gtMarkAddrMode(GenTree* addr, int* pCostEx, int* pCostSz, var_typ
return false;
}
+static void SetIndirectStoreEvalOrder(Compiler* comp, GenTreeIndir* store, bool* allowReversal)
+{
+ assert(store->OperIs(GT_STORE_BLK, GT_STOREIND));
+
+ GenTree* addr = store->Addr();
+ GenTree* data = store->Data();
+ *allowReversal = true;
+
+ if (addr->IsInvariant())
+ {
+ *allowReversal = false;
+ store->SetReverseOp();
+ return;
+ }
+
+ if ((addr->gtFlags & GTF_ALL_EFFECT) != 0)
+ {
+ return;
+ }
+
+ // In case op2 assigns to a local var that is used in op1, we have to evaluate op1 first.
+ if (comp->gtMayHaveStoreInterference(data, addr))
+ {
+ // TODO-ASG-Cleanup: move this guard to "gtCanSwapOrder".
+ *allowReversal = false;
+ return;
+ }
+
+ // If op2 is simple then evaluate op1 first
+ if (data->OperIsLeaf())
+ {
+ return;
+ }
+
+ *allowReversal = false;
+ store->SetReverseOp();
+}
+
/*****************************************************************************
*
* Given a tree, figure out the order in which its sub-operands should be
@@ -4848,6 +4899,11 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
{
assert(tree);
+ if (opts.OptimizationDisabled())
+ {
+ return gtSetEvalOrderMinOpts(tree);
+ }
+
#ifdef DEBUG
/* Clear the GTF_DEBUG_NODE_MORPHED flag as well */
tree->gtDebugFlags &= ~GTF_DEBUG_NODE_MORPHED;
@@ -5838,33 +5894,7 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
// TODO-ASG-Cleanup: this logic emulated the ASG case below. See how of much of it can be deleted.
if (!optValnumCSE_phase || optCSE_canSwap(op1, op2))
{
- if (op1->IsInvariant())
- {
- allowReversal = false;
- tree->SetReverseOp();
- break;
- }
- if ((op1->gtFlags & GTF_ALL_EFFECT) != 0)
- {
- break;
- }
-
- // In case op2 assigns to a local var that is used in op1, we have to evaluate op1 first.
- if (gtMayHaveStoreInterference(op2, op1))
- {
- // TODO-ASG-Cleanup: move this guard to "gtCanSwapOrder".
- allowReversal = false;
- break;
- }
-
- // If op2 is simple then evaluate op1 first
- if (op2->OperIsLeaf())
- {
- break;
- }
-
- allowReversal = false;
- tree->SetReverseOp();
+ SetIndirectStoreEvalOrder(this, tree->AsIndir(), &allowReversal);
}
break;
@@ -6212,6 +6242,149 @@ unsigned Compiler::gtSetEvalOrder(GenTree* tree)
#pragma warning(pop)
#endif
+//------------------------------------------------------------------------
+// gtSetEvalOrderMinOpts: A MinOpts specific version of gtSetEvalOrder. We don't
+// need to set costs, but we're looking for opportunities to swap operands.
+//
+// Arguments:
+// tree - The tree for which we are setting the evaluation order.
+//
+// Return Value:
+// the Sethi 'complexity' estimate for this tree (the higher
+// the number, the higher is the tree's resources requirement)
+//
+unsigned Compiler::gtSetEvalOrderMinOpts(GenTree* tree)
+{
+ assert(tree);
+ if (fgOrder == FGOrderLinear)
+ {
+ // We don't re-order operands in LIR anyway.
+ return 0;
+ }
+
+ if (tree->OperIsLeaf())
+ {
+ // Nothing to do for leaves, report as having Sethi 'complexity' of 0
+ return 0;
+ }
+
+ unsigned level = 1;
+ if (tree->OperIsSimple())
+ {
+ GenTree* op1 = tree->AsOp()->gtOp1;
+ GenTree* op2 = tree->gtGetOp2IfPresent();
+
+ // Only GT_LEA may have a nullptr op1 and a non-nullptr op2
+ if (tree->OperIs(GT_LEA) && (op1 == nullptr))
+ {
+ std::swap(op1, op2);
+ }
+
+ // Check for a nilary operator
+ if (op1 == nullptr)
+ {
+ // E.g. void GT_RETURN, GT_RETFIT
+ assert(op2 == nullptr);
+ return 0;
+ }
+
+ if (op2 == nullptr)
+ {
+ gtSetEvalOrderMinOpts(op1);
+ return 1;
+ }
+
+ level = gtSetEvalOrderMinOpts(op1);
+ unsigned levelOp2 = gtSetEvalOrderMinOpts(op2);
+
+ bool allowSwap = true;
+ // TODO: Introduce a function to check whether we can swap the order of its operands or not.
+ switch (tree->OperGet())
+ {
+ case GT_COMMA:
+ case GT_BOUNDS_CHECK:
+ case GT_INTRINSIC:
+ case GT_QMARK:
+ case GT_COLON:
+ // We're not going to swap operands in these
+ allowSwap = false;
+ break;
+
+ case GT_STORE_BLK:
+ case GT_STOREIND:
+ SetIndirectStoreEvalOrder(this, tree->AsIndir(), &allowSwap);
+ break;
+
+ default:
+ break;
+ }
+
+ const bool shouldSwap = tree->IsReverseOp() ? level > levelOp2 : level < levelOp2;
+ if (shouldSwap && allowSwap)
+ {
+ // Can we swap the order by commuting the operands?
+ const bool canSwap = tree->IsReverseOp() ? gtCanSwapOrder(op2, op1) : gtCanSwapOrder(op1, op2);
+ if (canSwap)
+ {
+ if (tree->OperIsCmpCompare())
+ {
+ genTreeOps oper = tree->OperGet();
+ if (GenTree::SwapRelop(oper) != oper)
+ {
+ tree->SetOper(GenTree::SwapRelop(oper));
+ }
+ std::swap(tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
+ }
+ else if (tree->OperIsCommutative())
+ {
+ std::swap(tree->AsOp()->gtOp1, tree->AsOp()->gtOp2);
+ }
+ else
+ {
+ // Mark the operand's evaluation order to be swapped.
+ tree->gtFlags ^= GTF_REVERSE_OPS;
+ }
+ }
+ }
+
+ // Swap the level counts
+ if (tree->IsReverseOp())
+ {
+ std::swap(level, levelOp2);
+ }
+
+ // Compute the sethi number for this binary operator
+ if (level < 1)
+ {
+ level = levelOp2;
+ }
+ else if (level == levelOp2)
+ {
+ level++;
+ }
+ }
+ else if (tree->IsCall())
+ {
+ // We ignore late args - they don't bring any noticeable benefits
+ // according to asmdiffs/tpdiff
+ for (CallArg& arg : tree->AsCall()->gtArgs.EarlyArgs())
+ {
+ gtSetEvalOrderMinOpts(arg.GetEarlyNode());
+ }
+ level = 3;
+ }
+#if defined(FEATURE_HW_INTRINSICS)
+ else if (tree->OperIsHWIntrinsic())
+ {
+ return gtSetMultiOpOrder(tree->AsMultiOp());
+ }
+#endif // FEATURE_HW_INTRINSICS
+
+ // NOTE: we skip many operators here in order to maintain a good trade-off between CQ and TP.
+
+ return level;
+}
+
//------------------------------------------------------------------------
// gtMayHaveStoreInterference: Check if two trees may interfere because of a
// store in one of the trees.
@@ -7028,6 +7201,8 @@ ExceptionSetFlags GenTree::OperExceptions(Compiler* comp)
#ifdef FEATURE_HW_INTRINSICS
case GT_HWINTRINSIC:
{
+ assert((gtFlags & GTF_HW_USER_CALL) == 0);
+
GenTreeHWIntrinsic* hwIntrinsicNode = this->AsHWIntrinsic();
if (hwIntrinsicNode->OperIsMemoryLoadOrStore())
@@ -7066,6 +7241,15 @@ bool GenTree::OperMayThrow(Compiler* comp)
helper = comp->eeGetHelperNum(this->AsCall()->gtCallMethHnd);
return ((helper == CORINFO_HELP_UNDEF) || !comp->s_helperCallProperties.NoThrow(helper));
}
+#ifdef FEATURE_HW_INTRINSICS
+ else if (OperIsHWIntrinsic())
+ {
+ if ((gtFlags & GTF_HW_USER_CALL) != 0)
+ {
+ return true;
+ }
+ }
+#endif // FEATURE_HW_INTRINSICS
return OperExceptions(comp) != ExceptionSetFlags::None;
}
@@ -7451,7 +7635,11 @@ GenTreeIntCon* Compiler::gtNewFalse()
// return a new node representing the value in a physical register
GenTree* Compiler::gtNewPhysRegNode(regNumber reg, var_types type)
{
+#ifdef TARGET_ARM64
+ assert(genIsValidIntReg(reg) || (reg == REG_SPBASE) || (reg == REG_FFR));
+#else
assert(genIsValidIntReg(reg) || (reg == REG_SPBASE));
+#endif
GenTree* result = new (this, GT_PHYSREG) GenTreePhysReg(reg, type);
return result;
}
@@ -11536,6 +11724,14 @@ void Compiler::gtGetLclVarNameInfo(unsigned lclNum, const char** ilKindOut, cons
ilKind = "cse";
ilNum = lclNum - optCSEstart;
}
+#ifdef TARGET_ARM64
+ else if (lclNum == lvaFfrRegister)
+ {
+ // We introduce this LclVar in lowering, hence special case the printing of
+ // it instead of handling it in "rationalizer" below.
+ ilName = "FFReg";
+ }
+#endif
else if (lclNum >= optCSEstart)
{
// Currently any new LclVar's introduced after the CSE phase
@@ -13340,10 +13536,7 @@ GenTree* Compiler::gtFoldExpr(GenTree* tree)
return tree;
}
- // NOTE: MinOpts() is always true for Tier0 so we have to check explicit flags instead.
- // To be fixed in https://github.com/dotnet/runtime/pull/77465
- const bool tier0opts = !opts.compDbgCode && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT);
- if (!tier0opts)
+ if (!opts.Tier0OptimizationEnabled())
{
return tree;
}
@@ -13402,9 +13595,15 @@ GenTree* Compiler::gtFoldExpr(GenTree* tree)
}
else if (op1->OperIsConst() || op2->OperIsConst())
{
- /* at least one is a constant - see if we have a
- * special operator that can use only one constant
- * to fold - e.g. booleans */
+ // At least one is a constant - see if we have a
+ // special operator that can use only one constant
+ // to fold - e.g. booleans
+
+ if (opts.OptimizationDisabled())
+ {
+ // Too heavy for tier0
+ return tree;
+ }
return gtFoldExprSpecial(tree);
}
@@ -15191,10 +15390,7 @@ GenTree* Compiler::gtFoldExprConst(GenTree* tree)
GenTree* op1 = tree->gtGetOp1();
GenTree* op2 = tree->gtGetOp2IfPresent();
- // NOTE: MinOpts() is always true for Tier0 so we have to check explicit flags instead.
- // To be fixed in https://github.com/dotnet/runtime/pull/77465
- const bool tier0opts = !opts.compDbgCode && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT);
- if (!tier0opts)
+ if (!opts.Tier0OptimizationEnabled())
{
return tree;
}
@@ -19933,7 +20129,7 @@ void GenTreeJitIntrinsic::SetMethodHandle(Compiler* com
CORINFO_METHOD_HANDLE methodHandle R2RARG(CORINFO_CONST_LOOKUP entryPoint))
{
assert(OperIsHWIntrinsic() && !IsUserCall());
- gtFlags |= GTF_HW_USER_CALL;
+ gtFlags |= (GTF_HW_USER_CALL | GTF_EXCEPT | GTF_CALL);
size_t operandCount = GetOperandCount();
@@ -29359,37 +29555,29 @@ void ReturnTypeDesc::InitializeStructReturnType(Compiler* comp,
#elif defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
assert((structSize >= TARGET_POINTER_SIZE) && (structSize <= (2 * TARGET_POINTER_SIZE)));
-
-#ifdef TARGET_LOONGARCH64
- uint32_t floatFieldFlags = comp->info.compCompHnd->getLoongArch64PassStructInRegisterFlags(retClsHnd);
-#else
- uint32_t floatFieldFlags = comp->info.compCompHnd->getRISCV64PassStructInRegisterFlags(retClsHnd);
-#endif
BYTE gcPtrs[2] = {TYPE_GC_NONE, TYPE_GC_NONE};
comp->info.compCompHnd->getClassGClayout(retClsHnd, &gcPtrs[0]);
-
- if (floatFieldFlags & STRUCT_FLOAT_FIELD_ONLY_TWO)
- {
- comp->compFloatingPointUsed = true;
- assert((structSize > 8) == ((floatFieldFlags & STRUCT_HAS_8BYTES_FIELDS_MASK) > 0));
- m_regType[0] = (floatFieldFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
- m_regType[1] = (floatFieldFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
- }
- else if (floatFieldFlags & STRUCT_FLOAT_FIELD_FIRST)
- {
- comp->compFloatingPointUsed = true;
- assert((structSize > 8) == ((floatFieldFlags & STRUCT_HAS_8BYTES_FIELDS_MASK) > 0));
- m_regType[0] = (floatFieldFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
- m_regType[1] =
- (floatFieldFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? comp->getJitGCType(gcPtrs[1]) : TYP_INT;
- }
- else if (floatFieldFlags & STRUCT_FLOAT_FIELD_SECOND)
+ const CORINFO_FPSTRUCT_LOWERING* lowering = comp->GetFpStructLowering(retClsHnd);
+ if (!lowering->byIntegerCallConv)
{
comp->compFloatingPointUsed = true;
- assert((structSize > 8) == ((floatFieldFlags & STRUCT_HAS_8BYTES_FIELDS_MASK) > 0));
- m_regType[0] =
- (floatFieldFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? comp->getJitGCType(gcPtrs[0]) : TYP_INT;
- m_regType[1] = (floatFieldFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
+ assert(lowering->numLoweredElements == MAX_RET_REG_COUNT);
+ var_types types[MAX_RET_REG_COUNT] = {JITtype2varType(lowering->loweredElements[0]),
+ JITtype2varType(lowering->loweredElements[1])};
+ assert(varTypeIsFloating(types[0]) || varTypeIsFloating(types[1]));
+ assert((structSize > 8) == ((genTypeSize(types[0]) == 8) || (genTypeSize(types[1]) == 8)));
+ for (unsigned i = 0; i < MAX_RET_REG_COUNT; ++i)
+ {
+ if (varTypeIsFloating(types[i]))
+ {
+ m_regType[i] = types[i];
+ }
+ else
+ {
+ assert(varTypeIsIntegralOrI(types[i]));
+ m_regType[i] = (genTypeSize(types[i]) == 8) ? comp->getJitGCType(gcPtrs[i]) : TYP_INT;
+ }
+ }
}
else
{
@@ -30261,10 +30449,7 @@ GenTree* Compiler::gtFoldExprHWIntrinsic(GenTreeHWIntrinsic* tree)
{
assert(tree->OperIsHWIntrinsic());
- // NOTE: MinOpts() is always true for Tier0 so we have to check explicit flags instead.
- // To be fixed in https://github.com/dotnet/runtime/pull/77465
- const bool tier0opts = !opts.compDbgCode && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT);
- if (!tier0opts)
+ if (!opts.Tier0OptimizationEnabled())
{
return tree;
}
@@ -30354,8 +30539,11 @@ GenTree* Compiler::gtFoldExprHWIntrinsic(GenTreeHWIntrinsic* tree)
#endif // !TARGET_XARCH && !TARGET_ARM64
DEBUG_DESTROY_NODE(op, tree);
- INDEBUG(vectorNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
+ if (fgGlobalMorph)
+ {
+ INDEBUG(vectorNode->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED);
+ }
return vectorNode;
}
}
@@ -30740,6 +30928,10 @@ GenTree* Compiler::gtFoldExprHWIntrinsic(GenTreeHWIntrinsic* tree)
// Ensure we don't lose track the the amount is an overshift
shiftAmount = -1;
}
+
+ // Ensure we broadcast to the right vector size
+ otherNode->gtType = retType;
+
otherNode->AsVecCon()->EvaluateBroadcastInPlace(simdBaseType, shiftAmount);
}
}
diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h
index c7b5caa4b38aef..7b893e1c39dfb4 100644
--- a/src/coreclr/jit/gentree.h
+++ b/src/coreclr/jit/gentree.h
@@ -4544,7 +4544,7 @@ struct CallArgABIInformation
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR StructDesc;
#endif // UNIX_AMD64_ABI
#if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
- // For LoongArch64's ABI, the struct which has float field(s) and no more than two fields
+ // For LoongArch64's and RISC-V 64's ABI, the struct which has float field(s) and no more than two fields
// may be passed by float register(s).
// e.g `struct {int a; float b;}` passed by an integer register and a float register.
var_types StructFloatFieldType[2];
diff --git a/src/coreclr/jit/hwintrinsic.cpp b/src/coreclr/jit/hwintrinsic.cpp
index 63455226ed2fbe..e977b649dc5a21 100644
--- a/src/coreclr/jit/hwintrinsic.cpp
+++ b/src/coreclr/jit/hwintrinsic.cpp
@@ -2297,10 +2297,15 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
switch (intrinsic)
{
- case NI_Sve_CreateBreakAfterMask:
case NI_Sve_CreateBreakAfterPropagateMask:
- case NI_Sve_CreateBreakBeforeMask:
case NI_Sve_CreateBreakBeforePropagateMask:
+ {
+ // HWInstrinsic requires a mask for op3
+ convertToMaskIfNeeded(retNode->AsHWIntrinsic()->Op(3));
+ FALLTHROUGH;
+ }
+ case NI_Sve_CreateBreakAfterMask:
+ case NI_Sve_CreateBreakBeforeMask:
case NI_Sve_CreateMaskForFirstActiveElement:
case NI_Sve_CreateMaskForNextActiveElement:
case NI_Sve_GetActiveElementCount:
@@ -2310,30 +2315,16 @@ GenTree* Compiler::impHWIntrinsic(NamedIntrinsic intrinsic,
{
// HWInstrinsic requires a mask for op2
convertToMaskIfNeeded(retNode->AsHWIntrinsic()->Op(2));
- break;
+ FALLTHROUGH;
}
-
default:
- break;
- }
-
- switch (intrinsic)
- {
- case NI_Sve_CreateBreakAfterPropagateMask:
- case NI_Sve_CreateBreakBeforePropagateMask:
{
- // HWInstrinsic requires a mask for op3
- convertToMaskIfNeeded(retNode->AsHWIntrinsic()->Op(3));
+ // HWInstrinsic requires a mask for op1
+ convertToMaskIfNeeded(retNode->AsHWIntrinsic()->Op(1));
break;
}
-
- default:
- break;
}
- // HWInstrinsic requires a mask for op1
- convertToMaskIfNeeded(retNode->AsHWIntrinsic()->Op(1));
-
if (HWIntrinsicInfo::IsMultiReg(intrinsic))
{
assert(HWIntrinsicInfo::IsExplicitMaskedOperation(retNode->AsHWIntrinsic()->GetHWIntrinsicId()));
diff --git a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
index a93fa678156ca8..934ed32b81d75d 100644
--- a/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
+++ b/src/coreclr/jit/hwintrinsiccodegenarm64.cpp
@@ -2366,6 +2366,26 @@ void CodeGen::genHWIntrinsic(GenTreeHWIntrinsic* node)
break;
}
+ case NI_Sve_LoadVectorFirstFaulting:
+ {
+ if (intrin.numOperands == 3)
+ {
+ // We have extra argument which means there is a "use" of FFR here. Restore it back in FFR register.
+ assert(op3Reg != REG_NA);
+ GetEmitter()->emitIns_R(INS_sve_wrffr, emitSize, op3Reg, opt);
+ }
+
+ insScalableOpts sopt = (opt == INS_OPTS_SCALABLE_B) ? INS_SCALABLE_OPTS_NONE : INS_SCALABLE_OPTS_LSL_N;
+ GetEmitter()->emitIns_R_R_R_R(ins, emitSize, targetReg, op1Reg, op2Reg, REG_ZR, opt, sopt);
+ break;
+ }
+
+ case NI_Sve_SetFfr:
+ {
+ assert(targetReg == REG_NA);
+ GetEmitter()->emitIns_R(ins, emitSize, op1Reg, opt);
+ break;
+ }
case NI_Sve_ConditionalExtractAfterLastActiveElementScalar:
case NI_Sve_ConditionalExtractLastActiveElementScalar:
{
diff --git a/src/coreclr/jit/hwintrinsiclistarm64sve.h b/src/coreclr/jit/hwintrinsiclistarm64sve.h
index 46dd188785d197..8a531918261a3e 100644
--- a/src/coreclr/jit/hwintrinsiclistarm64sve.h
+++ b/src/coreclr/jit/hwintrinsiclistarm64sve.h
@@ -122,6 +122,14 @@ HARDWARE_INTRINSIC(Sve, GatherVectorUInt32WithByteOffsetsZeroExtend,
HARDWARE_INTRINSIC(Sve, GatherVectorUInt32ZeroExtend, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1w, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, GatherVectorWithByteOffsets, -1, 3, true, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1w, INS_sve_ld1w, INS_sve_ld1d, INS_sve_ld1d, INS_sve_ld1w, INS_sve_ld1d}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, GetActiveElementCount, -1, 2, true, {INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp, INS_sve_cntp}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_ExplicitMaskedOperation)
+HARDWARE_INTRINSIC(Sve, GetFfrByte, -1, -1, false, {INS_invalid, INS_sve_rdffr, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ReturnsPerElementMask)
+HARDWARE_INTRINSIC(Sve, GetFfrInt16, -1, -1, false, {INS_invalid, INS_invalid, INS_sve_rdffr, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ReturnsPerElementMask)
+HARDWARE_INTRINSIC(Sve, GetFfrInt32, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_rdffr, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ReturnsPerElementMask)
+HARDWARE_INTRINSIC(Sve, GetFfrInt64, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_rdffr, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ReturnsPerElementMask)
+HARDWARE_INTRINSIC(Sve, GetFfrSByte, -1, -1, false, {INS_sve_rdffr, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ReturnsPerElementMask)
+HARDWARE_INTRINSIC(Sve, GetFfrUInt16, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_rdffr, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ReturnsPerElementMask)
+HARDWARE_INTRINSIC(Sve, GetFfrUInt32, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_rdffr, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ReturnsPerElementMask)
+HARDWARE_INTRINSIC(Sve, GetFfrUInt64, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_rdffr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ReturnsPerElementMask)
HARDWARE_INTRINSIC(Sve, InsertIntoShiftedVector, -1, 2, true, {INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr, INS_sve_insr}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_HasRMWSemantics)
HARDWARE_INTRINSIC(Sve, LeadingSignCount, -1, -1, false, {INS_sve_cls, INS_invalid, INS_sve_cls, INS_invalid, INS_sve_cls, INS_invalid, INS_sve_cls, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, LeadingZeroCount, -1, -1, false, {INS_sve_clz, INS_sve_clz, INS_sve_clz, INS_sve_clz, INS_sve_clz, INS_sve_clz, INS_sve_clz, INS_sve_clz, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation)
@@ -142,6 +150,7 @@ HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendToInt64,
HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendToUInt16, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1b, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendToUInt32, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1b, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, LoadVectorByteZeroExtendToUInt64, -1, 2, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ld1b, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation)
+HARDWARE_INTRINSIC(Sve, LoadVectorFirstFaulting, -1, -1, false, {INS_sve_ldff1b, INS_sve_ldff1b, INS_sve_ldff1h, INS_sve_ldff1h, INS_sve_ldff1w, INS_sve_ldff1w, INS_sve_ldff1d, INS_sve_ldff1d, INS_sve_ldff1w, INS_sve_ldff1d}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_SpecialSideEffectMask)
HARDWARE_INTRINSIC(Sve, LoadVectorInt16NonFaultingSignExtendToInt32, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sh, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, LoadVectorInt16NonFaultingSignExtendToInt64, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sh, INS_invalid, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, LoadVectorInt16NonFaultingSignExtendToUInt32, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_ldnf1sh, INS_invalid, INS_invalid, INS_invalid, INS_invalid}, HW_Category_MemoryLoad, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation)
@@ -237,6 +246,7 @@ HARDWARE_INTRINSIC(Sve, Scatter32BitNarrowing,
HARDWARE_INTRINSIC(Sve, Scatter32BitWithByteOffsetsNarrowing, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_st1w, INS_sve_st1w, INS_invalid, INS_invalid}, HW_Category_MemoryStore, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, Scatter8BitNarrowing, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_st1b, INS_sve_st1b, INS_sve_st1b, INS_sve_st1b, INS_invalid, INS_invalid}, HW_Category_MemoryStore, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation)
HARDWARE_INTRINSIC(Sve, Scatter8BitWithByteOffsetsNarrowing, -1, -1, false, {INS_invalid, INS_invalid, INS_invalid, INS_invalid, INS_sve_st1b, INS_sve_st1b, INS_sve_st1b, INS_sve_st1b, INS_invalid, INS_invalid}, HW_Category_MemoryStore, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialCodeGen|HW_Flag_ExplicitMaskedOperation|HW_Flag_LowMaskedOperation)
+HARDWARE_INTRINSIC(Sve, SetFfr, -1, 1, true, {INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_sve_wrffr, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_ExplicitMaskedOperation|HW_Flag_BaseTypeFromFirstArg|HW_Flag_SpecialSideEffectMask|HW_Flag_SpecialCodeGen)
HARDWARE_INTRINSIC(Sve, ShiftLeftLogical, -1, -1, false, {INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_sve_lsl, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_HasRMWSemantics)
HARDWARE_INTRINSIC(Sve, ShiftRightArithmetic, -1, -1, false, {INS_sve_asr, INS_invalid, INS_sve_asr, INS_invalid, INS_sve_asr, INS_invalid, INS_sve_asr, INS_invalid, INS_invalid, INS_invalid}, HW_Category_SIMD, HW_Flag_Scalable|HW_Flag_BaseTypeFromFirstArg|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_HasRMWSemantics)
HARDWARE_INTRINSIC(Sve, ShiftRightArithmeticForDivide, -1, -1, false, {INS_sve_asrd, INS_invalid, INS_sve_asrd, INS_invalid, INS_sve_asrd, INS_invalid, INS_sve_asrd, INS_invalid, INS_invalid, INS_invalid}, HW_Category_ShiftRightByImmediate, HW_Flag_Scalable|HW_Flag_EmbeddedMaskedOperation|HW_Flag_LowMaskedOperation|HW_Flag_HasRMWSemantics|HW_Flag_HasImmediateOperand)
diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp
index 85682710ea1992..793bed41dd2e20 100644
--- a/src/coreclr/jit/importer.cpp
+++ b/src/coreclr/jit/importer.cpp
@@ -3012,6 +3012,12 @@ GenTree* Compiler::impStoreNullableFields(CORINFO_CLASS_HANDLE nullableCls, GenT
ClassLayout* layout = valueType == TYP_STRUCT ? typGetObjLayout(valueStructCls) : nullptr;
GenTree* valueStore = gtNewStoreLclFldNode(resultTmp, valueType, layout, valueOffset, value);
+ // ABI handling for struct values
+ if (varTypeIsStruct(valueStore))
+ {
+ valueStore = impStoreStruct(valueStore, CHECK_SPILL_ALL);
+ }
+
impAppendTree(hasValueStore, CHECK_SPILL_ALL, impCurStmtDI);
impAppendTree(valueStore, CHECK_SPILL_ALL, impCurStmtDI);
return gtNewLclvNode(resultTmp, TYP_STRUCT);
diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp
index e8de998ebed2c4..566fdceb8fdf8c 100644
--- a/src/coreclr/jit/importercalls.cpp
+++ b/src/coreclr/jit/importercalls.cpp
@@ -1553,7 +1553,7 @@ GenTree* Compiler::impThrowIfNull(GenTreeCall* call)
assert(call->gtArgs.CountUserArgs() == 2);
assert(call->TypeIs(TYP_VOID));
- if (opts.compDbgCode || opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT))
+ if (!opts.Tier0OptimizationEnabled())
{
// Don't fold it for debug code or forced MinOpts
return call;
@@ -3302,11 +3302,7 @@ GenTree* Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
// Allow some lighweight intrinsics in Tier0 which can improve throughput
// we're fine if intrinsic decides to not expand itself in this case unlike mustExpand.
- // NOTE: MinOpts() is always true for Tier0 so we have to check explicit flags instead.
- // To be fixed in https://github.com/dotnet/runtime/pull/77465
- const bool tier0opts = !opts.compDbgCode && !opts.jitFlags->IsSet(JitFlags::JIT_FLAG_MIN_OPT);
-
- if (!mustExpand && tier0opts)
+ if (!mustExpand && opts.Tier0OptimizationEnabled())
{
switch (ni)
{
diff --git a/src/coreclr/jit/inductionvariableopts.cpp b/src/coreclr/jit/inductionvariableopts.cpp
index fa3d627feadcc8..990100ec8e3bbc 100644
--- a/src/coreclr/jit/inductionvariableopts.cpp
+++ b/src/coreclr/jit/inductionvariableopts.cpp
@@ -1341,7 +1341,11 @@ class StrengthReductionContext
bool CheckAdvancedCursors(ArrayStack* cursors, ScevAddRec** nextIV);
bool StaysWithinManagedObject(ArrayStack* cursors, ScevAddRec* addRec);
bool TryReplaceUsesWithNewPrimaryIV(ArrayStack* cursors, ScevAddRec* iv);
- BasicBlock* FindUpdateInsertionPoint(ArrayStack* cursors);
+ BasicBlock* FindUpdateInsertionPoint(ArrayStack* cursors, Statement** afterStmt);
+ BasicBlock* FindPostUseUpdateInsertionPoint(ArrayStack* cursors,
+ BasicBlock* backEdgeDominator,
+ Statement** afterStmt);
+ bool InsertionPointPostDominatesUses(BasicBlock* insertionPoint, ArrayStack* cursors);
bool StressProfitability()
{
@@ -2000,7 +2004,8 @@ bool StrengthReductionContext::TryReplaceUsesWithNewPrimaryIV(ArrayStackgtNewOperNode(GT_ADD, iv->Type, m_comp->gtNewLclVarNode(newPrimaryIV, iv->Type), stepValue);
GenTree* stepStore = m_comp->gtNewTempStore(newPrimaryIV, nextValue);
Statement* stepStmt = m_comp->fgNewStmtFromTree(stepStore);
- m_comp->fgInsertStmtNearEnd(insertionPoint, stepStmt);
+ if (afterStmt != nullptr)
+ {
+ m_comp->fgInsertStmtAfter(insertionPoint, afterStmt, stepStmt);
+ }
+ else
+ {
+ m_comp->fgInsertStmtNearEnd(insertionPoint, stepStmt);
+ }
JITDUMP(" Inserting step statement in " FMT_BB "\n", insertionPoint->bbNum);
DISPSTMT(stepStmt);
@@ -2084,22 +2096,27 @@ bool StrengthReductionContext::TryReplaceUsesWithNewPrimaryIV(ArrayStack* cursors)
+BasicBlock* StrengthReductionContext::FindUpdateInsertionPoint(ArrayStack* cursors, Statement** afterStmt)
{
+ *afterStmt = nullptr;
+
// Find insertion point. It needs to post-dominate all uses we are going to
// replace and it needs to dominate all backedges.
// TODO-CQ: Canonicalizing backedges would make this simpler and work in
// more cases.
BasicBlock* insertionPoint = nullptr;
+
for (FlowEdge* backEdge : m_loop->BackEdges())
{
if (insertionPoint == nullptr)
@@ -2112,6 +2129,18 @@ BasicBlock* StrengthReductionContext::FindUpdateInsertionPoint(ArrayStackbbNum, (*afterStmt)->GetID());
+ return postUseInsertionPoint;
+ }
+#endif
+
while ((insertionPoint != nullptr) && m_loop->ContainsBlock(insertionPoint) &&
m_loop->MayExecuteBlockMultipleTimesPerIteration(insertionPoint))
{
@@ -2123,6 +2152,124 @@ BasicBlock* StrengthReductionContext::FindUpdateInsertionPoint(ArrayStackbbNum);
+ return insertionPoint;
+}
+
+//------------------------------------------------------------------------
+// FindPostUseUpdateInsertionPoint: Try finding an insertion point for the IV
+// update that is right after one of the uses of it.
+//
+// Parameters:
+// cursors - The list of cursors pointing to uses that are being replaced by
+// the new IV
+// backEdgeDominator - A basic block that dominates all backedges
+// afterStmt - [out] Statement to insert the update after, if the
+// return value is non-null.
+//
+// Returns:
+// nullptr if no such insertion point could be found. Otherwise returns the
+// basic block and statement after which the update can be inserted.
+//
+BasicBlock* StrengthReductionContext::FindPostUseUpdateInsertionPoint(ArrayStack* cursors,
+ BasicBlock* backEdgeDominator,
+ Statement** afterStmt)
+{
+ BitVecTraits poTraits = m_loop->GetDfsTree()->PostOrderTraits();
+
+#ifdef DEBUG
+ // We will be relying on the fact that the cursors are ordered in a useful
+ // way here: loop locals are visited in post order within each basic block,
+ // meaning that "cursors" has the last uses first for each basic block.
+ // Assert that here.
+
+ BitVec seenBlocks(BitVecOps::MakeEmpty(&poTraits));
+ for (int i = 1; i < cursors->Height(); i++)
+ {
+ CursorInfo& prevCursor = cursors->BottomRef(i - 1);
+ CursorInfo& cursor = cursors->BottomRef(i);
+
+ if (cursor.Block != prevCursor.Block)
+ {
+ assert(BitVecOps::TryAddElemD(&poTraits, seenBlocks, prevCursor.Block->bbPostorderNum));
+ continue;
+ }
+
+ Statement* curStmt = cursor.Stmt;
+ while ((curStmt != nullptr) && (curStmt != prevCursor.Stmt))
+ {
+ curStmt = curStmt->GetNextStmt();
+ }
+
+ assert(curStmt == prevCursor.Stmt);
+ }
+#endif
+
+ BitVec blocksWithUses(BitVecOps::MakeEmpty(&poTraits));
+ for (int i = 0; i < cursors->Height(); i++)
+ {
+ CursorInfo& cursor = cursors->BottomRef(i);
+ BitVecOps::AddElemD(&poTraits, blocksWithUses, cursor.Block->bbPostorderNum);
+ }
+
+ while ((backEdgeDominator != nullptr) && m_loop->ContainsBlock(backEdgeDominator))
+ {
+ if (!BitVecOps::IsMember(&poTraits, blocksWithUses, backEdgeDominator->bbPostorderNum))
+ {
+ backEdgeDominator = backEdgeDominator->bbIDom;
+ continue;
+ }
+
+ if (m_loop->MayExecuteBlockMultipleTimesPerIteration(backEdgeDominator))
+ {
+ return nullptr;
+ }
+
+ for (int i = 0; i < cursors->Height(); i++)
+ {
+ CursorInfo& cursor = cursors->BottomRef(i);
+ if (cursor.Block != backEdgeDominator)
+ {
+ continue;
+ }
+
+ if (!InsertionPointPostDominatesUses(cursor.Block, cursors))
+ {
+ return nullptr;
+ }
+
+ *afterStmt = cursor.Stmt;
+ return cursor.Block;
+ }
+ }
+
+ return nullptr;
+}
+
+//------------------------------------------------------------------------
+// InsertionPointPostDominatesUses: Check if a basic block post-dominates all
+// locations specified by the cursors.
+//
+// Parameters:
+// insertionPoint - The insertion point
+// cursors - Cursors specifying locations
+//
+// Returns:
+// True if so.
+//
+// Remarks:
+// For cursors inside "insertionPoint", the function expects that the
+// insertion point is _after_ the use, except if the use is in a terminator
+// statement.
+//
+bool StrengthReductionContext::InsertionPointPostDominatesUses(BasicBlock* insertionPoint,
+ ArrayStack* cursors)
+{
for (int i = 0; i < cursors->Height(); i++)
{
CursorInfo& cursor = cursors->BottomRef(i);
@@ -2131,19 +2278,19 @@ BasicBlock* StrengthReductionContext::FindUpdateInsertionPoint(ArrayStackHasTerminator() && (cursor.Stmt == insertionPoint->lastStmt()))
{
- return nullptr;
+ return false;
}
}
else
{
if (!m_loop->IsPostDominatedOnLoopIteration(cursor.Block, insertionPoint))
{
- return nullptr;
+ return false;
}
}
}
- return insertionPoint;
+ return true;
}
//------------------------------------------------------------------------
diff --git a/src/coreclr/jit/instr.cpp b/src/coreclr/jit/instr.cpp
index f31e8b364c6ed1..bc0300c83a846e 100644
--- a/src/coreclr/jit/instr.cpp
+++ b/src/coreclr/jit/instr.cpp
@@ -721,7 +721,13 @@ void CodeGen::inst_TT_RV(instruction ins, emitAttr size, GenTree* tree, regNumbe
unsigned varNum = tree->AsLclVarCommon()->GetLclNum();
assert(varNum < compiler->lvaCount);
#if CPU_LOAD_STORE_ARCH
+#ifdef TARGET_ARM64
+ // Workaround until https://github.com/dotnet/runtime/issues/105512 is fixed.
+ assert(GetEmitter()->emitInsIsStore(ins) || (ins == INS_sve_str));
+#else
assert(GetEmitter()->emitInsIsStore(ins));
+#endif
+
#endif
GetEmitter()->emitIns_S_R(ins, size, reg, varNum, 0);
}
diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp
index 62d3769879b20a..ad5926522e41d3 100644
--- a/src/coreclr/jit/lclvars.cpp
+++ b/src/coreclr/jit/lclvars.cpp
@@ -63,6 +63,9 @@ void Compiler::lvaInit()
#endif // JIT32_GCENCODER
lvaNewObjArrayArgs = BAD_VAR_NUM;
lvaGSSecurityCookie = BAD_VAR_NUM;
+#ifdef TARGET_ARM64
+ lvaFfrRegister = BAD_VAR_NUM;
+#endif
#ifdef TARGET_X86
lvaVarargsBaseOfStkArgs = BAD_VAR_NUM;
#endif // TARGET_X86
@@ -898,24 +901,21 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un
}
else
#elif defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
- uint32_t floatFlags = STRUCT_NO_FLOAT_FIELD;
+ const CORINFO_FPSTRUCT_LOWERING* lowering = nullptr;
+
var_types argRegTypeInStruct1 = TYP_UNKNOWN;
var_types argRegTypeInStruct2 = TYP_UNKNOWN;
if ((strip(corInfoType) == CORINFO_TYPE_VALUECLASS) && (argSize <= MAX_PASS_MULTIREG_BYTES))
{
-#if defined(TARGET_LOONGARCH64)
- floatFlags = info.compCompHnd->getLoongArch64PassStructInRegisterFlags(typeHnd);
-#else
- floatFlags = info.compCompHnd->getRISCV64PassStructInRegisterFlags(typeHnd);
-#endif
+ lowering = GetFpStructLowering(typeHnd);
}
- if ((floatFlags & STRUCT_HAS_FLOAT_FIELDS_MASK) != 0)
+ if ((lowering != nullptr) && !lowering->byIntegerCallConv)
{
assert(varTypeIsStruct(argType));
int floatNum = 0;
- if ((floatFlags & STRUCT_FLOAT_FIELD_ONLY_ONE) != 0)
+ if (lowering->numLoweredElements == 1)
{
assert(argSize <= 8);
assert(varDsc->lvExactSize() <= argSize);
@@ -923,41 +923,26 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un
floatNum = 1;
canPassArgInRegisters = varDscInfo->canEnreg(TYP_DOUBLE, 1);
- argRegTypeInStruct1 = (varDsc->lvExactSize() == 8) ? TYP_DOUBLE : TYP_FLOAT;
+ argRegTypeInStruct1 = JITtype2varType(lowering->loweredElements[0]);
+ assert(varTypeIsFloating(argRegTypeInStruct1));
}
- else if ((floatFlags & STRUCT_FLOAT_FIELD_ONLY_TWO) != 0)
- {
- floatNum = 2;
- canPassArgInRegisters = varDscInfo->canEnreg(TYP_DOUBLE, 2);
-
- argRegTypeInStruct1 = (floatFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
- argRegTypeInStruct2 = (floatFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
- }
- else if ((floatFlags & STRUCT_FLOAT_FIELD_FIRST) != 0)
- {
- floatNum = 1;
- canPassArgInRegisters = varDscInfo->canEnreg(TYP_DOUBLE, 1);
- canPassArgInRegisters = canPassArgInRegisters && varDscInfo->canEnreg(TYP_I_IMPL, 1);
-
- argRegTypeInStruct1 = (floatFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
- argRegTypeInStruct2 = (floatFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_LONG : TYP_INT;
- }
- else if ((floatFlags & STRUCT_FLOAT_FIELD_SECOND) != 0)
+ else
{
- floatNum = 1;
- canPassArgInRegisters = varDscInfo->canEnreg(TYP_DOUBLE, 1);
- canPassArgInRegisters = canPassArgInRegisters && varDscInfo->canEnreg(TYP_I_IMPL, 1);
-
- argRegTypeInStruct1 = (floatFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_LONG : TYP_INT;
- argRegTypeInStruct2 = (floatFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
+ assert(lowering->numLoweredElements == 2);
+ argRegTypeInStruct1 = genActualType(JITtype2varType(lowering->loweredElements[0]));
+ argRegTypeInStruct2 = genActualType(JITtype2varType(lowering->loweredElements[1]));
+ floatNum = (int)varTypeIsFloating(argRegTypeInStruct1) + (int)varTypeIsFloating(argRegTypeInStruct2);
+ canPassArgInRegisters = varDscInfo->canEnreg(TYP_DOUBLE, floatNum);
+ if (floatNum == 1)
+ canPassArgInRegisters = canPassArgInRegisters && varDscInfo->canEnreg(TYP_I_IMPL, 1);
}
assert((floatNum == 1) || (floatNum == 2));
if (!canPassArgInRegisters)
{
- // On LoongArch64, if there aren't any remaining floating-point registers to pass the argument,
- // integer registers (if any) are used instead.
+ // On LoongArch64 and RISCV64, if there aren't any remaining floating-point registers to pass the
+ // argument, integer registers (if any) are used instead.
canPassArgInRegisters = varDscInfo->canEnreg(argType, cSlotsToEnregister);
argRegTypeInStruct1 = TYP_UNKNOWN;
@@ -1091,13 +1076,11 @@ void Compiler::lvaInitUserArgs(InitVarDscInfo* varDscInfo, unsigned skipArgs, un
{
varDsc->SetArgReg(
genMapRegArgNumToRegNum(firstAllocatedRegArgNum, argRegTypeInStruct1, info.compCallConv));
- varDsc->lvIs4Field1 = (genTypeSize(argRegTypeInStruct1) == 4) ? 1 : 0;
if (argRegTypeInStruct2 != TYP_UNKNOWN)
{
secondAllocatedRegArgNum = varDscInfo->allocRegArg(argRegTypeInStruct2, 1);
varDsc->SetOtherArgReg(
genMapRegArgNumToRegNum(secondAllocatedRegArgNum, argRegTypeInStruct2, info.compCallConv));
- varDsc->lvIs4Field2 = (genTypeSize(argRegTypeInStruct2) == 4) ? 1 : 0;
}
else if (cSlots > 1)
{
@@ -1675,9 +1658,7 @@ void Compiler::lvaInitVarDsc(LclVarDsc* varDsc,
varDsc->lvIsImplicitByRef = 0;
#endif // FEATURE_IMPLICIT_BYREFS
#if defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
- varDsc->lvIs4Field1 = 0;
- varDsc->lvIs4Field2 = 0;
- varDsc->lvIsSplit = 0;
+ varDsc->lvIsSplit = 0;
#endif // TARGET_LOONGARCH64 || TARGET_RISCV64
// Set the lvType (before this point it is TYP_UNDEF).
diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp
index feabec03d02cad..44570ea9864041 100644
--- a/src/coreclr/jit/lir.cpp
+++ b/src/coreclr/jit/lir.cpp
@@ -1872,6 +1872,22 @@ GenTree* LIR::LastNode(GenTree** nodes, size_t numNodes)
return lastNode;
}
+//------------------------------------------------------------------------
+// LIR::FirstNode:
+// Given two nodes in the same block range, find which node appears first.
+//
+// Arguments:
+// node1 - The first node
+// node2 - The second node
+//
+// Returns:
+// Node that appears first.
+//
+GenTree* LIR::FirstNode(GenTree* node1, GenTree* node2)
+{
+ return LastNode(node1, node2) == node1 ? node2 : node1;
+}
+
#ifdef DEBUG
void GenTree::dumpLIRFlags()
{
diff --git a/src/coreclr/jit/lir.h b/src/coreclr/jit/lir.h
index 8a3a9a507a38bb..a3271e832fa8de 100644
--- a/src/coreclr/jit/lir.h
+++ b/src/coreclr/jit/lir.h
@@ -317,6 +317,7 @@ class LIR final
static GenTree* LastNode(GenTree* node1, GenTree* node2);
static GenTree* LastNode(GenTree** nodes, size_t numNodes);
+ static GenTree* FirstNode(GenTree* node1, GenTree* node2);
};
inline void GenTree::SetUnusedValue()
diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp
index d57d1b893d68af..aa0581a8ceb397 100644
--- a/src/coreclr/jit/lower.cpp
+++ b/src/coreclr/jit/lower.cpp
@@ -479,6 +479,9 @@ GenTree* Lowering::LowerNode(GenTree* node)
{
return newNode;
}
+#ifdef TARGET_ARM64
+ m_ffrTrashed = true;
+#endif
}
break;
@@ -632,8 +635,7 @@ GenTree* Lowering::LowerNode(GenTree* node)
FALLTHROUGH;
case GT_STORE_LCL_FLD:
- LowerStoreLocCommon(node->AsLclVarCommon());
- break;
+ return LowerStoreLocCommon(node->AsLclVarCommon());
#if defined(TARGET_ARM64) || defined(TARGET_LOONGARCH64) || defined(TARGET_RISCV64)
case GT_CMPXCHG:
@@ -3058,52 +3060,22 @@ void Lowering::LowerFastTailCall(GenTreeCall* call)
// call could over-write the stack arg that is setup earlier.
ArrayStack putargs(comp->getAllocator(CMK_ArrayStack));
- for (CallArg& arg : call->gtArgs.EarlyArgs())
- {
- if (arg.GetEarlyNode()->OperIs(GT_PUTARG_STK))
- {
- putargs.Push(arg.GetEarlyNode());
- }
- }
-
- for (CallArg& arg : call->gtArgs.LateArgs())
+ for (CallArg& arg : call->gtArgs.Args())
{
- if (arg.GetLateNode()->OperIs(GT_PUTARG_STK))
+ if (arg.GetNode()->OperIs(GT_PUTARG_STK))
{
- putargs.Push(arg.GetLateNode());
+ putargs.Push(arg.GetNode());
}
}
GenTree* startNonGCNode = nullptr;
if (!putargs.Empty())
{
- // Get the earliest operand of the first PUTARG_STK node. We will make
- // the required copies of args before this node.
- bool unused;
- GenTree* insertionPoint = BlockRange().GetTreeRange(putargs.Bottom(), &unused).FirstNode();
- // Insert GT_START_NONGC node before we evaluate the PUTARG_STK args.
- // Note that if there are no args to be setup on stack, no need to
- // insert GT_START_NONGC node.
- startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID);
- BlockRange().InsertBefore(insertionPoint, startNonGCNode);
-
- // Gc-interruptability in the following case:
- // foo(a, b, c, d, e) { bar(a, b, c, d, e); }
- // bar(a, b, c, d, e) { foo(a, b, d, d, e); }
- //
- // Since the instruction group starting from the instruction that sets up first
- // stack arg to the end of the tail call is marked as non-gc interruptible,
- // this will form a non-interruptible tight loop causing gc-starvation. To fix
- // this we insert GT_NO_OP as embedded stmt before GT_START_NONGC, if the method
- // has a single basic block and is not a GC-safe point. The presence of a single
- // nop outside non-gc interruptible region will prevent gc starvation.
- if ((comp->fgBBcount == 1) && !comp->compCurBB->HasFlag(BBF_GC_SAFE_POINT))
+ GenTree* firstPutargStk = putargs.Bottom(0);
+ for (int i = 1; i < putargs.Height(); i++)
{
- assert(comp->fgFirstBB == comp->compCurBB);
- GenTree* noOp = new (comp, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
- BlockRange().InsertBefore(startNonGCNode, noOp);
+ firstPutargStk = LIR::FirstNode(firstPutargStk, putargs.Bottom(i));
}
-
// Since this is a fast tailcall each PUTARG_STK will place the argument in the
// _incoming_ arg space area. This will effectively overwrite our already existing
// incoming args that live in that area. If we have later uses of those args, this
@@ -3173,10 +3145,10 @@ void Lowering::LowerFastTailCall(GenTreeCall* call)
GenTree* lookForUsesFrom = put->gtNext;
if (overwrittenStart != argStart)
{
- lookForUsesFrom = insertionPoint;
+ lookForUsesFrom = firstPutargStk;
}
- RehomeArgForFastTailCall(callerArgLclNum, insertionPoint, lookForUsesFrom, call);
+ RehomeArgForFastTailCall(callerArgLclNum, firstPutargStk, lookForUsesFrom, call);
// The above call can introduce temps and invalidate the pointer.
callerArgDsc = comp->lvaGetDesc(callerArgLclNum);
@@ -3190,10 +3162,33 @@ void Lowering::LowerFastTailCall(GenTreeCall* call)
unsigned int fieldsEnd = fieldsFirst + callerArgDsc->lvFieldCnt;
for (unsigned int j = fieldsFirst; j < fieldsEnd; j++)
{
- RehomeArgForFastTailCall(j, insertionPoint, lookForUsesFrom, call);
+ RehomeArgForFastTailCall(j, firstPutargStk, lookForUsesFrom, call);
}
}
}
+
+ // Now insert GT_START_NONGC node before we evaluate the first PUTARG_STK.
+ // Note that if there are no args to be setup on stack, no need to
+ // insert GT_START_NONGC node.
+ startNonGCNode = new (comp, GT_START_NONGC) GenTree(GT_START_NONGC, TYP_VOID);
+ BlockRange().InsertBefore(firstPutargStk, startNonGCNode);
+
+ // Gc-interruptability in the following case:
+ // foo(a, b, c, d, e) { bar(a, b, c, d, e); }
+ // bar(a, b, c, d, e) { foo(a, b, d, d, e); }
+ //
+ // Since the instruction group starting from the instruction that sets up first
+ // stack arg to the end of the tail call is marked as non-gc interruptible,
+ // this will form a non-interruptible tight loop causing gc-starvation. To fix
+ // this we insert GT_NO_OP as embedded stmt before GT_START_NONGC, if the method
+ // has a single basic block and is not a GC-safe point. The presence of a single
+ // nop outside non-gc interruptible region will prevent gc starvation.
+ if ((comp->fgBBcount == 1) && !comp->compCurBB->HasFlag(BBF_GC_SAFE_POINT))
+ {
+ assert(comp->fgFirstBB == comp->compCurBB);
+ GenTree* noOp = new (comp, GT_NO_OP) GenTree(GT_NO_OP, TYP_VOID);
+ BlockRange().InsertBefore(startNonGCNode, noOp);
+ }
}
// Insert GT_PROF_HOOK node to emit profiler tail call hook. This should be
@@ -4783,7 +4778,10 @@ void Lowering::LowerRet(GenTreeOp* ret)
// Arguments:
// lclStore - The store lcl node to lower.
//
-void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore)
+// Returns:
+// Next node to lower.
+//
+GenTree* Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore)
{
assert(lclStore->OperIs(GT_STORE_LCL_FLD, GT_STORE_LCL_VAR));
JITDUMP("lowering store lcl var/field (before):\n");
@@ -4870,8 +4868,7 @@ void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore)
lclStore->gtOp1 = spilledCall;
src = lclStore->gtOp1;
JITDUMP("lowering store lcl var/field has to spill call src.\n");
- LowerStoreLocCommon(lclStore);
- return;
+ return LowerStoreLocCommon(lclStore);
}
#endif // !WINDOWS_AMD64_ABI
convertToStoreObj = false;
@@ -4966,7 +4963,7 @@ void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore)
DISPTREERANGE(BlockRange(), objStore);
JITDUMP("\n");
- return;
+ return objStore->gtNext;
}
}
@@ -4984,11 +4981,13 @@ void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore)
ContainCheckBitCast(bitcast);
}
- LowerStoreLoc(lclStore);
+ GenTree* next = LowerStoreLoc(lclStore);
JITDUMP("lowering store lcl var/field (after):\n");
DISPTREERANGE(BlockRange(), lclStore);
JITDUMP("\n");
+
+ return next;
}
//----------------------------------------------------------------------------------------------
@@ -7902,6 +7901,7 @@ void Lowering::LowerBlock(BasicBlock* block)
m_block = block;
#ifdef TARGET_ARM64
m_blockIndirs.Reset();
+ m_ffrTrashed = true;
#endif
// NOTE: some of the lowering methods insert calls before the node being
diff --git a/src/coreclr/jit/lower.h b/src/coreclr/jit/lower.h
index 81e337abdaf668..ccfe965ed6d88e 100644
--- a/src/coreclr/jit/lower.h
+++ b/src/coreclr/jit/lower.h
@@ -157,7 +157,7 @@ class Lowering final : public Phase
GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition);
void LowerJmpMethod(GenTree* jmp);
void LowerRet(GenTreeOp* ret);
- void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar);
+ GenTree* LowerStoreLocCommon(GenTreeLclVarCommon* lclVar);
void LowerRetStruct(GenTreeUnOp* ret);
void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret);
void LowerCallStruct(GenTreeCall* call);
@@ -353,6 +353,8 @@ class Lowering final : public Phase
GenTree* LowerIndir(GenTreeIndir* ind);
bool OptimizeForLdpStp(GenTreeIndir* ind);
bool TryMakeIndirsAdjacent(GenTreeIndir* prevIndir, GenTreeIndir* indir);
+ bool TryMoveAddSubRMWAfterIndir(GenTreeLclVarCommon* store);
+ bool TryMakeIndirAndStoreAdjacent(GenTreeIndir* prevIndir, GenTreeLclVarCommon* store);
void MarkTree(GenTree* root);
void UnmarkTree(GenTree* root);
GenTree* LowerStoreIndir(GenTreeStoreInd* node);
@@ -401,11 +403,11 @@ class Lowering final : public Phase
bool LowerRMWMemOp(GenTreeIndir* storeInd);
#endif
- void WidenSIMD12IfNecessary(GenTreeLclVarCommon* node);
- bool CheckMultiRegLclVar(GenTreeLclVar* lclNode, int registerCount);
- void LowerStoreLoc(GenTreeLclVarCommon* tree);
- void LowerRotate(GenTree* tree);
- void LowerShift(GenTreeOp* shift);
+ void WidenSIMD12IfNecessary(GenTreeLclVarCommon* node);
+ bool CheckMultiRegLclVar(GenTreeLclVar* lclNode, int registerCount);
+ GenTree* LowerStoreLoc(GenTreeLclVarCommon* tree);
+ void LowerRotate(GenTree* tree);
+ void LowerShift(GenTreeOp* shift);
#ifdef FEATURE_HW_INTRINSICS
GenTree* LowerHWIntrinsic(GenTreeHWIntrinsic* node);
void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition);
@@ -429,6 +431,7 @@ class Lowering final : public Phase
void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node);
void LowerModPow2(GenTree* node);
bool TryLowerAddForPossibleContainment(GenTreeOp* node, GenTree** next);
+ void StoreFFRValue(GenTreeHWIntrinsic* node);
#endif // !TARGET_XARCH && !TARGET_ARM64
GenTree* InsertNewSimdCreateScalarUnsafeNode(var_types type,
GenTree* op1,
@@ -629,6 +632,7 @@ class Lowering final : public Phase
}
};
ArrayStack m_blockIndirs;
+ bool m_ffrTrashed;
#endif
};
diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp
index 855c4f21917e50..a227d8ac14148a 100644
--- a/src/coreclr/jit/lowerarmarch.cpp
+++ b/src/coreclr/jit/lowerarmarch.cpp
@@ -472,7 +472,10 @@ bool Lowering::IsContainableUnaryOrBinaryOp(GenTree* parentNode, GenTree* childN
// This involves:
// - Widening small stores (on ARM).
//
-void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
+// Returns:
+// Next node to lower.
+//
+GenTree* Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
{
#ifdef TARGET_ARM
// On ARM, small stores can cost a bit more in terms of code size so we try to widen them. This is legal
@@ -495,6 +498,17 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
}
ContainCheckStoreLoc(storeLoc);
+
+ GenTree* next = storeLoc->gtNext;
+
+#ifdef TARGET_ARM64
+ if (comp->opts.OptimizationEnabled())
+ {
+ TryMoveAddSubRMWAfterIndir(storeLoc);
+ }
+#endif
+
+ return next;
}
//------------------------------------------------------------------------
@@ -1053,6 +1067,203 @@ void Lowering::LowerModPow2(GenTree* node)
ContainCheckNode(mod);
}
+const int POST_INDEXED_ADDRESSING_MAX_DISTANCE = 16;
+
+//------------------------------------------------------------------------
+// TryMoveAddSubRMWAfterIndir: Try to move an RMW update of a local with an
+// ADD/SUB operand earlier to happen right after an indirection on the same
+// local, attempting to make these combinable intro post-indexed addressing.
+//
+// Arguments:
+// store - The store to a local
+//
+// Return Value:
+// True if the store was moved; otherwise false.
+//
+bool Lowering::TryMoveAddSubRMWAfterIndir(GenTreeLclVarCommon* store)
+{
+ if (!store->OperIs(GT_STORE_LCL_VAR))
+ {
+ return false;
+ }
+
+ unsigned lclNum = store->GetLclNum();
+ if (comp->lvaGetDesc(lclNum)->lvDoNotEnregister)
+ {
+ return false;
+ }
+
+ GenTree* data = store->Data();
+ if (!data->OperIs(GT_ADD, GT_SUB) || data->gtOverflow())
+ {
+ return false;
+ }
+
+ GenTree* op1 = data->gtGetOp1();
+ GenTree* op2 = data->gtGetOp2();
+ if (!op1->OperIs(GT_LCL_VAR) || !op2->isContainedIntOrIImmed())
+ {
+ return false;
+ }
+
+ if (op1->AsLclVarCommon()->GetLclNum() != lclNum)
+ {
+ return false;
+ }
+
+ int maxCount = min(m_blockIndirs.Height(), POST_INDEXED_ADDRESSING_MAX_DISTANCE / 2);
+ for (int i = 0; i < maxCount; i++)
+ {
+ SavedIndir& prev = m_blockIndirs.TopRef(i);
+ if ((prev.AddrBase->GetLclNum() != lclNum) || (prev.Offset != 0))
+ {
+ continue;
+ }
+
+ GenTreeIndir* prevIndir = prev.Indir;
+ if ((prevIndir == nullptr) || (prevIndir->gtNext == nullptr))
+ {
+ continue;
+ }
+
+ JITDUMP(
+ "[%06u] is an an RMW ADD/SUB on local V%02u which is used as the address to [%06u]. Trying to make them adjacent.\n",
+ Compiler::dspTreeID(store), lclNum, Compiler::dspTreeID(prevIndir));
+
+ if (TryMakeIndirAndStoreAdjacent(prevIndir, store))
+ {
+ prev.Indir = nullptr;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+//------------------------------------------------------------------------
+// TryMakeIndirAndStoreAdjacent: Try to move a store earlier, right after the
+// specified indirection.
+//
+// Arguments:
+// prevIndir - Indirection that comes before "store"
+// store - Store that we want to happen next to the indirection
+//
+// Return Value:
+// True if the store was moved; otherwise false.
+//
+bool Lowering::TryMakeIndirAndStoreAdjacent(GenTreeIndir* prevIndir, GenTreeLclVarCommon* store)
+{
+ GenTree* cur = prevIndir;
+ for (int i = 0; i < POST_INDEXED_ADDRESSING_MAX_DISTANCE; i++)
+ {
+ cur = cur->gtNext;
+ if (cur == store)
+ break;
+ }
+
+ if (cur != store)
+ {
+ JITDUMP(" Too far separated, giving up\n");
+ return false;
+ }
+
+ JITDUMP(" They are close. Trying to move the following range (where * are nodes part of the data flow):\n\n");
+#ifdef DEBUG
+ bool isClosed;
+ GenTree* startDumpNode = BlockRange().GetTreeRange(prevIndir, &isClosed).FirstNode();
+ GenTree* endDumpNode = store->gtNext;
+
+ auto dumpWithMarks = [=]() {
+ if (!comp->verbose)
+ {
+ return;
+ }
+
+ for (GenTree* node = startDumpNode; node != endDumpNode; node = node->gtNext)
+ {
+ const char* prefix;
+ if (node == prevIndir)
+ prefix = "1. ";
+ else if (node == store)
+ prefix = "2. ";
+ else if ((node->gtLIRFlags & LIR::Flags::Mark) != 0)
+ prefix = "* ";
+ else
+ prefix = " ";
+
+ comp->gtDispLIRNode(node, prefix);
+ }
+ };
+
+#endif
+
+ MarkTree(store);
+
+ INDEBUG(dumpWithMarks());
+ JITDUMP("\n");
+
+ assert((prevIndir->gtLIRFlags & LIR::Flags::Mark) == 0);
+ m_scratchSideEffects.Clear();
+
+ for (GenTree* cur = prevIndir->gtNext; cur != store; cur = cur->gtNext)
+ {
+ if ((cur->gtLIRFlags & LIR::Flags::Mark) != 0)
+ {
+ // 'cur' is part of data flow of 'store', so we will be moving the
+ // currently recorded effects past 'cur'.
+ if (m_scratchSideEffects.InterferesWith(comp, cur, true))
+ {
+ JITDUMP("Giving up due to interference with [%06u]\n", Compiler::dspTreeID(cur));
+ UnmarkTree(store);
+ return false;
+ }
+ }
+ else
+ {
+ // Not part of dataflow; add its effects that will move past
+ // 'store'.
+ m_scratchSideEffects.AddNode(comp, cur);
+ }
+ }
+
+ if (m_scratchSideEffects.InterferesWith(comp, store, true))
+ {
+ JITDUMP("Have interference. Giving up.\n");
+ UnmarkTree(store);
+ return false;
+ }
+
+ JITDUMP("Interference checks passed. Moving nodes that are not part of data flow of [%06u]\n\n",
+ Compiler::dspTreeID(store));
+
+ GenTree* previous = prevIndir;
+ for (GenTree* node = prevIndir->gtNext;;)
+ {
+ GenTree* next = node->gtNext;
+
+ if ((node->gtLIRFlags & LIR::Flags::Mark) != 0)
+ {
+ // Part of data flow. Move it to happen right after 'previous'.
+ BlockRange().Remove(node);
+ BlockRange().InsertAfter(previous, node);
+ previous = node;
+ }
+
+ if (node == store)
+ {
+ break;
+ }
+
+ node = next;
+ }
+
+ JITDUMP("Result:\n\n");
+ INDEBUG(dumpWithMarks());
+ JITDUMP("\n");
+ UnmarkTree(store);
+ return true;
+}
+
//------------------------------------------------------------------------
// LowerAddForPossibleContainment: Tries to lower GT_ADD in such a way
// that would allow one of its operands
@@ -1531,6 +1742,73 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
break;
case NI_Sve_ConditionalSelect:
return LowerHWIntrinsicCndSel(node);
+ case NI_Sve_SetFfr:
+ {
+ StoreFFRValue(node);
+ break;
+ }
+ case NI_Sve_GetFfrByte:
+ case NI_Sve_GetFfrInt16:
+ case NI_Sve_GetFfrInt32:
+ case NI_Sve_GetFfrInt64:
+ case NI_Sve_GetFfrSByte:
+ case NI_Sve_GetFfrUInt16:
+ case NI_Sve_GetFfrUInt32:
+ case NI_Sve_GetFfrUInt64:
+ {
+ LIR::Use use;
+ bool foundUse = BlockRange().TryGetUse(node, &use);
+ if (foundUse)
+ {
+ unsigned lclNum = comp->getFFRegisterVarNum();
+ GenTree* lclVar = comp->gtNewLclvNode(lclNum, TYP_MASK);
+ BlockRange().InsertBefore(node, lclVar);
+ use.ReplaceWith(lclVar);
+ GenTree* next = node->gtNext;
+ BlockRange().Remove(node);
+ return next;
+ }
+ else
+ {
+ node->SetUnusedValue();
+ }
+
+ break;
+ }
+ case NI_Sve_LoadVectorFirstFaulting:
+ {
+ LIR::Use use;
+ bool foundUse = BlockRange().TryGetUse(node, &use);
+
+ if (m_ffrTrashed)
+ {
+ // Consume the FFR register value from local variable to simulate "use" of FFR,
+ // only if it was trashed. If it was not trashed, we do not have to reload the
+ // contents of the FFR register.
+
+ GenTree* lclVar = comp->gtNewLclvNode(comp->lvaFfrRegister, TYP_MASK);
+ BlockRange().InsertBefore(node, lclVar);
+ LowerNode(lclVar);
+
+ node->ResetHWIntrinsicId(intrinsicId, comp, node->Op(1), node->Op(2), lclVar);
+ }
+
+ if (foundUse)
+ {
+ unsigned tmpNum = comp->lvaGrabTemp(true DEBUGARG("Return value result/FFR"));
+ LclVarDsc* tmpVarDsc = comp->lvaGetDesc(tmpNum);
+ tmpVarDsc->lvType = node->TypeGet();
+ GenTree* storeLclVar;
+ use.ReplaceWithLclVar(comp, tmpNum, &storeLclVar);
+ }
+ else
+ {
+ node->SetUnusedValue();
+ }
+
+ StoreFFRValue(node);
+ break;
+ }
default:
break;
}
@@ -3790,6 +4068,37 @@ GenTree* Lowering::LowerHWIntrinsicCndSel(GenTreeHWIntrinsic* cndSelNode)
ContainCheckHWIntrinsic(cndSelNode);
return cndSelNode->gtNext;
}
+
+#if defined(TARGET_ARM64)
+//----------------------------------------------------------------------------------------------
+// StoreFFRValue: For hwintrinsic that produce a first faulting register (FFR) value, create
+// nodes to save its value to a local variable.
+//
+// Arguments:
+// node - The node before which the pseudo definition is needed
+//
+void Lowering::StoreFFRValue(GenTreeHWIntrinsic* node)
+{
+#ifdef DEBUG
+ switch (node->GetHWIntrinsicId())
+ {
+ case NI_Sve_SetFfr:
+ case NI_Sve_LoadVectorFirstFaulting:
+ break;
+ default:
+ assert(!"Unexpected HWIntrinsicId");
+ }
+#endif
+
+ // Create physReg FFR definition to store FFR register.
+ unsigned lclNum = comp->getFFRegisterVarNum();
+ GenTree* ffrReg = comp->gtNewPhysRegNode(REG_FFR, TYP_MASK);
+ GenTree* storeLclVar = comp->gtNewStoreLclVarNode(lclNum, ffrReg);
+ BlockRange().InsertAfter(node, ffrReg, storeLclVar);
+ m_ffrTrashed = false;
+}
+#endif // TARGET_ARM64
+
#endif // FEATURE_HW_INTRINSICS
#endif // TARGET_ARMARCH
diff --git a/src/coreclr/jit/lowerloongarch64.cpp b/src/coreclr/jit/lowerloongarch64.cpp
index ffdf11c44ac37e..72435464dbb099 100644
--- a/src/coreclr/jit/lowerloongarch64.cpp
+++ b/src/coreclr/jit/lowerloongarch64.cpp
@@ -249,7 +249,10 @@ GenTree* Lowering::LowerBinaryArithmetic(GenTreeOp* binOp)
// This involves:
// - Widening operations of unsigneds.
//
-void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
+// Returns:
+// Next node to lower.
+//
+GenTree* Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
{
if (storeLoc->OperIs(GT_STORE_LCL_FLD))
{
@@ -257,6 +260,7 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
verifyLclFldDoNotEnregister(storeLoc->GetLclNum());
}
ContainCheckStoreLoc(storeLoc);
+ return storeLoc->gtNext;
}
//------------------------------------------------------------------------
diff --git a/src/coreclr/jit/lowerriscv64.cpp b/src/coreclr/jit/lowerriscv64.cpp
index 8b74b1cc317813..55c4182430461d 100644
--- a/src/coreclr/jit/lowerriscv64.cpp
+++ b/src/coreclr/jit/lowerriscv64.cpp
@@ -198,7 +198,10 @@ GenTree* Lowering::LowerBinaryArithmetic(GenTreeOp* binOp)
// This involves:
// - Widening operations of unsigneds.
//
-void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
+// Returns:
+// Next node to lower.
+//
+GenTree* Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
{
if (storeLoc->OperIs(GT_STORE_LCL_FLD))
{
@@ -206,6 +209,7 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
verifyLclFldDoNotEnregister(storeLoc->GetLclNum());
}
ContainCheckStoreLoc(storeLoc);
+ return storeLoc->gtNext;
}
//------------------------------------------------------------------------
diff --git a/src/coreclr/jit/lowerxarch.cpp b/src/coreclr/jit/lowerxarch.cpp
index db997aa3f426cb..3f441215e37123 100644
--- a/src/coreclr/jit/lowerxarch.cpp
+++ b/src/coreclr/jit/lowerxarch.cpp
@@ -43,7 +43,10 @@ void Lowering::LowerRotate(GenTree* tree)
// - Handling of contained immediates.
// - Widening some small stores.
//
-void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
+// Returns:
+// Next tree to lower.
+//
+GenTree* Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
{
// Most small locals (the exception is dependently promoted fields) have 4 byte wide stack slots, so
// we can widen the store, if profitable. The widening is only (largely) profitable for 2 byte stores.
@@ -64,6 +67,7 @@ void Lowering::LowerStoreLoc(GenTreeLclVarCommon* storeLoc)
}
ContainCheckStoreLoc(storeLoc);
+ return storeLoc->gtNext;
}
//------------------------------------------------------------------------
@@ -1566,6 +1570,21 @@ GenTree* Lowering::LowerHWIntrinsic(GenTreeHWIntrinsic* node)
op2->SetUnusedValue();
}
+ // Since we have a double negation, it's possible that gtNext
+ // is op1 or user. If it is op1, then it's also possible the
+ // subsequent gtNext is user. We need to make sure to skip both
+ // in such a scenario since we're removing them.
+
+ if (nextNode == op1)
+ {
+ nextNode = nextNode->gtNext;
+ }
+
+ if (nextNode == user)
+ {
+ nextNode = nextNode->gtNext;
+ }
+
BlockRange().Remove(op3);
BlockRange().Remove(op1);
BlockRange().Remove(user);
diff --git a/src/coreclr/jit/lsra.h b/src/coreclr/jit/lsra.h
index 59963e02d7438f..b04af2082c90fe 100644
--- a/src/coreclr/jit/lsra.h
+++ b/src/coreclr/jit/lsra.h
@@ -511,7 +511,11 @@ class RegRecord : public Referenceable
#if defined(FEATURE_MASKED_HW_INTRINSICS)
else
{
+#ifdef TARGET_ARM64
+ assert(emitter::isMaskReg(reg) || (reg == REG_FFR));
+#else
assert(emitter::isMaskReg(reg));
+#endif
registerType = MaskRegisterType;
}
#endif // FEATURE_MASKED_HW_INTRINSICS
diff --git a/src/coreclr/jit/lsraarm64.cpp b/src/coreclr/jit/lsraarm64.cpp
index 5d6cf7f1945e4c..2352fa5ec0239a 100644
--- a/src/coreclr/jit/lsraarm64.cpp
+++ b/src/coreclr/jit/lsraarm64.cpp
@@ -607,9 +607,24 @@ int LinearScan::BuildNode(GenTree* tree)
switch (tree->OperGet())
{
default:
+ {
srcCount = BuildSimple(tree);
break;
-
+ }
+ case GT_PHYSREG:
+ {
+ srcCount = 0;
+ if (varTypeIsMask(tree))
+ {
+ assert(tree->AsPhysReg()->gtSrcReg == REG_FFR);
+ BuildDef(tree, getSingleTypeRegMask(tree->AsPhysReg()->gtSrcReg, TYP_MASK));
+ }
+ else
+ {
+ BuildSimple(tree);
+ }
+ break;
+ }
case GT_LCL_VAR:
// We make a final determination about whether a GT_LCL_VAR is a candidate or contained
// after liveness. In either case we don't build any uses or defs. Otherwise, this is a
diff --git a/src/coreclr/jit/lsrabuild.cpp b/src/coreclr/jit/lsrabuild.cpp
index 46577c8050fef3..ee6d423db46d9a 100644
--- a/src/coreclr/jit/lsrabuild.cpp
+++ b/src/coreclr/jit/lsrabuild.cpp
@@ -2841,10 +2841,7 @@ void LinearScan::buildIntervals()
#ifdef HAS_MORE_THAN_64_REGISTERS
else if (availableRegCount < (sizeof(regMaskTP) * 8))
{
- // Mask out the bits that are between (8 * regMaskTP) ~ availableRegCount
- // Subtract one extra for stack.
- unsigned topRegCount = availableRegCount - sizeof(regMaskSmall) * 8 - 1;
- actualRegistersMask = regMaskTP(~RBM_NONE, (1ULL << topRegCount) - 1);
+ actualRegistersMask = regMaskTP(~RBM_NONE, availableMaskRegs);
}
#endif
else
diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp
index 3b971780ff232e..d9ef8f039c90e2 100644
--- a/src/coreclr/jit/morph.cpp
+++ b/src/coreclr/jit/morph.cpp
@@ -1103,7 +1103,7 @@ void CallArgs::ArgsComplete(Compiler* comp, GenTreeCall* call)
// TODO-CQ: handle HWI/SIMD/COMMA nodes in multi-reg morphing.
SetNeedsTemp(&arg);
}
- else
+ else if (comp->opts.OptimizationEnabled())
{
// Finally, we call gtPrepareCost to measure the cost of evaluating this tree.
comp->gtPrepareCost(argx);
@@ -1476,7 +1476,7 @@ void CallArgs::SortArgs(Compiler* comp, GenTreeCall* call, CallArg** sortedArgs)
assert(begTab == endTab);
break;
}
- else
+ else if (comp->opts.OptimizationEnabled())
{
if (!costsPrepared)
{
@@ -1492,6 +1492,12 @@ void CallArgs::SortArgs(Compiler* comp, GenTreeCall* call, CallArg** sortedArgs)
expensiveArg = arg;
}
}
+ else
+ {
+ // We don't have cost information in MinOpts
+ expensiveArgIndex = curInx;
+ expensiveArg = arg;
+ }
}
}
@@ -2116,7 +2122,6 @@ void CallArgs::AddFinalArgsAndDetermineABIInfo(Compiler* comp, GenTreeCall* call
if (arg.NewAbiInfo.HasAnyFloatingRegisterSegment())
{
// Struct passed according to hardware floating-point calling convention
- assert(arg.NewAbiInfo.NumSegments <= 2);
assert(!arg.NewAbiInfo.HasAnyStackSegment());
if (arg.NewAbiInfo.NumSegments == 2)
{
@@ -3283,8 +3288,8 @@ void Compiler::fgMakeOutgoingStructArgCopy(GenTreeCall* call, CallArg* arg)
bool found = false;
// Attempt to find a local we have already used for an outgoing struct and reuse it.
- // We do not reuse within a statement.
- if (!opts.MinOpts())
+ // We do not reuse within a statement and we don't reuse if we're in LIR
+ if (!opts.MinOpts() && (fgOrder == FGOrderTree))
{
found = ForEachHbvBitSet(*fgAvailableOutgoingArgTemps, [&](indexType lclNum) {
LclVarDsc* varDsc = lvaGetDesc((unsigned)lclNum);
@@ -8028,10 +8033,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA
GenTree* oldTree = tree;
- if (opts.OptimizationEnabled())
- {
- tree = gtFoldExpr(tree);
- }
+ tree = gtFoldExpr(tree);
// Were we able to fold it ?
// Note that gtFoldExpr may return a non-leaf even if successful
@@ -8330,11 +8332,8 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac, bool* optA
qmarkOp2 = oldTree->AsOp()->gtOp2->AsOp()->gtOp2;
}
- if (opts.OptimizationEnabled())
- {
- // Try to fold it, maybe we get lucky,
- tree = gtFoldExpr(tree);
- }
+ // Try to fold it, maybe we get lucky,
+ tree = gtFoldExpr(tree);
if (oldTree != tree)
{
@@ -12360,11 +12359,8 @@ GenTree* Compiler::fgMorphTree(GenTree* tree, MorphAddrContext* mac)
tree->gtFlags |= tree->AsConditional()->gtOp1->gtFlags & GTF_ALL_EFFECT;
tree->gtFlags |= tree->AsConditional()->gtOp2->gtFlags & GTF_ALL_EFFECT;
- if (opts.OptimizationEnabled())
- {
- // Try to fold away any constants etc.
- tree = gtFoldExpr(tree);
- }
+ // Try to fold away any constants etc.
+ tree = gtFoldExpr(tree);
break;
diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp
index 8fb5ca04935662..b2f76f8e02b337 100644
--- a/src/coreclr/jit/optimizer.cpp
+++ b/src/coreclr/jit/optimizer.cpp
@@ -5162,24 +5162,13 @@ bool Compiler::optVNIsLoopInvariant(ValueNum vn, FlowGraphNaturalLoop* loop, VNS
return previousRes;
}
- bool res = true;
- VNFuncApp funcApp;
+ bool res = true;
+ VNFuncApp funcApp;
+ VNPhiDef phiDef;
+ VNMemoryPhiDef memoryPhiDef;
if (vnStore->GetVNFunc(vn, &funcApp))
{
- if (funcApp.m_func == VNF_PhiDef)
- {
- // Is the definition within the loop? If so, is not loop-invariant.
- unsigned lclNum = funcApp.m_args[0];
- unsigned ssaNum = funcApp.m_args[1];
- LclSsaVarDsc* ssaDef = lvaTable[lclNum].GetPerSsaData(ssaNum);
- res = !loop->ContainsBlock(ssaDef->GetBlock());
- }
- else if (funcApp.m_func == VNF_PhiMemoryDef)
- {
- BasicBlock* defnBlk = reinterpret_cast(vnStore->ConstantValue(funcApp.m_args[0]));
- res = !loop->ContainsBlock(defnBlk);
- }
- else if (funcApp.m_func == VNF_MemOpaque)
+ if (funcApp.m_func == VNF_MemOpaque)
{
const unsigned loopIndex = funcApp.m_args[0];
@@ -5239,6 +5228,16 @@ bool Compiler::optVNIsLoopInvariant(ValueNum vn, FlowGraphNaturalLoop* loop, VNS
}
}
}
+ else if (vnStore->GetPhiDef(vn, &phiDef))
+ {
+ // Is the definition within the loop? If so, is not loop-invariant.
+ LclSsaVarDsc* ssaDef = lvaTable[phiDef.LclNum].GetPerSsaData(phiDef.SsaDef);
+ res = !loop->ContainsBlock(ssaDef->GetBlock());
+ }
+ else if (vnStore->GetMemoryPhiDef(vn, &memoryPhiDef))
+ {
+ res = !loop->ContainsBlock(memoryPhiDef.Block);
+ }
loopVnInvariantCache->Set(vn, res);
return res;
diff --git a/src/coreclr/jit/rationalize.cpp b/src/coreclr/jit/rationalize.cpp
index 2e6acb23428fd9..708de9b8b8c341 100644
--- a/src/coreclr/jit/rationalize.cpp
+++ b/src/coreclr/jit/rationalize.cpp
@@ -505,7 +505,7 @@ void Rationalizer::RewriteHWIntrinsicAsUserCall(GenTree** use, ArrayStackgtFlags &= ~GTF_HW_USER_CALL;
+ hwintrinsic->gtFlags &= ~(GTF_HW_USER_CALL | GTF_EXCEPT | GTF_CALL);
return;
}
}
diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp
index b12ab304c08574..f074fab8032f6a 100644
--- a/src/coreclr/jit/redundantbranchopts.cpp
+++ b/src/coreclr/jit/redundantbranchopts.cpp
@@ -1398,8 +1398,8 @@ bool Compiler::optJumpThreadPhi(BasicBlock* block, GenTree* tree, ValueNum treeN
for (int i = 0; i < 2; i++)
{
const ValueNum phiDefVN = treeNormVNFuncApp.m_args[i];
- VNFuncApp phiDefFuncApp;
- if (!vnStore->GetVNFunc(phiDefVN, &phiDefFuncApp) || (phiDefFuncApp.m_func != VNF_PhiDef))
+ VNPhiDef phiDef;
+ if (!vnStore->GetPhiDef(phiDefVN, &phiDef))
{
// This input is not a phi def. If it's a func app it might depend on
// transitively on a phi def; consider a general search utility.
@@ -1409,12 +1409,10 @@ bool Compiler::optJumpThreadPhi(BasicBlock* block, GenTree* tree, ValueNum treeN
// The PhiDef args tell us which local and which SSA def of that local.
//
- assert(phiDefFuncApp.m_arity == 3);
- const unsigned lclNum = unsigned(phiDefFuncApp.m_args[0]);
- const unsigned ssaDefNum = unsigned(phiDefFuncApp.m_args[1]);
- const ValueNum phiVN = ValueNum(phiDefFuncApp.m_args[2]);
- JITDUMP("... JT-PHI [interestingVN] in " FMT_BB " relop %s operand VN is PhiDef for V%02u:%u " FMT_VN "\n",
- block->bbNum, i == 0 ? "first" : "second", lclNum, ssaDefNum, phiVN);
+ const unsigned lclNum = phiDef.LclNum;
+ const unsigned ssaDefNum = phiDef.SsaDef;
+ JITDUMP("... JT-PHI [interestingVN] in " FMT_BB " relop %s operand VN is PhiDef for V%02u\n", block->bbNum,
+ i == 0 ? "first" : "second", lclNum, ssaDefNum);
if (!foundPhiDef)
{
DISPTREE(tree);
diff --git a/src/coreclr/jit/registerarm64.h b/src/coreclr/jit/registerarm64.h
index d296ab9497858f..6b8091814251ee 100644
--- a/src/coreclr/jit/registerarm64.h
+++ b/src/coreclr/jit/registerarm64.h
@@ -116,13 +116,13 @@ REGDEF(P13, 13+PBASE, PMASK(13), "p13", "na")
REGDEF(P14, 14+PBASE, PMASK(14), "p14", "na")
REGDEF(P15, 15+PBASE, PMASK(15), "p15", "na")
-
// The registers with values 80 (NBASE) and above are not real register numbers
#define NBASE 80
REGDEF(SP, 0+NBASE, 0x0000, "sp", "wsp?")
+REGDEF(FFR, 1+NBASE, 0x0000, "ffr", "na")
// This must be last!
-REGDEF(STK, 1+NBASE, 0x0000, "STK", "STK")
+REGDEF(STK, 2+NBASE, 0x0000, "STK", "STK")
/*****************************************************************************/
#undef RMASK
diff --git a/src/coreclr/jit/simd.cpp b/src/coreclr/jit/simd.cpp
index 1157bf9e5bfc94..6256ee0c37799b 100644
--- a/src/coreclr/jit/simd.cpp
+++ b/src/coreclr/jit/simd.cpp
@@ -130,6 +130,29 @@ unsigned Compiler::getSIMDInitTempVarNum(var_types simdType)
return lvaSIMDInitTempVarNum;
}
+#ifdef TARGET_ARM64
+//------------------------------------------------------------------------
+// Get, and allocate if necessary, the SIMD temp used for various operations.
+// The temp is allocated as the maximum sized type of all operations required.
+//
+// Arguments:
+// simdType - Required SIMD type
+//
+// Returns:
+// The temp number
+//
+unsigned Compiler::getFFRegisterVarNum()
+{
+ if (lvaFfrRegister == BAD_VAR_NUM)
+ {
+ lvaFfrRegister = lvaGrabTemp(false DEBUGARG("Save the FFR value."));
+ lvaTable[lvaFfrRegister].lvType = TYP_MASK;
+ lvaTable[lvaFfrRegister].lvUsedInSIMDIntrinsic = true;
+ }
+ return lvaFfrRegister;
+}
+#endif
+
//----------------------------------------------------------------------------------
// Return the base type and size of SIMD vector type given its type handle.
//
diff --git a/src/coreclr/jit/targetloongarch64.cpp b/src/coreclr/jit/targetloongarch64.cpp
index 845a0e2991a40a..e31e06d5d3399b 100644
--- a/src/coreclr/jit/targetloongarch64.cpp
+++ b/src/coreclr/jit/targetloongarch64.cpp
@@ -78,45 +78,51 @@ ABIPassingInformation LoongArch64Classifier::Classify(Compiler* comp,
{
assert(!structLayout->IsBlockLayout());
- uint32_t floatFlags;
- CORINFO_CLASS_HANDLE typeHnd = structLayout->GetClassHandle();
+ CORINFO_CLASS_HANDLE typeHnd = structLayout->GetClassHandle();
+ const CORINFO_FPSTRUCT_LOWERING* lowering = comp->GetFpStructLowering(typeHnd);
- floatFlags = comp->info.compCompHnd->getLoongArch64PassStructInRegisterFlags(typeHnd);
-
- if ((floatFlags & STRUCT_HAS_FLOAT_FIELDS_MASK) != 0)
+ if (!lowering->byIntegerCallConv)
{
- if ((floatFlags & STRUCT_FLOAT_FIELD_ONLY_ONE) != 0)
+ slots = lowering->numLoweredElements;
+ if (lowering->numLoweredElements == 1)
{
assert(passedSize <= TARGET_POINTER_SIZE);
+ assert(varTypeIsFloating(JITtype2varType(lowering->loweredElements[0])));
- slots = 1;
canPassArgInRegisters = m_floatRegs.Count() > 0;
-
- argRegTypeInStruct1 = (passedSize == 8) ? TYP_DOUBLE : TYP_FLOAT;
- }
- else if ((floatFlags & STRUCT_FLOAT_FIELD_ONLY_TWO) != 0)
- {
- slots = 2;
- canPassArgInRegisters = m_floatRegs.Count() >= 2;
-
- argRegTypeInStruct1 = (floatFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
- argRegTypeInStruct2 = (floatFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
+ argRegTypeInStruct1 = (passedSize == 8) ? TYP_DOUBLE : TYP_FLOAT;
}
- else if ((floatFlags & STRUCT_FLOAT_FIELD_FIRST) != 0)
- {
- slots = 2;
- canPassArgInRegisters = (m_floatRegs.Count() > 0) && (m_intRegs.Count() > 0);
-
- argRegTypeInStruct1 = (floatFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
- argRegTypeInStruct2 = (floatFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_LONG : TYP_INT;
- }
- else if ((floatFlags & STRUCT_FLOAT_FIELD_SECOND) != 0)
+ else
{
- slots = 2;
- canPassArgInRegisters = (m_floatRegs.Count() > 0) && (m_intRegs.Count() > 0);
-
- argRegTypeInStruct1 = (floatFlags & STRUCT_FIRST_FIELD_SIZE_IS8) ? TYP_LONG : TYP_INT;
- argRegTypeInStruct2 = (floatFlags & STRUCT_SECOND_FIELD_SIZE_IS8) ? TYP_DOUBLE : TYP_FLOAT;
+ assert(lowering->numLoweredElements == 2);
+ var_types types[] = {
+ JITtype2varType(lowering->loweredElements[0]),
+ JITtype2varType(lowering->loweredElements[1]),
+ };
+ if (varTypeIsFloating(types[0]) && varTypeIsFloating(types[1]))
+ {
+ canPassArgInRegisters = m_floatRegs.Count() >= 2;
+
+ argRegTypeInStruct1 = types[0];
+ argRegTypeInStruct2 = types[1];
+ }
+ else if (!varTypeIsFloating(types[1]))
+ {
+ assert(varTypeIsFloating(types[0]));
+ canPassArgInRegisters = (m_floatRegs.Count() > 0) && (m_intRegs.Count() > 0);
+
+ argRegTypeInStruct1 = types[0];
+ argRegTypeInStruct2 = (genTypeSize(types[1]) == 8) ? TYP_LONG : TYP_INT;
+ }
+ else
+ {
+ assert(!varTypeIsFloating(types[0]));
+ assert(varTypeIsFloating(types[1]));
+ canPassArgInRegisters = (m_floatRegs.Count() > 0) && (m_intRegs.Count() > 0);
+
+ argRegTypeInStruct1 = (genTypeSize(types[0]) == 8) ? TYP_LONG : TYP_INT;
+ argRegTypeInStruct2 = types[1];
+ }
}
assert((slots == 1) || (slots == 2));
diff --git a/src/coreclr/jit/targetriscv64.cpp b/src/coreclr/jit/targetriscv64.cpp
index 6adb2b7b91ea1b..308df8c980f8d7 100644
--- a/src/coreclr/jit/targetriscv64.cpp
+++ b/src/coreclr/jit/targetriscv64.cpp
@@ -58,9 +58,10 @@ ABIPassingInformation RiscV64Classifier::Classify(Compiler* comp,
ClassLayout* structLayout,
WellKnownArg /*wellKnownParam*/)
{
- StructFloatFieldInfoFlags flags = STRUCT_NO_FLOAT_FIELD;
- unsigned intFields = 0, floatFields = 0;
- unsigned passedSize;
+ const CORINFO_FPSTRUCT_LOWERING* lowering = nullptr;
+
+ unsigned intFields = 0, floatFields = 0;
+ unsigned passedSize;
if (varTypeIsStruct(type))
{
@@ -71,22 +72,19 @@ ABIPassingInformation RiscV64Classifier::Classify(Compiler* comp,
}
else if (!structLayout->IsBlockLayout())
{
- flags = (StructFloatFieldInfoFlags)comp->info.compCompHnd->getRISCV64PassStructInRegisterFlags(
- structLayout->GetClassHandle());
-
- if ((flags & STRUCT_FLOAT_FIELD_ONLY_ONE) != 0)
- {
- floatFields = 1;
- }
- else if ((flags & STRUCT_FLOAT_FIELD_ONLY_TWO) != 0)
- {
- floatFields = 2;
- }
- else if (flags != STRUCT_NO_FLOAT_FIELD)
+ lowering = comp->GetFpStructLowering(structLayout->GetClassHandle());
+ if (!lowering->byIntegerCallConv)
{
- assert((flags & (STRUCT_FLOAT_FIELD_FIRST | STRUCT_FLOAT_FIELD_SECOND)) != 0);
- floatFields = 1;
- intFields = 1;
+ assert((lowering->numLoweredElements == 1) || (lowering->numLoweredElements == 2));
+ INDEBUG(unsigned debugIntFields = 0;)
+ for (size_t i = 0; i < lowering->numLoweredElements; ++i)
+ {
+ var_types type = JITtype2varType(lowering->loweredElements[i]);
+ floatFields += (unsigned)varTypeIsFloating(type);
+ INDEBUG(debugIntFields += (unsigned)varTypeIsIntegralOrI(type);)
+ }
+ intFields = lowering->numLoweredElements - floatFields;
+ assert(debugIntFields == intFields);
}
}
}
@@ -104,11 +102,15 @@ ABIPassingInformation RiscV64Classifier::Classify(Compiler* comp,
// Hardware floating-point calling convention
if ((floatFields == 1) && (intFields == 0))
{
- if (flags == STRUCT_NO_FLOAT_FIELD)
+ if (lowering == nullptr)
+ {
assert(varTypeIsFloating(type)); // standalone floating-point real
+ }
else
- assert((flags & STRUCT_FLOAT_FIELD_ONLY_ONE) != 0); // struct containing just one FP real
-
+ {
+ assert(lowering->numLoweredElements == 1); // struct containing just one FP real
+ assert(varTypeIsFloating(JITtype2varType(lowering->loweredElements[0])));
+ }
return ABIPassingInformation::FromSegment(comp, ABIPassingSegment::InRegister(m_floatRegs.Dequeue(), 0,
passedSize));
}
@@ -116,15 +118,20 @@ ABIPassingInformation RiscV64Classifier::Classify(Compiler* comp,
{
assert(varTypeIsStruct(type));
assert((floatFields + intFields) == 2);
- assert(flags != STRUCT_NO_FLOAT_FIELD);
- assert((flags & STRUCT_FLOAT_FIELD_ONLY_ONE) == 0);
-
- unsigned firstSize = ((flags & STRUCT_FIRST_FIELD_SIZE_IS8) != 0) ? 8 : 4;
- unsigned secondSize = ((flags & STRUCT_SECOND_FIELD_SIZE_IS8) != 0) ? 8 : 4;
+ assert(lowering != nullptr);
+ assert(!lowering->byIntegerCallConv);
+ assert(lowering->numLoweredElements == 2);
+
+ var_types types[] = {
+ JITtype2varType(lowering->loweredElements[0]),
+ JITtype2varType(lowering->loweredElements[1]),
+ };
+ unsigned firstSize = (genTypeSize(types[0]) == 8) ? 8 : 4;
+ unsigned secondSize = (genTypeSize(types[1]) == 8) ? 8 : 4;
unsigned offset = max(firstSize, secondSize); // TODO: cover empty fields and custom offsets / alignments
- bool isFirstFloat = (flags & (STRUCT_FLOAT_FIELD_ONLY_TWO | STRUCT_FLOAT_FIELD_FIRST)) != 0;
- bool isSecondFloat = (flags & (STRUCT_FLOAT_FIELD_ONLY_TWO | STRUCT_FLOAT_FIELD_SECOND)) != 0;
+ bool isFirstFloat = varTypeIsFloating(types[0]);
+ bool isSecondFloat = varTypeIsFloating(types[1]);
assert(isFirstFloat || isSecondFloat);
regNumber firstReg = (isFirstFloat ? m_floatRegs : m_intRegs).Dequeue();
diff --git a/src/coreclr/jit/utils.h b/src/coreclr/jit/utils.h
index b0b86736bc0b7b..b6b81ddd02029a 100644
--- a/src/coreclr/jit/utils.h
+++ b/src/coreclr/jit/utils.h
@@ -1190,4 +1190,7 @@ bool CastFromFloatOverflows(float fromValue, var_types toType);
bool CastFromDoubleOverflows(double fromValue, var_types toType);
} // namespace CheckedOps
+#define STRINGIFY_(x) #x
+#define STRINGIFY(x) STRINGIFY_(x)
+
#endif // _UTILS_H_
diff --git a/src/coreclr/jit/valuenum.cpp b/src/coreclr/jit/valuenum.cpp
index 7e00b61aabb25b..2e88aef9bb93d5 100644
--- a/src/coreclr/jit/valuenum.cpp
+++ b/src/coreclr/jit/valuenum.cpp
@@ -1635,7 +1635,20 @@ bool ValueNumStore::IsKnownNonNull(ValueNum vn)
}
VNFuncApp funcAttr;
- return GetVNFunc(vn, &funcAttr) && (s_vnfOpAttribs[funcAttr.m_func] & VNFOA_KnownNonNull) != 0;
+ if (!GetVNFunc(vn, &funcAttr))
+ {
+ return false;
+ }
+
+ if ((s_vnfOpAttribs[funcAttr.m_func] & VNFOA_KnownNonNull) != 0)
+ {
+ return true;
+ }
+
+ // TODO: we can recognize more non-null idioms here, e.g.
+ // ADD(IsKnownNonNull(op1), smallCns), etc.
+
+ return false;
}
bool ValueNumStore::IsSharedStatic(ValueNum vn)
@@ -1733,6 +1746,14 @@ ValueNumStore::Chunk::Chunk(CompAllocator alloc, ValueNum* pNextBaseVN, var_type
m_defs = new (alloc) VNHandle[ChunkSize];
break;
+ case CEA_PhiDef:
+ m_defs = new (alloc) VNPhiDef[ChunkSize];
+ break;
+
+ case CEA_MemoryPhiDef:
+ m_defs = new (alloc) VNMemoryPhiDef[ChunkSize];
+ break;
+
case CEA_Func0:
m_defs = new (alloc) VNFunc[ChunkSize];
break;
@@ -2860,8 +2881,7 @@ ValueNum ValueNumStore::VNForFuncNoFolding(var_types typ, VNFunc func, ValueNum
//
// Return Value: - Returns the ValueNum associated with 'func'('arg0VN','arg1VN','arg1VN)
//
-// Note: - This method only handles Trinary operations
-// We have to special case VNF_PhiDef, as it's first two arguments are not ValueNums
+// Note: - This method only handles ternary operations
//
ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, ValueNum arg1VN, ValueNum arg2VN)
{
@@ -2874,15 +2894,8 @@ ValueNum ValueNumStore::VNForFunc(var_types typ, VNFunc func, ValueNum arg0VN, V
#ifdef DEBUG
// Function arguments carry no exceptions.
//
- if (func != VNF_PhiDef)
- {
- // For a phi definition first and second argument are "plain" local/ssa numbers.
- // (I don't know if having such non-VN arguments to a VN function is a good idea -- if we wanted to declare
- // ValueNum to be "short" it would be a problem, for example. But we'll leave it for now, with these explicit
- // exceptions.)
- assert(arg0VN == VNNormalValue(arg0VN));
- assert(arg1VN == VNNormalValue(arg1VN));
- }
+ assert(arg0VN == VNNormalValue(arg0VN));
+ assert(arg1VN == VNNormalValue(arg1VN));
assert(arg2VN == VNNormalValue(arg2VN));
#endif
@@ -2958,6 +2971,123 @@ ValueNum ValueNumStore::VNForFunc(
return *resultVN;
}
+// ----------------------------------------------------------------------------------------
+// VNForPhiDef - Return a new VN number for a phi definition.
+//
+// Arguments:
+// type - Type of the local
+// lclNum - Local corresponding to the phidef
+// ssaDef - SSA number representing the phi def
+// ssaArgs - SSA numbers from the predecessors
+//
+// Return Value:
+// New value number
+//
+ValueNum ValueNumStore::VNForPhiDef(var_types type, unsigned lclNum, unsigned ssaDef, ArrayStack& ssaArgs)
+{
+ unsigned* newSsaArgs = m_alloc.allocate((unsigned)ssaArgs.Height());
+ memcpy(newSsaArgs, ssaArgs.Data(), (unsigned)ssaArgs.Height() * sizeof(ValueNum));
+
+ Chunk* const c = GetAllocChunk(type, CEA_PhiDef);
+ unsigned index = c->AllocVN();
+ VNPhiDef* inserted = &static_cast(c->m_defs)[index];
+
+ inserted->LclNum = lclNum;
+ inserted->SsaDef = ssaDef;
+ inserted->SsaArgs = newSsaArgs;
+ inserted->NumArgs = (unsigned)ssaArgs.Height();
+
+ ValueNum newVN = c->m_baseVN + index;
+ return newVN;
+}
+
+// ----------------------------------------------------------------------------------------
+// GetPhiDef - Check if a VN represents a phi definition and if so, look up
+// information about it.
+//
+// Arguments:
+// vn - Value number
+// phiDef - [out] Information about the phi definition
+//
+// Return Value:
+// True if the VN is a phi def.
+//
+bool ValueNumStore::GetPhiDef(ValueNum vn, VNPhiDef* phiDef)
+{
+ if (vn == NoVN)
+ {
+ return false;
+ }
+
+ Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
+ unsigned offset = ChunkOffset(vn);
+ assert(offset < c->m_numUsed);
+ if (c->m_attribs == CEA_PhiDef)
+ {
+ *phiDef = static_cast(c->m_defs)[offset];
+ return true;
+ }
+
+ return false;
+}
+
+// ----------------------------------------------------------------------------------------
+// VNForMemoryPhiDef - Return a new VN number for a memory phi definition.
+//
+// Arguments:
+// block - Block with the definition
+// ssaArgs - SSA numbers from the predecessors
+//
+// Return Value:
+// New value number
+//
+ValueNum ValueNumStore::VNForMemoryPhiDef(BasicBlock* block, ArrayStack& ssaArgs)
+{
+ unsigned* newSsaArgs = m_alloc.allocate((unsigned)ssaArgs.Height());
+ memcpy(newSsaArgs, ssaArgs.Data(), (unsigned)ssaArgs.Height() * sizeof(ValueNum));
+
+ Chunk* const c = GetAllocChunk(TYP_HEAP, CEA_MemoryPhiDef);
+ unsigned index = c->AllocVN();
+ VNMemoryPhiDef* inserted = &static_cast(c->m_defs)[index];
+
+ inserted->Block = block;
+ inserted->SsaArgs = newSsaArgs;
+ inserted->NumArgs = (unsigned)ssaArgs.Height();
+
+ ValueNum newVN = c->m_baseVN + index;
+ return newVN;
+}
+
+// ----------------------------------------------------------------------------------------
+// GetMemoryPhiDef - Check if a VN represents a memory phi definition and if
+// so, look up information about it.
+//
+// Arguments:
+// vn - Value number
+// phiDef - [out] Information about the memory phi definition
+//
+// Return Value:
+// True if the VN is a memory phi def.
+//
+bool ValueNumStore::GetMemoryPhiDef(ValueNum vn, VNMemoryPhiDef* memoryPhiDef)
+{
+ if (vn == NoVN)
+ {
+ return false;
+ }
+
+ Chunk* c = m_chunks.GetNoExpand(GetChunkNum(vn));
+ unsigned offset = ChunkOffset(vn);
+ assert(offset < c->m_numUsed);
+ if (c->m_attribs == CEA_MemoryPhiDef)
+ {
+ *memoryPhiDef = static_cast(c->m_defs)[offset];
+ return true;
+ }
+
+ return false;
+}
+
//------------------------------------------------------------------------------
// VNForMapStore: Create the VN for a precise store (to a precise map).
//
@@ -3074,81 +3204,81 @@ ValueNum ValueNumStore::VNForMapPhysicalSelect(
return result;
}
-typedef JitHashTable, bool> ValueNumSet;
-
-class SmallValueNumSet
+bool ValueNumStore::SmallValueNumSet::Lookup(ValueNum vn)
{
- union
+ // O(N) lookup for inline elements
+ if (m_numElements <= ArrLen(m_inlineElements))
{
- ValueNum m_inlineElements[4];
- ValueNumSet* m_set;
- };
- unsigned m_numElements = 0;
-
-public:
- unsigned Count()
- {
- return m_numElements;
- }
-
- template
- void ForEach(Func func)
- {
- if (m_numElements <= ArrLen(m_inlineElements))
- {
- for (unsigned i = 0; i < m_numElements; i++)
- {
- func(m_inlineElements[i]);
- }
- }
- else
+ for (unsigned i = 0; i < m_numElements; i++)
{
- for (ValueNum vn : ValueNumSet::KeyIteration(m_set))
+ if (m_inlineElements[i] == vn)
{
- func(vn);
+ return true;
}
}
+
+ // Not found
+ return false;
}
- void Add(Compiler* comp, ValueNum vn)
+ return m_set->Lookup(vn);
+}
+
+// Returns false if the value already exists
+bool ValueNumStore::SmallValueNumSet::Add(Compiler* comp, ValueNum vn)
+{
+ if (m_numElements <= ArrLen(m_inlineElements))
{
- if (m_numElements <= ArrLen(m_inlineElements))
+ for (unsigned i = 0; i < m_numElements; i++)
{
- for (unsigned i = 0; i < m_numElements; i++)
+ if (m_inlineElements[i] == vn)
{
- if (m_inlineElements[i] == vn)
- {
- return;
- }
+ // Already exists
+ return false;
}
+ }
- if (m_numElements < ArrLen(m_inlineElements))
+ if (m_numElements < ArrLen(m_inlineElements))
+ {
+ m_inlineElements[m_numElements] = vn;
+ m_numElements++;
+ }
+ else
+ {
+ ValueNumSet* set = new (comp, CMK_ValueNumber) ValueNumSet(comp->getAllocator(CMK_ValueNumber));
+ for (ValueNum oldVn : m_inlineElements)
{
- m_inlineElements[m_numElements] = vn;
- m_numElements++;
+ set->Set(oldVn, true);
}
- else
- {
- ValueNumSet* set = new (comp, CMK_ValueNumber) ValueNumSet(comp->getAllocator(CMK_ValueNumber));
- for (ValueNum oldVn : m_inlineElements)
- {
- set->Set(oldVn, true);
- }
- set->Set(vn, true);
+ set->Set(vn, true);
- m_set = set;
- m_numElements++;
- assert(m_numElements == set->GetCount());
- }
- }
- else
- {
- m_set->Set(vn, true, ValueNumSet::SetKind::Overwrite);
- m_numElements = m_set->GetCount();
+ m_set = set;
+ m_numElements++;
+ assert(m_numElements == set->GetCount());
}
+ return true;
}
-};
+
+ bool exists = m_set->Set(vn, true, ValueNumSet::SetKind::Overwrite);
+ m_numElements = m_set->GetCount();
+ return !exists;
+}
+
+//------------------------------------------------------------------------------
+// VNPhiDefToVN: Extracts the VN for a specific argument of a phi definition.
+//
+// Arguments:
+// phiDef - The phi definition
+// ssaArgNum - The argument number to extract
+//
+// Return Value:
+// The VN for the specified argument of the phi definition.
+//
+ValueNum ValueNumStore::VNPhiDefToVN(const VNPhiDef& phiDef, unsigned ssaArgNum)
+{
+ return m_pComp->lvaGetDesc(phiDef.LclNum)->GetPerSsaData(phiDef.SsaArgs[ssaArgNum])->m_vnPair.Get(VNK_Conservative);
+}
//------------------------------------------------------------------------------
// VNForMapSelectInner: Select value from a map and record loop memory dependencies.
@@ -3440,141 +3570,110 @@ ValueNum ValueNumStore::VNForMapSelectWork(ValueNumKind vnk,
}
break;
- case VNF_PhiDef:
- case VNF_PhiMemoryDef:
+ default:
+ break;
+ }
+ }
+ else
+ {
+ unsigned* ssaArgs = nullptr;
+ unsigned numSsaArgs = 0;
+ unsigned lclNum = BAD_VAR_NUM;
+
+ VNPhiDef phiDef;
+ VNMemoryPhiDef memoryPhiDef;
+ if (GetPhiDef(map, &phiDef))
+ {
+ lclNum = phiDef.LclNum;
+ ssaArgs = phiDef.SsaArgs;
+ numSsaArgs = phiDef.NumArgs;
+ }
+ else if (GetMemoryPhiDef(map, &memoryPhiDef))
+ {
+ ssaArgs = memoryPhiDef.SsaArgs;
+ numSsaArgs = memoryPhiDef.NumArgs;
+ }
+
+ if (ssaArgs != nullptr)
+ {
+ // select(phi(m1, m2), x): if select(m1, x) == select(m2, x), return that, else new fresh.
+ // Get the first argument of the phi.
+
+ // We need to be careful about breaking infinite recursion. Record the outer select.
+ m_fixedPointMapSels.Push(VNDefFuncApp<2>(VNF_MapSelect, map, index));
+
+ ValueNum sameSelResult = ValueNumStore::RecursiveVN;
+ bool allSame = true;
+
+ for (unsigned i = 0; i < numSsaArgs; i++)
{
- unsigned lclNum = BAD_VAR_NUM;
- bool isMemory = false;
- VNFuncApp phiFuncApp;
- bool defArgIsFunc = false;
- if (funcApp.m_func == VNF_PhiDef)
+ if (*pBudget <= 0)
{
- lclNum = unsigned(funcApp.m_args[0]);
- defArgIsFunc = GetVNFunc(funcApp.m_args[2], &phiFuncApp);
+ allSame = false;
+ break;
+ }
+
+ ValueNum phiArgVN;
+ if (lclNum != BAD_VAR_NUM)
+ {
+ phiArgVN = m_pComp->lvaGetDesc(lclNum)->GetPerSsaData(ssaArgs[i])->m_vnPair.Get(vnk);
}
else
{
- assert(funcApp.m_func == VNF_PhiMemoryDef);
- isMemory = true;
- defArgIsFunc = GetVNFunc(funcApp.m_args[1], &phiFuncApp);
+ phiArgVN = m_pComp->GetMemoryPerSsaData(ssaArgs[i])->m_vnPair.Get(vnk);
}
- if (defArgIsFunc && phiFuncApp.m_func == VNF_Phi)
+
+ if (phiArgVN == ValueNumStore::NoVN)
{
- // select(phi(m1, m2), x): if select(m1, x) == select(m2, x), return that, else new fresh.
- // Get the first argument of the phi.
+ allSame = false;
+ break;
+ }
- // We need to be careful about breaking infinite recursion. Record the outer select.
- m_fixedPointMapSels.Push(VNDefFuncApp<2>(VNF_MapSelect, map, index));
+ bool usedRecursiveVN = false;
+ ValueNum curResult =
+ VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget, &usedRecursiveVN, recMemoryDependencies);
- assert(IsVNConstant(phiFuncApp.m_args[0]));
- unsigned phiArgSsaNum = ConstantValue(phiFuncApp.m_args[0]);
- ValueNum phiArgVN;
- if (isMemory)
- {
- phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk);
- }
- else
- {
- phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk);
- }
- if (phiArgVN != ValueNumStore::NoVN)
- {
- bool allSame = true;
- ValueNum argRest = phiFuncApp.m_args[1];
- ValueNum sameSelResult = VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget,
- pUsedRecursiveVN, recMemoryDependencies);
-
- // It is possible that we just now exceeded our budget, if so we need to force an early exit
- // and stop calling VNForMapSelectWork
- if (*pBudget <= 0)
- {
- // We don't have any budget remaining to verify that all phiArgs are the same
- // so setup the default failure case now.
- allSame = false;
- }
+ *pUsedRecursiveVN |= usedRecursiveVN;
+ if (sameSelResult == ValueNumStore::RecursiveVN)
+ {
+ sameSelResult = curResult;
+ }
+ if ((curResult != ValueNumStore::RecursiveVN) && (curResult != sameSelResult))
+ {
+ allSame = false;
+ break;
+ }
+ }
- while (allSame && argRest != ValueNumStore::NoVN)
- {
- ValueNum cur = argRest;
- VNFuncApp phiArgFuncApp;
- if (GetVNFunc(argRest, &phiArgFuncApp) && phiArgFuncApp.m_func == VNF_Phi)
- {
- cur = phiArgFuncApp.m_args[0];
- argRest = phiArgFuncApp.m_args[1];
- }
- else
- {
- argRest = ValueNumStore::NoVN; // Cause the loop to terminate.
- }
- assert(IsVNConstant(cur));
- phiArgSsaNum = ConstantValue(cur);
- if (isMemory)
- {
- phiArgVN = m_pComp->GetMemoryPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk);
- }
- else
- {
- phiArgVN = m_pComp->lvaTable[lclNum].GetPerSsaData(phiArgSsaNum)->m_vnPair.Get(vnk);
- }
- if (phiArgVN == ValueNumStore::NoVN)
- {
- allSame = false;
- }
- else
- {
- bool usedRecursiveVN = false;
- ValueNum curResult = VNForMapSelectWork(vnk, type, phiArgVN, index, pBudget,
- &usedRecursiveVN, recMemoryDependencies);
+ // Make sure we're popping what we pushed.
+ assert(FixedPointMapSelsTopHasValue(map, index));
+ m_fixedPointMapSels.Pop();
- *pUsedRecursiveVN |= usedRecursiveVN;
- if (sameSelResult == ValueNumStore::RecursiveVN)
- {
- sameSelResult = curResult;
- }
- if (curResult != ValueNumStore::RecursiveVN && curResult != sameSelResult)
- {
- allSame = false;
- }
- }
- }
- if (allSame && sameSelResult != ValueNumStore::RecursiveVN)
- {
- // Make sure we're popping what we pushed.
- assert(FixedPointMapSelsTopHasValue(map, index));
- m_fixedPointMapSels.Pop();
-
- // To avoid exponential searches, we make sure that this result is memo-ized.
- // The result is always valid for memoization if we didn't rely on RecursiveVN to get
- // it.
- // If RecursiveVN was used, we are processing a loop and we can't memo-ize this
- // intermediate
- // result if, e.g., this block is in a multi-entry loop.
- if (!*pUsedRecursiveVN)
- {
- entry.Result = sameSelResult;
- entry.SetMemoryDependencies(m_pComp, recMemoryDependencies);
+ if (allSame && (sameSelResult != ValueNumStore::RecursiveVN))
+ {
+ // To avoid exponential searches, we make sure that this result is memo-ized.
+ // The result is always valid for memoization if we didn't rely on RecursiveVN to get
+ // it.
+ // If RecursiveVN was used, we are processing a loop and we can't memo-ize this
+ // intermediate
+ // result if, e.g., this block is in a multi-entry loop.
+ if (!*pUsedRecursiveVN)
+ {
+ entry.Result = sameSelResult;
+ entry.SetMemoryDependencies(m_pComp, recMemoryDependencies);
- GetMapSelectWorkCache()->Set(fstruct, entry);
- }
+ GetMapSelectWorkCache()->Set(fstruct, entry);
+ }
- recMemoryDependencies.ForEach([this, &memoryDependencies](ValueNum vn) {
- memoryDependencies.Add(m_pComp, vn);
- });
+ recMemoryDependencies.ForEach([this, &memoryDependencies](ValueNum vn) {
+ memoryDependencies.Add(m_pComp, vn);
+ });
- return sameSelResult;
- }
- // Otherwise, fall through to creating the select(phi(m1, m2), x) function application.
- }
- // Make sure we're popping what we pushed.
- assert(FixedPointMapSelsTopHasValue(map, index));
- m_fixedPointMapSels.Pop();
- }
+ return sameSelResult;
}
- break;
-
- default:
- break;
}
+
+ // Otherwise, fall through to creating the select(phi(m1, m2), x) function application.
}
// We may have run out of budget and already assigned a result
@@ -6360,7 +6459,8 @@ var_types ValueNumStore::TypeOfVN(ValueNum vn) const
//
FlowGraphNaturalLoop* ValueNumStore::LoopOfVN(ValueNum vn)
{
- VNFuncApp funcApp;
+ VNFuncApp funcApp;
+ VNMemoryPhiDef memoryPhiDef;
if (GetVNFunc(vn, &funcApp))
{
if (funcApp.m_func == VNF_MemOpaque)
@@ -6383,11 +6483,10 @@ FlowGraphNaturalLoop* ValueNumStore::LoopOfVN(ValueNum vn)
return m_pComp->m_loops->GetLoopByIndex(index);
}
- else if (funcApp.m_func == VNF_PhiMemoryDef)
- {
- BasicBlock* const block = reinterpret_cast(ConstantValue(funcApp.m_args[0]));
- return m_pComp->m_blockToLoop->GetLoop(block);
- }
+ }
+ else if (GetMemoryPhiDef(vn, &memoryPhiDef))
+ {
+ return m_pComp->m_blockToLoop->GetLoop(memoryPhiDef.Block);
}
return nullptr;
@@ -6427,68 +6526,75 @@ bool ValueNumStore::IsVNInt32Constant(ValueNum vn)
bool ValueNumStore::IsVNNeverNegative(ValueNum vn)
{
- assert(varTypeIsIntegral(TypeOfVN(vn)));
+ auto vnVisitor = [this](ValueNum vn) -> VNVisit {
+ if ((vn == NoVN) || !varTypeIsIntegral(TypeOfVN(vn)))
+ {
+ return VNVisit::Abort;
+ }
- if (IsVNConstant(vn))
- {
- var_types vnTy = TypeOfVN(vn);
- if (vnTy == TYP_INT)
+ if (IsVNConstant(vn))
{
- return GetConstantInt32(vn) >= 0;
+ var_types vnTy = TypeOfVN(vn);
+ if (vnTy == TYP_INT)
+ {
+ return GetConstantInt32(vn) >= 0 ? VNVisit::Continue : VNVisit::Abort;
+ }
+ if (vnTy == TYP_LONG)
+ {
+ return GetConstantInt64(vn) >= 0 ? VNVisit::Continue : VNVisit::Abort;
+ }
+ return VNVisit::Abort;
}
- else if (vnTy == TYP_LONG)
+
+ // Array length can never be negative.
+ if (IsVNArrLen(vn))
{
- return GetConstantInt64(vn) >= 0;
+ return VNVisit::Continue;
}
- return false;
- }
+ // TODO-VN: Recognize Span.Length
+ // Handle more intrinsics such as Math.Max(neverNegative1, neverNegative2)
- // Array length can never be negative.
- if (IsVNArrLen(vn))
- {
- return true;
- }
-
- VNFuncApp funcApp;
- if (GetVNFunc(vn, &funcApp))
- {
- switch (funcApp.m_func)
+ VNFuncApp funcApp;
+ if (GetVNFunc(vn, &funcApp))
{
- case VNF_GE_UN:
- case VNF_GT_UN:
- case VNF_LE_UN:
- case VNF_LT_UN:
- case VNF_COUNT:
- case VNF_ADD_UN_OVF:
- case VNF_SUB_UN_OVF:
- case VNF_MUL_UN_OVF:
+ switch (funcApp.m_func)
+ {
+ case VNF_GE_UN:
+ case VNF_GT_UN:
+ case VNF_LE_UN:
+ case VNF_LT_UN:
+ case VNF_COUNT:
+ case VNF_ADD_UN_OVF:
+ case VNF_SUB_UN_OVF:
+ case VNF_MUL_UN_OVF:
#ifdef FEATURE_HW_INTRINSICS
#ifdef TARGET_XARCH
- case VNF_HWI_POPCNT_PopCount:
- case VNF_HWI_POPCNT_X64_PopCount:
- case VNF_HWI_LZCNT_LeadingZeroCount:
- case VNF_HWI_LZCNT_X64_LeadingZeroCount:
- case VNF_HWI_BMI1_TrailingZeroCount:
- case VNF_HWI_BMI1_X64_TrailingZeroCount:
- return true;
+ case VNF_HWI_POPCNT_PopCount:
+ case VNF_HWI_POPCNT_X64_PopCount:
+ case VNF_HWI_LZCNT_LeadingZeroCount:
+ case VNF_HWI_LZCNT_X64_LeadingZeroCount:
+ case VNF_HWI_BMI1_TrailingZeroCount:
+ case VNF_HWI_BMI1_X64_TrailingZeroCount:
+ return VNVisit::Continue;
#elif defined(TARGET_ARM64)
- case VNF_HWI_AdvSimd_PopCount:
- case VNF_HWI_AdvSimd_LeadingZeroCount:
- case VNF_HWI_AdvSimd_LeadingSignCount:
- case VNF_HWI_ArmBase_LeadingZeroCount:
- case VNF_HWI_ArmBase_Arm64_LeadingZeroCount:
- case VNF_HWI_ArmBase_Arm64_LeadingSignCount:
- return true;
+ case VNF_HWI_AdvSimd_PopCount:
+ case VNF_HWI_AdvSimd_LeadingZeroCount:
+ case VNF_HWI_AdvSimd_LeadingSignCount:
+ case VNF_HWI_ArmBase_LeadingZeroCount:
+ case VNF_HWI_ArmBase_Arm64_LeadingZeroCount:
+ case VNF_HWI_ArmBase_Arm64_LeadingSignCount:
+ return VNVisit::Continue;
#endif
#endif // FEATURE_HW_INTRINSICS
- default:
- break;
+ default:
+ break;
+ }
}
- }
-
- return false;
+ return VNVisit::Abort;
+ };
+ return VNVisitReachingVNs(vn, vnVisitor) == VNVisit::Continue;
}
GenTreeFlags ValueNumStore::GetHandleFlags(ValueNum vn)
@@ -6718,16 +6824,6 @@ const char* ValueNumStore::VNRelationString(VN_RELATION_KIND vrk)
}
#endif
-bool ValueNumStore::IsVNPhiDef(ValueNum vn)
-{
- VNFuncApp funcAttr;
- if (!GetVNFunc(vn, &funcAttr))
- {
- return false;
- }
-
- return funcAttr.m_func == VNF_PhiDef;
-}
//------------------------------------------------------------------------
// AreVNsEquivalent: returns true iff VNs represent the same value
//
@@ -6748,24 +6844,14 @@ bool ValueNumStore::AreVNsEquivalent(ValueNum vn1, ValueNum vn2)
return true;
}
- VNFuncApp funcAttr1;
- if (!GetVNFunc(vn1, &funcAttr1))
- {
- return false;
- }
-
- if (funcAttr1.m_func != VNF_PhiDef)
+ VNPhiDef def1;
+ if (!GetPhiDef(vn1, &def1))
{
return false;
}
- VNFuncApp funcAttr2;
- if (!GetVNFunc(vn2, &funcAttr2))
- {
- return false;
- }
-
- if (funcAttr2.m_func != VNF_PhiDef)
+ VNPhiDef def2;
+ if (!GetPhiDef(vn2, &def2))
{
return false;
}
@@ -6773,16 +6859,16 @@ bool ValueNumStore::AreVNsEquivalent(ValueNum vn1, ValueNum vn2)
// We have two PhiDefs. They may be equivalent, if
// they come from Phis in the same block.
//
- const unsigned lclNum1 = unsigned(funcAttr1.m_args[0]);
- const unsigned ssaDefNum1 = unsigned(funcAttr1.m_args[1]);
+ const unsigned lclNum1 = def1.LclNum;
+ const unsigned ssaDefNum1 = def1.SsaDef;
LclVarDsc* const varDsc1 = m_pComp->lvaGetDesc(lclNum1);
LclSsaVarDsc* const varSsaDsc1 = varDsc1->GetPerSsaData(ssaDefNum1);
GenTree* const varDefTree1 = varSsaDsc1->GetDefNode();
BasicBlock* const varDefBlock1 = varSsaDsc1->GetBlock();
- const unsigned lclNum2 = unsigned(funcAttr2.m_args[0]);
- const unsigned ssaDefNum2 = unsigned(funcAttr2.m_args[1]);
+ const unsigned lclNum2 = def2.LclNum;
+ const unsigned ssaDefNum2 = def2.SsaDef;
LclVarDsc* const varDsc2 = m_pComp->lvaGetDesc(lclNum2);
LclSsaVarDsc* const varSsaDsc2 = varDsc2->GetPerSsaData(ssaDefNum2);
@@ -6810,6 +6896,8 @@ bool ValueNumStore::AreVNsEquivalent(ValueNum vn1, ValueNum vn2)
bool phiArgsAreEquivalent = true;
+ // TODO-CQ: This logic could walk the SSA nums in the VNPhiDef, which
+ // accounts for unreachable predecessors.
for (; (treeIter1 != treeEnd1) && (treeIter2 != treeEnd2); ++treeIter1, ++treeIter2)
{
GenTreePhiArg* const treePhiArg1 = treeIter1->GetNode()->AsPhiArg();
@@ -9819,6 +9907,8 @@ bool ValueNumStore::VNIsValid(ValueNum vn)
void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr)
{
printf(" {");
+ VNPhiDef phiDef;
+ VNMemoryPhiDef memoryPhiDef;
if (vn == NoVN)
{
printf("NoVN");
@@ -10058,6 +10148,24 @@ void ValueNumStore::vnDump(Compiler* comp, ValueNum vn, bool isPtr)
printf(")");
}
}
+ else if (GetPhiDef(vn, &phiDef))
+ {
+ printf("PhiDef(V%02u d:%u", phiDef.LclNum, phiDef.SsaDef);
+ for (unsigned i = 0; i < phiDef.NumArgs; i++)
+ {
+ printf(", u:%u", phiDef.SsaArgs[i]);
+ }
+ printf(")");
+ }
+ else if (GetMemoryPhiDef(vn, &memoryPhiDef))
+ {
+ printf("MemoryPhiDef(" FMT_BB, memoryPhiDef.Block->bbNum);
+ for (unsigned i = 0; i < memoryPhiDef.NumArgs; i++)
+ {
+ printf(", m:%u", memoryPhiDef.SsaArgs[i]);
+ }
+ printf(")");
+ }
else
{
// Otherwise, just a VN with no structure; print just the VN.
@@ -10905,10 +11013,32 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk)
// First: visit phis and check to see if all phi args have the same value.
for (; (stmt != nullptr) && stmt->IsPhiDefnStmt(); stmt = stmt->GetNextStmt())
{
- GenTreeLclVar* newSsaDef = stmt->GetRootNode()->AsLclVar();
- fgValueNumberPhiDef(newSsaDef, blk);
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\n***** " FMT_BB ", " FMT_STMT "(before)\n", blk->bbNum, stmt->GetID());
+ gtDispTree(stmt->GetRootNode());
+ printf("\n");
+ }
+#endif
+
+ fgValueNumberPhiDef(stmt->GetRootNode()->AsLclVar(), blk);
+
+#ifdef DEBUG
+ if (verbose)
+ {
+ printf("\n***** " FMT_BB ", " FMT_STMT "(after)\n", blk->bbNum, stmt->GetID());
+ gtDispTree(stmt->GetRootNode());
+ printf("\n");
+ if (stmt->GetNextStmt() != nullptr)
+ {
+ printf("---------\n");
+ }
+ }
+#endif
}
+ ArrayStack phiArgSsaNums(getAllocator(CMK_ValueNumber));
// Now do the same for each MemoryKind.
for (MemoryKind memoryKind : allMemoryKinds())
{
@@ -10942,41 +11072,37 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk)
// There should be > 1 args to a phi.
// But OSR might leave around "dead" try entry blocks...
assert((phiArgs->m_nextArg != nullptr) || opts.IsOSR());
- ValueNum phiAppVN = vnStore->VNForIntCon(phiArgs->GetSsaNum());
- JITDUMP(" Building phi application: $%x = SSA# %d.\n", phiAppVN, phiArgs->GetSsaNum());
- bool allSame = true;
- ValueNum sameVN = GetMemoryPerSsaData(phiArgs->GetSsaNum())->m_vnPair.GetLiberal();
- if (sameVN == ValueNumStore::NoVN)
- {
- allSame = false;
- }
- phiArgs = phiArgs->m_nextArg;
+ phiArgSsaNums.Reset();
+
+ JITDUMP(" Building memory phi def for block " FMT_BB ".\n", blk->bbNum);
+ ValueNum sameVN = ValueNumStore::NoVN;
+
while (phiArgs != nullptr)
{
ValueNum phiArgVN = GetMemoryPerSsaData(phiArgs->GetSsaNum())->m_vnPair.GetLiberal();
- if (phiArgVN == ValueNumStore::NoVN || phiArgVN != sameVN)
+
+ if (phiArgSsaNums.Height() == 0)
{
- allSame = false;
+ sameVN = phiArgVN;
}
-#ifdef DEBUG
- ValueNum oldPhiAppVN = phiAppVN;
-#endif
- unsigned phiArgSSANum = phiArgs->GetSsaNum();
- ValueNum phiArgSSANumVN = vnStore->VNForIntCon(phiArgSSANum);
- JITDUMP(" Building phi application: $%x = SSA# %d.\n", phiArgSSANumVN, phiArgSSANum);
- phiAppVN = vnStore->VNForFuncNoFolding(TYP_HEAP, VNF_Phi, phiArgSSANumVN, phiAppVN);
- JITDUMP(" Building phi application: $%x = phi($%x, $%x).\n", phiAppVN, phiArgSSANumVN,
- oldPhiAppVN);
+ else
+ {
+ if ((phiArgVN == ValueNumStore::NoVN) || (phiArgVN != sameVN))
+ {
+ sameVN = ValueNumStore::NoVN;
+ }
+ }
+
+ phiArgSsaNums.Push(phiArgs->GetSsaNum());
phiArgs = phiArgs->m_nextArg;
}
- if (allSame)
+ if (sameVN != ValueNumStore::NoVN)
{
newMemoryVN = sameVN;
}
else
{
- newMemoryVN = vnStore->VNForFuncNoFolding(TYP_HEAP, VNF_PhiMemoryDef,
- vnStore->VNForHandle(ssize_t(blk), GTF_EMPTY), phiAppVN);
+ newMemoryVN = vnStore->VNForMemoryPhiDef(blk, phiArgSsaNums);
}
}
GetMemoryPerSsaData(blk->bbMemorySsaNumIn[memoryKind])->m_vnPair.SetLiberal(newMemoryVN);
@@ -11054,7 +11180,7 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk)
}
//------------------------------------------------------------------------
-// fgValueNumberRegisterConstFieldSeq: If a VN'd integer constant has a
+// fgValueNumberPhiDef: If a VN'd integer constant has a
// field sequence we want to keep track of, then register it in the side table.
//
// Arguments:
@@ -11064,8 +11190,9 @@ void Compiler::fgValueNumberBlock(BasicBlock* blk)
//
void Compiler::fgValueNumberPhiDef(GenTreeLclVar* newSsaDef, BasicBlock* blk, bool isUpdate)
{
+ ArrayStack phiArgSsaNums(getAllocator(CMK_ValueNumber));
+
GenTreePhi* phiNode = newSsaDef->AsLclVar()->Data()->AsPhi();
- ValueNumPair phiVNP;
ValueNumPair sameVNP;
for (GenTreePhi::Use& use : phiNode->Uses())
@@ -11076,7 +11203,7 @@ void Compiler::fgValueNumberPhiDef(GenTreeLclVar* newSsaDef, BasicBlock* blk, bo
JITDUMP(" Phi arg [%06u] is unnecessary; path through pred " FMT_BB " cannot be taken\n",
dspTreeID(phiArg), phiArg->gtPredBB->bbNum);
- if ((use.GetNext() != nullptr) || (phiVNP.GetLiberal() != ValueNumStore::NoVN))
+ if ((use.GetNext() != nullptr) || (phiArgSsaNums.Height() > 0))
{
continue;
}
@@ -11085,31 +11212,28 @@ void Compiler::fgValueNumberPhiDef(GenTreeLclVar* newSsaDef, BasicBlock* blk, bo
JITDUMP(" ..but no other path can, so we are using it anyway\n");
}
- ValueNum phiArgSsaNumVN = vnStore->VNForIntCon(phiArg->GetSsaNum());
- ValueNumPair phiArgVNP = lvaGetDesc(phiArg)->GetPerSsaData(phiArg->GetSsaNum())->m_vnPair;
+ ValueNumPair phiArgVNP = lvaGetDesc(phiArg)->GetPerSsaData(phiArg->GetSsaNum())->m_vnPair;
- if (isUpdate && (phiArgVNP != phiArg->gtVNPair))
+#ifdef DEBUG
+ if (verbose && isUpdate && (phiArgVNP != phiArg->gtVNPair))
{
- JITDUMP("Updating phi arg [%06u] VN from ", dspTreeID(phiArg));
- JITDUMPEXEC(vnpPrint(phiArg->gtVNPair, 0));
- JITDUMP(" to ");
- JITDUMPEXEC(vnpPrint(phiArgVNP, 0));
- JITDUMP("\n");
+ printf("Updating phi arg [%06u] VN from ", dspTreeID(phiArg));
+ vnpPrint(phiArg->gtVNPair, 0);
+ printf(" to ");
+ vnpPrint(phiArgVNP, 0);
+ printf("\n");
}
+#endif
phiArg->gtVNPair = phiArgVNP;
- if (phiVNP.GetLiberal() == ValueNumStore::NoVN)
+ if (phiArgSsaNums.Height() == 0)
{
// This is the first PHI argument
- phiVNP = ValueNumPair(phiArgSsaNumVN, phiArgSsaNumVN);
sameVNP = phiArgVNP;
}
else
{
- phiVNP = vnStore->VNPairForFuncNoFolding(newSsaDef->TypeGet(), VNF_Phi,
- ValueNumPair(phiArgSsaNumVN, phiArgSsaNumVN), phiVNP);
-
if ((sameVNP.GetLiberal() != phiArgVNP.GetLiberal()) ||
(sameVNP.GetConservative() != phiArgVNP.GetConservative()))
{
@@ -11119,13 +11243,12 @@ void Compiler::fgValueNumberPhiDef(GenTreeLclVar* newSsaDef, BasicBlock* blk, bo
sameVNP.SetBoth(ValueNumStore::NoVN);
}
}
- }
- // We should have visited at least one phi arg in the loop above
- assert(phiVNP.GetLiberal() != ValueNumStore::NoVN);
- assert(phiVNP.GetConservative() != ValueNumStore::NoVN);
+ phiArgSsaNums.Push(phiArg->GetSsaNum());
+ }
- ValueNumPair newSsaDefVNP;
+ LclSsaVarDsc* newSsaDefDsc = lvaGetDesc(newSsaDef)->GetPerSsaData(newSsaDef->GetSsaNum());
+ ValueNumPair newSsaDefVNP = newSsaDefDsc->m_vnPair;
if (sameVNP.BothDefined())
{
@@ -11136,14 +11259,28 @@ void Compiler::fgValueNumberPhiDef(GenTreeLclVar* newSsaDef, BasicBlock* blk, bo
else
{
// They were not the same; we need to create a phi definition.
- ValueNum lclNumVN = ValueNum(newSsaDef->GetLclNum());
- ValueNum ssaNumVN = ValueNum(newSsaDef->GetSsaNum());
- newSsaDefVNP = vnStore->VNPairForFunc(newSsaDef->TypeGet(), VNF_PhiDef, ValueNumPair(lclNumVN, lclNumVN),
- ValueNumPair(ssaNumVN, ssaNumVN), phiVNP);
- }
+ bool newPhiDef = true;
+ if (isUpdate)
+ {
+ // For an update, the phi-def usually does not have any new
+ // information -- it is still just the list of SSA args from preds.
+ // The exception is if we were now able to prove that the block is
+ // unreachable from one of the preds.
+ VNPhiDef prevPhiDef;
+ if (vnStore->GetPhiDef(newSsaDefDsc->m_vnPair.GetLiberal(), &prevPhiDef) &&
+ (prevPhiDef.NumArgs == (unsigned)phiArgSsaNums.Height()))
+ {
+ newPhiDef = false;
+ }
+ }
- LclSsaVarDsc* newSsaDefDsc = lvaGetDesc(newSsaDef)->GetPerSsaData(newSsaDef->GetSsaNum());
+ if (newPhiDef)
+ {
+ newSsaDefVNP.SetBoth(vnStore->VNForPhiDef(newSsaDef->TypeGet(), newSsaDef->GetLclNum(),
+ newSsaDef->GetSsaNum(), phiArgSsaNums));
+ }
+ }
#ifdef DEBUG
if (isUpdate)
diff --git a/src/coreclr/jit/valuenum.h b/src/coreclr/jit/valuenum.h
index b9592d8c8cae27..46d7378f185c61 100644
--- a/src/coreclr/jit/valuenum.h
+++ b/src/coreclr/jit/valuenum.h
@@ -206,6 +206,21 @@ struct VNFuncApp
}
};
+struct VNPhiDef
+{
+ unsigned LclNum;
+ unsigned SsaDef;
+ unsigned* SsaArgs;
+ unsigned NumArgs;
+};
+
+struct VNMemoryPhiDef
+{
+ BasicBlock* Block;
+ unsigned* SsaArgs;
+ unsigned NumArgs;
+};
+
// We use a unique prefix character when printing value numbers in dumps: i.e. $1c0
// This define is used with string concatenation to put this in printf format strings
#define FMT_VN "$%x"
@@ -514,6 +529,107 @@ class ValueNumStore
void PeelOffsets(ValueNum* vn, target_ssize_t* offset);
+ typedef JitHashTable, bool> ValueNumSet;
+
+ class SmallValueNumSet
+ {
+ union
+ {
+ ValueNum m_inlineElements[4];
+ ValueNumSet* m_set;
+ };
+ unsigned m_numElements = 0;
+
+ public:
+ unsigned Count()
+ {
+ return m_numElements;
+ }
+
+ template
+ void ForEach(Func func)
+ {
+ if (m_numElements <= ArrLen(m_inlineElements))
+ {
+ for (unsigned i = 0; i < m_numElements; i++)
+ {
+ func(m_inlineElements[i]);
+ }
+ }
+ else
+ {
+ for (ValueNum vn : ValueNumSet::KeyIteration(m_set))
+ {
+ func(vn);
+ }
+ }
+ }
+
+ // Returns false if the value wasn't found
+ bool Lookup(ValueNum vn);
+
+ // Returns false if the value already exists
+ bool Add(Compiler* comp, ValueNum vn);
+ };
+
+ enum class VNVisit
+ {
+ Continue,
+ Abort,
+ };
+
+ ValueNum VNPhiDefToVN(const VNPhiDef& phiDef, unsigned ssaArgNum);
+
+ //--------------------------------------------------------------------------------
+ // VNVisitReachingVNs: given a VN, call the specified callback function on it and all the VNs that reach it
+ // via PHI definitions if any.
+ //
+ // Arguments:
+ // vn - The VN to visit all the reaching VNs for
+ // argVisitor - The callback function to call on the vn and its PHI arguments if any
+ //
+ // Return Value:
+ // VNVisit::Aborted - an argVisitor returned VNVisit::Abort, we stop the walk and return
+ // VNVisit::Continue - all argVisitor returned VNVisit::Continue
+ //
+ template
+ VNVisit VNVisitReachingVNs(ValueNum vn, TArgVisitor argVisitor)
+ {
+ ArrayStack toVisit(m_alloc);
+ toVisit.Push(vn);
+
+ SmallValueNumSet visited;
+ visited.Add(m_pComp, vn);
+ while (toVisit.Height() > 0)
+ {
+ ValueNum vnToVisit = toVisit.Pop();
+
+ // We need to handle nested (and, potentially, recursive) phi definitions.
+ // For now, we ignore memory phi definitions.
+ VNPhiDef phiDef;
+ if (GetPhiDef(vnToVisit, &phiDef))
+ {
+ for (unsigned ssaArgNum = 0; ssaArgNum < phiDef.NumArgs; ssaArgNum++)
+ {
+ ValueNum childVN = VNPhiDefToVN(phiDef, ssaArgNum);
+ if (visited.Add(m_pComp, childVN))
+ {
+ toVisit.Push(childVN);
+ }
+ }
+ }
+ else
+ {
+ if (argVisitor(vnToVisit) == VNVisit::Abort)
+ {
+ // The visitor wants to abort the walk.
+ return VNVisit::Abort;
+ }
+ }
+ }
+ return VNVisit::Continue;
+ }
+
// And the single constant for an object reference type.
static ValueNum VNForNull()
{
@@ -700,6 +816,11 @@ class ValueNumStore
// Skip all folding checks.
ValueNum VNForFuncNoFolding(var_types typ, VNFunc func, ValueNum op1VNwx, ValueNum op2VNwx);
+ ValueNum VNForPhiDef(var_types type, unsigned lclNum, unsigned ssaDef, ArrayStack& ssaArgs);
+ bool GetPhiDef(ValueNum vn, VNPhiDef* phiDef);
+ ValueNum VNForMemoryPhiDef(BasicBlock* block, ArrayStack& vns);
+ bool GetMemoryPhiDef(ValueNum vn, VNMemoryPhiDef* memoryPhiDef);
+
ValueNum VNForCast(VNFunc func, ValueNum castToVN, ValueNum objVN);
ValueNum VNForMapSelect(ValueNumKind vnk, var_types type, ValueNum map, ValueNum index);
@@ -1068,9 +1189,6 @@ class ValueNumStore
// Returns true iff the VN represents a relop
bool IsVNRelop(ValueNum vn);
- // Returns true iff the VN is a phi definition
- bool IsVNPhiDef(ValueNum vn);
-
// Returns true if the two VNs represent the same value
// despite being different VNs. Useful for phi def VNs.
bool AreVNsEquivalent(ValueNum vn1, ValueNum vn2);
@@ -1398,13 +1516,15 @@ class ValueNumStore
enum ChunkExtraAttribs : BYTE
{
- CEA_Const, // This chunk contains constant values.
- CEA_Handle, // This chunk contains handle constants.
- CEA_Func0, // Represents functions of arity 0.
- CEA_Func1, // ...arity 1.
- CEA_Func2, // ...arity 2.
- CEA_Func3, // ...arity 3.
- CEA_Func4, // ...arity 4.
+ CEA_Const, // This chunk contains constant values.
+ CEA_Handle, // This chunk contains handle constants.
+ CEA_PhiDef, // This contains pointers to VNPhiDef.
+ CEA_MemoryPhiDef, // This contains pointers to VNMemoryPhiDef.
+ CEA_Func0, // Represents functions of arity 0.
+ CEA_Func1, // ...arity 1.
+ CEA_Func2, // ...arity 2.
+ CEA_Func3, // ...arity 3.
+ CEA_Func4, // ...arity 4.
CEA_Count
};
diff --git a/src/coreclr/jit/valuenumfuncs.h b/src/coreclr/jit/valuenumfuncs.h
index 70c950fccfc7e0..50cf4c6a274c3b 100644
--- a/src/coreclr/jit/valuenumfuncs.h
+++ b/src/coreclr/jit/valuenumfuncs.h
@@ -12,9 +12,6 @@ ValueNumFuncDef(MapStore, 4, false, false, false, false) // Args: 0: m
ValueNumFuncDef(MapPhysicalStore, 3, false, false, false, false) // Args: 0: map, 1: "physical selector": offset and size, 2: value being stored
ValueNumFuncDef(BitCast, 2, false, false, false, false) // Args: 0: VN of the arg, 1: VN of the target type
ValueNumFuncDef(ZeroObj, 1, false, false, false, false) // Args: 0: VN of the class handle.
-ValueNumFuncDef(PhiDef, 3, false, false, false, false) // Args: 0: local var # (or -1 for memory), 1: SSA #, 2: VN of definition.
-ValueNumFuncDef(PhiMemoryDef, 2, false, false, false, false) // Args: 0: VN for basic block pointer, 1: VN of definition
-ValueNumFuncDef(Phi, 2, false, false, false, false) // A phi function. Only occurs as arg of PhiDef or PhiMemoryDef. Arguments are SSA numbers of var being defined.
ValueNumFuncDef(PtrToLoc, 2, false, true, false, false) // Pointer (byref) to a local variable. Args: VN's of: 0: local's number, 1: offset.
ValueNumFuncDef(PtrToArrElem, 4, false, false, false, false) // Pointer (byref) to an array element. Args: 0: array elem type eq class var_types value, VN's of: 1: array, 2: index, 3: offset.
diff --git a/src/coreclr/jit/vartype.h b/src/coreclr/jit/vartype.h
index 34668fd545b713..c0cfa87775dab6 100644
--- a/src/coreclr/jit/vartype.h
+++ b/src/coreclr/jit/vartype.h
@@ -185,7 +185,7 @@ inline bool varTypeIsArithmetic(T vt)
template
inline bool varTypeIsGC(T vt)
{
- return ((varTypeClassification[TypeGet(vt)] & (VTF_GCR | VTF_BYR)) != 0);
+ return (TypeGet(vt) == TYP_REF) || (TypeGet(vt) == TYP_BYREF);
}
template
diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets
index e9cdbde08e5fd8..a2271dfea6da13 100644
--- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets
+++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets
@@ -174,7 +174,7 @@ The .NET Foundation licenses this file to you under the MIT license.
-
+
@@ -185,8 +185,8 @@ The .NET Foundation licenses this file to you under the MIT license.
-
-
+
+
@@ -211,7 +211,7 @@ The .NET Foundation licenses this file to you under the MIT license.
-
+
diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets
index 9f210a00880b77..b89c23085f1db7 100644
--- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets
+++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets
@@ -244,6 +244,7 @@ The .NET Foundation licenses this file to you under the MIT license.
+
diff --git a/src/coreclr/nativeaot/Common/src/Internal/Runtime/MethodTable.cs b/src/coreclr/nativeaot/Common/src/Internal/Runtime/MethodTable.cs
index 91ef0a286e356c..98d5f338899a6e 100644
--- a/src/coreclr/nativeaot/Common/src/Internal/Runtime/MethodTable.cs
+++ b/src/coreclr/nativeaot/Common/src/Internal/Runtime/MethodTable.cs
@@ -1362,7 +1362,7 @@ public uint GetFieldOffset(EETypeField eField)
return cbOffset;
}
- Debug.Assert(false, "Unknown MethodTable field type");
+ Debug.Fail("Unknown MethodTable field type");
return 0;
}
diff --git a/src/coreclr/nativeaot/Common/src/System/Collections/Generic/LowLevelList.cs b/src/coreclr/nativeaot/Common/src/System/Collections/Generic/LowLevelList.cs
index a6e5e3a3fe9752..bd3a854f39a64f 100644
--- a/src/coreclr/nativeaot/Common/src/System/Collections/Generic/LowLevelList.cs
+++ b/src/coreclr/nativeaot/Common/src/System/Collections/Generic/LowLevelList.cs
@@ -31,7 +31,6 @@ namespace System.Collections.Generic
// LowLevelList with no interface implementation minimizes both code and data size.
// Data size is smaller because there will be minimal virtual function table.
// Code size is smaller because only functions called will be in the binary.
- // Use LowLevelListWithIList for IList support
[DebuggerDisplay("Count = {Count}")]
#if TYPE_LOADER_IMPLEMENTATION
[System.Runtime.CompilerServices.ForceDictionaryLookups]
@@ -196,356 +195,5 @@ private void EnsureCapacity(int min)
Capacity = newCapacity;
}
}
-
-#if !TYPE_LOADER_IMPLEMENTATION
- // Adds the elements of the given collection to the end of this list. If
- // required, the capacity of the list is increased to twice the previous
- // capacity or the new size, whichever is larger.
- //
- public void AddRange(IEnumerable collection)
- {
-
- InsertRange(_size, collection);
- }
-
- // Clears the contents of List.
- public void Clear()
- {
- if (_size > 0)
- {
- Array.Clear(_items, 0, _size); // Don't need to doc this but we clear the elements so that the gc can reclaim the references.
- _size = 0;
- }
- _version++;
- }
-
- // Contains returns true if the specified element is in the List.
- // It does a linear, O(n) search. Equality is determined by calling
- // item.Equals().
- //
- public bool Contains(T item)
- {
- if ((object?)item == null)
- {
- for (int i = 0; i < _size; i++)
- if ((object?)_items[i] == null)
- return true;
- return false;
- }
- else
- {
- int index = IndexOf(item);
- if (index >= 0)
- return true;
- return false;
- }
- }
-
-
- // Copies a section of this list to the given array at the given index.
- //
- // The method uses the Array.Copy method to copy the elements.
- //
- public void CopyTo(int index, T[] array, int arrayIndex, int count)
- {
- if (_size - index < count)
- {
- throw new ArgumentException();
- }
-
- // Delegate rest of error checking to Array.Copy.
- Array.Copy(_items, index, array, arrayIndex, count);
- }
-
- public void CopyTo(T[] array, int arrayIndex)
- {
- // Delegate rest of error checking to Array.Copy.
- Array.Copy(_items, 0, array, arrayIndex, _size);
- }
-
- // Returns the index of the first occurrence of a given value in a range of
- // this list. The list is searched forwards from beginning to end.
- // The elements of the list are compared to the given value using the
- // Object.Equals method.
- //
- // This method uses the Array.IndexOf method to perform the
- // search.
- //
- public int IndexOf(T item)
- {
- return Array.IndexOf(_items, item, 0, _size);
- }
-
-
- // Returns the index of the first occurrence of a given value in a range of
- // this list. The list is searched forwards, starting at index
- // index and ending at count number of elements. The
- // elements of the list are compared to the given value using the
- // Object.Equals method.
- //
- // This method uses the Array.IndexOf method to perform the
- // search.
- //
- public int IndexOf(T item, int index)
- {
- ArgumentOutOfRangeException.ThrowIfGreaterThan(index, _size);
- return Array.IndexOf(_items, item, index, _size - index);
- }
-
- // Inserts an element into this list at a given index. The size of the list
- // is increased by one. If required, the capacity of the list is doubled
- // before inserting the new element.
- //
- public void Insert(int index, T item)
- {
- // Note that insertions at the end are legal.
- ArgumentOutOfRangeException.ThrowIfGreaterThan((uint)index, (uint)_size, nameof(index));
-
- if (_size == _items.Length) EnsureCapacity(_size + 1);
- if (index < _size)
- {
- Array.Copy(_items, index, _items, index + 1, _size - index);
- }
- _items[index] = item;
- _size++;
- _version++;
- }
-
- // Inserts the elements of the given collection at a given index. If
- // required, the capacity of the list is increased to twice the previous
- // capacity or the new size, whichever is larger. Ranges may be added
- // to the end of the list by setting index to the List's size.
- //
- public void InsertRange(int index, IEnumerable collection)
- {
- ArgumentNullException.ThrowIfNull(collection);
- ArgumentOutOfRangeException.ThrowIfGreaterThan((uint)index, (uint)_size, nameof(index));
-
- ICollection? c = collection as ICollection;
- if (c != null)
- { // if collection is ICollection
- int count = c.Count;
- if (count > 0)
- {
- EnsureCapacity(_size + count);
- if (index < _size)
- {
- Array.Copy(_items, index, _items, index + count, _size - index);
- }
-
- // If we're inserting a List into itself, we want to be able to deal with that.
- if (this == c)
- {
- // Copy first part of _items to insert location
- Array.Copy(_items, 0, _items, index, index);
- // Copy last part of _items back to inserted location
- Array.Copy(_items, index + count, _items, index * 2, _size - index);
- }
- else
- {
- T[] itemsToInsert = new T[count];
- c.CopyTo(itemsToInsert, 0);
- Array.Copy(itemsToInsert, 0, _items, index, count);
- }
- _size += count;
- }
- }
- else
- {
- using (IEnumerator en = collection.GetEnumerator())
- {
- while (en.MoveNext())
- {
- Insert(index++, en.Current);
- }
- }
- }
- _version++;
- }
-
- // Removes the element at the given index. The size of the list is
- // decreased by one.
- //
- public bool Remove(T item)
- {
- int index = IndexOf(item);
- if (index >= 0)
- {
- RemoveAt(index);
- return true;
- }
-
- return false;
- }
-
- // This method removes all items which matches the predicate.
- // The complexity is O(n).
- public int RemoveAll(Predicate match)
- {
- ArgumentNullException.ThrowIfNull(match);
-
- int freeIndex = 0; // the first free slot in items array
-
- // Find the first item which needs to be removed.
- while (freeIndex < _size && !match(_items[freeIndex]!)) freeIndex++;
- if (freeIndex >= _size) return 0;
-
- int current = freeIndex + 1;
- while (current < _size)
- {
- // Find the first item which needs to be kept.
- while (current < _size && match(_items[current]!)) current++;
-
- if (current < _size)
- {
- // copy item to the free slot.
- _items[freeIndex++] = _items[current++];
- }
- }
-
- Array.Clear(_items, freeIndex, _size - freeIndex);
- int result = _size - freeIndex;
- _size = freeIndex;
- _version++;
- return result;
- }
-
- // Removes the element at the given index. The size of the list is
- // decreased by one.
- //
- public void RemoveAt(int index)
- {
- ArgumentOutOfRangeException.ThrowIfGreaterThanOrEqual((uint)index, (uint)_size, nameof(index));
- _size--;
- if (index < _size)
- {
- Array.Copy(_items, index + 1, _items, index, _size - index);
- }
- _items[_size] = default!;
- _version++;
- }
-
- // ToArray returns a new Object array containing the contents of the List.
- // This requires copying the List, which is an O(n) operation.
- public T[] ToArray()
- {
- T[] array = new T[_size];
- Array.Copy(_items, 0, array, 0, _size);
- return array;
- }
-#endif
- }
-
-#if !TYPE_LOADER_IMPLEMENTATION
- // LowLevelList with full IList implementation
- internal sealed class LowLevelListWithIList : LowLevelList, IList
- {
- public LowLevelListWithIList()
- {
- }
-
- public LowLevelListWithIList(int capacity)
- : base(capacity)
- {
- }
-
- public LowLevelListWithIList(IEnumerable collection)
- : base(collection)
- {
- }
-
- // Is this List read-only?
- bool ICollection.IsReadOnly
- {
- get { return false; }
- }
-
- ///
- IEnumerator IEnumerable.GetEnumerator()
- {
- return new Enumerator(this);
- }
-
- System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator()
- {
- return new Enumerator(this);
- }
-
- private struct Enumerator : IEnumerator, System.Collections.IEnumerator
- {
- private LowLevelListWithIList _list;
- private int _index;
- private int _version;
- private T? _current;
-
- internal Enumerator(LowLevelListWithIList list)
- {
- _list = list;
- _index = 0;
- _version = list._version;
- _current = default(T);
- }
-
- public void Dispose()
- {
- }
-
- public bool MoveNext()
- {
- LowLevelListWithIList localList = _list;
-
- if (_version == localList._version && ((uint)_index < (uint)localList._size))
- {
- _current = localList._items[_index];
- _index++;
- return true;
- }
- return MoveNextRare();
- }
-
- private bool MoveNextRare()
- {
- if (_version != _list._version)
- {
- throw new InvalidOperationException();
- }
-
- _index = _list._size + 1;
- _current = default(T);
- return false;
- }
-
- public T Current
- {
- get
- {
- return _current!;
- }
- }
-
- object? System.Collections.IEnumerator.Current
- {
- get
- {
- if (_index == 0 || _index == _list._size + 1)
- {
- throw new InvalidOperationException();
- }
- return Current;
- }
- }
-
- void System.Collections.IEnumerator.Reset()
- {
- if (_version != _list._version)
- {
- throw new InvalidOperationException();
- }
-
- _index = 0;
- _current = default(T);
- }
- }
}
-#endif // !TYPE_LOADER_IMPLEMENTATION
}
diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Diagnostics/Debug.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Diagnostics/Debug.cs
index 0221b8e7c98951..ea85cc5389bf68 100644
--- a/src/coreclr/nativeaot/Runtime.Base/src/System/Diagnostics/Debug.cs
+++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Diagnostics/Debug.cs
@@ -27,5 +27,12 @@ internal static void Assert(bool condition)
EH.FallbackFailFast(RhFailFastReason.InternalError, null);
}
}
+
+ [System.Diagnostics.Conditional("DEBUG")]
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ internal static void Fail(string message)
+ {
+ Assert(false, message);
+ }
}
}
diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.cs
index f74e394df855da..3443e1bcb711a9 100644
--- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.cs
+++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/ExceptionHandling.cs
@@ -200,7 +200,7 @@ private static void OnUnhandledExceptionViaClassLib(object exception)
// disallow all exceptions leaking out of callbacks
}
#else
- Debug.Assert(false, "Unhandled exceptions should be processed by the native runtime only");
+ Debug.Fail("Unhandled exceptions should be processed by the native runtime only");
#endif
}
@@ -429,7 +429,7 @@ public static Exception GetRuntimeException(ExceptionIDs id)
return new InvalidCastException();
default:
- Debug.Assert(false, "unexpected ExceptionID");
+ Debug.Fail("unexpected ExceptionID");
FallbackFailFast(RhFailFastReason.InternalError, null);
return null;
}
@@ -696,7 +696,7 @@ public static void RhUnwindAndIntercept(ref ExInfo exInfo, UIntPtr interceptStac
InternalCalls.ResumeAtInterceptionLocation(exInfo._frameIter.RegisterSet);
}
- Debug.Assert(false, "unreachable");
+ Debug.Fail("unreachable");
FallbackFailFast(RhFailFastReason.InternalError, null);
}
#endif // !NATIVEAOT
@@ -903,7 +903,7 @@ private static void DispatchEx(scoped ref StackFrameIterator frameIter, ref ExIn
pReversePInvokePropagationContext, pReversePInvokePropagationCallback, frameIter.RegisterSet, ref exInfo, frameIter.PreviousTransitionFrame);
// the helper should jump to propagation handler and not return
#endif
- Debug.Assert(false, "unreachable");
+ Debug.Fail("unreachable");
FallbackFailFast(RhFailFastReason.InternalError, null);
}
#endif // FEATURE_OBJCMARSHAL
@@ -926,7 +926,7 @@ private static void DispatchEx(scoped ref StackFrameIterator frameIter, ref ExIn
}
#endif // NATIVEAOT
// currently, RhpCallCatchFunclet will resume after the catch
- Debug.Assert(false, "unreachable");
+ Debug.Fail("unreachable");
FallbackFailFast(RhFailFastReason.InternalError, null);
}
diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs
index 7ea73ba7c2c387..2237b50350835f 100644
--- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs
+++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/InternalCalls.cs
@@ -276,7 +276,7 @@ internal static extern unsafe IntPtr RhpCallPropagateExceptionCallback(
// Indicate that the current round of finalizations is complete.
[DllImport(Redhawk.BaseName)]
- internal static extern void RhpSignalFinalizationComplete(uint fCount);
+ internal static extern void RhpSignalFinalizationComplete(uint fCount, int observedFullGcCount);
[DllImport(Redhawk.BaseName)]
internal static extern ulong RhpGetTickCount64();
diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeExports.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeExports.cs
index 8a552a86cd48dd..a1e7ab60f30da6 100644
--- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeExports.cs
+++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/RuntimeExports.cs
@@ -388,7 +388,7 @@ internal static unsafe IntPtr RhGetRuntimeHelperForType(MethodTable* pEEType, Ru
return (IntPtr)(delegate*)&InternalCalls.RhpNewArray;
default:
- Debug.Assert(false, "Unknown RuntimeHelperKind");
+ Debug.Fail("Unknown RuntimeHelperKind");
return IntPtr.Zero;
}
}
diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/TypeCast.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/TypeCast.cs
index 0161e8c47c1508..c3f92d067dd243 100644
--- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/TypeCast.cs
+++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/TypeCast.cs
@@ -414,10 +414,10 @@ public static unsafe object CheckCastClass(MethodTable* pTargetType, object obj)
[RuntimeExport("RhTypeCast_CheckCastClassSpecial")]
private static unsafe object CheckCastClassSpecial(MethodTable* pTargetType, object obj)
{
- Debug.Assert(!pTargetType->IsParameterizedType, "CheckCastClass called with parameterized MethodTable");
- Debug.Assert(!pTargetType->IsFunctionPointer, "CheckCastClass called with function pointer MethodTable");
- Debug.Assert(!pTargetType->IsInterface, "CheckCastClass called with interface MethodTable");
- Debug.Assert(!pTargetType->HasGenericVariance, "CheckCastClass with variant MethodTable");
+ Debug.Assert(!pTargetType->IsParameterizedType, "CheckCastClassSpecial called with parameterized MethodTable");
+ Debug.Assert(!pTargetType->IsFunctionPointer, "CheckCastClassSpecial called with function pointer MethodTable");
+ Debug.Assert(!pTargetType->IsInterface, "CheckCastClassSpecial called with interface MethodTable");
+ Debug.Assert(!pTargetType->HasGenericVariance, "CheckCastClassSpecial with variant MethodTable");
MethodTable* mt = obj.GetMethodTable();
Debug.Assert(mt != pTargetType, "The check for the trivial cases should be inlined by the JIT");
@@ -711,7 +711,7 @@ internal static unsafe bool TypeParametersAreCompatible(int arity,
break;
default:
- Debug.Assert(false, "unknown generic variance type");
+ Debug.Fail("unknown generic variance type");
break;
}
}
diff --git a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/__Finalizer.cs b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/__Finalizer.cs
index 4e695601f19450..80576c921f8a20 100644
--- a/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/__Finalizer.cs
+++ b/src/coreclr/nativeaot/Runtime.Base/src/System/Runtime/__Finalizer.cs
@@ -29,11 +29,14 @@ public static void ProcessFinalizers()
// otherwise memory is low and we should initiate a collection.
if (InternalCalls.RhpWaitForFinalizerRequest() != 0)
{
+ int observedFullGcCount = RuntimeImports.RhGetGcCollectionCount(RuntimeImports.RhGetMaxGcGeneration(), false);
uint finalizerCount = DrainQueue();
- // Tell anybody that's interested that the finalization pass is complete (there is a race condition here
- // where we might immediately signal a new request as complete, but this is acceptable).
- InternalCalls.RhpSignalFinalizationComplete(finalizerCount);
+ // Anyone waiting to drain the Q can now wake up. Note that there is a
+ // race in that another thread starting a drain, as we leave a drain, may
+ // consider itself satisfied by the drain that just completed.
+ // Thus we include the Full GC count that we have certaily observed.
+ InternalCalls.RhpSignalFinalizationComplete(finalizerCount, observedFullGcCount);
}
else
{
diff --git a/src/coreclr/nativeaot/Runtime/FinalizerHelpers.cpp b/src/coreclr/nativeaot/Runtime/FinalizerHelpers.cpp
index 8fa60538189697..b0f9eb0db5aa99 100644
--- a/src/coreclr/nativeaot/Runtime/FinalizerHelpers.cpp
+++ b/src/coreclr/nativeaot/Runtime/FinalizerHelpers.cpp
@@ -94,6 +94,22 @@ EXTERN_C void QCALLTYPE RhInitializeFinalizerThread()
g_FinalizerEvent.Set();
}
+static int32_t g_fullGcCountSeenByFinalization;
+
+// Indicate that the current round of finalizations is complete.
+EXTERN_C void QCALLTYPE RhpSignalFinalizationComplete(uint32_t fcount, int32_t observedFullGcCount)
+{
+ FireEtwGCFinalizersEnd_V1(fcount, GetClrInstanceId());
+
+ g_fullGcCountSeenByFinalization = observedFullGcCount;
+ g_FinalizerDoneEvent.Set();
+
+ if (YieldProcessorNormalization::IsMeasurementScheduled())
+ {
+ YieldProcessorNormalization::PerformMeasurement();
+ }
+}
+
EXTERN_C void QCALLTYPE RhWaitForPendingFinalizers(UInt32_BOOL allowReentrantWait)
{
// This must be called via p/invoke rather than RuntimeImport since it blocks and could starve the GC if
@@ -103,6 +119,14 @@ EXTERN_C void QCALLTYPE RhWaitForPendingFinalizers(UInt32_BOOL allowReentrantWai
// Can't call this from the finalizer thread itself.
if (ThreadStore::GetCurrentThread() != g_pFinalizerThread)
{
+ // We may see a completion of finalization cycle that might not see objects that became
+ // F-reachable in recent GCs. In such case we want to wait for a completion of another cycle.
+ // However, since an object cannot be prevented from promoting, one can only rely on Full GCs
+ // to collect unreferenced objects deterministically. Thus we only care about Full GCs here.
+ int desiredFullGcCount =
+ GCHeapUtilities::GetGCHeap()->CollectionCount(GCHeapUtilities::GetGCHeap()->GetMaxGeneration());
+
+ tryAgain:
// Clear any current indication that a finalization pass is finished and wake the finalizer thread up
// (if there's no work to do it'll set the done event immediately).
g_FinalizerDoneEvent.Reset();
@@ -110,6 +134,17 @@ EXTERN_C void QCALLTYPE RhWaitForPendingFinalizers(UInt32_BOOL allowReentrantWai
// Wait for the finalizer thread to get back to us.
g_FinalizerDoneEvent.Wait(INFINITE, false, allowReentrantWait);
+
+ // we use unsigned math here as the collection counts, which are size_t internally,
+ // can in theory overflow an int and wrap around.
+ // unsigned math would have more defined/portable behavior in such case
+ if ((int)((unsigned int)desiredFullGcCount - (unsigned int)g_fullGcCountSeenByFinalization) > 0)
+ {
+ // There were some Full GCs happening before we started waiting and possibly not seen by the
+ // last finalization cycle. This is rare, but we need to be sure we have seen those,
+ // so we try one more time.
+ goto tryAgain;
+ }
}
}
@@ -176,18 +211,6 @@ EXTERN_C UInt32_BOOL QCALLTYPE RhpWaitForFinalizerRequest()
} while (true);
}
-// Indicate that the current round of finalizations is complete.
-EXTERN_C void QCALLTYPE RhpSignalFinalizationComplete(uint32_t fcount)
-{
- FireEtwGCFinalizersEnd_V1(fcount, GetClrInstanceId());
- g_FinalizerDoneEvent.Set();
-
- if (YieldProcessorNormalization::IsMeasurementScheduled())
- {
- YieldProcessorNormalization::PerformMeasurement();
- }
-}
-
//
// The following helpers are special in that they interact with internal GC state or directly manipulate
// managed references so they're called with a special co-operative p/invoke.
diff --git a/src/coreclr/nativeaot/Runtime/inc/stressLog.h b/src/coreclr/nativeaot/Runtime/inc/stressLog.h
index 9c78a120bfac6b..fc031897f492d4 100644
--- a/src/coreclr/nativeaot/Runtime/inc/stressLog.h
+++ b/src/coreclr/nativeaot/Runtime/inc/stressLog.h
@@ -109,103 +109,36 @@ enum LogFacilitiesEnum: unsigned int {
StressLog::LogMsgOL(_Args msg); \
} WHILE_0
-#define STRESS_LOG0(facility, level, msg) do { \
- if (StressLog::StressLogOn(facility, level)) \
- StressLog::LogMsg(facility, 0, msg); \
- } WHILE_0 \
-
-#define STRESS_LOG1(facility, level, msg, data1) do { \
- if (StressLog::StressLogOn(facility, level)) \
- StressLog::LogMsg(facility, 1, msg, (void*)(size_t)(data1)); \
- } WHILE_0
-
-#define STRESS_LOG2(facility, level, msg, data1, data2) do { \
- if (StressLog::StressLogOn(facility, level)) \
- StressLog::LogMsg(facility, 2, msg, \
- (void*)(size_t)(data1), (void*)(size_t)(data2)); \
- } WHILE_0
-
-#define STRESS_LOG3(facility, level, msg, data1, data2, data3) do { \
- if (StressLog::StressLogOn(facility, level)) \
- StressLog::LogMsg(facility, 3, msg, \
- (void*)(size_t)(data1),(void*)(size_t)(data2),(void*)(size_t)(data3)); \
- } WHILE_0
-
-#define STRESS_LOG4(facility, level, msg, data1, data2, data3, data4) do { \
- if (StressLog::StressLogOn(facility, level)) \
- StressLog::LogMsg(facility, 4, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4)); \
- } WHILE_0
-
-#define STRESS_LOG5(facility, level, msg, data1, data2, data3, data4, data5) do { \
- if (StressLog::StressLogOn(facility, level)) \
- StressLog::LogMsg(facility, 5, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5)); \
- } WHILE_0
-
-#define STRESS_LOG6(facility, level, msg, data1, data2, data3, data4, data5, data6) do { \
+#define STRESS_LOG_WRITE(facility, level, msg, ...) do { \
if (StressLog::StressLogOn(facility, level)) \
- StressLog::LogMsg(facility, 6, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5), (void*)(size_t)(data6)); \
+ StressLog::LogMsgOL(facility, msg, __VA_ARGS__); \
} WHILE_0
-#define STRESS_LOG7(facility, level, msg, data1, data2, data3, data4, data5, data6, data7) do { \
- if (StressLog::StressLogOn(facility, level)) \
- StressLog::LogMsg(facility, 7, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5), (void*)(size_t)(data6), (void*)(size_t)(data7)); \
- } WHILE_0
-
-#define STRESS_LOG_COND0(facility, level, msg) do { \
- if (StressLog::StressLogOn(facility, level) && (cond)) \
+#define STRESS_LOG0(facility, level, msg) do { \
+ if (StressLog::StressLogOn(facility, level)) \
StressLog::LogMsg(facility, 0, msg); \
- } WHILE_0
+ } WHILE_0 \
-#define STRESS_LOG_COND1(facility, level, cond, msg, data1) do { \
- if (StressLog::StressLogOn(facility, level) && (cond)) \
- StressLog::LogMsg(facility, 1, msg, (void*)(size_t)(data1)); \
- } WHILE_0
+#define STRESS_LOG1(facility, level, msg, data1) \
+ STRESS_LOG_WRITE(facility, level, msg, data1)
-#define STRESS_LOG_COND2(facility, level, cond, msg, data1, data2) do { \
- if (StressLog::StressLogOn(facility, level) && (cond)) \
- StressLog::LogMsg(facility, 2, msg, \
- (void*)(size_t)(data1), (void*)(size_t)(data2)); \
- } WHILE_0
+#define STRESS_LOG2(facility, level, msg, data1, data2) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2)
-#define STRESS_LOG_COND3(facility, level, cond, msg, data1, data2, data3) do { \
- if (StressLog::StressLogOn(facility, level) && (cond)) \
- StressLog::LogMsg(facility, 3, msg, \
- (void*)(size_t)(data1),(void*)(size_t)(data2),(void*)(size_t)(data3)); \
- } WHILE_0
+#define STRESS_LOG3(facility, level, msg, data1, data2, data3) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3)
-#define STRESS_LOG_COND4(facility, level, cond, msg, data1, data2, data3, data4) do { \
- if (StressLog::StressLogOn(facility, level) && (cond)) \
- StressLog::LogMsg(facility, 4, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4)); \
- } WHILE_0
+#define STRESS_LOG4(facility, level, msg, data1, data2, data3, data4) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4)
-#define STRESS_LOG_COND5(facility, level, cond, msg, data1, data2, data3, data4, data5) do { \
- if (StressLog::StressLogOn(facility, level) && (cond)) \
- StressLog::LogMsg(facility, 5, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5)); \
- } WHILE_0
+#define STRESS_LOG5(facility, level, msg, data1, data2, data3, data4, data5) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4, data5)
-#define STRESS_LOG_COND6(facility, level, cond, msg, data1, data2, data3, data4, data5, data6) do { \
- if (StressLog::StressLogOn(facility, level) && (cond)) \
- StressLog::LogMsg(facility, 6, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5), (void*)(size_t)(data6)); \
- } WHILE_0
+#define STRESS_LOG6(facility, level, msg, data1, data2, data3, data4, data5, data6) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4, data5, data6)
-#define STRESS_LOG_COND7(facility, level, cond, msg, data1, data2, data3, data4, data5, data6, data7) do { \
- if (StressLog::StressLogOn(facility, level) && (cond)) \
- StressLog::LogMsg(facility, 7, msg, (void*)(size_t)(data1), \
- (void*)(size_t)(data2),(void*)(size_t)(data3),(void*)(size_t)(data4), \
- (void*)(size_t)(data5), (void*)(size_t)(data6), (void*)(size_t)(data7)); \
- } WHILE_0
+#define STRESS_LOG7(facility, level, msg, data1, data2, data3, data4, data5, data6, data7) \
+ STRESS_LOG_WRITE(facility, level, msg, data1, data2, data3, data4, data5, data6, data7)
#define STRESS_LOG_RESERVE_MEM(numChunks) do { \
if (StressLog::StressLogOn(LF_ALL, LL_ALWAYS)) \
@@ -375,105 +308,24 @@ class StressLog {
#pragma warning( push )
#pragma warning( disable : 4312 )
#endif
- static void LogMsgOL(const char* format)
- { LogMsg(LF_GC, 0, format); }
-
- template < typename T1 >
- static void LogMsgOL(const char* format, T1 data1)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*));
- LogMsg(LF_GC, 1, format, (void*)(size_t)data1);
- }
-
- template < typename T1, typename T2 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*));
- LogMsg(LF_GC, 2, format, (void*)(size_t)data1, (void*)(size_t)data2);
- }
-
- template < typename T1, typename T2, typename T3 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*));
- LogMsg(LF_GC, 3, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3);
- }
-
- template < typename T1, typename T2, typename T3, typename T4 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*));
- LogMsg(LF_GC, 4, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*));
- LogMsg(LF_GC, 5, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5);
- }
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6)
+ template
+ static void* ConvertArgument(T arg)
{
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*));
- LogMsg(LF_GC, 6, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6);
+ C_ASSERT(sizeof(T) <= sizeof(void*));
+ return (void*)(size_t)arg;
}
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7)
+ template
+ static void LogMsgOL(const char* format, Ts... args)
{
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*));
- LogMsg(LF_GC, 7, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7);
+ LogMsg(LF_GC, sizeof...(args), format, ConvertArgument(args)...);
}
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8)
+ template
+ static void LogMsgOL(unsigned facility, const char* format, Ts... args)
{
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*));
- LogMsg(LF_GC, 8, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*));
- LogMsg(LF_GC, 9, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*));
- LogMsg(LF_GC, 10, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9, (void*)(size_t)data10);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*));
- LogMsg(LF_GC, 11, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9, (void*)(size_t)data10, (void*)(size_t)data11);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*));
- LogMsg(LF_GC, 12, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9, (void*)(size_t)data10, (void*)(size_t)data11, (void*)(size_t)data12);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12, T13 data13)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*) && sizeof(T13) <= sizeof(void*));
- LogMsg(LF_GC, 13, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9, (void*)(size_t)data10, (void*)(size_t)data11, (void*)(size_t)data12, (void*)(size_t)data13);
- }
-
- template < typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10, typename T11, typename T12, typename T13, typename T14 >
- static void LogMsgOL(const char* format, T1 data1, T2 data2, T3 data3, T4 data4, T5 data5, T6 data6, T7 data7, T8 data8, T9 data9, T10 data10, T11 data11, T12 data12, T13 data13, T14 data14)
- {
- C_ASSERT(sizeof(T1) <= sizeof(void*) && sizeof(T2) <= sizeof(void*) && sizeof(T3) <= sizeof(void*) && sizeof(T4) <= sizeof(void*) && sizeof(T5) <= sizeof(void*) && sizeof(T6) <= sizeof(void*) && sizeof(T7) <= sizeof(void*) && sizeof(T8) <= sizeof(void*) && sizeof(T9) <= sizeof(void*) && sizeof(T10) <= sizeof(void*) && sizeof(T11) <= sizeof(void*) && sizeof(T12) <= sizeof(void*) && sizeof(T13) <= sizeof(void*) && sizeof(T14) <= sizeof(void*));
- LogMsg(LF_GC, 14, format, (void*)(size_t)data1, (void*)(size_t)data2, (void*)(size_t)data3, (void*)(size_t)data4, (void*)(size_t)data5, (void*)(size_t)data6, (void*)(size_t)data7, (void*)(size_t)data8, (void*)(size_t)data9, (void*)(size_t)data10, (void*)(size_t)data11, (void*)(size_t)data12, (void*)(size_t)data13, (void*)(size_t)data14);
+ LogMsg(facility, sizeof...(args), format, ConvertArgument(args)...);
}
#ifdef _MSC_VER
@@ -492,6 +344,33 @@ class StressLog {
};
+template<>
+void* StressLog::ConvertArgument(float arg) = delete;
+
+#if TARGET_64BIT
+template<>
+inline void* StressLog::ConvertArgument(double arg)
+{
+ return (void*)(size_t)(*((uint64_t*)&arg));
+}
+#else
+template<>
+void* StressLog::ConvertArgument(double arg) = delete;
+
+// COMPAT: Truncate 64-bit integer arguments to 32-bit
+template<>
+inline void* StressLog::ConvertArgument(uint64_t arg)
+{
+ return (void*)(size_t)arg;
+}
+
+template<>
+inline void* StressLog::ConvertArgument(int64_t arg)
+{
+ return (void*)(size_t)arg;
+}
+#endif
+
//==========================================================================================
// Private classes
//
diff --git a/src/coreclr/nativeaot/Runtime/thread.cpp b/src/coreclr/nativeaot/Runtime/thread.cpp
index c2e94a1dc8f383..b796b052182260 100644
--- a/src/coreclr/nativeaot/Runtime/thread.cpp
+++ b/src/coreclr/nativeaot/Runtime/thread.cpp
@@ -675,16 +675,10 @@ void Thread::HijackCallback(NATIVE_CONTEXT* pThreadContext, void* pThreadToHijac
if (runtime->IsConservativeStackReportingEnabled() ||
codeManager->IsSafePoint(pvAddress))
{
- // IsUnwindable is precise on arm64, but can give false negatives on other architectures.
- // (when IP is on the first instruction of an epilog, we still can unwind,
- // but we can tell if the instruction is the first only if we can navigate instructions backwards and check)
- // The preciseness of IsUnwindable is tracked in https://github.com/dotnet/runtime/issues/101932
-#if defined(TARGET_ARM64)
// we may not be able to unwind in some locations, such as epilogs.
// such locations should not contain safe points.
// when scanning conservatively we do not need to unwind
ASSERT(codeManager->IsUnwindable(pvAddress) || runtime->IsConservativeStackReportingEnabled());
-#endif
// if we are not given a thread to hijack
// perform in-line wait on the current thread
diff --git a/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp b/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp
index 7cf5bfc792edb7..b12d63bf726129 100644
--- a/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp
+++ b/src/coreclr/nativeaot/Runtime/unix/UnixNativeCodeManager.cpp
@@ -213,18 +213,44 @@ void UnixNativeCodeManager::EnumGcRefs(MethodInfo * pMethodInfo,
#ifdef TARGET_ARM
// Ensure that code offset doesn't have the Thumb bit set. We need
- // it to be aligned to instruction start
+ // it to be aligned to instruction start to make the !isActiveStackFrame
+ // branch below work.
ASSERT(((uintptr_t)codeOffset & 1) == 0);
#endif
+ bool executionAborted = ((UnixNativeMethodInfo*)pMethodInfo)->executionAborted;
+
+ if (!isActiveStackFrame && !executionAborted)
+ {
+ // the reasons for this adjustment are explained in EECodeManager::EnumGcRefs
+ codeOffset--;
+ }
+
GcInfoDecoder decoder(
GCInfoToken(gcInfo),
GcInfoDecoderFlags(DECODE_GC_LIFETIMES | DECODE_SECURITY_OBJECT | DECODE_VARARG),
codeOffset
);
+ if (isActiveStackFrame)
+ {
+ // CONSIDER: We can optimize this by remembering the need to adjust in IsSafePoint and propagating into here.
+ // Or, better yet, maybe we should change the decoder to not require this adjustment.
+ // The scenario that adjustment tries to handle (fallthrough into BB with random liveness)
+ // does not seem possible.
+ if (!decoder.HasInterruptibleRanges())
+ {
+ decoder = GcInfoDecoder(
+ GCInfoToken(gcInfo),
+ GcInfoDecoderFlags(DECODE_GC_LIFETIMES | DECODE_SECURITY_OBJECT | DECODE_VARARG),
+ codeOffset - 1
+ );
+
+ assert(decoder.IsInterruptibleSafePoint());
+ }
+ }
+
ICodeManagerFlags flags = (ICodeManagerFlags)0;
- bool executionAborted = ((UnixNativeMethodInfo*)pMethodInfo)->executionAborted;
if (executionAborted)
flags = ICodeManagerFlags::ExecutionAborted;
diff --git a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp
index 051db8ddd00b77..b8c2310d644004 100644
--- a/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp
+++ b/src/coreclr/nativeaot/Runtime/windows/CoffNativeCodeManager.cpp
@@ -440,8 +440,9 @@ void CoffNativeCodeManager::EnumGcRefs(MethodInfo * pMethodInfo,
PTR_uint8_t gcInfo;
uint32_t codeOffset = GetCodeOffset(pMethodInfo, safePointAddress, &gcInfo);
- ICodeManagerFlags flags = (ICodeManagerFlags)0;
bool executionAborted = ((CoffNativeMethodInfo *)pMethodInfo)->executionAborted;
+
+ ICodeManagerFlags flags = (ICodeManagerFlags)0;
if (executionAborted)
flags = ICodeManagerFlags::ExecutionAborted;
@@ -452,6 +453,11 @@ void CoffNativeCodeManager::EnumGcRefs(MethodInfo * pMethodInfo,
flags = (ICodeManagerFlags)(flags | ICodeManagerFlags::ActiveStackFrame);
#ifdef USE_GC_INFO_DECODER
+ if (!isActiveStackFrame && !executionAborted)
+ {
+ // the reasons for this adjustment are explained in EECodeManager::EnumGcRefs
+ codeOffset--;
+ }
GcInfoDecoder decoder(
GCInfoToken(gcInfo),
@@ -459,6 +465,24 @@ void CoffNativeCodeManager::EnumGcRefs(MethodInfo * pMethodInfo,
codeOffset
);
+ if (isActiveStackFrame)
+ {
+ // CONSIDER: We can optimize this by remembering the need to adjust in IsSafePoint and propagating into here.
+ // Or, better yet, maybe we should change the decoder to not require this adjustment.
+ // The scenario that adjustment tries to handle (fallthrough into BB with random liveness)
+ // does not seem possible.
+ if (!decoder.HasInterruptibleRanges())
+ {
+ decoder = GcInfoDecoder(
+ GCInfoToken(gcInfo),
+ GcInfoDecoderFlags(DECODE_GC_LIFETIMES | DECODE_SECURITY_OBJECT | DECODE_VARARG),
+ codeOffset - 1
+ );
+
+ assert(decoder.IsInterruptibleSafePoint());
+ }
+ }
+
if (!decoder.EnumerateLiveSlots(
pRegisterSet,
isActiveStackFrame /* reportScratchSlots */,
diff --git a/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp b/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp
index 216b017054836d..86013f7a964d28 100644
--- a/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp
+++ b/src/coreclr/nativeaot/Runtime/windows/PalRedhawkMinWin.cpp
@@ -463,6 +463,32 @@ EXTERN_C VOID __cdecl RtlRestoreContextFallback(PCONTEXT ContextRecord, struct _
typedef BOOL(WINAPI* PINITIALIZECONTEXT2)(PVOID Buffer, DWORD ContextFlags, PCONTEXT* Context, PDWORD ContextLength, ULONG64 XStateCompactionMask);
PINITIALIZECONTEXT2 pfnInitializeContext2 = NULL;
+#ifdef TARGET_ARM64
+// Mirror the XSTATE_ARM64_SVE flags from winnt.h
+
+#ifndef XSTATE_ARM64_SVE
+#define XSTATE_ARM64_SVE (2)
+#endif // XSTATE_ARM64_SVE
+
+#ifndef XSTATE_MASK_ARM64_SVE
+#define XSTATE_MASK_ARM64_SVE (1ui64 << (XSTATE_ARM64_SVE))
+#endif // XSTATE_MASK_ARM64_SVE
+
+#ifndef CONTEXT_ARM64_XSTATE
+#define CONTEXT_ARM64_XSTATE (CONTEXT_ARM64 | 0x20L)
+#endif // CONTEXT_ARM64_XSTATE
+
+#ifndef CONTEXT_XSTATE
+#define CONTEXT_XSTATE CONTEXT_ARM64_XSTATE
+#endif // CONTEXT_XSTATE
+
+typedef DWORD64(WINAPI* PGETENABLEDXSTATEFEATURES)();
+PGETENABLEDXSTATEFEATURES pfnGetEnabledXStateFeatures = NULL;
+
+typedef BOOL(WINAPI* PSETXSTATEFEATURESMASK)(PCONTEXT Context, DWORD64 FeatureMask);
+PSETXSTATEFEATURESMASK pfnSetXStateFeaturesMask = NULL;
+#endif // TARGET_ARM64
+
#ifdef TARGET_X86
EXTERN_C VOID __cdecl RtlRestoreContextFallback(PCONTEXT ContextRecord, struct _EXCEPTION_RECORD* ExceptionRecord);
typedef VOID(__cdecl* PRTLRESTORECONTEXT)(PCONTEXT ContextRecord, struct _EXCEPTION_RECORD* ExceptionRecord);
@@ -478,7 +504,7 @@ REDHAWK_PALEXPORT CONTEXT* PalAllocateCompleteOSContext(_Out_ uint8_t** contextB
{
CONTEXT* pOSContext = NULL;
-#if (defined(TARGET_X86) || defined(TARGET_AMD64))
+#if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64)
DWORD context = CONTEXT_COMPLETE;
if (pfnInitializeContext2 == NULL)
@@ -490,6 +516,17 @@ REDHAWK_PALEXPORT CONTEXT* PalAllocateCompleteOSContext(_Out_ uint8_t** contextB
}
}
+#if defined(TARGET_ARM64)
+ if (pfnGetEnabledXStateFeatures == NULL)
+ {
+ HMODULE hm = GetModuleHandleW(_T("kernel32.dll"));
+ if (hm != NULL)
+ {
+ pfnGetEnabledXStateFeatures = (PGETENABLEDXSTATEFEATURES)GetProcAddress(hm, "GetEnabledXStateFeatures");
+ }
+ }
+#endif // TARGET_ARM64
+
#ifdef TARGET_X86
if (pfnRtlRestoreContext == NULL)
{
@@ -503,10 +540,27 @@ REDHAWK_PALEXPORT CONTEXT* PalAllocateCompleteOSContext(_Out_ uint8_t** contextB
}
#endif //TARGET_X86
- // Determine if the processor supports AVX or AVX512 so we could
- // retrieve extended registers
- DWORD64 FeatureMask = GetEnabledXStateFeatures();
- if ((FeatureMask & (XSTATE_MASK_AVX | XSTATE_MASK_AVX512)) != 0)
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
+ const DWORD64 xStateFeatureMask = XSTATE_MASK_AVX | XSTATE_MASK_AVX512;
+ const ULONG64 xStateCompactionMask = XSTATE_MASK_LEGACY | XSTATE_MASK_MPX | xStateFeatureMask;
+#elif defined(TARGET_ARM64)
+ const DWORD64 xStateFeatureMask = XSTATE_MASK_ARM64_SVE;
+ const ULONG64 xStateCompactionMask = XSTATE_MASK_LEGACY | xStateFeatureMask;
+#endif
+
+ // Determine if the processor supports extended features so we could retrieve those registers
+ DWORD64 FeatureMask = 0;
+
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
+ FeatureMask = GetEnabledXStateFeatures();
+#elif defined(TARGET_ARM64)
+ if (pfnGetEnabledXStateFeatures != NULL)
+ {
+ FeatureMask = pfnGetEnabledXStateFeatures();
+ }
+#endif
+
+ if ((FeatureMask & xStateFeatureMask) != 0)
{
context = context | CONTEXT_XSTATE;
}
@@ -517,7 +571,6 @@ REDHAWK_PALEXPORT CONTEXT* PalAllocateCompleteOSContext(_Out_ uint8_t** contextB
// Retrieve contextSize by passing NULL for Buffer
DWORD contextSize = 0;
- ULONG64 xStateCompactionMask = XSTATE_MASK_LEGACY | XSTATE_MASK_AVX | XSTATE_MASK_MPX | XSTATE_MASK_AVX512;
// The initialize call should fail but return contextSize
BOOL success = pfnInitializeContext2 ?
pfnInitializeContext2(NULL, context, NULL, &contextSize, xStateCompactionMask) :
@@ -565,15 +618,32 @@ REDHAWK_PALEXPORT _Success_(return) bool REDHAWK_PALAPI PalGetCompleteThreadCont
{
_ASSERTE((pCtx->ContextFlags & CONTEXT_COMPLETE) == CONTEXT_COMPLETE);
-#if defined(TARGET_X86) || defined(TARGET_AMD64)
- // Make sure that AVX feature mask is set, if supported. This should not normally fail.
+#if defined(TARGET_ARM64)
+ if (pfnSetXStateFeaturesMask == NULL)
+ {
+ HMODULE hm = GetModuleHandleW(_T("kernel32.dll"));
+ if (hm != NULL)
+ {
+ pfnSetXStateFeaturesMask = (PSETXSTATEFEATURESMASK)GetProcAddress(hm, "SetXStateFeaturesMask");
+ }
+ }
+#endif // TARGET_ARM64
+
+ // This should not normally fail.
// The system silently ignores any feature specified in the FeatureMask which is not enabled on the processor.
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
if (!SetXStateFeaturesMask(pCtx, XSTATE_MASK_AVX | XSTATE_MASK_AVX512))
{
_ASSERTE(!"Could not apply XSTATE_MASK_AVX | XSTATE_MASK_AVX512");
return FALSE;
}
-#endif //defined(TARGET_X86) || defined(TARGET_AMD64)
+#elif defined(TARGET_ARM64)
+ if ((pfnSetXStateFeaturesMask != NULL) && !pfnSetXStateFeaturesMask(pCtx, XSTATE_MASK_ARM64_SVE))
+ {
+ _ASSERTE(!"Could not apply XSTATE_MASK_ARM64_SVE");
+ return FALSE;
+ }
+#endif
return GetThreadContext(hThread, pCtx);
}
@@ -902,7 +972,7 @@ REDHAWK_PALEXPORT HANDLE PalLoadLibrary(const char* moduleName)
return 0;
}
moduleNameWide[len] = '\0';
-
+
HANDLE result = LoadLibraryExW(moduleNameWide, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
delete[] moduleNameWide;
return result;
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Attribute.NativeAot.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Attribute.NativeAot.cs
index a619095037b5c1..b68c2234d88fac 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Attribute.NativeAot.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Attribute.NativeAot.cs
@@ -135,18 +135,23 @@ private static Attribute OneOrNull(IEnumerable results)
Justification = "Arrays of reference types are safe to create.")]
private static Attribute[] Instantiate(IEnumerable cads, Type actualElementType)
{
- LowLevelList attributes = new LowLevelList();
+ ArrayBuilder attributes = default;
foreach (CustomAttributeData cad in cads)
{
Attribute instantiatedAttribute = cad.Instantiate();
attributes.Add(instantiatedAttribute);
}
- int count = attributes.Count;
- Attribute[] result = actualElementType.ContainsGenericParameters
- ? new Attribute[count]
- : (Attribute[])Array.CreateInstance(actualElementType, count);
- attributes.CopyTo(result, 0);
- return result;
+
+ if (actualElementType.ContainsGenericParameters)
+ {
+ return attributes.ToArray();
+ }
+ else
+ {
+ Attribute[] result = (Attribute[])Array.CreateInstance(actualElementType, attributes.Count);
+ attributes.CopyTo(result);
+ return result;
+ }
}
}
}
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/CustomAttributes/NativeFormat/NativeFormatCustomAttributeData.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/CustomAttributes/NativeFormat/NativeFormatCustomAttributeData.cs
index 10c49ffe177bbf..e7a7202b0400c0 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/CustomAttributes/NativeFormat/NativeFormatCustomAttributeData.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/CustomAttributes/NativeFormat/NativeFormatCustomAttributeData.cs
@@ -116,7 +116,7 @@ internal sealed override IList GetConstructorArgum
}
Handle[] ctorTypeHandles = parameterTypeSignatureHandles.ToArray();
- LowLevelListWithIList customAttributeTypedArguments = new LowLevelListWithIList();
+ ArrayBuilder customAttributeTypedArguments = new ArrayBuilder(_customAttribute.FixedArguments.Count);
foreach (Handle fixedArgumentHandle in _customAttribute.FixedArguments)
{
Handle typeHandle = ctorTypeHandles[index];
@@ -147,7 +147,7 @@ internal sealed override IList GetConstructorArgum
index++;
}
- return customAttributeTypedArguments;
+ return customAttributeTypedArguments.ToArray();
}
//
@@ -155,7 +155,7 @@ internal sealed override IList GetConstructorArgum
//
internal sealed override IList GetNamedArguments(bool throwIfMissingMetadata)
{
- LowLevelListWithIList customAttributeNamedArguments = new LowLevelListWithIList();
+ ArrayBuilder customAttributeNamedArguments = new ArrayBuilder(_customAttribute.NamedArguments.Count);
foreach (NamedArgumentHandle namedArgumentHandle in _customAttribute.NamedArguments)
{
NamedArgument namedArgument = namedArgumentHandle.GetNamedArgument(_reader);
@@ -185,7 +185,7 @@ internal sealed override IList GetNamedArguments(b
customAttributeNamedArguments.Add(CreateCustomAttributeNamedArgument(this.AttributeType, memberName, isField, typedValue));
}
- return customAttributeNamedArguments;
+ return customAttributeNamedArguments.ToArray();
}
[UnconditionalSuppressMessage("ReflectionAnalysis", "IL2070:UnrecognizedReflectionPattern",
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/CustomAttributes/RuntimeCustomAttributeData.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/CustomAttributes/RuntimeCustomAttributeData.cs
index 12b0cde3b166f1..12b6000851694c 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/CustomAttributes/RuntimeCustomAttributeData.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/CustomAttributes/RuntimeCustomAttributeData.cs
@@ -171,13 +171,13 @@ protected static CustomAttributeTypedArgument WrapInCustomAttributeTypedArgument
if (!argumentType.IsArray)
throw new BadImageFormatException();
Type reportedElementType = argumentType.GetElementType()!;
- LowLevelListWithIList elementTypedArguments = new LowLevelListWithIList();
+ ArrayBuilder elementTypedArguments = default;
foreach (object elementValue in enumerableValue)
{
CustomAttributeTypedArgument elementTypedArgument = WrapInCustomAttributeTypedArgument(elementValue, reportedElementType);
elementTypedArguments.Add(elementTypedArgument);
}
- return new CustomAttributeTypedArgument(argumentType, new ReadOnlyCollection(elementTypedArguments));
+ return new CustomAttributeTypedArgument(argumentType, new ReadOnlyCollection(elementTypedArguments.ToArray()));
}
else
{
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/General/Helpers.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/General/Helpers.cs
index abe62427fb1dd4..b6d019046c3884 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/General/Helpers.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Reflection/Runtime/General/Helpers.cs
@@ -151,22 +151,26 @@ public static BinderBundle ToBinderBundle(this Binder binder, BindingFlags invok
Justification = "Array.CreateInstance is only used with reference types here and is therefore safe.")]
public static object[] InstantiateAsArray(this IEnumerable cads, Type actualElementType)
{
- LowLevelList