@@ -592,6 +592,27 @@ static inline uint64_t decodeAdvSIMDModImmType9(uint8_t Imm) {
592
592
// aaaaaaaa bbbbbbbb cccccccc dddddddd eeeeeeee ffffffff gggggggg hhhhhhhh
593
593
// cmode: 1110, op: 1
594
594
static inline bool isAdvSIMDModImmType10 (uint64_t Imm) {
595
+ #if defined(_MSC_VER) && _MSC_VER == 1937 && !defined(__clang__) && \
596
+ defined (_M_ARM64)
597
+ // The MSVC compiler 19.37 for ARM64 has an optimization bug that
598
+ // causes an incorrect behavior with the orignal version. Work around
599
+ // by using a slightly different variation.
600
+ // https://developercommunity.visualstudio.com/t/C-ARM64-compiler-optimization-bug/10481261
601
+ constexpr uint64_t Mask = 0xFFULL ;
602
+ uint64_t ByteA = (Imm >> 56 ) & Mask;
603
+ uint64_t ByteB = (Imm >> 48 ) & Mask;
604
+ uint64_t ByteC = (Imm >> 40 ) & Mask;
605
+ uint64_t ByteD = (Imm >> 32 ) & Mask;
606
+ uint64_t ByteE = (Imm >> 24 ) & Mask;
607
+ uint64_t ByteF = (Imm >> 16 ) & Mask;
608
+ uint64_t ByteG = (Imm >> 8 ) & Mask;
609
+ uint64_t ByteH = Imm & Mask;
610
+
611
+ return (ByteA == 0ULL || ByteA == Mask) && (ByteB == 0ULL || ByteB == Mask) &&
612
+ (ByteC == 0ULL || ByteC == Mask) && (ByteD == 0ULL || ByteD == Mask) &&
613
+ (ByteE == 0ULL || ByteE == Mask) && (ByteF == 0ULL || ByteF == Mask) &&
614
+ (ByteG == 0ULL || ByteG == Mask) && (ByteH == 0ULL || ByteH == Mask);
615
+ #else
595
616
uint64_t ByteA = Imm & 0xff00000000000000ULL ;
596
617
uint64_t ByteB = Imm & 0x00ff000000000000ULL ;
597
618
uint64_t ByteC = Imm & 0x0000ff0000000000ULL ;
@@ -609,6 +630,7 @@ static inline bool isAdvSIMDModImmType10(uint64_t Imm) {
609
630
(ByteF == 0ULL || ByteF == 0x0000000000ff0000ULL ) &&
610
631
(ByteG == 0ULL || ByteG == 0x000000000000ff00ULL ) &&
611
632
(ByteH == 0ULL || ByteH == 0x00000000000000ffULL );
633
+ #endif
612
634
}
613
635
614
636
static inline uint8_t encodeAdvSIMDModImmType10 (uint64_t Imm) {
0 commit comments