From 720015df397c3c94a99b2b847b031f6fd7814253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Doeraene?= Date: Mon, 23 Jun 2025 10:00:51 +0200 Subject: [PATCH 1/3] Branchless algorithm for RuntimeLong.toDouble. It turns out the computation we did in the non-negative case also works for the negative case. The proof relies on elementary properties of the two's complement representation. I don't know how I never saw this before. To make things worse, it seems that Kotlin and J2CL knew all along, and I never realized when skimming through their implementations either. --- .../scalajs/linker/runtime/RuntimeLong.scala | 46 +++++++++++++------ .../org/scalajs/linker/LibrarySizeTest.scala | 6 +-- project/Build.scala | 10 ++-- 3 files changed, 40 insertions(+), 22 deletions(-) diff --git a/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala b/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala index 71066e0f2f..25633ebf6b 100644 --- a/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala +++ b/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala @@ -695,17 +695,8 @@ object RuntimeLong { a.lo @inline - def toDouble(a: RuntimeLong): Double = { - val lo = a.lo - val hi = a.hi - if (hi < 0) { - // We need unsignedToDoubleApprox specifically for MinValue - val neg = inline_negate(lo, hi) - -unsignedToDoubleApprox(neg.lo, neg.hi) - } else { - nonNegativeToDoubleApprox(lo, hi) - } - } + def toDouble(a: RuntimeLong): Double = + signedToDoubleApprox(a.lo, a.hi) @inline def toFloat(a: RuntimeLong): Float = @@ -1203,7 +1194,7 @@ object RuntimeLong { /** Converts an unsigned safe double into its Double representation. */ @inline def asUnsignedSafeDouble(lo: Int, hi: Int): Double = - nonNegativeToDoubleApprox(lo, hi) + signedToDoubleApprox(lo, hi) // can use either signed or unsigned here; signed folds better /** Converts an unsigned safe double into its RuntimeLong representation. */ @inline def fromUnsignedSafeDouble(x: Double): RuntimeLong = @@ -1221,14 +1212,41 @@ object RuntimeLong { @inline def unsignedToDoubleApprox(lo: Int, hi: Int): Double = uintToDouble(hi) * TwoPow32 + uintToDouble(lo) - /** Approximates a non-negative (lo, hi) with a Double. + /** Approximates a signed (lo, hi) with a Double. * * If `hi` is known to be non-negative, this method is equivalent to * `unsignedToDoubleApprox`, but it can fold away part of the computation if * `hi` is in fact constant. */ - @inline def nonNegativeToDoubleApprox(lo: Int, hi: Int): Double = + @inline def signedToDoubleApprox(lo: Int, hi: Int): Double = { + /* We note a_u the mathematical value of a when interpreted as an unsigned + * quantity, and a_s when interpreted as a signed quantity. + * + * For x = (lo, hi), the result must be the correctly rounded value of x_s. + * + * If x_s >= 0, then hi_s >= 0. The obvious mathematical value of x_s is + * x_s = hi_s * 2^32 + lo_u + * + * If x_s < 0, then hi_s < 0. The fundamental definition of two's + * completement means that + * x_s = -2^64 + hi_u * 2^32 + lo_u + * Likewise, + * hi_s = -2^32 + hi_u + * + * Now take the computation for the x_s >= 0 case, but substituting values + * for the negative case: + * hi_s * 2^32 + lo_u + * = (-2^32 + hi_u) * 2^32 + lo_u + * = (-2^64 + hi_u * 2^32) + lo_u + * which is the correct mathematical result for x_s in the negative case. + * + * Therefore, we can always compute + * x_s = hi_s * 2^32 + lo_u + * When computed with `Double` values, only the last `+` can be inexact, + * hence the result is correctly round. + */ hi.toDouble * TwoPow32 + uintToDouble(lo) + } /** Interprets an `Int` as an unsigned integer and returns its value as a * `Double`. diff --git a/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala b/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala index e6d062aab1..7f2b385abf 100644 --- a/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala +++ b/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala @@ -70,9 +70,9 @@ class LibrarySizeTest { ) testLinkedSizes( - expectedFastLinkSize = 148481, - expectedFullLinkSizeWithoutClosure = 87816, - expectedFullLinkSizeWithClosure = 20704, + expectedFastLinkSize = 147932, + expectedFullLinkSizeWithoutClosure = 87506, + expectedFullLinkSizeWithClosure = 20696, classDefs, moduleInitializers = MainTestModuleInitializers ) diff --git a/project/Build.scala b/project/Build.scala index 071d5016db..49e549a45c 100644 --- a/project/Build.scala +++ b/project/Build.scala @@ -2053,15 +2053,15 @@ object Build { case `default212Version` => if (!useMinifySizes) { Some(ExpectedSizes( - fastLink = 625000 to 626000, + fastLink = 624000 to 625000, fullLink = 94000 to 95000, fastLinkGz = 75000 to 79000, fullLinkGz = 24000 to 25000, )) } else { Some(ExpectedSizes( - fastLink = 426000 to 427000, - fullLink = 283000 to 284000, + fastLink = 425000 to 426000, + fullLink = 282000 to 283000, fastLinkGz = 61000 to 62000, fullLinkGz = 43000 to 44000, )) @@ -2070,7 +2070,7 @@ object Build { case `default213Version` => if (!useMinifySizes) { Some(ExpectedSizes( - fastLink = 443000 to 444000, + fastLink = 442000 to 443000, fullLink = 90000 to 91000, fastLinkGz = 57000 to 58000, fullLinkGz = 24000 to 25000, @@ -2078,7 +2078,7 @@ object Build { } else { Some(ExpectedSizes( fastLink = 301000 to 302000, - fullLink = 259000 to 260000, + fullLink = 258000 to 259000, fastLinkGz = 47000 to 48000, fullLinkGz = 42000 to 43000, )) From 331baa8b0326d05b144d41493a72a0a1343b47f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Doeraene?= Date: Mon, 23 Jun 2025 14:57:54 +0200 Subject: [PATCH 2/3] Introduce code paths for Longs that are *signed* safe doubles. Since conversions of signed longs to doubles is in fact no more expensive than the unsigned longs, we can take shorter paths for values that fit in the *signed* safe range. This applies to the conversions to string (including in the javalib) and to float. It could also apply to signed division and remainder. However, benchmarks suggest that doing so makes it slower. The trouble is that we then need a signed double-to-long conversion for the result, and that appears to be slower than performing the 3 sign adjustments. --- javalib/src/main/scala/java/lang/Long.scala | 34 ++- .../scalajs/linker/runtime/RuntimeLong.scala | 287 +++++++++--------- .../org/scalajs/linker/LibrarySizeTest.scala | 6 +- project/Build.scala | 2 +- 4 files changed, 172 insertions(+), 157 deletions(-) diff --git a/javalib/src/main/scala/java/lang/Long.scala b/javalib/src/main/scala/java/lang/Long.scala index 5c347eecd4..bf27b15544 100644 --- a/javalib/src/main/scala/java/lang/Long.scala +++ b/javalib/src/main/scala/java/lang/Long.scala @@ -120,7 +120,7 @@ object Long { if (radix == 10 || radix < Character.MIN_RADIX || radix > Character.MAX_RADIX) toString(i) else - toStringImpl(i, radix) + toStringImpl(i.toInt, (i >>> 32).toInt, radix) } @inline // because radix is almost certainly constant at call site @@ -133,7 +133,7 @@ object Long { val radix1 = if (radix < Character.MIN_RADIX || radix > Character.MAX_RADIX) 10 else radix - toUnsignedStringImpl(i, radix1) + toUnsignedStringImpl(i.toInt, (i >>> 32).toInt, radix1) } } @@ -141,40 +141,42 @@ object Long { @inline def toString(i: scala.Long): String = "" + i @inline def toUnsignedString(i: scala.Long): String = - toUnsignedStringImpl(i, 10) + toUnsignedStringImpl(i.toInt, (i >>> 32).toInt, 10) // Must be called only with valid radix - private def toStringImpl(i: scala.Long, radix: Int): String = { - val lo = i.toInt - val hi = (i >>> 32).toInt + private def toStringImpl(lo: Int, hi: Int, radix: Int): String = { + import js.JSNumberOps.enableJSNumberOps if (lo >> 31 == hi) { // It's a signed int32 - import js.JSNumberOps.enableJSNumberOps lo.toString(radix) - } else if (hi < 0) { - val neg = -i - "-" + toUnsignedStringInternalLarge(neg.toInt, (neg >>> 32).toInt, radix) + } else if (((hi ^ (hi >> 10)) & 0xffe00000) == 0) { // see RuntimeLong.isSignedSafeDouble + // (lo, hi) is small enough to be a Double, so toDouble is exact + makeLongFromLoHi(lo, hi).toDouble.toString(radix) } else { - toUnsignedStringInternalLarge(lo, hi, radix) + val abs = Math.abs(makeLongFromLoHi(lo, hi)) + val s = toUnsignedStringInternalLarge(abs.toInt, (abs >>> 32).toInt, radix) + if (hi < 0) "-" + s else s } } // Must be called only with valid radix - private def toUnsignedStringImpl(i: scala.Long, radix: Int): String = { - val lo = i.toInt - val hi = (i >>> 32).toInt + private def toUnsignedStringImpl(lo: Int, hi: Int, radix: Int): String = { + import js.JSNumberOps.enableJSNumberOps if (hi == 0) { // It's an unsigned int32 - import js.JSNumberOps.enableJSNumberOps Integer.toUnsignedDouble(lo).toString(radix) + } else if ((hi & 0xffe00000) == 0) { // see RuntimeLong.isUnsignedSafeDouble + // (lo, hi) is small enough to be a Double, so toDouble is exact + makeLongFromLoHi(lo, hi).toDouble.toString(radix) } else { toUnsignedStringInternalLarge(lo, hi, radix) } } - // Must be called only with valid radix and with (lo, hi) >= 2^30 + // Must be called only with valid radix and with (lo, hi) >= 2^53 + @inline // inlined twice: once in toStringImpl and once in toUnsignedStringImpl private def toUnsignedStringInternalLarge(lo: Int, hi: Int, radix: Int): String = { import js.JSNumberOps.enableJSNumberOps import js.JSStringOps.enableJSStringOps diff --git a/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala b/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala index 25633ebf6b..a91d1aff27 100644 --- a/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala +++ b/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala @@ -90,7 +90,7 @@ object RuntimeLong { * double. * @see isUnsignedSafeDouble */ - private final val UnsignedSafeDoubleHiMask = 0xffe00000 + private final val SafeDoubleHiMask = 0xffe00000 /** The hi part of a (lo, hi) return value. */ private[this] var hiReturn: Int = _ @@ -591,103 +591,98 @@ object RuntimeLong { private def toString(lo: Int, hi: Int): String = { if (isInt32(lo, hi)) { lo.toString() - } else if (hi < 0) { - val neg = inline_negate(lo, hi) - "-" + toUnsignedString(neg.lo, neg.hi) + } else if (isSignedSafeDouble(hi)) { + asSafeDouble(lo, hi).toString() } else { - toUnsignedString(lo, hi) + val abs = inline_abs(lo, hi) + val s = toUnsignedStringLarge(abs.lo, abs.hi) + if (hi < 0) "-" + s else s } } - private def toUnsignedString(lo: Int, hi: Int): String = { - // This is called only if (lo, hi) is not an Int32 - - if (isUnsignedSafeDouble(hi)) { - // (lo, hi) is small enough to be a Double, use that directly - asUnsignedSafeDouble(lo, hi).toString - } else { - /* At this point, (lo, hi) >= 2^53. - * - * The idea is to divide (lo, hi) once by 10^9 and keep the remainder. - * - * The remainder must then be < 10^9, and is therefore an int32. - * - * The quotient must be <= ULong.MaxValue / 10^9, which is < 2^53, and - * is therefore a valid double. It must also be non-zero, since - * (lo, hi) >= 2^53 > 10^9. - * - * We should do that single division as a Long division. However, that is - * slow. We can cheat with a Double division instead. - * - * We convert the unsigned value num = (lo, hi) to a Double value - * approxNum. This is an approximation. It can lose as many as - * 64 - 53 = 11 low-order bits. Hence |approxNum - num| <= 2^12. - * - * We then compute an approximated quotient - * approxQuot = floor(approxNum / 10^9) - * instead of the theoretical value - * quot = floor(num / 10^9) - * - * Since 10^9 > 2^29 > 2^12, we have |approxNum - num| < 10^9. - * Therefore, |approxQuot - quot| <= 1. - * - * We also have 0 <= approxQuot < 2^53, which means that approxQuot is an - * "unsigned safe double" and that `approxQuot.toLong` is lossless. - * - * At this point, we compute the approximated remainder - * approxRem = num - 10^9 * approxQuot.toLong - * as if with Long arithmetics. - * - * Since the theoretical remainder rem = num - 10^9 * quot is such that - * 0 <= rem < 10^9, and since |approxQuot - quot| <= 1, we have that - * -10^9 <= approxRem < 2 * 10^9 - * - * Interestingly, that range entirely fits within a signed int32. - * That means approxRem = approxRem.toInt, and therefore - * - * approxRem - * = (num - 10^9 * approxQuot.toLong).toInt - * = num.toInt - 10^9 * approxQuot.toLong.toInt (thanks to modular arithmetics) - * = lo - 10^9 * unsignedSafeDoubleLo(approxQuot) - * - * That allows to compute approxRem with Int arithmetics without loss of - * precision. - * - * We can use approxRem to detect and correct the error on approxQuot. - * If approxRem < 0, correct approxQuot by -1 and approxRem by +10^9. - * If approxRem >= 10^9, correct them by +1 and -10^9, respectively. - * - * After the correction, we know that approxQuot and approxRem are equal - * to their theoretical counterparts quot and rem. We have successfully - * computed the correct quotient and remainder without using any Long - * division. - * - * We can finally convert both to strings using the native string - * conversions, and concatenate the results to produce our final result. - */ - - // constants - val divisor = 1000000000 // 10^9 - val divisorInv = 1.0 / divisor.toDouble - - // initial approximation of the quotient and remainder - val approxNum = unsignedToDoubleApprox(lo, hi) - var approxQuot = scala.scalajs.js.Math.floor(approxNum * divisorInv) - var approxRem = lo - divisor * unsignedSafeDoubleLo(approxQuot) - - // correct the approximations - if (approxRem < 0) { - approxQuot -= 1.0 - approxRem += divisor - } else if (approxRem >= divisor) { - approxQuot += 1.0 - approxRem -= divisor - } + @inline + private def toUnsignedStringLarge(lo: Int, hi: Int): String = { + /* This is called only if (lo, hi) is >= 2^53. + * + * The idea is to divide (lo, hi) once by 10^9 and keep the remainder. + * + * The remainder must then be < 10^9, and is therefore an int32. + * + * The quotient must be <= ULong.MaxValue / 10^9, which is < 2^53, and + * is therefore a valid double. It must also be non-zero, since + * (lo, hi) >= 2^53 > 10^9. + * + * We should do that single division as a Long division. However, that is + * slow. We can cheat with a Double division instead. + * + * We convert the unsigned value num = (lo, hi) to a Double value + * approxNum. This is an approximation. It can lose as many as + * 64 - 53 = 11 low-order bits. Hence |approxNum - num| <= 2^12. + * + * We then compute an approximated quotient + * approxQuot = floor(approxNum / 10^9) + * instead of the theoretical value + * quot = floor(num / 10^9) + * + * Since 10^9 > 2^29 > 2^12, we have |approxNum - num| < 10^9. + * Therefore, |approxQuot - quot| <= 1. + * + * We also have 0 <= approxQuot < 2^53, which means that approxQuot is an + * "unsigned safe double" and that `approxQuot.toLong` is lossless. + * + * At this point, we compute the approximated remainder + * approxRem = num - 10^9 * approxQuot.toLong + * as if with Long arithmetics. + * + * Since the theoretical remainder rem = num - 10^9 * quot is such that + * 0 <= rem < 10^9, and since |approxQuot - quot| <= 1, we have that + * -10^9 <= approxRem < 2 * 10^9 + * + * Interestingly, that range entirely fits within a signed int32. + * That means approxRem = approxRem.toInt, and therefore + * + * approxRem + * = (num - 10^9 * approxQuot.toLong).toInt + * = num.toInt - 10^9 * approxQuot.toLong.toInt (thanks to modular arithmetics) + * = lo - 10^9 * unsignedSafeDoubleLo(approxQuot) + * + * That allows to compute approxRem with Int arithmetics without loss of + * precision. + * + * We can use approxRem to detect and correct the error on approxQuot. + * If approxRem < 0, correct approxQuot by -1 and approxRem by +10^9. + * If approxRem >= 10^9, correct them by +1 and -10^9, respectively. + * + * After the correction, we know that approxQuot and approxRem are equal + * to their theoretical counterparts quot and rem. We have successfully + * computed the correct quotient and remainder without using any Long + * division. + * + * We can finally convert both to strings using the native string + * conversions, and concatenate the results to produce our final result. + */ - // build the result string - val remStr = approxRem.toString() - approxQuot.toString() + substring("000000000", remStr.length()) + remStr + // constants + val divisor = 1000000000 // 10^9 + val divisorInv = 1.0 / divisor.toDouble + + // initial approximation of the quotient and remainder + val approxNum = unsignedToDoubleApprox(lo, hi) + var approxQuot = scala.scalajs.js.Math.floor(approxNum * divisorInv) + var approxRem = lo - divisor * unsignedSafeDoubleLo(approxQuot) + + // correct the approximations + if (approxRem < 0) { + approxQuot -= 1.0 + approxRem += divisor + } else if (approxRem >= divisor) { + approxQuot += 1.0 + approxRem -= divisor } + + // build the result string + val remStr = approxRem.toString() + approxQuot.toString() + substring("000000000", remStr.length()) + remStr } @inline @@ -699,10 +694,7 @@ object RuntimeLong { signedToDoubleApprox(a.lo, a.hi) @inline - def toFloat(a: RuntimeLong): Float = - toFloat(a.lo, a.hi) - - private def toFloat(lo: Int, hi: Int): Float = { + def toFloat(a: RuntimeLong): Float = { /* This implementation is based on the property that, *if* the conversion * `x.toDouble` is lossless, then the result of `x.toFloat` is equivalent * to `x.toDouble.toFloat`. @@ -721,39 +713,48 @@ object RuntimeLong { * * The algorithm works as follows: * - * First, we take the absolute value of the input. We will negate the - * result at the end if the input was negative. - * - * Second, if the abs input is an unsigned safe Double, then the conversion - * to double is lossless, so we don't have to do anything special - * (`y == x` in terms of the above explanation). - * - * Otherwise, we know that the input's highest 1 bit is in the 11 - * highest-order bits. That means that rounding to float, which only has 24 - * bits in the significand, can only take into account the - * `11 + 23 + 1 = 35` highest-order bits (the `+ 1` is for the rounding - * bit). The remaining bits can only affect the result by two states: - * either they are all 0's, or there is at least one 1. We use that - * property to "compress" the 16 low-order bits into a single 0 or 1 bit - * representing those two states. The compressed Long value - * `y = (compressedAbsLo, abs.hi)` has at most `32 + 17 = 49` significant + * Second, if the input is a signed safe Double, then the conversion to + * double is lossless, so we don't have to do anything special (`y == x` in + * terms of the above explanation). + * + * Otherwise, let us first assume that `x >= 0`. In that case, we know that + * the input's highest 1 bit is in the 11 highest-order bits. That means + * that rounding to float, which only has 24 bits in the significand, can + * only take into account the `11 + 23 + 1 = 35` highest-order bits (the + * `+ 1` is for the rounding bit). The remaining bits can only affect the + * result by two states: either they are all 0's, or there is at least one + * 1. We use that property to "compress" the 16 low-order bits into a + * single 0 or 1 bit representing those two states. The compressed Long + * value `y = (compressedLo, hi)` has at most `32 + 17 = 49` significant * bits. Therefore its conversion to Double is lossless. * * Now that we always have a lossless compression to Double, we can perform * it, followed by a conversion from Double to Float, which will apply the * appropriate rounding. * - * (A similar strategy is used in `parseFloat` for the hexadecimal format.) + * (A similar strategy is used in `parseFloat` for the hexadecimal format, + * where we only have the non-negative case.) + * + * For the case `x < 0`, logically we should negate it, perform the above + * transformation and convert to Double, then negate the result. It turns + * out we do not need a separate code path. Indeed, if x is a safe double, + * then -x also converts losslessly (-x may not be safe double by our + * definition, because it could be exactly 2^53, but the conversion is + * still exact). Otherwise, we should apply a compression if + * `(-x & 0xffffL) != 0L`. Because of how two's complement negation work, + * that is equivalent to `(x & 0xffffL) != 0L`, and therefore also + * equivalent to `(lo & 0xffff) != 0`. When we do need a compression, we + * can do it on the signed representation just as well as the unsigned + * representation, because it only affects `lo`, and `lo` is interpreted as + * unsigned regardless, when converting to a double. */ - val abs = inline_abs(lo, hi) - val compressedAbsLo = - if (isUnsignedSafeDouble(abs.hi) || (abs.lo & 0xffff) == 0) abs.lo - else (abs.lo & ~0xffff) | 0x8000 - - val absRes = unsignedToDoubleApprox(compressedAbsLo, abs.hi) - - (if (hi < 0) -absRes else absRes).toFloat + val lo = a.lo + val hi = a.hi + val compressedLo = + if (isSignedSafeDouble(hi) || (lo & 0xffff) == 0) lo + else (lo & ~0xffff) | 0x8000 + signedToDoubleApprox(compressedLo, hi).toFloat } @inline @@ -970,8 +971,8 @@ object RuntimeLong { // This method is not called if isInt32(alo, ahi) nor if isZero(blo, bhi) if (isUnsignedSafeDouble(ahi)) { if (isUnsignedSafeDouble(bhi)) { - val aDouble = asUnsignedSafeDouble(alo, ahi) - val bDouble = asUnsignedSafeDouble(blo, bhi) + val aDouble = asSafeDouble(alo, ahi) + val bDouble = asSafeDouble(blo, bhi) val rDouble = aDouble / bDouble hiReturn = unsignedSafeDoubleHi(rDouble) unsignedSafeDoubleLo(rDouble) @@ -1064,8 +1065,8 @@ object RuntimeLong { // This method is not called if isInt32(alo, ahi) nor if isZero(blo, bhi) if (isUnsignedSafeDouble(ahi)) { if (isUnsignedSafeDouble(bhi)) { - val aDouble = asUnsignedSafeDouble(alo, ahi) - val bDouble = asUnsignedSafeDouble(blo, bhi) + val aDouble = asSafeDouble(alo, ahi) + val bDouble = asSafeDouble(blo, bhi) val rDouble = aDouble % bDouble hiReturn = unsignedSafeDoubleHi(rDouble) unsignedSafeDoubleLo(rDouble) @@ -1119,7 +1120,7 @@ object RuntimeLong { * val, which will explose the while condition as a while(true) + if + * break, and we don't want that. */ - while (shift >= 0 && (remHi & UnsignedSafeDoubleHiMask) != 0) { + while (shift >= 0 && (remHi & SafeDoubleHiMask) != 0) { if (inlineUnsigned_>=(remLo, remHi, bShiftLo, bShiftHi)) { val newRem = new RuntimeLong(remLo, remHi) - new RuntimeLong(bShiftLo, bShiftHi) @@ -1138,8 +1139,8 @@ object RuntimeLong { // Now rem < 2^53, we can finish with a double division if (inlineUnsigned_>=(remLo, remHi, blo, bhi)) { - val remDouble = asUnsignedSafeDouble(remLo, remHi) - val bDouble = asUnsignedSafeDouble(blo, bhi) + val remDouble = asSafeDouble(remLo, remHi) + val bDouble = asSafeDouble(blo, bhi) if (askQuotient) { val rem_div_bDouble = fromUnsignedSafeDouble(remDouble / bDouble) @@ -1190,11 +1191,27 @@ object RuntimeLong { * stay on the fast side. */ @inline def isUnsignedSafeDouble(hi: Int): Boolean = - (hi & UnsignedSafeDoubleHiMask) == 0 + (hi & SafeDoubleHiMask) == 0 - /** Converts an unsigned safe double into its Double representation. */ - @inline def asUnsignedSafeDouble(lo: Int, hi: Int): Double = - signedToDoubleApprox(lo, hi) // can use either signed or unsigned here; signed folds better + /** Tests whether a signed long (lo, hi) is a safe Double. + * + * This test is in fact slightly stricter than necessary, as it tests + * whether `-2^53 <= x < 2^53`, although x == 2^53 would be a perfectly safe + * Double. We do it this way because it corresponds to testing whether the + * value can be represented as a signed 54-bit integer. That is true if and + * only if the (64 - 54) = 10 most significant bits are all equal to bit 53, + * or equivalently, whether the 11 most significant bits all equal. + * + * Since there is virtually no gain to treating 2^53 itself as a safe + * Double, compared to all numbers smaller than it, we don't bother, and + * stay on the fast side. + */ + @inline def isSignedSafeDouble(hi: Int): Boolean = + ((hi ^ (hi >> 10)) & SafeDoubleHiMask) == 0 + + /** Converts a safe double (signed or unsigned) into its exact Double representation. */ + @inline def asSafeDouble(lo: Int, hi: Int): Double = + signedToDoubleApprox(lo, hi) /** Converts an unsigned safe double into its RuntimeLong representation. */ @inline def fromUnsignedSafeDouble(x: Double): RuntimeLong = @@ -1301,13 +1318,9 @@ object RuntimeLong { def inlineUnsignedInt_>=(a: Int, b: Int): Boolean = (a ^ 0x80000000) >= (b ^ 0x80000000) - @inline - def inline_negate(lo: Int, hi: Int): RuntimeLong = - sub(new RuntimeLong(0, 0), new RuntimeLong(lo, hi)) - @inline def inline_negate_hiReturn(lo: Int, hi: Int): Int = { - val n = inline_negate(lo, hi) + val n = sub(new RuntimeLong(0, 0), new RuntimeLong(lo, hi)) hiReturn = n.hi n.lo } diff --git a/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala b/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala index 7f2b385abf..39925a5cb2 100644 --- a/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala +++ b/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala @@ -70,9 +70,9 @@ class LibrarySizeTest { ) testLinkedSizes( - expectedFastLinkSize = 147932, - expectedFullLinkSizeWithoutClosure = 87506, - expectedFullLinkSizeWithClosure = 20696, + expectedFastLinkSize = 147548, + expectedFullLinkSizeWithoutClosure = 87296, + expectedFullLinkSizeWithClosure = 20680, classDefs, moduleInitializers = MainTestModuleInitializers ) diff --git a/project/Build.scala b/project/Build.scala index 49e549a45c..0883e88c59 100644 --- a/project/Build.scala +++ b/project/Build.scala @@ -2062,7 +2062,7 @@ object Build { Some(ExpectedSizes( fastLink = 425000 to 426000, fullLink = 282000 to 283000, - fastLinkGz = 61000 to 62000, + fastLinkGz = 60000 to 61000, fullLinkGz = 43000 to 44000, )) } From 3e51123db2b293575d18e1cb7804f8419da8955d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Doeraene?= Date: Wed, 25 Jun 2025 17:59:49 +0200 Subject: [PATCH 3/3] WiP Redesign the encoding of Longs in JavaScript. As a nice bonus, the IR checker now passes with `RuntimeLong`. --- .../scalajs/linker/runtime/RuntimeLong.scala | 574 +++------ .../linker/backend/emitter/ClassEmitter.scala | 13 +- .../linker/backend/emitter/CoreJSLib.scala | 251 +++- .../linker/backend/emitter/Emitter.scala | 7 +- .../backend/emitter/FunctionEmitter.scala | 1041 ++++++++++++----- .../linker/backend/emitter/LongImpl.scala | 72 +- .../linker/backend/emitter/SJSGen.scala | 49 +- .../linker/backend/emitter/Transients.scala | 41 + .../linker/backend/emitter/TreeDSL.scala | 4 + .../linker/backend/emitter/VarField.scala | 27 +- .../scalajs/linker/checker/FeatureSet.scala | 4 +- .../scalajs/linker/checker/IRChecker.scala | 5 + .../scalajs/linker/frontend/Desugarer.scala | 20 +- .../org/scalajs/linker/frontend/Refiner.scala | 12 +- .../frontend/optimizer/OptimizerCore.scala | 484 ++++---- .../org/scalajs/linker/EmitterTest.scala | 10 +- .../org/scalajs/linker/LibrarySizeTest.scala | 6 +- project/Build.scala | 20 +- .../scalajs/testsuite/compiler/LongTest.scala | 45 + 19 files changed, 1560 insertions(+), 1125 deletions(-) diff --git a/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala b/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala index a91d1aff27..b75318a9f1 100644 --- a/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala +++ b/linker-private-library/src/main/scala/org/scalajs/linker/runtime/RuntimeLong.scala @@ -12,8 +12,6 @@ package org.scalajs.linker.runtime -import scala.annotation.tailrec - /* IMPORTANT NOTICE about this file * * The code of RuntimeLong is code-size- and performance critical. The methods @@ -22,66 +20,9 @@ import scala.annotation.tailrec * * This means that this implementation is oriented for performance over * readability and idiomatic code. - * - * DRY is applied as much as possible but is bounded by the performance and - * code size requirements. We use a lot of inline_xyz helpers meant to be used - * when we already have the parameters on stack, but they are sometimes - * duplicated in entry points to avoid the explicit extraction of heap fields - * into temporary variables when they are used only once. - * - * Otherwise, we typically extract the lo and hi fields from the heap into - * local variables once, whether explicitly in vals or implicitly when passed - * as arguments to inlineable methods. This reduces heap/record accesses, and - * allows both our optimizer and the JIT to know that we indeed always have the - * same value (the JIT does not even know that fields are immutable, but even - * our optimizer does not make use of that information). */ -/** Emulates a Long on the JavaScript platform. */ -@inline -final class RuntimeLong(val lo: Int, val hi: Int) { - import RuntimeLong._ - - // java.lang.Object - - @inline - override def equals(that: Any): Boolean = that match { - case that: RuntimeLong => RuntimeLong.equals(this, that) - case _ => false - } - - @inline override def hashCode(): Int = lo ^ hi - - @inline override def toString(): String = - RuntimeLong.toString(this) - - // java.lang.Number - - @inline def byteValue(): Byte = lo.toByte - @inline def shortValue(): Short = lo.toShort - @inline def intValue(): Int = lo - @inline def longValue(): Long = this.asInstanceOf[Long] - @inline def floatValue(): Float = RuntimeLong.toFloat(this) - @inline def doubleValue(): Double = RuntimeLong.toDouble(this) - - // java.lang.Comparable, including bridges - - @inline - def compareTo(that: Object): Int = - RuntimeLong.compare(this, that.asInstanceOf[RuntimeLong]) - - @inline - def compareTo(that: java.lang.Long): Int = - RuntimeLong.compare(this, that.asInstanceOf[RuntimeLong]) - - // A few operator-friendly methods used by the division algorithms - - @inline private def <<(b: Int): RuntimeLong = RuntimeLong.shl(this, b) - @inline private def >>>(b: Int): RuntimeLong = RuntimeLong.shr(this, b) - @inline private def +(b: RuntimeLong): RuntimeLong = RuntimeLong.add(this, b) - @inline private def -(b: RuntimeLong): RuntimeLong = RuntimeLong.sub(this, b) -} - +/** Implementation for the Long operations on the JavaScript platform. */ object RuntimeLong { private final val TwoPow32 = 4294967296.0 private final val TwoPow63 = 9223372036854775808.0 @@ -92,153 +33,100 @@ object RuntimeLong { */ private final val SafeDoubleHiMask = 0xffe00000 - /** The hi part of a (lo, hi) return value. */ - private[this] var hiReturn: Int = _ + // public to be reliably identifiable by Desugar + @inline + def pack(lo: Int, hi: Int): Long = + 0L // replaced by a magic Transient(PackLong(lo, hi)) by Desugar // Comparisons @inline - def compare(a: RuntimeLong, b: RuntimeLong): Int = - RuntimeLong.compare(a.lo, a.hi, b.lo, b.hi) + def equals(alo: Int, ahi: Int, blo: Int, bhi: Int): Boolean = + alo == blo && ahi == bhi @inline - def equals(a: RuntimeLong, b: RuntimeLong): Boolean = - a.lo == b.lo && a.hi == b.hi + def notEquals(alo: Int, ahi: Int, blo: Int, bhi: Int): Boolean = + !equals(alo, ahi, blo, bhi) @inline - def notEquals(a: RuntimeLong, b: RuntimeLong): Boolean = - !equals(a, b) - - @inline - def lt(a: RuntimeLong, b: RuntimeLong): Boolean = { - /* We should use `inlineUnsignedInt_<(a.lo, b.lo)`, but that first extracts - * a.lo and b.lo into local variables, which cause the if/else not to be - * a valid JavaScript expression anymore. This causes useless explosion of - * JavaScript code at call site, when inlined. So we manually inline - * `inlineUnsignedInt_<(a.lo, b.lo)` to avoid that problem. - */ - val ahi = a.hi - val bhi = b.hi - if (ahi == bhi) (a.lo ^ 0x80000000) < (b.lo ^ 0x80000000) + def lt(alo: Int, ahi: Int, blo: Int, bhi: Int): Boolean = + if (ahi == bhi) inlineUnsignedInt_<(alo, blo) else ahi < bhi - } @inline - def le(a: RuntimeLong, b: RuntimeLong): Boolean = { - /* Manually inline `inlineUnsignedInt_<=(a.lo, b.lo)`. - * See the comment in `<` for the rationale. - */ - val ahi = a.hi - val bhi = b.hi - if (ahi == bhi) (a.lo ^ 0x80000000) <= (b.lo ^ 0x80000000) + def le(alo: Int, ahi: Int, blo: Int, bhi: Int): Boolean = + if (ahi == bhi) inlineUnsignedInt_<=(alo, blo) else ahi < bhi - } @inline - def gt(a: RuntimeLong, b: RuntimeLong): Boolean = { - /* Manually inline `inlineUnsignedInt_>a.lo, b.lo)`. - * See the comment in `<` for the rationale. - */ - val ahi = a.hi - val bhi = b.hi - if (ahi == bhi) (a.lo ^ 0x80000000) > (b.lo ^ 0x80000000) + def gt(alo: Int, ahi: Int, blo: Int, bhi: Int): Boolean = + if (ahi == bhi) inlineUnsignedInt_>(alo, blo) else ahi > bhi - } @inline - def ge(a: RuntimeLong, b: RuntimeLong): Boolean = { - /* Manually inline `inlineUnsignedInt_>=(a.lo, b.lo)`. - * See the comment in `<` for the rationale. - */ - val ahi = a.hi - val bhi = b.hi - if (ahi == bhi) (a.lo ^ 0x80000000) >= (b.lo ^ 0x80000000) + def ge(alo: Int, ahi: Int, blo: Int, bhi: Int): Boolean = + if (ahi == bhi) inlineUnsignedInt_>=(alo, blo) else ahi > bhi - } @inline - def ltu(a: RuntimeLong, b: RuntimeLong): Boolean = { - /* Manually inline `inlineUnsignedInt_<(a.lo, b.lo)`. - * See the comment in `<` for the rationale. - */ - val ahi = a.hi - val bhi = b.hi - if (ahi == bhi) (a.lo ^ 0x80000000) < (b.lo ^ 0x80000000) + def ltu(alo: Int, ahi: Int, blo: Int, bhi: Int): Boolean = + if (ahi == bhi) inlineUnsignedInt_<(alo, blo) else inlineUnsignedInt_<(ahi, bhi) - } @inline - def leu(a: RuntimeLong, b: RuntimeLong): Boolean = { - /* Manually inline `inlineUnsignedInt_<=(a.lo, b.lo)`. - * See the comment in `<` for the rationale. - */ - val ahi = a.hi - val bhi = b.hi - if (ahi == bhi) (a.lo ^ 0x80000000) <= (b.lo ^ 0x80000000) - else inlineUnsignedInt_<=(ahi, bhi) - } + def leu(alo: Int, ahi: Int, blo: Int, bhi: Int): Boolean = + if (ahi == bhi) inlineUnsignedInt_<=(alo, blo) + else inlineUnsignedInt_<(ahi, bhi) @inline - def gtu(a: RuntimeLong, b: RuntimeLong): Boolean = { - /* Manually inline `inlineUnsignedInt_>(a.lo, b.lo)`. - * See the comment in `<` for the rationale. - */ - val ahi = a.hi - val bhi = b.hi - if (ahi == bhi) (a.lo ^ 0x80000000) > (b.lo ^ 0x80000000) + def gtu(alo: Int, ahi: Int, blo: Int, bhi: Int): Boolean = + if (ahi == bhi) inlineUnsignedInt_>(alo, blo) else inlineUnsignedInt_>(ahi, bhi) - } @inline - def geu(a: RuntimeLong, b: RuntimeLong): Boolean = { - /* Manually inline `inlineUnsignedInt_>=(a.lo, b.lo)`. - * See the comment in `<` for the rationale. - */ - val ahi = a.hi - val bhi = b.hi - if (ahi == bhi) (a.lo ^ 0x80000000) >= (b.lo ^ 0x80000000) - else inlineUnsignedInt_>=(ahi, bhi) - } + def geu(alo: Int, ahi: Int, blo: Int, bhi: Int): Boolean = + if (ahi == bhi) inlineUnsignedInt_>=(alo, blo) + else inlineUnsignedInt_>(ahi, bhi) // Bitwise operations @inline - def or(a: RuntimeLong, b: RuntimeLong): RuntimeLong = - new RuntimeLong(a.lo | b.lo, a.hi | b.hi) + def or(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = + pack(alo | blo, ahi | bhi) @inline - def and(a: RuntimeLong, b: RuntimeLong): RuntimeLong = - new RuntimeLong(a.lo & b.lo, a.hi & b.hi) + def and(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = + pack(alo & blo, ahi & bhi) @inline - def xor(a: RuntimeLong, b: RuntimeLong): RuntimeLong = - new RuntimeLong(a.lo ^ b.lo, a.hi ^ b.hi) + def xor(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = + pack(alo ^ blo, ahi ^ bhi) // Shifts /** Shift left */ @inline - def shl(a: RuntimeLong, n: Int): RuntimeLong = { + def shl(lo: Int, hi: Int, n: Int): Long = { /* This should *reasonably* be: * val n1 = n & 63 * if (n1 < 32) - * new RuntimeLong(lo << n1, if (n1 == 0) hi else (lo >>> 32-n1) | (hi << n1)) + * RTLong(lo << n1, if (n1 == 0) hi else (lo >>> 32-n1) | (hi << n1)) * else - * new RuntimeLong(0, lo << n1) + * RTLong(0, lo << n1) * * Replacing n1 by its definition, we have: * if (n & 63 < 32) - * new RuntimeLong(lo << (n & 63), + * RTLong(lo << (n & 63), * if ((n & 63) == 0) hi else (lo >>> 32-(n & 63)) | (hi << (n & 63))) * else - * new RuntimeLong(0, lo << (n & 63)) + * RTLong(0, lo << (n & 63)) * * Since the values on the rhs of shifts are always in arithmetic mod 32, * we can get: * if (n & 63 < 32) - * new RuntimeLong(lo << n, if ((n & 63) == 0) hi else (lo >>> -n) | (hi << n)) + * RTLong(lo << n, if ((n & 63) == 0) hi else (lo >>> -n) | (hi << n)) * else - * new RuntimeLong(0, lo << n) + * RTLong(0, lo << n) * * The condition `n & 63 < 32` is equivalent to * (n & 63) & 32 == 0 @@ -246,7 +134,7 @@ object RuntimeLong { * n & 32 == 0 * * In the then part, we have `n & 32 == 0` hence `n & 63 == n & 31`: - * new RuntimeLong(lo << n, if ((n & 31) == 0) hi else (lo >>> -n) | (hi << n)) + * RTLong(lo << n, if ((n & 31) == 0) hi else (lo >>> -n) | (hi << n)) * * Consider the following portion: * if ((n & 31) == 0) hi else (lo >>> -n) | (hi << n) @@ -272,58 +160,53 @@ object RuntimeLong { * * Summarizing, so far we have * if (n & 32 == 0) - * new RuntimeLong(lo << n, (lo >>> 1 >>> (31-n)) | (hi << n)) + * RTLong(lo << n, (lo >>> 1 >>> (31-n)) | (hi << n)) * else - * new RuntimeLong(0, lo << n) + * RTLong(0, lo << n) * * If we distribute the condition in the lo and hi arguments of the - * constructors, we get a version with only one RuntimeLong output, which - * avoids reification as records by the optimizer, yielding shorter code. + * constructors, we get a version with only one pack output, which avoids + * reification as records by the optimizer, yielding shorter code. * It is potentially slightly less efficient, except when `n` is constant, * which is often the case anyway. * * Finally we have: */ - val lo = a.lo - new RuntimeLong( + pack( if ((n & 32) == 0) lo << n else 0, - if ((n & 32) == 0) (lo >>> 1 >>> (31-n)) | (a.hi << n) else lo << n) + if ((n & 32) == 0) (lo >>> 1 >>> (31-n)) | (hi << n) else lo << n) } /** Logical shift right */ @inline - def shr(a: RuntimeLong, n: Int): RuntimeLong = { + def shr(lo: Int, hi: Int, n: Int): Long = { // This derives in a similar way as in << - val hi = a.hi - new RuntimeLong( - if ((n & 32) == 0) (a.lo >>> n) | (hi << 1 << (31-n)) else hi >>> n, + pack( + if ((n & 32) == 0) (lo >>> n) | (hi << 1 << (31-n)) else hi >>> n, if ((n & 32) == 0) hi >>> n else 0) } /** Arithmetic shift right */ @inline - def sar(a: RuntimeLong, n: Int): RuntimeLong = { + def sar(lo: Int, hi: Int, n: Int): Long = { // This derives in a similar way as in << - val hi = a.hi - new RuntimeLong( - if ((n & 32) == 0) (a.lo >>> n) | (hi << 1 << (31-n)) else hi >> n, + pack( + if ((n & 32) == 0) (lo >>> n) | (hi << 1 << (31-n)) else hi >> n, if ((n & 32) == 0) hi >> n else hi >> 31) } // Arithmetic operations @inline - def add(a: RuntimeLong, b: RuntimeLong): RuntimeLong = { + def add(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = { // Hacker's Delight, Section 2-16 - val alo = a.lo - val blo = b.lo val lo = alo + blo - new RuntimeLong(lo, - a.hi + b.hi + (((alo & blo) | ((alo | blo) & ~lo)) >>> 31)) + pack(lo, + ahi + bhi + (((alo & blo) | ((alo | blo) & ~lo)) >>> 31)) } @inline - def sub(a: RuntimeLong, b: RuntimeLong): RuntimeLong = { + def sub(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = { /* Hacker's Delight, Section 2-16 * * We deviate a bit from the original algorithm. Hacker's Delight uses @@ -332,19 +215,13 @@ object RuntimeLong { * better when `a.hi` and `b.hi` are both known to be 0. This happens in * practice when `a` and `b` are 0-extended from `Int` values. */ - val alo = a.lo - val blo = b.lo val lo = alo - blo - new RuntimeLong(lo, - a.hi - b.hi + (((~alo & blo) | (~(alo ^ blo) & lo)) >> 31)) + pack(lo, + ahi - bhi + (((~alo & blo) | (~(alo ^ blo) & lo)) >> 31)) } @inline - def abs(a: RuntimeLong): RuntimeLong = - inline_abs(a.lo, a.hi) - - @inline - def mul(a: RuntimeLong, b: RuntimeLong): RuntimeLong = { + def mul(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = { /* The following algorithm is based on the decomposition in 32-bit and then * 16-bit subproducts of the unsigned interpretation of operands. * @@ -533,9 +410,6 @@ object RuntimeLong { * a1b1 +[32] (c1part >>>[32] 16) +[32] ((a1b0 +[32] (c1part &[32] 0xffff)) >>>[32] 16) */ - val alo = a.lo - val blo = b.lo - /* Note that the optimizer normalizes constants in * to be on the * left-hand-side (when it cannot do constant-folding to begin with). * Therefore, `b` is never constant in practice. @@ -563,11 +437,11 @@ object RuntimeLong { // hi = a.lo*b.hi + a.hi*b.lo + carry_from_lo_* val c1part = (a0b0 >>> 16) + a0b1 val hi = { - alo*b.hi + a.hi*blo + a1 * b1 + (c1part >>> 16) + + alo*bhi + ahi*blo + a1 * b1 + (c1part >>> 16) + (((c1part & 0xffff) + a1b0) >>> 16) // collapses to 0 when a1b0 = 0 } - new RuntimeLong(lo, hi) + pack(lo, hi) } /** Computes `longBitsToDouble(a)`. @@ -576,26 +450,27 @@ object RuntimeLong { * underlying buffer is at least 8 bytes long. */ @inline - def bitsToDouble(a: RuntimeLong, + def bitsToDouble(lo: Int, hi: Int, fpBitsDataView: scala.scalajs.js.typedarray.DataView): Double = { - fpBitsDataView.setInt32(0, a.lo, littleEndian = true) - fpBitsDataView.setInt32(4, a.hi, littleEndian = true) + fpBitsDataView.setInt32(0, lo, littleEndian = true) + fpBitsDataView.setInt32(4, hi, littleEndian = true) fpBitsDataView.getFloat64(0, littleEndian = true) } @inline - def toString(a: RuntimeLong): String = - toString(a.lo, a.hi) + def toString(lo: Int, hi: Int): String = + toStringImpl(lo, hi) - private def toString(lo: Int, hi: Int): String = { + private def toStringImpl(lo: Int, hi: Int): String = { if (isInt32(lo, hi)) { lo.toString() } else if (isSignedSafeDouble(hi)) { asSafeDouble(lo, hi).toString() } else { - val abs = inline_abs(lo, hi) - val s = toUnsignedStringLarge(abs.lo, abs.hi) + val aAbs = abs(lo, hi) + // Calls back into toInt() and shr() + val s = toUnsignedStringLarge(aAbs.toInt, (aAbs >>> 32).toInt) if (hi < 0) "-" + s else s } } @@ -686,15 +561,15 @@ object RuntimeLong { } @inline - def toInt(a: RuntimeLong): Int = - a.lo + def toInt(lo: Int, hi: Int): Int = + lo @inline - def toDouble(a: RuntimeLong): Double = - signedToDoubleApprox(a.lo, a.hi) + def toDouble(lo: Int, hi: Int): Double = + signedToDoubleApprox(lo, hi) @inline - def toFloat(a: RuntimeLong): Float = { + def toFloat(lo: Int, hi: Int): Float = { /* This implementation is based on the property that, *if* the conversion * `x.toDouble` is lossless, then the result of `x.toFloat` is equivalent * to `x.toDouble.toFloat`. @@ -749,8 +624,6 @@ object RuntimeLong { * unsigned regardless, when converting to a double. */ - val lo = a.lo - val hi = a.hi val compressedLo = if (isSignedSafeDouble(hi) || (lo & 0xffff) == 0) lo else (lo & ~0xffff) | 0x8000 @@ -758,37 +631,32 @@ object RuntimeLong { } @inline - def clz(a: RuntimeLong): Int = { - val hi = a.hi + def clz(lo: Int, hi: Int): Int = { if (hi != 0) Integer.numberOfLeadingZeros(hi) - else 32 + Integer.numberOfLeadingZeros(a.lo) + else 32 + Integer.numberOfLeadingZeros(lo) } @inline - def fromInt(value: Int): RuntimeLong = - new RuntimeLong(value, value >> 31) + def fromInt(value: Int): Long = + pack(value, value >> 31) @inline - def fromUnsignedInt(value: Int): RuntimeLong = - new RuntimeLong(value, 0) + def fromUnsignedInt(value: Int): Long = + pack(value, 0) @inline - def fromDouble(value: Double): RuntimeLong = { - val lo = fromDoubleImpl(value) - new RuntimeLong(lo, hiReturn) - } + def fromDouble(value: Double): Long = + fromDoubleImpl(value) - private def fromDoubleImpl(value: Double): Int = { + private def fromDoubleImpl(value: Double): Long = { /* When value is NaN, the conditions of the 3 `if`s are false, and we end * up returning (NaN | 0, (NaN / TwoPow32) | 0), which is correctly (0, 0). */ if (value < -TwoPow63) { - hiReturn = 0x80000000 - 0 + Long.MinValue } else if (value >= TwoPow63) { - hiReturn = 0x7fffffff - 0xffffffff + Long.MaxValue } else { val rawLo = rawToInt(value) val rawHi = rawToInt(value / TwoPow32) @@ -830,8 +698,7 @@ object RuntimeLong { * * Combining the negative and positive cases, we get: */ - hiReturn = if (value < 0 && rawLo != 0) rawHi - 1 else rawHi - rawLo + pack(rawLo, if (value < 0 && rawLo != 0) rawHi - 1 else rawHi) } } @@ -842,16 +709,17 @@ object RuntimeLong { */ @inline def fromDoubleBits(value: Double, - fpBitsDataView: scala.scalajs.js.typedarray.DataView): RuntimeLong = { + fpBitsDataView: scala.scalajs.js.typedarray.DataView): Long = { fpBitsDataView.setFloat64(0, value, littleEndian = true) - new RuntimeLong( + pack( fpBitsDataView.getInt32(0, littleEndian = true), fpBitsDataView.getInt32(4, littleEndian = true) ) } - private def compare(alo: Int, ahi: Int, blo: Int, bhi: Int): Int = { + @inline + def compare(alo: Int, ahi: Int, blo: Int, bhi: Int): Int = { if (ahi == bhi) { if (alo == blo) 0 else if (inlineUnsignedInt_<(alo, blo)) -1 @@ -868,14 +736,14 @@ object RuntimeLong { * intrinsic avoids 2 int multiplications. */ @inline - def multiplyFull(a: Int, b: Int): RuntimeLong = { + def multiplyFull(a: Int, b: Int): Long = { /* We use Hacker's Delight, Section 8-2, Figure 8-2, to compute the hi * word of the result. We reuse intermediate products to compute the lo - * word, like we do in `RuntimeLong.*`. + * word, like we do in `RuntimeLong.mul`. * * We swap the role of a1b0 and a0b1 compared to Hacker's Delight, to * optimize for the case where a1b0 collapses to 0, like we do in - * `RuntimeLong.*`. The optimizer normalizes constants in multiplyFull to + * `RuntimeLong.mul`. The optimizer normalizes constants in multiplyFull to * be on the left-hand-side (when it cannot do constant-folding to begin * with). Therefore, `b` is never constant in practice. */ @@ -900,96 +768,74 @@ object RuntimeLong { (((t & 0xffff) + a1b0) >> 16) // collapses to 0 when a1b0 = 0 } - new RuntimeLong(lo, hi) + pack(lo, hi) } @inline - def divide(a: RuntimeLong, b: RuntimeLong): RuntimeLong = { - val lo = divideImpl(a.lo, a.hi, b.lo, b.hi) - new RuntimeLong(lo, hiReturn) - } + def divide(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = + divideImpl(alo, ahi, blo, bhi) - def divideImpl(alo: Int, ahi: Int, blo: Int, bhi: Int): Int = { + def divideImpl(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = { if (isZero(blo, bhi)) throw new ArithmeticException("/ by zero") if (isInt32(alo, ahi)) { if (isInt32(blo, bhi)) { - if (alo == Int.MinValue && blo == -1) { - hiReturn = 0 - Int.MinValue - } else { - val lo = alo / blo - hiReturn = lo >> 31 - lo - } + if (alo == Int.MinValue && blo == -1) + 0x80000000L + else + fromInt(alo / blo) } else { // Either a == Int.MinValue && b == (Int.MaxValue + 1), or (abs(b) > abs(a)) - if (alo == Int.MinValue && (blo == 0x80000000 && bhi == 0)) { - hiReturn = -1 - -1 - } else { - // 0L, because abs(b) > abs(a) - hiReturn = 0 - 0 - } + if (alo == Int.MinValue && (blo == 0x80000000 && bhi == 0)) + -1L + else + 0L // because abs(b) > abs(a) } } else { - val aAbs = inline_abs(alo, ahi) - val bAbs = inline_abs(blo, bhi) - val absRLo = unsigned_/(aAbs.lo, aAbs.hi, bAbs.lo, bAbs.hi) - if ((ahi ^ bhi) >= 0) absRLo // a and b have the same sign bit - else inline_negate_hiReturn(absRLo, hiReturn) + val aAbs = abs(alo, ahi) + val bAbs = abs(blo, bhi) + // Calls back into toInt() and shr() + val absR = unsigned_/(aAbs.toInt, (aAbs >>> 32).toInt, bAbs.toInt, (bAbs >>> 32).toInt) + if ((ahi ^ bhi) >= 0) + absR // a and b have the same sign bit + else + -absR // calls back into sub() } } @inline - def divideUnsigned(a: RuntimeLong, b: RuntimeLong): RuntimeLong = { - val lo = divideUnsignedImpl(a.lo, a.hi, b.lo, b.hi) - new RuntimeLong(lo, hiReturn) - } + def divideUnsigned(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = + divideUnsignedImpl(alo, ahi, blo, bhi) - def divideUnsignedImpl(alo: Int, ahi: Int, blo: Int, bhi: Int): Int = { + def divideUnsignedImpl(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = { if (isZero(blo, bhi)) throw new ArithmeticException("/ by zero") if (isUInt32(ahi)) { - if (isUInt32(bhi)) { - hiReturn = 0 - Integer.divideUnsigned(alo, blo) - } else { - // a < b - hiReturn = 0 - 0 - } + if (isUInt32(bhi)) + pack(Integer.divideUnsigned(alo, blo), 0) + else + 0L // a < b } else { unsigned_/(alo, ahi, blo, bhi) } } - private def unsigned_/(alo: Int, ahi: Int, blo: Int, bhi: Int): Int = { + private def unsigned_/(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = { // This method is not called if isInt32(alo, ahi) nor if isZero(blo, bhi) if (isUnsignedSafeDouble(ahi)) { - if (isUnsignedSafeDouble(bhi)) { - val aDouble = asSafeDouble(alo, ahi) - val bDouble = asSafeDouble(blo, bhi) - val rDouble = aDouble / bDouble - hiReturn = unsignedSafeDoubleHi(rDouble) - unsignedSafeDoubleLo(rDouble) - } else { - // 0L, because b > a - hiReturn = 0 - 0 - } + if (isUnsignedSafeDouble(bhi)) + fromUnsignedSafeDouble(asSafeDouble(alo, ahi) / asSafeDouble(blo, bhi)) + else + 0L // 0L, because b > a } else { if (bhi == 0 && isPowerOfTwo_IKnowItsNot0(blo)) { val pow = log2OfPowerOfTwo(blo) - hiReturn = ahi >>> pow - (alo >>> pow) | (ahi << 1 << (31-pow)) + pack((alo >>> pow) | (ahi << 1 << (31-pow)), ahi >>> pow) } else if (blo == 0 && isPowerOfTwo_IKnowItsNot0(bhi)) { val pow = log2OfPowerOfTwo(bhi) - hiReturn = 0 - ahi >>> pow + pack(ahi >>> pow, 0) } else { unsignedDivModHelper(alo, ahi, blo, bhi, askQuotient = true) } @@ -997,94 +843,70 @@ object RuntimeLong { } @inline - def remainder(a: RuntimeLong, b: RuntimeLong): RuntimeLong = { - val lo = remainderImpl(a.lo, a.hi, b.lo, b.hi) - new RuntimeLong(lo, hiReturn) - } + def remainder(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = + remainderImpl(alo, ahi, blo, bhi) - def remainderImpl(alo: Int, ahi: Int, blo: Int, bhi: Int): Int = { + def remainderImpl(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = { if (isZero(blo, bhi)) throw new ArithmeticException("/ by zero") if (isInt32(alo, ahi)) { if (isInt32(blo, bhi)) { - if (blo != -1) { - val lo = alo % blo - hiReturn = lo >> 31 - lo - } else { - // Work around https://github.com/ariya/phantomjs/issues/12198 - hiReturn = 0 - 0 - } + if (blo != -1) + fromInt(alo % blo) + else + 0L // Work around https://github.com/ariya/phantomjs/issues/12198 } else { // Either a == Int.MinValue && b == (Int.MaxValue + 1), or (abs(b) > abs(a)) - if (alo == Int.MinValue && (blo == 0x80000000 && bhi == 0)) { - hiReturn = 0 - 0 - } else { - // a, because abs(b) > abs(a) - hiReturn = ahi - alo - } + if (alo == Int.MinValue && (blo == 0x80000000 && bhi == 0)) + 0L + else + pack(alo, ahi) // a, because abs(b) > abs(a) } } else { - val aAbs = inline_abs(alo, ahi) - val bAbs = inline_abs(blo, bhi) - val absRLo = unsigned_%(aAbs.lo, aAbs.hi, bAbs.lo, bAbs.hi) - if (ahi < 0) inline_negate_hiReturn(absRLo, hiReturn) - else absRLo + val aAbs = abs(alo, ahi) + val bAbs = abs(blo, bhi) + // Calls back into toInt() and shr() + val absR = unsigned_%(aAbs.toInt, (aAbs >>> 32).toInt, bAbs.toInt, (bAbs >>> 32).toInt) + if (ahi < 0) + -absR // calls back into sub() + else + absR } } @inline - def remainderUnsigned(a: RuntimeLong, b: RuntimeLong): RuntimeLong = { - val lo = remainderUnsignedImpl(a.lo, a.hi, b.lo, b.hi) - new RuntimeLong(lo, hiReturn) - } + def remainderUnsigned(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = + remainderUnsignedImpl(alo, ahi, blo, bhi) - def remainderUnsignedImpl(alo: Int, ahi: Int, blo: Int, bhi: Int): Int = { + def remainderUnsignedImpl(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = { if (isZero(blo, bhi)) throw new ArithmeticException("/ by zero") if (isUInt32(ahi)) { - if (isUInt32(bhi)) { - hiReturn = 0 - Integer.remainderUnsigned(alo, blo) - } else { - // a < b - hiReturn = ahi - alo - } + if (isUInt32(bhi)) + pack(Integer.remainderUnsigned(alo, blo), 0) + else + pack(alo, ahi) // a < b } else { unsigned_%(alo, ahi, blo, bhi) } } - private def unsigned_%(alo: Int, ahi: Int, blo: Int, bhi: Int): Int = { + private def unsigned_%(alo: Int, ahi: Int, blo: Int, bhi: Int): Long = { // This method is not called if isInt32(alo, ahi) nor if isZero(blo, bhi) if (isUnsignedSafeDouble(ahi)) { - if (isUnsignedSafeDouble(bhi)) { - val aDouble = asSafeDouble(alo, ahi) - val bDouble = asSafeDouble(blo, bhi) - val rDouble = aDouble % bDouble - hiReturn = unsignedSafeDoubleHi(rDouble) - unsignedSafeDoubleLo(rDouble) - } else { - // a, because b > a - hiReturn = ahi - alo - } + if (isUnsignedSafeDouble(bhi)) + fromUnsignedSafeDouble(asSafeDouble(alo, ahi) % asSafeDouble(blo, bhi)) + else + pack(alo, ahi) // a, because b > a } else { - if (bhi == 0 && isPowerOfTwo_IKnowItsNot0(blo)) { - hiReturn = 0 - alo & (blo - 1) - } else if (blo == 0 && isPowerOfTwo_IKnowItsNot0(bhi)) { - hiReturn = ahi & (bhi - 1) - alo - } else { + if (bhi == 0 && isPowerOfTwo_IKnowItsNot0(blo)) + pack(alo & (blo - 1), 0) + else if (blo == 0 && isPowerOfTwo_IKnowItsNot0(bhi)) + pack(alo, ahi & (bhi - 1)) + else unsignedDivModHelper(alo, ahi, blo, bhi, askQuotient = false) - } } } @@ -1095,13 +917,15 @@ object RuntimeLong { * the lo word. */ private def unsignedDivModHelper(alo: Int, ahi: Int, blo: Int, bhi: Int, - askQuotient: Boolean): Int = { + askQuotient: Boolean): Long = { var shift = inlineNumberOfLeadingZeros(blo, bhi) - inlineNumberOfLeadingZeros(alo, ahi) - val initialBShift = new RuntimeLong(blo, bhi) << shift - var bShiftLo = initialBShift.lo - var bShiftHi = initialBShift.hi + + // var bShift = shl(blo, bhi, shift) + var bShiftLo = if ((shift & 32) == 0) blo << shift else 0 + var bShiftHi = if ((shift & 32) == 0) (blo >>> 1 >>> (31-shift)) | (bhi << shift) else blo << shift + var remLo = alo var remHi = ahi var quotLo = 0 @@ -1122,19 +946,25 @@ object RuntimeLong { */ while (shift >= 0 && (remHi & SafeDoubleHiMask) != 0) { if (inlineUnsigned_>=(remLo, remHi, bShiftLo, bShiftHi)) { - val newRem = - new RuntimeLong(remLo, remHi) - new RuntimeLong(bShiftLo, bShiftHi) - remLo = newRem.lo - remHi = newRem.hi + // val newRem = rem - bShift + val newRemLo = remLo - bShiftLo + val newRemHi = remHi - bShiftHi + (((~remLo & bShiftLo) | (~(remLo ^ bShiftLo) & newRemLo)) >> 31) + + remLo = newRemLo + remHi = newRemHi if (shift < 32) quotLo |= (1 << shift) else quotHi |= (1 << shift) // == (1 << (shift - 32)) } shift -= 1 - val newBShift = new RuntimeLong(bShiftLo, bShiftHi) >>> 1 - bShiftLo = newBShift.lo - bShiftHi = newBShift.hi + + // val newBShift = bShift >>> 1 + val newBShiftLo = (bShiftLo >>> 1) | (bShiftHi << 31) + val newBShiftHi = bShiftHi >>> 1 + + bShiftLo = newBShiftLo + bShiftHi = newBShiftHi } // Now rem < 2^53, we can finish with a double division @@ -1144,22 +974,15 @@ object RuntimeLong { if (askQuotient) { val rem_div_bDouble = fromUnsignedSafeDouble(remDouble / bDouble) - val newQuot = new RuntimeLong(quotLo, quotHi) + rem_div_bDouble - hiReturn = newQuot.hi - newQuot.lo + pack(quotLo, quotHi) + rem_div_bDouble // calls back into add() } else { - val rem_mod_bDouble = remDouble % bDouble - hiReturn = unsignedSafeDoubleHi(rem_mod_bDouble) - unsignedSafeDoubleLo(rem_mod_bDouble) + fromUnsignedSafeDouble(remDouble % bDouble) } } else { - if (askQuotient) { - hiReturn = quotHi - quotLo - } else { - hiReturn = remHi - remLo - } + if (askQuotient) + pack(quotLo, quotHi) + else + pack(remLo, remHi) } } @@ -1213,9 +1036,9 @@ object RuntimeLong { @inline def asSafeDouble(lo: Int, hi: Int): Double = signedToDoubleApprox(lo, hi) - /** Converts an unsigned safe double into its RuntimeLong representation. */ - @inline def fromUnsignedSafeDouble(x: Double): RuntimeLong = - new RuntimeLong(unsignedSafeDoubleLo(x), unsignedSafeDoubleHi(x)) + /** Converts an unsigned safe double into its Long representation. */ + @inline def fromUnsignedSafeDouble(x: Double): Long = + pack(unsignedSafeDoubleLo(x), unsignedSafeDoubleHi(x)) /** Computes the lo part of a long from an unsigned safe double. */ @inline def unsignedSafeDoubleLo(x: Double): Int = @@ -1319,14 +1142,7 @@ object RuntimeLong { (a ^ 0x80000000) >= (b ^ 0x80000000) @inline - def inline_negate_hiReturn(lo: Int, hi: Int): Int = { - val n = sub(new RuntimeLong(0, 0), new RuntimeLong(lo, hi)) - hiReturn = n.hi - n.lo - } - - @inline - def inline_abs(lo: Int, hi: Int): RuntimeLong = { + def abs(lo: Int, hi: Int): Long = { /* The algorithm here is inspired by Hacker's Delight formula for `abs`. * However, a naive application of that formula does not give good code for * our RuntimeLong implementation. @@ -1414,7 +1230,7 @@ object RuntimeLong { val xlo = lo ^ sign val rlo = xlo - sign val rhi = (hi ^ sign) + ((xlo & ~rlo) >>> 31) - new RuntimeLong(rlo, rhi) + pack(rlo, rhi) } } diff --git a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/ClassEmitter.scala b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/ClassEmitter.scala index 9e6b4fd44d..92830a8fc8 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/ClassEmitter.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/ClassEmitter.scala @@ -371,8 +371,17 @@ private[emitter] final class ClassEmitter(sjsGen: SJSGen) { } yield { val field = anyField.asInstanceOf[FieldDef] implicit val pos = field.pos - js.Assign(genSelectForDef(js.This(), field.name, field.originalName), - genZeroOf(field.ftpe)) + field.ftpe match { + case LongType if !useBigIntForLongs => + val (lo, hi) = genSelectLongForDef(js.This(), field.name, field.originalName) + js.Block( + js.Assign(lo, js.IntLiteral(0)), + js.Assign(hi, js.IntLiteral(0)) + ) + case _ => + js.Assign(genSelectForDef(js.This(), field.name, field.originalName), + genZeroOf(field.ftpe)) + } } } diff --git a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/CoreJSLib.scala b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/CoreJSLib.scala index bc8610d0c0..e469626e8d 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/CoreJSLib.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/CoreJSLib.scala @@ -133,8 +133,9 @@ private[emitter] object CoreJSLib { private def buildPreObjectDefinitions(): List[Tree] = { defineFileLevelThis() ::: defineJSBuiltinsSnapshotsAndPolyfills() ::: - declareCachedL0() ::: + defineResHi() ::: defineCharClass() ::: + defineLongClass() ::: defineRuntimeFunctions() ::: defineObjectGetClassFunctions() ::: defineDispatchFunctions() ::: @@ -155,7 +156,7 @@ private[emitter] object CoreJSLib { } private def buildInitializations(): List[Tree] = { - assignCachedL0() + Nil } private def defineFileLevelThis(): List[Tree] = { @@ -517,20 +518,12 @@ private[emitter] object CoreJSLib { } } - private def declareCachedL0(): List[Tree] = { + private def defineResHi(): List[Tree] = { condDefs(!allowBigIntsForLongs)( - extractWithGlobals(globalVarDecl(VarField.L0, CoreVar)) + extractWithGlobals(globallyMutableVarDef(VarField.resHi, VarField.setResHi, CoreVar, int(0))) ) } - private def assignCachedL0(): List[Tree] = { - condDefs(!allowBigIntsForLongs)(List( - globalVar(VarField.L0, CoreVar) := genScalaClassNew( - LongImpl.RuntimeLongClass, LongImpl.initFromParts, 0, 0), - genClassDataOf(LongRef) DOT cpn.zero := globalVar(VarField.L0, CoreVar) - )) - } - private def defineCharClass(): List[Tree] = { val ctor = { val c = varRef("c") @@ -555,6 +548,33 @@ private[emitter] object CoreJSLib { } } + private def defineLongClass(): List[Tree] = { + condDefs(!allowBigIntsForLongs) { + val ctor = { + val lo = varRef("lo") + val hi = varRef("hi") + MethodDef(static = false, Ident("constructor"), paramList(lo, hi), None, Block( + This() DOT cpn.lo := lo, + This() DOT cpn.hi := hi + )) + } + + val toStr = { + MethodDef(static = false, Ident("toString"), Nil, None, { + Return(genLongApplyStatic(LongImpl.toString_, This() DOT cpn.lo, This() DOT cpn.hi)) + }) + } + + if (useClassesForRegularClasses) { + extractWithGlobals(globalClassDef(VarField.Long, CoreVar, None, ctor :: toStr :: Nil)) + } else { + defineFunction(VarField.Long, ctor.args, ctor.body) ::: + setPrototypeVar(globalVar(VarField.Long, CoreVar)) ::: + assignES5ClassMembers(globalVar(VarField.Long, CoreVar), List(toStr)) + } + } + } + private def defineRuntimeFunctions(): List[Tree] = ( condDefs(asInstanceOfs != CheckedBehavior.Unchecked || arrayStores != CheckedBehavior.Unchecked)( /* Returns a safe string description of a value. @@ -809,10 +829,15 @@ private[emitter] object CoreJSLib { } def genHijackedMethodApply(className: ClassName): Tree = { - val instanceAsPrimitive = - if (className == BoxedCharacterClass) genCallHelper(VarField.uC, instance) - else instance - Apply(globalVar(VarField.f, (className, methodName)), instanceAsPrimitive :: args) + val instanceAsPrimitive = className match { + case BoxedCharacterClass => + List(instance DOT cpn.c) + case BoxedLongClass if !useBigIntForLongs => + List(instance DOT cpn.lo, instance DOT cpn.hi) + case _ => + List(instance) + } + Apply(globalVar(VarField.f, (className, methodName)), instanceAsPrimitive ::: args) } def genBodyNoSwitch(hijackedClasses: List[ClassName]): Tree = { @@ -930,7 +955,10 @@ private[emitter] object CoreJSLib { }) }, { genArrowFunction(paramList(x), { - Return(Apply(globalVar(VarField.s, (FloatingPointBitsPolyfillsClass, polyfillMethod)), List(x))) + val args = + if (polyfillMethod.paramTypeRefs.head != LongRef || useBigIntForLongs) List(x) + else List(x DOT cpn.lo, x DOT cpn.hi) + Return(Apply(globalVar(VarField.s, (FloatingPointBitsPolyfillsClass, polyfillMethod)), args)) }) }) })) @@ -1070,7 +1098,7 @@ private[emitter] object CoreJSLib { Return(Apply(genIdentBracketSelect(fpBitsDataView, "getFloat64"), List(0, bool(true)))) ) } else { - Return(genLongApplyStatic(LongImpl.bitsToDouble, x, fpBitsDataView)) + Return(genLongApplyStatic(LongImpl.bitsToDouble, x DOT cpn.lo, x DOT cpn.hi, fpBitsDataView)) } } } @@ -1190,7 +1218,15 @@ private[emitter] object CoreJSLib { condDefs(esVersion < ESVersion.ES2015)( defineFunction5(VarField.systemArraycopy) { (src, srcPos, dest, destPos, length) => - genCallHelper(VarField.arraycopyGeneric, src.u, srcPos, dest.u, destPos, length) + if (useBigIntForLongs) { + genCallHelper(VarField.arraycopyGeneric, src.u, srcPos, dest.u, destPos, length) + } else { + If((src DOT classData) === genClassDataOf(ArrayTypeRef(LongRef, 1)), { + genCallHelper(VarField.arraycopyGeneric, src.u, srcPos << 1, dest.u, destPos << 1, length << 1) + }, { + genCallHelper(VarField.arraycopyGeneric, src.u, srcPos, dest.u, destPos, length) + }) + } } ) ::: condDefs(esVersion >= ESVersion.ES2015 && nullPointers != CheckedBehavior.Unchecked)( @@ -1433,7 +1469,15 @@ private[emitter] object CoreJSLib { defineFunction1(VarField.bC) { c => Return(New(globalVar(VarField.Char, CoreVar), c :: Nil)) } ::: - extractWithGlobals(globalVarDef(VarField.bC0, CoreVar, genCallHelper(VarField.bC, 0))) + extractWithGlobals(globalVarDef(VarField.bC0, CoreVar, genCallHelper(VarField.bC, 0))) ::: + + // Boxes for Longs + condDefs(!useBigIntForLongs)( + defineFunction2(VarField.bL) { (lo, hi) => + Return(New(globalVar(VarField.Long, CoreVar), lo :: hi :: Nil)) + } ::: + extractWithGlobals(globalVarDef(VarField.bL0, CoreVar, genCallHelper(VarField.bL, 0, 0))) + ) ) ::: ( if (asInstanceOfs != CheckedBehavior.Unchecked) { // Unboxes for everything @@ -1453,7 +1497,24 @@ private[emitter] object CoreJSLib { defineUnbox(VarField.uB, BoxedByteClass, _ | 0) ::: defineUnbox(VarField.uS, BoxedShortClass, _ | 0) ::: defineUnbox(VarField.uI, BoxedIntegerClass, _ | 0) ::: - defineUnbox(VarField.uJ, BoxedLongClass, v => If(v === Null(), genLongZero(), v)) ::: + + defineUnbox(VarField.uJ, BoxedLongClass, { v => + if (useBigIntForLongs) { + If(v === Null(), genLongZero(), v) + } else { + If(v === Null(), { + Block( + globalVar(VarField.resHi, CoreVar) := 0, + 0 + ) + }, { + Block( + globalVar(VarField.resHi, CoreVar) := v DOT cpn.hi, + v DOT cpn.lo + ) + }) + } + }) ::: /* Since the type test ensures that v is either null or a float, we can * use + instead of fround. @@ -1470,7 +1531,21 @@ private[emitter] object CoreJSLib { Return(If(v === Null(), 0, v DOT cpn.c)) } ::: defineFunction1(VarField.uJ) { v => - Return(If(v === Null(), genLongZero(), v)) + if (useBigIntForLongs) { + Return(If(v === Null(), genLongZero(), v)) + } else { + If(v === Null(), { + Block( + globalVar(VarField.resHi, CoreVar) := 0, + Return(0) + ) + }, { + Block( + globalVar(VarField.resHi, CoreVar) := v DOT cpn.hi, + Return(v DOT cpn.lo) + ) + }) + } } ) } @@ -1498,32 +1573,58 @@ private[emitter] object CoreJSLib { }) } + def lengthOf(arrayVal: Tree): Tree = + if (componentTypeRef == LongRef && !useBigIntForLongs) (arrayVal.u.length >>> 1) | 0 + else arrayVal.u.length + val getAndSet = if (arrayIndexOutOfBounds != CheckedBehavior.Unchecked) { val i = varRef("i") val v = varRef("v") + val w = varRef("w") val boundsCheck = { - If((i < 0) || (i >= This().u.length), + If((i < 0) || (i >= lengthOf(This())), genCallHelper(VarField.throwArrayIndexOutOfBoundsException, i)) } val getName = genSyntheticPropertyForDef(SyntheticProperty.get) val setName = genSyntheticPropertyForDef(SyntheticProperty.set) - List( - MethodDef(static = false, getName, paramList(i), None, { - Block( - boundsCheck, - Return(BracketSelect(This().u, i)) - ) - }), - MethodDef(static = false, setName, paramList(i, v), None, { - Block( - boundsCheck, - BracketSelect(This().u, i) := v - ) - }) - ) + if (componentTypeRef == LongRef && !useBigIntForLongs) { + List( + MethodDef(static = false, getName, paramList(i), None, { + Block( + boundsCheck, + i := (i << 1), + globalVar(VarField.resHi, CoreVar) := BracketSelect(This().u, (i + 1) | 0), + Return(BracketSelect(This().u, i)) + ) + }), + MethodDef(static = false, setName, paramList(i, v, w), None, { + Block( + boundsCheck, + i := (i << 1), + BracketSelect(This().u, i) := v, + BracketSelect(This().u, (i + 1) | 0) := w + ) + }) + ) + } else { + List( + MethodDef(static = false, getName, paramList(i), None, { + Block( + boundsCheck, + Return(BracketSelect(This().u, i)) + ) + }), + MethodDef(static = false, setName, paramList(i, v), None, { + Block( + boundsCheck, + BracketSelect(This().u, i) := v + ) + }) + ) + } } else if (isArrayOfObject && arrayStores != CheckedBehavior.Unchecked) { /* We need to define a straightforward "set" method, without any * check necessary, which will be overridden in subclasses. @@ -1550,24 +1651,29 @@ private[emitter] object CoreJSLib { val copyToName = genSyntheticPropertyForDef(SyntheticProperty.copyTo) + def scale(sizeVal: Tree): Tree = + if (componentTypeRef == LongRef && !useBigIntForLongs) sizeVal << 1 + else sizeVal + val methodDef = MethodDef(static = false, copyToName, paramList(srcPos, dest, destPos, length), None, { if (isTypedArray) { Block( if (semantics.arrayIndexOutOfBounds != CheckedBehavior.Unchecked) { - genCallHelper(VarField.arraycopyCheckBounds, This().u.length, - srcPos, dest.u.length, destPos, length) + genCallHelper(VarField.arraycopyCheckBounds, lengthOf(This()), + srcPos, lengthOf(dest), destPos, length) } else { Skip() }, Apply(genIdentBracketSelect(dest.u, "set"), - Apply(genIdentBracketSelect(This().u, "subarray"), srcPos :: ((srcPos + length) | 0) :: Nil) :: - destPos :: + Apply(genIdentBracketSelect(This().u, "subarray"), + scale(srcPos) :: scale((srcPos + length) | 0) :: Nil) :: + scale(destPos) :: Nil) ) } else { - genCallHelper(VarField.arraycopyGeneric, This().u, srcPos, - dest.u, destPos, length) + genCallHelper(VarField.arraycopyGeneric, This().u, scale(srcPos), + dest.u, scale(destPos), length) } }) methodDef :: Nil @@ -1616,18 +1722,27 @@ private[emitter] object CoreJSLib { If(arg < 0, genCallHelper(VarField.throwNegativeArraySizeException)) } + val scaleArg = + if (componentTypeRef == LongRef && !useBigIntForLongs) Assign(arg, arg << 1) + else Skip() + getArrayUnderlyingTypedArrayClassRef(componentTypeRef) match { case Some(typeArrayClassWithGlobalRefs) => Block( arraySizeCheck, + scaleArg, This().u := New(extractWithGlobals(typeArrayClassWithGlobalRefs), arg :: Nil) ) case None => + val zeroElem = + if (componentTypeRef == LongRef && !useBigIntForLongs) int(0) + else genZeroOf(componentTypeRef) Block( arraySizeCheck, + scaleArg, This().u := New(ArrayRef, arg :: Nil), For(let(i, 0), i < arg, i.++, { - BracketSelect(This().u, i) := genZeroOf(componentTypeRef) + BracketSelect(This().u, i) := zeroElem }) ) } @@ -1702,7 +1817,7 @@ private[emitter] object CoreJSLib { If(arrayClass !== Undefined(), { // it is undefined for void privateFieldSet(cpn._arrayOf, Apply(New(globalVar(VarField.TypeData, CoreVar), Nil) DOT cpn.initSpecializedArray, - List(This(), arrayClass, typedArrayClass))) + List(This(), arrayClass, typedArrayClass, arrayEncodedName === str("J")))) }), Return(This()) ) @@ -1793,13 +1908,14 @@ private[emitter] object CoreJSLib { val componentData = varRef("componentData") val arrayClass = varRef("arrayClass") val typedArrayClass = varRef("typedArrayClass") + val isLongArray = varRef("isLongArray") val isAssignableFromFun = varRef("isAssignableFromFun") val self = varRef("self") val that = varRef("that") val obj = varRef("obj") val array = varRef("array") MethodDef(static = false, Ident(cpn.initSpecializedArray), - paramList(componentData, arrayClass, typedArrayClass, isAssignableFromFun), None, { + paramList(componentData, arrayClass, typedArrayClass, isLongArray, isAssignableFromFun), None, { Block( arrayClass.prototype DOT classData := This(), initArrayCommonBody(arrayClass, componentData, componentData, 1), @@ -1808,15 +1924,41 @@ private[emitter] object CoreJSLib { genArrowFunction(paramList(that), Return(self === that)) }), privateFieldSet(cpn.wrapArray, { - If(typedArrayClass, { - genArrowFunction(paramList(array), { - Return(New(arrayClass, New(typedArrayClass, array :: Nil) :: Nil)) + val whenNotRuntimeLongArray = { + If(typedArrayClass, { + genArrowFunction(paramList(array), { + Return(New(arrayClass, New(typedArrayClass, array :: Nil) :: Nil)) + }) + }, { + genArrowFunction(paramList(array), { + Return(New(arrayClass, array :: Nil)) + }) }) - }, { - genArrowFunction(paramList(array), { - Return(New(arrayClass, array :: Nil)) + } + if (useBigIntForLongs) { + whenNotRuntimeLongArray + } else { + If(isLongArray, { + genArrowFunction(paramList(array), { + val len = varRef("len") + val result = varRef("result") + val u = varRef("u") + val i = varRef("i") + Block( + const(len, array.length | 0), + const(result, New(arrayClass, len :: Nil)), + const(u, result.u), + For(let(i, 0), i < len, i := (i + 1) | 0, Block( + BracketSelect(u, i << 1) := BracketSelect(array, i) DOT cpn.lo, + BracketSelect(u, ((i << 1) + 1) | 0) := BracketSelect(array, i) DOT cpn.hi + )), + Return(result) + ) + }) + }, { + whenNotRuntimeLongArray }) - }) + } }), privateFieldSet(cpn.isInstance, genArrowFunction(paramList(obj), Return(obj instanceof arrayClass))), @@ -2187,6 +2329,7 @@ private[emitter] object CoreJSLib { typeDataVar, globalVar(VarField.ac, ObjectClass), Undefined(), // typedArray + bool(false), // isLongArray genArrowFunction(paramList(that), { val thatDepth = varRef("thatDepth") Block( @@ -2214,7 +2357,7 @@ private[emitter] object CoreJSLib { */ val zero = primRef match { case VoidRef => Undefined() - case LongRef if !allowBigIntsForLongs => Null() // set later when $L0 is initialized + case LongRef if !allowBigIntsForLongs => genBoxedZeroOf(LongType) case _ => genZeroOf(primRef) } diff --git a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/Emitter.scala b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/Emitter.scala index 77e0c1ba39..d95a84a60f 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/Emitter.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/Emitter.scala @@ -1434,12 +1434,7 @@ object Emitter { callMethod(BoxedStringClass, hashCodeMethodName), cond(!config.coreSpec.esFeatures.allowBigIntsForLongs) { - multiple( - instanceTests(LongImpl.RuntimeLongClass), - instantiateClass(LongImpl.RuntimeLongClass, LongImpl.AllConstructors.toList), - callMethods(LongImpl.RuntimeLongClass, LongImpl.BoxedLongMethods.toList), - callStaticMethods(LongImpl.RuntimeLongClass, LongImpl.OperatorMethods.toList) - ) + callStaticMethods(LongImpl.RuntimeLongClass, LongImpl.OperatorMethods.toList) }, cond(config.coreSpec.esFeatures.esVersion < ESVersion.ES2015) { diff --git a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/FunctionEmitter.scala b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/FunctionEmitter.scala index e3f696248c..974b2452a2 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/FunctionEmitter.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/FunctionEmitter.scala @@ -488,11 +488,20 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { implicit pos: Position): WithGlobals[js.Function] = { performOptimisticThenPessimisticRuns { - val thisIdent = fileLevelVarIdent(VarField.thiz, thisOriginalName) + val thisParams = if (env0.enclosingClassName.contains(BoxedLongClass) && !useBigIntForLongs) { + List( + js.ParamDef(fileLevelVarIdent(VarField.thiz, thisOriginalName)), + js.ParamDef(fileLevelVarIdent(VarField.thizhi, thisOriginalName)) + ) + } else { + List( + js.ParamDef(fileLevelVarIdent(VarField.thiz, thisOriginalName)) + ) + } val env = env0.withExplicitThis() val js.Function(jsFlags, jsParams, restParam, jsBody) = desugarToFunctionInternal(ClosureFlags.function, params, None, body, isStat, env) - js.Function(jsFlags, js.ParamDef(thisIdent) :: jsParams, restParam, jsBody) + js.Function(jsFlags, thisParams ::: jsParams, restParam, jsBody) } } @@ -536,7 +545,9 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { val jsFlags = if (esFeatures.useECMAScript2015Semantics) flags else flags.withArrow(false) - val jsParams = params.map(transformParamDef(_)) + val jsParams = + if (useBigIntForLongs) params.map(transformParamDef(_)) + else params.flatMap(transformParamDefExpanded(_)) if (es2015) { val jsRestParam = restParam.map(transformParamDef(_)) @@ -614,9 +625,20 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { case Select(qualifier, field) => unnest(checkNotNull(qualifier), rhs) { (newQualifier, newRhs, env0) => implicit val env = env0 - js.Assign( - genSelect(transformExprNoChar(newQualifier), field)(lhs.pos), - transformExpr(newRhs, lhs.tpe)) + if (isRTLongType(lhs.tpe)) { + val tempQual = newSyntheticVar() + val (lhsLo, lhsHi) = genSelectLong(js.VarRef(tempQual), field)(lhs.pos) + val (rhsLo, rhsHi) = transformLongExpr(newRhs) + js.Block( + genConst(tempQual, transformExprNoChar(newQualifier)), + js.Assign(lhsLo, rhsLo), + js.Assign(lhsHi, rhsHi) + ) + } else { + js.Assign( + genSelect(transformExprNoChar(newQualifier), field)(lhs.pos), + transformExpr(newRhs, lhs.tpe)) + } } case ArraySelect(array, index) => @@ -624,7 +646,6 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { implicit val env = env0 val genArray = transformExprNoChar(newArray) val genIndex = transformExprNoChar(newIndex) - val genRhs = transformExpr(newRhs, lhs.tpe) /* We need to use a checked 'set' if at least one of the following applies: * - Array index out of bounds are checked, or @@ -635,14 +656,42 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { ((semantics.arrayStores != CheckedBehavior.Unchecked) && RefArray.is(array.tpe)) } - if (checked) { - genSyntheticPropApply(genArray, SyntheticProperty.set, genIndex, genRhs) + if (isRTLongType(lhs.tpe)) { + val (rhsLo, rhsHi) = transformLongExpr(newRhs) + + if (checked) { + genSyntheticPropApply(genArray, SyntheticProperty.set, genIndex, rhsLo, rhsHi) + } else { + withTempJSVar(genSyntheticPropSelect(genArray, SyntheticProperty.u)(lhs.pos)) { uRef => + genIndex match { + case js.IntLiteral(genIndexValue) => + val scaledIdx = genIndexValue << 1 + js.Block( + js.Assign(js.BracketSelect(uRef, js.IntLiteral(scaledIdx)(lhs.pos))(lhs.pos), rhsLo), + js.Assign(js.BracketSelect(uRef, js.IntLiteral(scaledIdx + 1)(lhs.pos))(lhs.pos), rhsHi) + ) + case _ => + withTempJSVar(genIndex << 1) { scaledIndex => + js.Block( + js.Assign(js.BracketSelect(uRef, scaledIndex)(lhs.pos), rhsLo), + js.Assign(js.BracketSelect(uRef, (scaledIndex + 1) | 0)(lhs.pos), rhsHi) + ) + } + } + } + } } else { - js.Assign( - js.BracketSelect( - genSyntheticPropSelect(genArray, SyntheticProperty.u)(lhs.pos), - genIndex)(lhs.pos), - genRhs) + val genRhs = transformExpr(newRhs, lhs.tpe) + + if (checked) { + genSyntheticPropApply(genArray, SyntheticProperty.set, genIndex, genRhs) + } else { + js.Assign( + js.BracketSelect( + genSyntheticPropSelect(genArray, SyntheticProperty.u)(lhs.pos), + genIndex)(lhs.pos), + genRhs) + } } } @@ -1076,7 +1125,7 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { case New(className, constr, args) if noExtractYet => New(className, constr, recs(args)) - case Select(qualifier, item) if noExtractYet => + case Select(qualifier, item) if noExtractYet && (!isRTLongType(arg.tpe) || isDuplicatable(qualifier)) => Select(rec(qualifier), item)(arg.tpe) case Apply(flags, receiver, method, args) if noExtractYet => val newArgs = recs(args) @@ -1091,12 +1140,17 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { case ApplyTypedClosure(flags, fun, args) if noExtractYet => val newArgs = recs(args) ApplyTypedClosure(flags, rec(fun), newArgs) - case ArraySelect(array, index) if noExtractYet => + case arg @ ArraySelect(array, index) if noExtractYet && canArraySelectBeExpression(arg) => val newIndex = rec(index) ArraySelect(rec(array), newIndex)(arg.tpe) case RecordSelect(record, field) if noExtractYet => RecordSelect(rec(record), field)(arg.tpe) + case Transient(PackLong(lo, hi)) => + val newHi = rec(hi) + Transient(PackLong(rec(lo), newHi)) + case Transient(ExtractLongHi(longValue)) => + Transient(ExtractLongHi(rec(longValue))) case Transient(Cast(expr, tpe)) => Transient(Cast(rec(expr), tpe)) case Transient(ZeroOf(runtimeClass)) => @@ -1114,7 +1168,7 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { Transient(TypedArrayToArray(rec(expr), primRef)) case If(cond, thenp, elsep) - if noExtractYet && isExpression(thenp) && isExpression(elsep) => + if noExtractYet && !isRTLongType(arg.tpe) && isExpression(thenp) && isExpression(elsep) => If(rec(cond), thenp, elsep)(arg.tpe) case _ => @@ -1232,15 +1286,17 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { case Throw => false case WrapAsThrowable | UnwrapFromThrowable => - tree.lhs match { - case VarRef(_) | Transient(JSVarRef(_, _)) => true - case _ => false - } + isDuplicatable(tree.lhs) case _ => true } } + private def canArraySelectBeExpression(tree: ArraySelect): Boolean = { + !isRTLongType(tree.tpe) || + (semantics.arrayIndexOutOfBounds != CheckedBehavior.Unchecked) + } + /** Common implementation for the functions below. * A pure expression can be moved around or executed twice, because it * will always produce the same result and never have side-effects. @@ -1275,6 +1331,8 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { allowUnpure || !env.isLocalMutable(name) case Transient(JSVarRef(_, mutable)) => allowUnpure || !mutable + case Transient(JSLongArraySelect(_, _)) => + allowUnpure case tree @ UnaryOp(op, lhs) if canUnaryOpBeExpression(tree) => if (op == UnaryOp.CheckNotNull) @@ -1312,12 +1370,16 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { // Expressions preserving pureness (modulo NPE) case Block(trees) => trees forall test - case If(cond, thenp, elsep) => test(cond) && test(thenp) && test(elsep) + case If(cond, thenp, elsep) => !isRTLongType(tree.tpe) && test(cond) && test(thenp) && test(elsep) case BinaryOp(_, lhs, rhs) => test(lhs) && test(rhs) case RecordSelect(record, _) => test(record) case IsInstanceOf(expr, _) => test(expr) // Transients preserving pureness (modulo NPE) + case Transient(PackLong(lo, hi)) => + test(lo) && test(hi) + case Transient(ExtractLongHi(longValue)) => + test(longValue) case Transient(Cast(expr, _)) => test(expr) case Transient(ZeroOf(runtimeClass)) => @@ -1327,7 +1389,9 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { // Expressions preserving side-effect freedom (modulo NPE) case Select(qualifier, _) => - allowUnpure && testNPE(qualifier) + allowUnpure && testNPE(qualifier) && { + !isRTLongType(tree.tpe) || isDuplicatable(qualifier) + } case SelectStatic(_) => allowUnpure case ArrayValue(tpe, elems) => @@ -1372,8 +1436,9 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { // Array operations with conditional exceptions case NewArray(tpe, length) => allowBehavior(semantics.negativeArraySizes) && allowUnpure && test(length) - case ArraySelect(array, index) => - allowBehavior(semantics.arrayIndexOutOfBounds) && allowUnpure && testNPE(array) && test(index) + case tree @ ArraySelect(array, index) => + allowBehavior(semantics.arrayIndexOutOfBounds) && allowUnpure && + canArraySelectBeExpression(tree) && testNPE(array) && test(index) // Casts case AsInstanceOf(expr, _) => @@ -1429,6 +1494,27 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { test(tree) } + /** Can the given tree be freely duplicated without extra computation? + * + * In practice, this tests whether the tree is a variable reference or a + * literal. + */ + private def isDuplicatable(tree: Tree): Boolean = tree match { + case VarRef(_) | Transient(JSVarRef(_, _)) | _:Literal => true + case _ => false + } + + /** Can the given tree be freely duplicated while also being pure? + * + * This is true if it is an immutable variable reference or a literal. + */ + private def isPureDuplicatable(tree: Tree)(implicit env: Env): Boolean = tree match { + case VarRef(name) => !env.isLocalMutable(name) + case Transient(JSVarRef(_, mutable)) => !mutable + case _: Literal => true + case _ => false + } + /** Test whether the given tree is a standard JS expression. */ def isExpression(tree: Tree)(implicit env: Env): Boolean = @@ -1458,6 +1544,13 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { mutable || fMutable, fRhs) }) + case LongType if !useBigIntForLongs => + val (lo, hi) = transformLongExpr(rhs) + js.Block( + genLet(identLongLo(ident), mutable, lo), + genLet(identLongHi(ident), mutable, hi) + ) + case _ => genLet(ident, mutable, transformExpr(rhs, tpe)) } @@ -1473,6 +1566,12 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { doEmptyVarDef(makeRecordFieldIdent(ident, fName, fOrigName), fTpe) }) + case LongType if !useBigIntForLongs => + js.Block( + genEmptyMutableLet(identLongLo(ident)), + genEmptyMutableLet(identLongHi(ident)) + ) + case _ => genEmptyMutableLet(ident) } @@ -1497,6 +1596,15 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { fRhs) }) + case LongType if !useBigIntForLongs => + // TODO Handle static mirrors + val (lhsLo, lhsHi) = transformLongExpr(lhs) + val (rhsLo, rhsHi) = transformLongExpr(rhs) + js.Block( + js.Assign(lhsLo, rhsLo), + js.Assign(lhsHi, rhsHi) + ) + case _ => val base = js.Assign(transformExpr(lhs, preserveChar = true), transformExpr(rhs, lhs.tpe)) @@ -1629,10 +1737,33 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { case Lhs.Assign(lhs) => doAssign(lhs, rhs) case Lhs.ReturnFromFunction => - if (env.expectedReturnType == VoidType) - js.Block(transformStat(rhs, tailPosLabels = Set.empty), js.Return(js.Undefined())) - else - js.Return(transformExpr(rhs, env.expectedReturnType)) + env.expectedReturnType match { + case VoidType => + js.Block(transformStat(rhs, tailPosLabels = Set.empty), js.Return(js.Undefined())) + case LongType if !useBigIntForLongs => + val (lo, hi) = transformLongExpr(rhs) + val resHi = globalVar(VarField.resHi, CoreVar) + if (hi == resHi) { + /* No need to assign resHi to itself. + * This happens for all tail calls of Long-returning functions. + */ + js.Return(lo) + } else { + val tempLo = newSyntheticVar() + val resHiAssign = if (needToUseGloballyMutableVarSetter(CoreVar)) { + js.Apply(globalVar(VarField.setResHi, CoreVar), hi :: Nil) + } else { + js.Assign(resHi, hi) + } + js.Block( + genConst(tempLo, lo), + resHiAssign, + js.Return(js.VarRef(tempLo)) + ) + } + case _ => + js.Return(transformExpr(rhs, env.expectedReturnType)) + } case Lhs.Return(l) => doReturnToLabel(l) case Lhs.Throw => @@ -1760,8 +1891,15 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { } case Select(qualifier, item) => - unnest(qualifier) { (newQualifier, env) => - redo(Select(newQualifier, item)(rhs.tpe))(env) + unnest(qualifier) { (newQualifier, newEnv) => + implicit val env = newEnv + if (isRTLongType(rhs.tpe) && !isDuplicatable(newQualifier)) { + withTempJSVar(newQualifier) { varRef => + redo(Select(varRef, item)(rhs.tpe)) + } + } else { + redo(Select(newQualifier, item)(rhs.tpe)) + } } case Apply(flags, receiver, method, args) => @@ -1824,9 +1962,29 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { redo(ArrayValue(tpe, newElems))(env) } - case ArraySelect(array, index) => - unnest(checkNotNull(array), index) { (newArray, newIndex, env) => - redo(ArraySelect(newArray, newIndex)(rhs.tpe))(env) + case rhs @ ArraySelect(array, index) => + unnest(checkNotNull(array), index) { (newArray, newIndex, newEnv) => + implicit val env = newEnv + + if (canArraySelectBeExpression(rhs)) { + redo(ArraySelect(newArray, newIndex)(rhs.tpe))(env) + } else { + import TreeDSL._ + + val genArray = transformExprNoChar(newArray) + val genIndex = transformExprNoChar(newIndex) + + withTempJSVar(genSyntheticPropSelect(genArray, SyntheticProperty.u)) { uRef => + genIndex match { + case js.IntLiteral(genIndexValue) => + redo(Transient(JSLongArraySelect(uRef, js.IntLiteral(genIndexValue << 1)))) + case _ => + withTempJSVar(genIndex << 1) { scaledIndex => + redo(Transient(JSLongArraySelect(uRef, scaledIndex))) + } + } + } + } } case RecordSelect(record, field) => @@ -1844,6 +2002,16 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { redo(AsInstanceOf(newExpr, tpe))(env) } + case Transient(PackLong(lo, hi)) => + unnest(lo, hi) { (newLo, newHi, env) => + redo(Transient(PackLong(newLo, newHi)))(env) + } + + case Transient(ExtractLongHi(longValue)) => + unnest(longValue) { (newLongValue, env) => + redo(Transient(ExtractLongHi(newLongValue)))(env) + } + case Transient(Cast(expr, tpe)) => unnest(expr) { (newExpr, env) => redo(Transient(Cast(newExpr, tpe)))(env) @@ -2079,15 +2247,17 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { private def withTempJSVar(value: Tree)(makeBody: Transient => js.Tree)( implicit env: Env, pos: Position): js.Tree = { - withTempJSVar(transformExpr(value, value.tpe), value.tpe)(makeBody) + val varIdent = newSyntheticVar() + val varDef = genLet(varIdent, mutable = false, transformExpr(value, value.tpe)) + val body = makeBody(Transient(JSVarRef(varIdent, mutable = false)(value.tpe))) + js.Block(varDef, body) } - private def withTempJSVar(value: js.Tree, tpe: Type)( - makeBody: Transient => js.Tree)( + private def withTempJSVar(value: js.Tree)(makeBody: js.VarRef => js.Tree)( implicit pos: Position): js.Tree = { val varIdent = newSyntheticVar() val varDef = genLet(varIdent, mutable = false, value) - val body = makeBody(Transient(JSVarRef(varIdent, mutable = false)(tpe))) + val body = makeBody(js.VarRef(varIdent)) js.Block(varDef, body) } @@ -2186,17 +2356,33 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { def transformTypedArgs(methodName: MethodName, args: List[Tree])( implicit env: Env): List[js.Tree] = { - if (args.forall(_.tpe != CharType)) { + if (args.forall(a => a.tpe != CharType && !isRTLongType(a.tpe))) { // Fast path args.map(transformExpr(_, preserveChar = true)) } else { - args.zip(methodName.paramTypeRefs).map { - case (arg, CharRef) => transformExpr(arg, preserveChar = true) - case (arg, _) => transformExpr(arg, preserveChar = false) + args.zip(methodName.paramTypeRefs).flatMap { + case (arg, CharRef) => + transformExpr(arg, preserveChar = true) :: Nil + case (arg, LongRef) if !useBigIntForLongs => + val (lo, hi) = transformLongExpr(arg) + List(lo, hi) + case (arg, _) => + transformExpr(arg, preserveChar = false) :: Nil } } } + def transformTypedArgs(paramTypes: List[Type], args: List[Tree])( + implicit env: Env): List[js.Tree] = { + args.zip(paramTypes).flatMap { + case (arg, LongType) if !useBigIntForLongs => + val (lo, hi) = transformLongExpr(arg) + List(lo, hi) + case (arg, paramType) => + transformExpr(arg, paramType) :: Nil + } + } + /** Desugar an expression of the IR into JavaScript. */ def transformExpr(tree: Tree, preserveChar: Boolean)( implicit env: Env): js.Tree = { @@ -2226,6 +2412,11 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { } val baseResult: js.Tree = tree match { + case _ if isRTLongType(tree.tpe) => + // We must be boxing + val (lo, hi) = transformLongExpr(tree) + genCallHelper(VarField.bL, lo, hi) + // Control flow constructs case Block(stats :+ expr) => @@ -2267,127 +2458,22 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { globalKnowledge.getJSNativeLoadSpec(className, member.name) extractWithGlobals(genLoadJSFromSpec(jsNativeLoadSpec)) - case Apply(_, receiver, method, args) => - val methodName = method.name - - def newReceiver(asChar: Boolean): js.Tree = { - if (asChar) { - /* When statically calling a (hijacked) method of j.l.Character, - * the receiver must be passed as a primitive CharType. If it is - * not already a CharType, we must introduce a cast to unbox the - * value. - */ - if (receiver.tpe == CharType) - transformExpr(receiver, preserveChar = true) - else - transformExpr(AsInstanceOf(checkNotNull(receiver), CharType), preserveChar = true) - } else { - /* For other primitive types, unboxes/casts are not necessary, - * because they would only convert `null` to the zero value of - * the type. However, `null` is ruled out by `checkNotNull` (or - * because it is UB). - */ - transformExprNoChar(checkNotNull(receiver)) - } - } - - val newArgs = transformTypedArgs(method.name, args) - - def genNormalApply(): js.Tree = - js.Apply(newReceiver(false) DOT genMethodIdent(method), newArgs) - - def genDispatchApply(): js.Tree = - js.Apply(globalVar(VarField.dp, methodName), newReceiver(false) :: newArgs) - - def genHijackedMethodApply(className: ClassName): js.Tree = - genApplyStaticLike(VarField.f, className, method, newReceiver(className == BoxedCharacterClass) :: newArgs) - - if (isMaybeHijackedClass(receiver.tpe) && - !methodName.isReflectiveProxy) { - receiver.tpe match { - case AnyType | AnyNotNullType => - genDispatchApply() - - case LongType | ClassType(BoxedLongClass, _) if !useBigIntForLongs => - // All methods of java.lang.Long are also in RuntimeLong - genNormalApply() - - case _ if hijackedMethodsInheritedFromObject.contains(methodName) => - /* Methods inherited from j.l.Object do not have a dedicated - * hijacked method that we can call, even when we know the - * precise type of the receiver. Therefore, we always have to - * go through the dispatcher in those cases. - * - * Note that when the optimizer is enabled, if the receiver - * had a precise type, the method would have been inlined - * anyway (because all the affected methods are @inline). - * Therefore this roundabout of dealing with this does not - * prevent any optimization. - */ - genDispatchApply() - - case ClassType(className, _) if !HijackedClasses.contains(className) => - /* This is a strict ancestor of a hijacked class. We need to - * use the dispatcher available in the helper method. - */ - genDispatchApply() - - case tpe => - /* This is a concrete hijacked class or its corresponding - * primitive type. Directly call the hijacked method. Note that - * there might not even be a dispatcher for this method, so - * this is important. - */ - genHijackedMethodApply(typeToBoxedHijackedClass(tpe)) - } - } else { - genNormalApply() - } - - case ApplyStatically(flags, receiver, className, method, args) => - val newReceiver = transformExprNoChar(checkNotNull(receiver)) - val newArgs = transformTypedArgs(method.name, args) - val transformedArgs = newReceiver :: newArgs - - if (flags.isConstructor) { - genApplyStaticLike(VarField.ct, className, method, transformedArgs) - } else if (flags.isPrivate) { - genApplyStaticLike(VarField.p, className, method, transformedArgs) - } else if (globalKnowledge.isInterface(className)) { - genApplyStaticLike(VarField.f, className, method, transformedArgs) - } else { - val fun = - globalVar(VarField.c, className).prototype DOT genMethodIdent(method) - js.Apply(fun DOT "call", transformedArgs) - } - - case ApplyStatic(flags, className, method, args) => - genApplyStaticLike( - if (flags.isPrivate) VarField.ps else VarField.s, - className, - method, - transformTypedArgs(method.name, args)) + case _:Apply | _:ApplyStatically | _:ApplyStatic | _:ApplyTypedClosure => + transformTypedApplyCommon(tree) case tree: ApplyDynamicImport => transformApplyDynamicImport(tree) - case ApplyTypedClosure(_, fun, args) => - val newFun = transformExprNoChar(checkNotNull(fun)) - val newArgs = fun.tpe match { - case ClosureType(paramTypes, _, _) => - for ((arg, paramType) <- args.zip(paramTypes)) yield - transformExpr(arg, paramType) - case NothingType | NullType => - args.map(transformExpr(_, preserveChar = true)) - case _ => - throw new AssertionError( - s"Unexpected type for the fun of ApplyTypedClosure: ${fun.tpe}") - } - js.Apply.makeProtected(newFun, newArgs) - case UnaryOp(op, lhs) => import UnaryOp._ - val newLhs = transformExpr(lhs, preserveChar = (op == CharToInt || op == CheckNotNull)) + + def newLhs: js.Tree = transformExpr(lhs, preserveChar = (op == CharToInt || op == CheckNotNull)) + + def rtLongToNonLongOp(rtLongMethodName: MethodName): js.Tree = { + val (lo, hi) = transformLongExpr(lhs) + genLongApplyStatic(rtLongMethodName, lo, hi) + } + (op: @switch) match { case Boolean_! => js.UnaryOp(JSUnaryOp.!, newLhs) @@ -2399,7 +2485,7 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { if (useBigIntForLongs) js.Apply(genGlobalVarRef("BigInt"), List(newLhs)) else - genLongApplyStatic(LongImpl.fromInt, newLhs) + rtLongToNonLongOp(LongImpl.fromInt) // Narrowing conversions case IntToChar => @@ -2413,10 +2499,18 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { js.BinaryOp(JSBinaryOp.<<, newLhs, js.IntLiteral(16)), js.IntLiteral(16)) case LongToInt => - if (useBigIntForLongs) + if (useBigIntForLongs) { js.Apply(genGlobalVarRef("Number"), List(wrapBigInt32(newLhs))) - else - genLongApplyStatic(LongImpl.toInt, newLhs) + } else { + val (lo, hi) = transformLongExpr(lhs) + hi match { + case _:js.VarRef | _:js.IntLiteral => + // we can safely drop the hi word + lo + case _ => + genLongApplyStatic(LongImpl.toInt, lo, hi) + } + } case DoubleToInt => genCallHelper(VarField.doubleToInt, newLhs) case DoubleToFloat => @@ -2427,19 +2521,19 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { if (useBigIntForLongs) js.Apply(genGlobalVarRef("Number"), List(newLhs)) else - genLongApplyStatic(LongImpl.toDouble, newLhs) + rtLongToNonLongOp(LongImpl.toDouble) case DoubleToLong => if (useBigIntForLongs) genCallHelper(VarField.doubleToLong, newLhs) else - genLongApplyStatic(LongImpl.fromDouble, newLhs) + rtLongToNonLongOp(LongImpl.fromDouble) // Long -> Float (neither widening nor narrowing) case LongToFloat => if (useBigIntForLongs) genCallHelper(VarField.longToFloat, newLhs) else - genLongApplyStatic(LongImpl.toFloat, newLhs) + rtLongToNonLongOp(LongImpl.toFloat) // String.length case String_length => @@ -2467,9 +2561,15 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { js.Apply(genGetDataOf(newLhs) DOT cpn.getSuperclass, Nil) case Array_length => - genIdentBracketSelect( + val rawLength = genIdentBracketSelect( genSyntheticPropSelect(newLhs, SyntheticProperty.u), "length") + lhs.tpe match { + case ArrayType(ArrayTypeRef(LongRef, 1), _) if !useBigIntForLongs => + or0(rawLength >>> js.IntLiteral(1)) + case _ => + rawLength + } case GetClass => genCallHelper(VarField.objectGetClass, newLhs) @@ -2513,18 +2613,20 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { genCallHelper(VarField.systemIdentityHashCode, newLhs) case WrapAsThrowable => - assert(newLhs.isInstanceOf[js.VarRef] || newLhs.isInstanceOf[js.This], newLhs) + val newLhs1 = newLhs + assert(isDuplicatable(lhs), newLhs1) js.If( - genIsInstanceOfClass(newLhs, ThrowableClass), - newLhs, - genScalaClassNew(JavaScriptExceptionClass, AnyArgConstructorName, newLhs)) + genIsInstanceOfClass(newLhs1, ThrowableClass), + newLhs1, + genScalaClassNew(JavaScriptExceptionClass, AnyArgConstructorName, newLhs1)) case UnwrapFromThrowable => - assert(newLhs.isInstanceOf[js.VarRef] || newLhs.isInstanceOf[js.This], newLhs) + val newLhs1 = newLhs + assert(isDuplicatable(lhs), newLhs1) js.If( - genIsInstanceOfClass(newLhs, JavaScriptExceptionClass), - genSelect(newLhs, FieldIdent(exceptionFieldName)), - newLhs) + genIsInstanceOfClass(newLhs1, JavaScriptExceptionClass), + genSelect(newLhs1, FieldIdent(exceptionFieldName)), + newLhs1) // Floating point bit manipulation case Float_toBits => @@ -2534,6 +2636,7 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { case Double_toBits => genCallHelper(VarField.doubleToBits, newLhs) case Double_fromBits => + // TODO Don't box RuntimeLong pair genCallHelper(VarField.doubleFromBits, newLhs) // clz @@ -2543,25 +2646,41 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { if (useBigIntForLongs) genCallHelper(VarField.longClz, newLhs) else - genLongApplyStatic(LongImpl.clz, newLhs) + rtLongToNonLongOp(LongImpl.clz) case UnsignedIntToLong => - if (useBigIntForLongs) - js.Apply(genGlobalVarRef("BigInt"), List(shr0(newLhs))) - else - genLongApplyStatic(LongImpl.fromUnsignedInt, newLhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + js.Apply(genGlobalVarRef("BigInt"), List(shr0(newLhs))) } case BinaryOp(op, lhs, rhs) => import BinaryOp._ - val newLhs = transformExpr(lhs, preserveChar = (op == String_+)) - val newRhs = transformExpr(rhs, preserveChar = (op == String_+)) + + def newLhs: js.Tree = transformExpr(lhs, preserveChar = (op == String_+)) + def newRhs: js.Tree = transformExpr(rhs, preserveChar = (op == String_+)) def extractClassData(origTree: Tree, jsTree: js.Tree): js.Tree = origTree match { case ClassOf(typeRef) => genClassDataOf(typeRef)(implicitly, implicitly, origTree.pos) case _ => genGetDataOf(jsTree) } + def longComparisonOp(bigIntBinaryOp: JSBinaryOp.Code, rtLongMethodName: MethodName): js.Tree = { + if (useBigIntForLongs) { + js.BinaryOp(bigIntBinaryOp, newLhs, newRhs) + } else { + val (lhsLo, lhsHi) = transformLongExpr(lhs) + val (rhsLo, rhsHi) = transformLongExpr(rhs) + genLongApplyStatic(rtLongMethodName, lhsLo, lhsHi, rhsLo, rhsHi) + } + } + + def unsignedLongComparisonOp(bigIntBinaryOp: JSBinaryOp.Code, rtLongMethodName: MethodName): js.Tree = { + if (useBigIntForLongs) + js.BinaryOp(bigIntBinaryOp, wrapBigIntU64(newLhs), wrapBigIntU64(newRhs)) + else + longComparisonOp(bigIntBinaryOp, rtLongMethodName) + } + (op: @switch) match { case === | !== => /* Semantically, this is an `Object.is` test in JS. However, we @@ -2635,6 +2754,8 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { js.BinaryOp(JSBinaryOp.!==, newLhs, newRhs) case String_+ => + // TODO Optimize longs here + def charToString(t: js.Tree): js.Tree = genCallHelper(VarField.charToString, t) @@ -2684,126 +2805,68 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { case Int_>= => js.BinaryOp(JSBinaryOp.>=, newLhs, newRhs) case Long_+ => - if (useBigIntForLongs) - wrapBigInt64(js.BinaryOp(JSBinaryOp.+, newLhs, newRhs)) - else - genLongApplyStatic(LongImpl.add, newLhs, newRhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + wrapBigInt64(js.BinaryOp(JSBinaryOp.+, newLhs, newRhs)) case Long_- => - if (useBigIntForLongs) { - lhs match { - case LongLiteral(0L) => - wrapBigInt64(js.UnaryOp(JSUnaryOp.-, newRhs)) - case _ => - wrapBigInt64(js.BinaryOp(JSBinaryOp.-, newLhs, newRhs)) - } - } else { - /* RuntimeLong does not have a dedicated method for 0L - b. - * The regular expansion done by the optimizer for the binary - * form is already optimal. - * So we don't special-case it here either. - */ - genLongApplyStatic(LongImpl.sub, newLhs, newRhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + lhs match { + case LongLiteral(0L) => + wrapBigInt64(js.UnaryOp(JSUnaryOp.-, newRhs)) + case _ => + wrapBigInt64(js.BinaryOp(JSBinaryOp.-, newLhs, newRhs)) } case Long_* => - if (useBigIntForLongs) - wrapBigInt64(js.BinaryOp(JSBinaryOp.*, newLhs, newRhs)) - else - genLongApplyStatic(LongImpl.mul, newLhs, newRhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + wrapBigInt64(js.BinaryOp(JSBinaryOp.*, newLhs, newRhs)) case Long_/ | Long_% | Long_unsigned_/ | Long_unsigned_% => - if (useBigIntForLongs) { - val newRhs1 = rhs match { - case LongLiteral(r) if r != 0L => newRhs - case _ => genCallHelper(VarField.checkLongDivisor, newRhs) - } - wrapBigInt64((op: @switch) match { - case Long_/ => js.BinaryOp(JSBinaryOp./, newLhs, newRhs1) - case Long_% => js.BinaryOp(JSBinaryOp.%, newLhs, newRhs1) - case Long_unsigned_/ => js.BinaryOp(JSBinaryOp./, wrapBigIntU64(newLhs), wrapBigIntU64(newRhs1)) - case Long_unsigned_% => js.BinaryOp(JSBinaryOp.%, wrapBigIntU64(newLhs), wrapBigIntU64(newRhs1)) - }) - } else { - // The zero divisor check is performed by the implementation methods - val implMethodName = (op: @switch) match { - case Long_/ => LongImpl.divide - case Long_% => LongImpl.remainder - case Long_unsigned_/ => LongImpl.divideUnsigned - case Long_unsigned_% => LongImpl.remainderUnsigned - } - genLongApplyStatic(implMethodName, newLhs, newRhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + val newRhs1 = rhs match { + case LongLiteral(r) if r != 0L => newRhs + case _ => genCallHelper(VarField.checkLongDivisor, newRhs) } + wrapBigInt64((op: @switch) match { + case Long_/ => js.BinaryOp(JSBinaryOp./, newLhs, newRhs1) + case Long_% => js.BinaryOp(JSBinaryOp.%, newLhs, newRhs1) + case Long_unsigned_/ => js.BinaryOp(JSBinaryOp./, wrapBigIntU64(newLhs), wrapBigIntU64(newRhs1)) + case Long_unsigned_% => js.BinaryOp(JSBinaryOp.%, wrapBigIntU64(newLhs), wrapBigIntU64(newRhs1)) + }) case Long_| => - if (useBigIntForLongs) - wrapBigInt64(js.BinaryOp(JSBinaryOp.|, newLhs, newRhs)) - else - genLongApplyStatic(LongImpl.or, newLhs, newRhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + wrapBigInt64(js.BinaryOp(JSBinaryOp.|, newLhs, newRhs)) case Long_& => - if (useBigIntForLongs) - wrapBigInt64(js.BinaryOp(JSBinaryOp.&, newLhs, newRhs)) - else - genLongApplyStatic(LongImpl.and, newLhs, newRhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + wrapBigInt64(js.BinaryOp(JSBinaryOp.&, newLhs, newRhs)) case Long_^ => - if (useBigIntForLongs) { - lhs match { - case LongLiteral(-1L) => - wrapBigInt64(js.UnaryOp(JSUnaryOp.~, newRhs)) - case _ => - wrapBigInt64(js.BinaryOp(JSBinaryOp.^, newLhs, newRhs)) - } - } else { - /* RuntimeLong does not have a dedicated method for -1L ^ b. - * The regular expansion done by the optimizer for the binary - * form is already optimal. - * So we don't special-case it here either. - */ - genLongApplyStatic(LongImpl.xor, newLhs, newRhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + lhs match { + case LongLiteral(-1L) => + wrapBigInt64(js.UnaryOp(JSUnaryOp.~, newRhs)) + case _ => + wrapBigInt64(js.BinaryOp(JSBinaryOp.^, newLhs, newRhs)) } case Long_<< => - if (useBigIntForLongs) - wrapBigInt64(js.BinaryOp(JSBinaryOp.<<, newLhs, bigIntShiftRhs(newRhs))) - else - genLongApplyStatic(LongImpl.shl, newLhs, newRhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + wrapBigInt64(js.BinaryOp(JSBinaryOp.<<, newLhs, bigIntShiftRhs(newRhs))) case Long_>>> => - if (useBigIntForLongs) - wrapBigInt64(js.BinaryOp(JSBinaryOp.>>, wrapBigIntU64(newLhs), bigIntShiftRhs(newRhs))) - else - genLongApplyStatic(LongImpl.shr, newLhs, newRhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + wrapBigInt64(js.BinaryOp(JSBinaryOp.>>, wrapBigIntU64(newLhs), bigIntShiftRhs(newRhs))) case Long_>> => - if (useBigIntForLongs) - wrapBigInt64(js.BinaryOp(JSBinaryOp.>>, newLhs, bigIntShiftRhs(newRhs))) - else - genLongApplyStatic(LongImpl.sar, newLhs, newRhs) + assert(useBigIntForLongs, "useBigIntForLongs only") + wrapBigInt64(js.BinaryOp(JSBinaryOp.>>, newLhs, bigIntShiftRhs(newRhs))) case Long_== => - if (useBigIntForLongs) - js.BinaryOp(JSBinaryOp.===, newLhs, newRhs) - else - genLongApplyStatic(LongImpl.equals_, newLhs, newRhs) + longComparisonOp(JSBinaryOp.===, LongImpl.equals_) case Long_!= => - if (useBigIntForLongs) - js.BinaryOp(JSBinaryOp.!==, newLhs, newRhs) - else - genLongApplyStatic(LongImpl.notEquals, newLhs, newRhs) + longComparisonOp(JSBinaryOp.!==, LongImpl.notEquals) case Long_< => - if (useBigIntForLongs) - js.BinaryOp(JSBinaryOp.<, newLhs, newRhs) - else - genLongApplyStatic(LongImpl.lt, newLhs, newRhs) + longComparisonOp(JSBinaryOp.<, LongImpl.lt) case Long_<= => - if (useBigIntForLongs) - js.BinaryOp(JSBinaryOp.<=, newLhs, newRhs) - else - genLongApplyStatic(LongImpl.le, newLhs, newRhs) + longComparisonOp(JSBinaryOp.<=, LongImpl.le) case Long_> => - if (useBigIntForLongs) - js.BinaryOp(JSBinaryOp.>, newLhs, newRhs) - else - genLongApplyStatic(LongImpl.gt, newLhs, newRhs) + longComparisonOp(JSBinaryOp.>, LongImpl.gt) case Long_>= => - if (useBigIntForLongs) - js.BinaryOp(JSBinaryOp.>=, newLhs, newRhs) - else - genLongApplyStatic(LongImpl.ge, newLhs, newRhs) + longComparisonOp(JSBinaryOp.>=, LongImpl.ge) case Float_+ => genFround(js.BinaryOp(JSBinaryOp.+, newLhs, newRhs)) case Float_- => genFround(js.BinaryOp(JSBinaryOp.-, newLhs, newRhs)) @@ -2860,37 +2923,31 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { case Int_unsigned_>= => js.BinaryOp(JSBinaryOp.>=, shr0(newLhs), shr0(newRhs)) case Long_unsigned_< => - if (useBigIntForLongs) - js.BinaryOp(JSBinaryOp.<, wrapBigIntU64(newLhs), wrapBigIntU64(newRhs)) - else - genLongApplyStatic(LongImpl.ltu, newLhs, newRhs) + unsignedLongComparisonOp(JSBinaryOp.<, LongImpl.ltu) case Long_unsigned_<= => - if (useBigIntForLongs) - js.BinaryOp(JSBinaryOp.<=, wrapBigIntU64(newLhs), wrapBigIntU64(newRhs)) - else - genLongApplyStatic(LongImpl.leu, newLhs, newRhs) + unsignedLongComparisonOp(JSBinaryOp.<=, LongImpl.leu) case Long_unsigned_> => - if (useBigIntForLongs) - js.BinaryOp(JSBinaryOp.>, wrapBigIntU64(newLhs), wrapBigIntU64(newRhs)) - else - genLongApplyStatic(LongImpl.gtu, newLhs, newRhs) + unsignedLongComparisonOp(JSBinaryOp.>, LongImpl.gtu) case Long_unsigned_>= => - if (useBigIntForLongs) - js.BinaryOp(JSBinaryOp.>=, wrapBigIntU64(newLhs), wrapBigIntU64(newRhs)) - else - genLongApplyStatic(LongImpl.geu, newLhs, newRhs) + unsignedLongComparisonOp(JSBinaryOp.>=, LongImpl.geu) } case NewArray(typeRef, length) => js.New(genArrayConstrOf(typeRef), transformExprNoChar(length) :: Nil) case ArrayValue(typeRef, elems) => - val preserveChar = typeRef match { - case ArrayTypeRef(CharRef, 1) => true - case _ => false + val newElems = typeRef match { + case ArrayTypeRef(CharRef, 1) => + elems.map(transformExpr(_, preserveChar = true)) + case ArrayTypeRef(LongRef, 1) if !useBigIntForLongs => + elems.flatMap { elem => + val (elemLo, elemHi) = transformLongExpr(elem) + List(elemLo, elemHi) + } + case _ => + elems.map(transformExprNoChar(_)) } - extractWithGlobals( - genArrayValue(typeRef, elems.map(transformExpr(_, preserveChar)))) + extractWithGlobals(genArrayValue(typeRef, newElems)) case ArraySelect(array, index) => val newArray = transformExprNoChar(checkNotNull(array)) @@ -2913,6 +2970,17 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { // Transients + case Transient(ExtractLongHi(longValue)) => + assert(!useBigIntForLongs, "RuntimeLong only") + val (lo, hi) = transformLongExpr(longValue) + lo match { + case _:js.VarRef | _:js.IntLiteral => + // we can safely drop the lo word + hi + case _ => + js.Block(lo, hi) + } + case Transient(Cast(expr, tpe)) => val newExpr = transformExpr(expr, preserveChar = true) if (tpe == CharType && expr.tpe != CharType) @@ -2928,7 +2996,7 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { case Transient(NativeArrayWrapper(elemClass, nativeArray)) => val newNativeArray = transformExprNoChar(nativeArray) elemClass match { - case ClassOf(elemTypeRef) => + case ClassOf(elemTypeRef) if (elemTypeRef != LongRef) || useBigIntForLongs => val arrayTypeRef = ArrayTypeRef.of(elemTypeRef) extractWithGlobals( genNativeArrayWrapper(arrayTypeRef, newNativeArray)) @@ -3086,17 +3154,9 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { case DoubleLiteral(value) => js.DoubleLiteral(value) case StringLiteral(value) => js.StringLiteral(value) - case LongLiteral(0L) => - genLongZero() case LongLiteral(value) => - if (useBigIntForLongs) { - js.BigIntLiteral(value) - } else { - val (lo, hi) = LongImpl.extractParts(value) - genScalaClassNew( - LongImpl.RuntimeLongClass, LongImpl.initFromParts, - js.IntLiteral(lo), js.IntLiteral(hi)) - } + assert(useBigIntForLongs, "useBigIntForLongs only") + js.BigIntLiteral(value) case ClassOf(typeRef) => genClassOf(typeRef) @@ -3150,6 +3210,311 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { genCallHelper(VarField.bC, baseResult) } + /** Desugar an Long expression of the IR into a pair `(lo, hi)` of JavaScript expressions. */ + def transformLongExpr(tree: Tree)(implicit env: Env): (js.Tree, js.Tree) = { + import TreeDSL._ + + implicit val pos = tree.pos + + assert(!useBigIntForLongs, + s"transformLongExpr must not be called with bigIntForLongs, at $pos with tree\n$tree") + + def or0(tree: js.Tree): js.Tree = + js.BinaryOp(JSBinaryOp.|, tree, js.IntLiteral(0)) + + def shr0(tree: js.Tree): js.Tree = tree match { + case js.IntLiteral(value) => + js.UintLiteral(value) + case _ => + js.BinaryOp(JSBinaryOp.>>>, tree, js.IntLiteral(0)) + } + + def withResHi(jsTree: js.Tree): (js.Tree, js.Tree) = + (jsTree, globalVar(VarField.resHi, CoreVar)) + + tree match { + // Control flow constructs + + case Block(stats :+ expr) => + val (newStats, newEnv) = transformBlockStats(stats) + val (lo, hi) = transformLongExpr(expr)(newEnv) + (js.Block(newStats :+ lo), hi) + + // Scala expressions + + case Select(qualifier, field) => + // Assume trivial qualifier + genSelectLong(transformExprNoChar(checkNotNull(qualifier)), field) + + case SelectStatic(item) => + (globalVar(VarField.t, item.name), globalVar(VarField.thi, item.name)) + + case _:Apply | _:ApplyStatically | _:ApplyStatic | _:ApplyTypedClosure => + withResHi(transformTypedApplyCommon(tree)) + + case UnaryOp(op, lhs) => + import UnaryOp._ + def newNonLongLhs = transformExprNoChar(lhs) + (op: @switch) match { + case IntToLong => + withResHi(genLongApplyStatic(LongImpl.fromInt, newNonLongLhs)) + + case DoubleToLong => + withResHi(genLongApplyStatic(LongImpl.fromDouble, newNonLongLhs)) + + case CheckNotNull => + /* A CheckNotNull is of type LongType iff its argument is of type + * LongType, and in that case it always succeeds. + */ + transformLongExpr(lhs) + + case Double_toBits => + withResHi(genCallHelper(VarField.doubleToBits, newNonLongLhs)) + + case UnsignedIntToLong => + (newNonLongLhs, js.IntLiteral(0)) + } + + case BinaryOp(op @ (BinaryOp.Long_<< | BinaryOp.Long_>>> | BinaryOp.Long_>>), lhs, rhs) => + import BinaryOp._ + + val (newLhsLo, newLhsHi) = transformLongExpr(lhs) + val newRhs = transformExprNoChar(rhs) + + val implMethodName = (op: @switch) match { + case Long_<< => LongImpl.shl + case Long_>>> => LongImpl.shr + case Long_>> => LongImpl.sar + } + + withResHi(genLongApplyStatic(implMethodName, newLhsLo, newLhsHi, newRhs)) + + case BinaryOp(op, lhs, rhs) => + import BinaryOp._ + + val (newLhsLo, newLhsHi) = transformLongExpr(lhs) + val (newRhsLo, newRhsHi) = transformLongExpr(rhs) + + val implMethodName = (op: @switch) match { + case Long_+ => LongImpl.add + case Long_- => LongImpl.sub + case Long_* => LongImpl.mul + case Long_/ => LongImpl.divide + case Long_% => LongImpl.remainder + case Long_unsigned_/ => LongImpl.divideUnsigned + case Long_unsigned_% => LongImpl.remainderUnsigned + case Long_| => LongImpl.or + case Long_& => LongImpl.and + case Long_^ => LongImpl.xor + } + + withResHi(genLongApplyStatic(implMethodName, newLhsLo, newLhsHi, newRhsLo, newRhsHi)) + + case ArraySelect(array, index) => + assert(semantics.arrayIndexOutOfBounds != CheckedBehavior.Unchecked) + val newArray = transformExprNoChar(array) + val newIndex = transformExprNoChar(index) + withResHi(genSyntheticPropApply(newArray, SyntheticProperty.get, newIndex)) + + case Transient(JSLongArraySelect(jsArray, scaledIndex)) => + assert(semantics.arrayIndexOutOfBounds == CheckedBehavior.Unchecked) + val scaledIndexPlusOne = scaledIndex match { + case js.IntLiteral(scaledIndexValue) => js.IntLiteral(scaledIndexValue + 1) + case _ => (scaledIndex + 1) | 0 + } + val newLo = js.BracketSelect(jsArray, scaledIndex) + val newHi = js.BracketSelect(jsArray, scaledIndexPlusOne) + (newLo, newHi) + + case tree: RecordSelect => + val jsIdent = makeRecordFieldIdentForVarRef(tree) + (js.VarRef(identLongLo(jsIdent)), js.VarRef(identLongHi(jsIdent))) + + case AsInstanceOf(expr, tpe) => + withResHi(extractWithGlobals(genAsInstanceOf(transformExprNoChar(expr), tpe))) + + // Transients + + case Transient(PackLong(lo, hi)) => + (transformExprNoChar(lo), transformExprNoChar(hi)) + + case Transient(Cast(expr, tpe)) => + if (expr.tpe == LongType) { + transformLongExpr(expr) + } else { + // TODO Do this locally + withResHi(extractWithGlobals(genAsInstanceOf(transformExprNoChar(expr), tpe))) + } + + // Literals + + case LongLiteral(value) => + val (lo, hi) = LongImpl.extractParts(value) + (js.IntLiteral(lo), js.IntLiteral(hi)) + + // Atomic expressions + + case tree @ VarRef(name) => + env.varKind(name) match { + case VarKind.Mutable | VarKind.Immutable => + val jsIdent = transformLocalVarRefIdent(tree) + (js.VarRef(identLongLo(jsIdent)), js.VarRef(identLongHi(jsIdent))) + + case VarKind.ThisAlias => + throw new AssertionError("ThisAlias cannot be a `long`") + + case VarKind.ExplicitThisAlias => + (fileLevelVar(VarField.thiz), fileLevelVar(VarField.thizhi)) + + case VarKind.ClassCapture => + val newName = genName(name) + (fileLevelVar(VarField.cc, newName), fileLevelVar(VarField.cchi, newName)) + } + + case Transient(JSVarRef(name, _)) => + (js.VarRef(identLongLo(name)), js.VarRef(identLongHi(name))) + + // Invalid trees + + case _ => + throw new IllegalArgumentException( + "Invalid tree in FunctionEmitter.transformLongExpr() of class " + + tree.getClass) + } + } + + private def transformTypedApplyCommon(tree: Tree)(implicit env: Env): js.Tree = { + import TreeDSL._ + + implicit val pos = tree.pos + + tree match { + case Apply(_, receiver, method, args) => + // TODO Dedupe with transformExpr + + val methodName = method.name + + def newReceiver: js.Tree = { + /* For other primitive types, unboxes/casts are not necessary, + * because they would only convert `null` to the zero value of + * the type. However, `null` is ruled out by `checkNotNull` (or + * because it is UB). + */ + transformExprNoChar(checkNotNull(receiver)) + } + + val newArgs = transformTypedArgs(method.name, args) + + def genNormalApply(): js.Tree = + js.Apply(newReceiver DOT genMethodIdent(method), newArgs) + + def genDispatchApply(): js.Tree = + js.Apply(globalVar(VarField.dp, methodName), newReceiver :: newArgs) + + def genHijackedMethodApply(className: ClassName): js.Tree = { + val newRec = className match { + case BoxedLongClass if !useBigIntForLongs => + val (lo, hi) = + if (receiver.tpe == LongType) transformLongExpr(receiver) + else transformLongExpr(AsInstanceOf(checkNotNull(receiver), LongType)) + List(lo, hi) + case BoxedCharacterClass => + /* When statically calling a (hijacked) method of j.l.Character, + * the receiver must be passed as a primitive CharType. If it is + * not already a CharType, we must introduce a cast to unbox the + * value. + */ + if (receiver.tpe == CharType) + List(transformExpr(receiver, preserveChar = true)) + else + List(transformExpr(AsInstanceOf(checkNotNull(receiver), CharType), preserveChar = true)) + case _ => + List(newReceiver) + } + genApplyStaticLike(VarField.f, className, method, newRec ::: newArgs) + } + + if (isMaybeHijackedClass(receiver.tpe) && + !methodName.isReflectiveProxy) { + receiver.tpe match { + case AnyType | AnyNotNullType => + genDispatchApply() + + case _ if hijackedMethodsInheritedFromObject.contains(methodName) => + /* Methods inherited from j.l.Object do not have a dedicated + * hijacked method that we can call, even when we know the + * precise type of the receiver. Therefore, we always have to + * go through the dispatcher in those cases. + * + * Note that when the optimizer is enabled, if the receiver + * had a precise type, the method would have been inlined + * anyway (because all the affected methods are @inline). + * Therefore this roundabout of dealing with this does not + * prevent any optimization. + */ + genDispatchApply() + + case ClassType(className, _) if !HijackedClasses.contains(className) => + /* This is a strict ancestor of a hijacked class. We need to + * use the dispatcher available in the helper method. + */ + genDispatchApply() + + case tpe => + /* This is a concrete hijacked class or its corresponding + * primitive type. Directly call the hijacked method. Note that + * there might not even be a dispatcher for this method, so + * this is important. + */ + genHijackedMethodApply(typeToBoxedHijackedClass(tpe)) + } + } else { + genNormalApply() + } + + case ApplyStatically(flags, receiver, className, method, args) => + val newReceiver = transformExprNoChar(checkNotNull(receiver)) + val newArgs = transformTypedArgs(method.name, args) + val transformedArgs = newReceiver :: newArgs + + if (flags.isConstructor) { + genApplyStaticLike(VarField.ct, className, method, transformedArgs) + } else if (flags.isPrivate) { + genApplyStaticLike(VarField.p, className, method, transformedArgs) + } else if (globalKnowledge.isInterface(className)) { + genApplyStaticLike(VarField.f, className, method, transformedArgs) + } else { + val fun = + globalVar(VarField.c, className).prototype DOT genMethodIdent(method) + js.Apply(fun DOT "call", transformedArgs) + } + + case ApplyStatic(flags, className, method, args) => + genApplyStaticLike( + if (flags.isPrivate) VarField.ps else VarField.s, + className, + method, + transformTypedArgs(method.name, args)) + + case ApplyTypedClosure(_, fun, args) => + val newFun = transformExprNoChar(checkNotNull(fun)) + val newArgs = fun.tpe match { + case ClosureType(paramTypes, _, _) => + transformTypedArgs(paramTypes, args) + case NothingType | NullType => + args.map(transformExpr(_, preserveChar = true)) + case _ => + throw new AssertionError( + s"Unexpected type for the fun of ApplyTypedClosure: ${fun.tpe}") + } + js.Apply.makeProtected(newFun, newArgs) + + case _ => + throw new AssertionError( + s"Invalid tree in transformTypedApplyCommon of class ${tree.getClass()}") + } + } + private def transformApplyDynamicImport(tree: ApplyDynamicImport)( implicit env: Env): js.Tree = { implicit val pos = tree.pos @@ -3203,7 +3568,14 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { val captureName = param.name.name val varKind = prepareCapture(value, Some(captureName), flags.arrow) { () => - capturesBuilder += transformParamDef(param) -> transformExpr(value, param.ptpe) + if (!isRTLongType(param.ptpe)) { + capturesBuilder += transformParamDef(param) -> transformExpr(value, param.ptpe) + } else { + val List(loParam, hiParam) = transformParamDefExpanded(param) + val (loValue, hiValue) = transformLongExpr(value) + capturesBuilder += loParam -> loValue + capturesBuilder += hiParam -> hiValue + } } captureName -> varKind @@ -3281,6 +3653,9 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { } } + def isRTLongType(tpe: Type): Boolean = + tpe == LongType && !useBigIntForLongs + def isMaybeHijackedClass(tpe: Type): Boolean = tpe match { case ClassType(className, _) => HijackedClasses.contains(className) || @@ -3330,6 +3705,22 @@ private[emitter] class FunctionEmitter(sjsGen: SJSGen) { private def transformParamDef(paramDef: ParamDef): js.ParamDef = js.ParamDef(transformLocalVarIdent(paramDef.name, paramDef.originalName))(paramDef.pos) + private def transformParamDefExpanded(paramDef: ParamDef): List[js.ParamDef] = { + assert(!useBigIntForLongs, + s"transformParamDefExpanded must not be called with bigIntForLongs at ${paramDef.pos}") + val ident = transformLocalVarIdent(paramDef.name, paramDef.originalName) + if (paramDef.ptpe == LongType) + List(js.ParamDef(identLongLo(ident))(paramDef.pos), js.ParamDef(identLongHi(ident))(paramDef.pos)) + else + js.ParamDef(ident)(paramDef.pos) :: Nil + } + + private def identLongLo(ident: js.Ident): js.Ident = + js.Ident(ident.name + "_$_lo")(ident.pos) + + private def identLongHi(ident: js.Ident): js.Ident = + js.Ident(ident.name + "_$_hi")(ident.pos) + private def transformLabelIdent(label: LabelName)(implicit pos: Position): js.Ident = js.Ident(genName(label)) @@ -3443,6 +3834,30 @@ private object FunctionEmitter { out.print(ident.name) } + /** A selection from a Long array where the underlying JS array and scaled + * index have already been extracted in immutable JS vars. + * + * The scaled index may be a `js.IntLiteral` as well. + */ + private final case class JSLongArraySelect(jsArray: js.VarRef, scaledIndex: js.Tree) + extends Transient.Value { + + val tpe = LongType + + def traverse(traverser: Traverser): Unit = () + + def transform(transformer: Transformer)(implicit pos: Position): Tree = + Transient(this) + + def printIR(out: org.scalajs.ir.Printers.IRTreePrinter): Unit = { + out.print("(") + out.print(jsArray.show) + out.print(", ") + out.print(scaledIndex.show) + out.print(")") + } + } + private final case class JSNewVararg(ctor: Tree, argArray: Tree) extends Transient.Value { val tpe: Type = AnyType diff --git a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/LongImpl.scala b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/LongImpl.scala index 86f9b419af..3224b0af5c 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/LongImpl.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/LongImpl.scala @@ -18,6 +18,7 @@ import org.scalajs.ir.WellKnownNames._ private[linker] object LongImpl { final val RuntimeLongClass = ClassName("org.scalajs.linker.runtime.RuntimeLong") + final val RuntimeLongModClass = ClassName("org.scalajs.linker.runtime.RuntimeLong$") final val lo = MethodName("lo", Nil, IntRef) final val hi = MethodName("hi", Nil, IntRef) @@ -26,35 +27,23 @@ private[linker] object LongImpl { private final val OneRTLongRef = RTLongRef :: Nil private final val TwoRTLongRefs = RTLongRef :: OneRTLongRef + private final val TwoIntRefs = IntRef :: IntRef :: Nil + private final val ThreeIntRefs = IntRef :: TwoIntRefs + private final val FourIntRefs = IntRef :: ThreeIntRefs + + final val pack = MethodName("pack", TwoIntRefs, LongRef) + def unaryOp(name: String): MethodName = - MethodName(name, OneRTLongRef, RTLongRef) + MethodName(name, TwoIntRefs, LongRef) def binaryOp(name: String): MethodName = - MethodName(name, TwoRTLongRefs, RTLongRef) + MethodName(name, FourIntRefs, LongRef) def shiftOp(name: String): MethodName = - MethodName(name, List(RTLongRef, IntRef), RTLongRef) + MethodName(name, ThreeIntRefs, LongRef) def compareOp(name: String): MethodName = - MethodName(name, TwoRTLongRefs, BooleanRef) - - // Instance methods that we need to reach as part of the jl.Long boxing - - private final val byteValue = MethodName("byteValue", Nil, ByteRef) - private final val shortValue = MethodName("shortValue", Nil, ShortRef) - private final val intValue = MethodName("intValue", Nil, IntRef) - private final val longValue = MethodName("longValue", Nil, LongRef) - private final val floatValue = MethodName("floatValue", Nil, FloatRef) - private final val doubleValue = MethodName("doubleValue", Nil, DoubleRef) - - private final val equalsO = MethodName("equals", List(ClassRef(ObjectClass)), BooleanRef) - private final val hashCode_ = MethodName("hashCode", Nil, IntRef) - private final val compareTo = MethodName("compareTo", List(ClassRef(BoxedLongClass)), IntRef) - private final val compareToO = MethodName("compareTo", List(ClassRef(ObjectClass)), IntRef) - - val BoxedLongMethods = Set( - byteValue, shortValue, intValue, longValue, floatValue, doubleValue, - equalsO, hashCode_, compareTo, compareToO) + MethodName(name, FourIntRefs, BooleanRef) // Operator methods @@ -86,16 +75,18 @@ private[linker] object LongImpl { final val gtu = compareOp("gtu") final val geu = compareOp("geu") - final val toInt = MethodName("toInt", OneRTLongRef, IntRef) - final val toFloat = MethodName("toFloat", OneRTLongRef, FloatRef) - final val toDouble = MethodName("toDouble", OneRTLongRef, DoubleRef) - final val bitsToDouble = MethodName("bitsToDouble", List(RTLongRef, ObjectRef), DoubleRef) - final val clz = MethodName("clz", OneRTLongRef, IntRef) + final val toInt = MethodName("toInt", TwoIntRefs, IntRef) + final val toFloat = MethodName("toFloat", TwoIntRefs, FloatRef) + final val toDouble = MethodName("toDouble", TwoIntRefs, DoubleRef) + final val bitsToDouble = MethodName("bitsToDouble", List(IntRef, IntRef, ObjectRef), DoubleRef) + final val clz = MethodName("clz", TwoIntRefs, IntRef) - final val fromInt = MethodName("fromInt", List(IntRef), RTLongRef) - final val fromUnsignedInt = MethodName("fromUnsignedInt", List(IntRef), RTLongRef) - final val fromDouble = MethodName("fromDouble", List(DoubleRef), RTLongRef) - final val fromDoubleBits = MethodName("fromDoubleBits", List(DoubleRef, ObjectRef), RTLongRef) + final val fromInt = MethodName("fromInt", List(IntRef), LongRef) + final val fromUnsignedInt = MethodName("fromUnsignedInt", List(IntRef), LongRef) + final val fromDouble = MethodName("fromDouble", List(DoubleRef), LongRef) + final val fromDoubleBits = MethodName("fromDoubleBits", List(DoubleRef, ObjectRef), LongRef) + + final val toString_ = MethodName("toString", TwoIntRefs, ClassRef(BoxedStringClass)) val OperatorMethods = Set( add, sub, mul, @@ -103,32 +94,23 @@ private[linker] object LongImpl { or, and, xor, shl, shr, sar, equals_, notEquals, lt, le, gt, ge, ltu, leu, gtu, geu, toInt, toFloat, toDouble, bitsToDouble, clz, - fromInt, fromUnsignedInt, fromDouble, fromDoubleBits + fromInt, fromUnsignedInt, fromDouble, fromDoubleBits, + toString_ ) // Methods used for intrinsics - final val toString_ = MethodName("toString", OneRTLongRef, ClassRef(BoxedStringClass)) - - final val compare = MethodName("compare", TwoRTLongRefs, IntRef) + final val compare = MethodName("compare", FourIntRefs, IntRef) - final val abs = MethodName("abs", OneRTLongRef, RTLongRef) - final val multiplyFull = MethodName("multiplyFull", List(IntRef, IntRef), RTLongRef) + final val abs = MethodName("abs", TwoIntRefs, LongRef) + final val multiplyFull = MethodName("multiplyFull", TwoIntRefs, LongRef) val AllIntrinsicMethods = Set( - toString_, compare, abs, multiplyFull ) - // Constructors - - final val initFromParts = MethodName.constructor(List(IntRef, IntRef)) - - val AllConstructors = Set( - initFromParts) - // Extract the parts to give to the initFromParts constructor def extractParts(value: Long): (Int, Int) = diff --git a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/SJSGen.scala b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/SJSGen.scala index 09514782bb..18a2df62a4 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/SJSGen.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/SJSGen.scala @@ -57,6 +57,12 @@ private[emitter] final class SJSGen( /** `Char.c`: the int value of the character. */ val c = "c" + /** `Long.l`: the lo word of the long. */ + val lo = "l" + + /** `Long.h`: the hi word of the long. */ + val hi = "h" + // --- TypeData fields --- /** `TypeData.constr`: the run-time constructor of the class. */ @@ -222,23 +228,18 @@ private[emitter] final class SJSGen( def genLongZero()( implicit moduleContext: ModuleContext, globalKnowledge: GlobalKnowledge, pos: Position): Tree = { - if (useBigIntForLongs) - BigIntLiteral(0L) - else - globalVar(VarField.L0, CoreVar) + assert(useBigIntForLongs, s"cannot generate a zero value for primitive long at $pos") + BigIntLiteral(0L) } def genBoxedZeroOf(tpe: Type)( implicit moduleContext: ModuleContext, globalKnowledge: GlobalKnowledge, pos: Position): Tree = { - if (tpe == CharType) genBoxedCharZero() - else genZeroOf(tpe) - } - - def genBoxedCharZero()( - implicit moduleContext: ModuleContext, globalKnowledge: GlobalKnowledge, - pos: Position): Tree = { - globalVar(VarField.bC0, CoreVar) + tpe match { + case CharType => globalVar(VarField.bC0, CoreVar) + case LongType if !useBigIntForLongs => globalVar(VarField.bL0, CoreVar) + case _ => genZeroOf(tpe) + } } def genLongApplyStatic(methodName: MethodName, args: Tree*)( @@ -278,7 +279,11 @@ private[emitter] final class SJSGen( case FloatRef => some("Float32Array") case DoubleRef => some("Float64Array") - case LongRef if useBigIntForLongs => some("BigInt64Array") + case LongRef => + if (useBigIntForLongs) + some("BigInt64Array") + else + some("Int32Array") // where elements are spread over two slots case _ => None } @@ -289,12 +294,28 @@ private[emitter] final class SJSGen( DotSelect(receiver, genFieldIdent(field.name)(field.pos)) } + def genSelectLong(receiver: Tree, field: irt.FieldIdent)( + implicit pos: Position): (Tree, Tree) = { + // TODO Name compressor + val baseName = genName(field.name) + val loIdent = Ident(baseName + "_$lo")(field.pos) + val hiIdent = Ident(baseName + "_$hi")(field.pos) + (DotSelect(receiver, loIdent), DotSelect(receiver, hiIdent)) + } + def genSelectForDef(receiver: Tree, field: irt.FieldIdent, originalName: OriginalName)( implicit pos: Position): Tree = { DotSelect(receiver, genFieldIdentForDef(field.name, originalName)(field.pos)) } + def genSelectLongForDef(receiver: Tree, field: irt.FieldIdent, + originalName: OriginalName)( + implicit pos: Position): (Tree, Tree) = { + // TODO Name compressor; originalName + genSelectLong(receiver, field) + } + private def genFieldIdent(fieldName: FieldName)( implicit pos: Position): MaybeDelayedIdent = { nameCompressor match { @@ -488,7 +509,7 @@ private[emitter] final class SJSGen( import TreeDSL._ if (useBigIntForLongs) genCallHelper(VarField.isLong, expr) - else expr instanceof globalVar(VarField.c, LongImpl.RuntimeLongClass) + else expr instanceof globalVar(VarField.Long, CoreVar) } def genAsInstanceOf(expr: Tree, tpe: Type)( diff --git a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/Transients.scala b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/Transients.scala index b1ac1c10a6..ff7fb5ab02 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/Transients.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/Transients.scala @@ -21,6 +21,47 @@ import org.scalajs.ir.Types._ object Transients { + /** Packs a `long` value from its `lo` and `hi` words. */ + final case class PackLong(lo: Tree, hi: Tree) extends Transient.Value { + val tpe = LongType + + def traverse(traverser: Traverser): Unit = { + traverser.traverse(lo) + traverser.traverse(hi) + } + + def transform(transformer: Transformer)(implicit pos: Position): Tree = + Transient(PackLong(transformer.transform(lo), transformer.transform(hi))) + + def printIR(out: IRTreePrinter): Unit = { + out.print("(") + out.print(lo) + out.print(", ") + out.print(hi) + out.print(")") + } + } + + /** Extracts the `hi` word of a `long` value. + * + * To extract the `lo` word, use a `UnaryOp.LongToInt`. + */ + final case class ExtractLongHi(value: Tree) extends Transient.Value { + val tpe = IntType + + def traverse(traverser: Traverser): Unit = + traverser.traverse(value) + + def transform(transformer: Transformer)(implicit pos: Position): Tree = + Transient(ExtractLongHi(transformer.transform(value))) + + def printIR(out: IRTreePrinter): Unit = { + out.print("(") + out.print(value) + out.print(")") + } + } + /** Casts `expr` to the given `tpe`, without any check. * * This operation is only valid if we know that `expr` is indeed a value of diff --git a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/TreeDSL.scala b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/TreeDSL.scala index 540936dc78..586d2e3577 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/TreeDSL.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/TreeDSL.scala @@ -57,6 +57,8 @@ private[emitter] object TreeDSL { def +(that: Tree)(implicit pos: Position): Tree = BinaryOp(ir.Trees.JSBinaryOp.+, self, that) + def +(that: Int)(implicit pos: Position): Tree = + BinaryOp(ir.Trees.JSBinaryOp.+, self, IntLiteral(that)) def -(that: Tree)(implicit pos: Position): Tree = BinaryOp(ir.Trees.JSBinaryOp.-, self, that) def *(that: Tree)(implicit pos: Position): Tree = @@ -77,6 +79,8 @@ private[emitter] object TreeDSL { def <<(that: Tree)(implicit pos: Position): Tree = BinaryOp(ir.Trees.JSBinaryOp.<<, self, that) + def <<(that: Int)(implicit pos: Position): Tree = + BinaryOp(ir.Trees.JSBinaryOp.<<, self, IntLiteral(that)) def >>(that: Tree)(implicit pos: Position): Tree = BinaryOp(ir.Trees.JSBinaryOp.>>, self, that) def >>>(that: Tree)(implicit pos: Position): Tree = diff --git a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/VarField.scala b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/VarField.scala index 9ce22ed2aa..da400ae8e6 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/VarField.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/VarField.scala @@ -58,6 +58,9 @@ private[emitter] object VarField { /** Static fields. */ final val t = mk("$t") + /** Static fields, hi word of a long. */ + final val thi = mk("$thi") + /** Scala module accessor. */ final val m = mk("$m") @@ -123,12 +126,18 @@ private[emitter] object VarField { /** Local field for class captures. */ final val cc = mk("$cc") + /** Local field for the hi word of a long class capture. */ + final val cchi = mk("$cchi") + /** Local field for super class. */ final val superClass = mk("$superClass") /** Local field for this replacement. */ final val thiz = mk("$thiz") + /** Local field for the hi word of a this replacement. */ + final val thizhi = mk("$thizhi") + /** Local field for dynamic imports. */ final val module = mk("$module") @@ -143,8 +152,11 @@ private[emitter] object VarField { /** The TypeData class. */ final val TypeData = mk("$TypeData") - /** Long zero. */ - final val L0 = mk("$L0") + /** Hi word of a long result. */ + final val resHi = mk("$resHi") + + /** Setter for resHi (for ES Modules). */ + final val setResHi = mk("$setResHi") /** DataView for floating point bit manipulation. */ final val fpBitsDataView = mk("$fpBitsDataView") @@ -167,6 +179,17 @@ private[emitter] object VarField { final val charAt = mk("$charAt") + // Long + + /** The Long class. */ + final val Long = mk("$Long") + + /** Box long. */ + final val bL = mk("$bL") + + /** Boxed Long zero. */ + final val bL0 = mk("$bL0") + // Object helpers final val objectClone = mk("$objectClone") diff --git a/linker/shared/src/main/scala/org/scalajs/linker/checker/FeatureSet.scala b/linker/shared/src/main/scala/org/scalajs/linker/checker/FeatureSet.scala index 94aabffff1..f6114768db 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/checker/FeatureSet.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/checker/FeatureSet.scala @@ -60,6 +60,8 @@ private[checker] object FeatureSet { /** Records and record types. */ val Records = new FeatureSet(1 << 7) + val PackLong = new FeatureSet(1 << 8) + /** Relaxed constructor discipline. * * - Optional super/delegate constructor call. @@ -88,7 +90,7 @@ private[checker] object FeatureSet { /** IR that is only the result of desugaring (currently empty). */ private val Desugared = - Empty + PackLong /** IR that is only the result of optimizations. */ private val Optimized = diff --git a/linker/shared/src/main/scala/org/scalajs/linker/checker/IRChecker.scala b/linker/shared/src/main/scala/org/scalajs/linker/checker/IRChecker.scala index c25ae55672..9cb62cc801 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/checker/IRChecker.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/checker/IRChecker.scala @@ -27,6 +27,7 @@ import org.scalajs.logging._ import org.scalajs.linker.frontend.{LinkingUnit, LinkTimeEvaluator, LinkTimeProperties} import org.scalajs.linker.standard.LinkedClass import org.scalajs.linker.checker.ErrorReporter._ +import org.scalajs.linker.backend.emitter.Transients /** Checker for the validity of the IR. */ private final class IRChecker(linkTimeProperties: LinkTimeProperties, @@ -777,6 +778,10 @@ private final class IRChecker(linkTimeProperties: LinkTimeProperties, typecheckExpect(value, env, ctpe) } + case Transient(Transients.PackLong(lo, hi)) if featureSet.supports(FeatureSet.PackLong) => + typecheckExpect(lo, env, IntType) + typecheckExpect(hi, env, IntType) + case Transient(transient) if featureSet.supports(FeatureSet.OptimizedTransients) => // No precise rules, but at least check that its children type-check on their own transient.traverse(new Traversers.Traverser { diff --git a/linker/shared/src/main/scala/org/scalajs/linker/frontend/Desugarer.scala b/linker/shared/src/main/scala/org/scalajs/linker/frontend/Desugarer.scala index b97423440d..a635d0bf81 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/frontend/Desugarer.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/frontend/Desugarer.scala @@ -18,11 +18,13 @@ import org.scalajs.logging._ import org.scalajs.linker.standard._ import org.scalajs.linker.checker._ +import org.scalajs.linker.backend.emitter.LongImpl import org.scalajs.ir.Names._ import org.scalajs.ir.Transformers._ import org.scalajs.ir.Trees._ import org.scalajs.ir.{Position, Version} +import org.scalajs.linker.backend.emitter.Transients /** Desugars a linking unit. */ final class Desugarer(config: CommonPhaseConfig, checkIR: Boolean) { @@ -58,11 +60,15 @@ final class Desugarer(config: CommonPhaseConfig, checkIR: Boolean) { private def desugarClass(linkedClass: LinkedClass): LinkedClass = { import linkedClass._ - if (desugaringRequirements.isEmpty) { + val isRTLongMod = className == LongImpl.RuntimeLongModClass + + if (desugaringRequirements.isEmpty && !isRTLongMod) { linkedClass } else { val newMethods = methods.map { method => - if (!desugaringRequirements.containsMethod(method.flags.namespace, method.methodName)) + if (isRTLongMod && method.methodName == LongImpl.pack) + desugarRTLongPack(method) + else if (!desugaringRequirements.containsMethod(method.flags.namespace, method.methodName)) method else desugarTransformer.transformMethodDef(method) @@ -107,6 +113,16 @@ final class Desugarer(config: CommonPhaseConfig, checkIR: Boolean) { } } + private def desugarRTLongPack(methodDef: MethodDef): MethodDef = { + import methodDef._ + val newBody = { + implicit val pos = body.get.pos + Transient(Transients.PackLong(args(0).ref, args(1).ref)) + } + MethodDef(flags, name, originalName, args, resultType, Some(newBody))( + optimizerHints, version)(pos) + } + private def desugarTopLevelExport(tle: LinkedTopLevelExport): LinkedTopLevelExport = { import tle._ if (!tle.needsDesugaring) { diff --git a/linker/shared/src/main/scala/org/scalajs/linker/frontend/Refiner.scala b/linker/shared/src/main/scala/org/scalajs/linker/frontend/Refiner.scala index 4f778351ba..64f66f46c1 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/frontend/Refiner.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/frontend/Refiner.scala @@ -38,16 +38,6 @@ final class Refiner(config: CommonPhaseConfig, checkIR: Boolean) { new Analyzer(config, initial = false, checkIRFor, failOnError = true, irLoader) } - /* TODO: Remove this and replace with `checkIR` once the optimizer generates - * well-typed IR with runtime longs. - */ - private val shouldRunIRChecker = { - val optimizerUsesRuntimeLong = - !config.coreSpec.esFeatures.allowBigIntsForLongs && - !config.coreSpec.targetIsWebAssembly - checkIR && !optimizerUsesRuntimeLong - } - def refine(classDefs: Seq[(ClassDef, Version)], moduleInitializers: List[ModuleInitializer], symbolRequirements: SymbolRequirement, logger: Logger)( @@ -81,7 +71,7 @@ final class Refiner(config: CommonPhaseConfig, checkIR: Boolean) { linkedTopLevelExports.flatten.toList, moduleInitializers, globalInfo) } - if (shouldRunIRChecker) { + if (checkIR) { logger.time("Refiner: Check IR") { val errorCount = IRChecker.check(linkTimeProperties, result, logger, CheckingPhase.Optimizer) diff --git a/linker/shared/src/main/scala/org/scalajs/linker/frontend/optimizer/OptimizerCore.scala b/linker/shared/src/main/scala/org/scalajs/linker/frontend/optimizer/OptimizerCore.scala index c0fd013511..65263d4fd4 100644 --- a/linker/shared/src/main/scala/org/scalajs/linker/frontend/optimizer/OptimizerCore.scala +++ b/linker/shared/src/main/scala/org/scalajs/linker/frontend/optimizer/OptimizerCore.scala @@ -143,18 +143,6 @@ private[optimizer] abstract class OptimizerCore( private val useRuntimeLong = !config.coreSpec.esFeatures.allowBigIntsForLongs && !isWasm - /** The record type for inlined `RuntimeLong`. */ - private lazy val inlinedRTLongStructure = - tryNewInlineableClass(LongImpl.RuntimeLongClass).get - - /** The name of the `lo` field of in the record type of `RuntimeLong`. */ - private lazy val inlinedRTLongLoField = - inlinedRTLongStructure.recordType.fields(0).name - - /** The name of the `lo` field of in the record type of `RuntimeLong`. */ - private lazy val inlinedRTLongHiField = - inlinedRTLongStructure.recordType.fields(1).name - private val intrinsics = Intrinsics.buildIntrinsics(config.coreSpec.esFeatures, isWasm) @@ -267,24 +255,7 @@ private[optimizer] abstract class OptimizerCore( assert(lhs != VoidType) assert(rhs != VoidType) - Types.isSubtype(lhs, rhs)(isSubclassFun) || { - (lhs, rhs) match { - case (LongType, ClassType(LongImpl.RuntimeLongClass, _)) => - true - case (ClassType(LongImpl.RuntimeLongClass, false), LongType) => - true - case (ClassType(BoxedLongClass, lhsNullable), - ClassType(LongImpl.RuntimeLongClass, rhsNullable)) => - rhsNullable || !lhsNullable - - case (ClassType(LongImpl.RuntimeLongClass, lhsNullable), - ClassType(BoxedLongClass, rhsNullable)) => - rhsNullable || !lhsNullable - - case _ => - false - } - } + Types.isSubtype(lhs, rhs)(isSubclassFun) } /** Transforms a statement. @@ -717,6 +688,11 @@ private[optimizer] abstract class OptimizerCore( case LoadJSConstructor(className) => transformJSLoadCommon(ImportTarget.Class(className), tree) + // Transients + + case Transient(PackLong(lo, hi)) => + Transient(PackLong(transformExpr(lo), transformExpr(hi))) + // Trees that need not be transformed case _:Skip | _:Debugger | _:StoreModule | @@ -1082,6 +1058,19 @@ private[optimizer] abstract class OptimizerCore( } } + + case Transient(PackLong(lo, hi)) => + pretransformExprs(lo, hi) { (tlo, thi) => + val loBinding = Binding.temp(LocalName("lo"), tlo) + val hiBinding = Binding.temp(LocalName("hi"), thi) + withNewLocalDefs(List(loBinding, hiBinding)) { (localDefs, cont1) => + val List(loLocalDef, hiLocalDef) = localDefs + val pairLocalDef = LocalDef(RefinedType(LongType), mutable = false, + LongPairReplacement(loLocalDef, hiLocalDef)) + cont1(pairLocalDef.toPreTransform) + } (cont) + } + case _ => cont(transformExpr(tree).toPreTransform) } @@ -1226,17 +1215,6 @@ private[optimizer] abstract class OptimizerCore( assert(!isLhsOfAssign || fieldLocalDef.mutable, s"assign to immutable field at $pos") cont(fieldLocalDef.toPreTransform) - // Select the lo or hi "field" of a Long literal - case PreTransLit(LongLiteral(value)) if useRuntimeLong => - val itemName = field.name - assert(itemName.simpleName == inlinedRTLongLoField || - itemName.simpleName == inlinedRTLongHiField) - assert(expectedType == IntType) - val resultValue = - if (itemName.simpleName == inlinedRTLongLoField) value.toInt - else (value >>> 32).toInt - cont(PreTransLit(IntLiteral(resultValue))) - case _ => def default: TailRec[Tree] = { resolveLocalDef(preTransQual) match { @@ -1317,28 +1295,14 @@ private[optimizer] abstract class OptimizerCore( resolveLocalDef(tlhs) match { case PreTransRecordTree(lhsTree, lhsStructure, lhsCancelFun) => - def buildInner(trhs: PreTransform): TailRec[Tree] = { - resolveLocalDef(trhs) match { - case PreTransRecordTree(rhsTree, rhsStructure, rhsCancelFun) => - if (!lhsStructure.sameClassAs(rhsStructure)) - lhsCancelFun() - assert(rhsTree.tpe == lhsTree.tpe) - contAssign(lhsTree, rhsTree) - case _ => + resolveLocalDef(trhs) match { + case PreTransRecordTree(rhsTree, rhsStructure, rhsCancelFun) => + if (!lhsStructure.sameClassAs(rhsStructure)) lhsCancelFun() - } - } - - if (lhsStructure.className == LongImpl.RuntimeLongClass && trhs.tpe.base == LongType) { - /* The lhs is a stack-allocated RuntimeLong, but the rhs is a - * primitive Long. We expand the primitive Long into a new - * stack-allocated RuntimeLong so that we do not need to cancel. - */ - expandLongValue(trhs) { expandedRhs => - buildInner(expandedRhs) - } - } else { - buildInner(trhs) + assert(rhsTree.tpe == lhsTree.tpe) + contAssign(lhsTree, rhsTree) + case _ => + lhsCancelFun() } case PreTransTree(lhsTree, _) => @@ -1476,32 +1440,6 @@ private[optimizer] abstract class OptimizerCore( BinaryOp(op, finishTransformExpr(lhs), finishTransformExpr(rhs)) case PreTransLocalDef(localDef) => localDef.newReplacement - - /* In general, it is not OK to allocate a new instance of an inlined - * class from its record value, because that can break object identity - * (not to mention we have no idea what the primary constructor does). - * However, for RuntimeLong specifically, it is OK. It is useful to do - * so because it allows us not to cancel the original stack allocation - * of the Long value, which means that all previous usages of it can - * stay on stack. - * - * We do something similar in LocalDef.newReplacement. - */ - case PreTransRecordTree(tree, structure, _) - if structure.className == LongImpl.RuntimeLongClass => - tree match { - case RecordValue(_, List(lo, hi)) => - createNewLong(lo, hi) - case recordVarRef: VarRef => - createNewLong(recordVarRef) - case _ => - val varRefIdent = LocalIdent( - freshLocalNameWithoutOriginalName(LocalName("x"), mutable = false)) - val recordVarDef = - VarDef(varRefIdent, NoOriginalName, tree.tpe, mutable = false, tree) - Block(recordVarDef, createNewLong(recordVarDef.ref)) - } - case PreTransRecordTree(_, _, cancelFun) => cancelFun() case PreTransTree(tree, _) => @@ -2195,11 +2133,7 @@ private[optimizer] abstract class OptimizerCore( private def boxedClassForType(tpe: Type): ClassName = (tpe: @unchecked) match { case ClassType(className, _) => - if (className == BoxedLongClass && useRuntimeLong) - LongImpl.RuntimeLongClass - else - className - + className case AnyType | AnyNotNullType | _:ArrayType => ObjectClass @@ -2209,14 +2143,10 @@ private[optimizer] abstract class OptimizerCore( case ByteType => BoxedByteClass case ShortType => BoxedShortClass case IntType => BoxedIntegerClass - - case LongType => - if (useRuntimeLong) LongImpl.RuntimeLongClass - else BoxedLongClass - - case FloatType => BoxedFloatClass - case DoubleType => BoxedDoubleClass - case StringType => BoxedStringClass + case LongType => BoxedLongClass + case FloatType => BoxedFloatClass + case DoubleType => BoxedDoubleClass + case StringType => BoxedStringClass } private def pretransformStaticApply(tree: ApplyStatically, isStat: Boolean, @@ -2584,33 +2514,22 @@ private[optimizer] abstract class OptimizerCore( false } - def isLocalOnlyInlineType(tpe: RefinedType): Boolean = { - /* RuntimeLong is @inline so that *local* box/unbox pairs and instances - * can be eliminated. But we don't want that to force inlining of a - * method only because we pass it an instance of RuntimeLong. - */ - tpe.base match { - case ClassType(LongImpl.RuntimeLongClass, _) => true - case _ => false - } - } - def isLikelyOptimizable(arg: PreTransform): Boolean = arg match { case PreTransBlock(_, result) => isLikelyOptimizable(result) case PreTransLocalDef(localDef) => - (localDef.replacement match { + localDef.replacement match { case _: TentativeClosureReplacement => true case _: ReplaceWithRecordVarRef => true case _: InlineClassBeingConstructedReplacement => true case _: InlineClassInstanceReplacement => true case _ => isTypeLikelyOptimizable(localDef.tpe) - }) && !isLocalOnlyInlineType(localDef.tpe) + } case PreTransRecordTree(_, _, _) => - !isLocalOnlyInlineType(arg.tpe) + true case _ => isTypeLikelyOptimizable(arg.tpe) @@ -2949,15 +2868,23 @@ private[optimizer] abstract class OptimizerCore( } case LongToString => - pretransformApplyStatic(ApplyFlags.empty, LongImpl.RuntimeLongClass, - MethodIdent(LongImpl.toString_), targs, StringClassType, - isStat, usePreTransform)( - cont) + val List(targ) = targs + withSplitLong(targ) { (targLo, targHi, cont1) => + pretransformApplyStatic(ApplyFlags.empty, LongImpl.RuntimeLongClass, + MethodIdent(LongImpl.toString_), List(targLo, targHi), StringClassType, + isStat, usePreTransform)( + cont1) + } (cont) case LongCompare => - pretransformApplyStatic(ApplyFlags.empty, LongImpl.RuntimeLongClass, - MethodIdent(LongImpl.compare), targs, IntType, - isStat, usePreTransform)( - cont) + val List(tlhs, trhs) = targs + withSplitLong(tlhs) { (tlhsLo, tlhsHi, cont1) => + withSplitLong(trhs) { (trhsLo, trhsHi, cont2) => + pretransformApplyStatic(ApplyFlags.empty, LongImpl.RuntimeLongClass, + MethodIdent(LongImpl.compare), List(tlhsLo, tlhsHi, trhsLo, trhsHi), IntType, + isStat, usePreTransform)( + cont2) + } (cont1) + } (cont) // java.lang.Character @@ -2998,11 +2925,13 @@ private[optimizer] abstract class OptimizerCore( // java.lang.Math case MathAbsLong => - pretransformApplyStatic(ApplyFlags.empty, LongImpl.RuntimeLongClass, - MethodIdent(LongImpl.abs), targs, - ClassType(LongImpl.RuntimeLongClass, nullable = true), - isStat, usePreTransform)( - cont) + val List(targ) = targs + withSplitLong(targ) { (targLo, targHi, cont1) => + pretransformApplyStatic(ApplyFlags.empty, LongImpl.RuntimeLongClass, + MethodIdent(LongImpl.abs), List(targLo, targHi), LongType, + isStat, usePreTransform)( + cont1) + } (cont) case MathAbsFloat => contTree(wasmUnaryOp(WasmUnaryOp.F32Abs, targs.head)) @@ -3032,7 +2961,7 @@ private[optimizer] abstract class OptimizerCore( LongImpl.RuntimeLongClass, MethodIdent(LongImpl.multiplyFull), targs, - ClassType(LongImpl.RuntimeLongClass, nullable = true), + LongType, isStat, usePreTransform)( cont) } @@ -3524,82 +3453,74 @@ private[optimizer] abstract class OptimizerCore( } } - private def expandLongValue(value: PreTransform)(cont: PreTransCont)( - implicit scope: Scope, pos: Position): TailRec[Tree] = { - - assert(useRuntimeLong) - - /* To force the expansion, we first store the `value` in a temporary - * variable of type `RuntimeLong!` (not `Long`, otherwise we would go into - * infinite recursion), then we create a `new RuntimeLong` with its lo and - * hi part. Basically, we're doing: - * - * val t: RuntimeLong! = value - * new RuntimeLong(t.lo__I(), t.hi__I()) - */ - val tName = LocalName("t") - val rtLongClassType = ClassType(LongImpl.RuntimeLongClass, nullable = false) - val rtLongBinding = Binding.temp(tName, rtLongClassType, mutable = false, - value) - withBinding(rtLongBinding) { (scope1, cont1) => - implicit val scope = scope1 - val tRef = VarRef(tName)(rtLongClassType) - - val lo = Apply(ApplyFlags.empty, tRef, MethodIdent(LongImpl.lo), Nil)(IntType) - val hi = Apply(ApplyFlags.empty, tRef, MethodIdent(LongImpl.hi), Nil)(IntType) - - pretransformExprs(lo, hi) { (tlo, thi) => - inlineClassConstructor(AllocationSite.Anonymous, LongImpl.RuntimeLongClass, - inlinedRTLongStructure, MethodIdent(LongImpl.initFromParts), List(tlo, thi), - () => throw new AssertionError(s"rolled-back RuntimeLong inlining at $pos"))(cont1) - } - } (cont) - } - private def expandLongOps(pretrans: PreTransform)(cont: PreTransCont)( implicit scope: Scope): TailRec[Tree] = { implicit val pos = pretrans.pos - def expand(methodName: MethodName, targs: PreTransform*): TailRec[Tree] = { + def expand(methodName: MethodName, targs: PreTransform*)(cont: PreTransCont): TailRec[Tree] = { val impl = staticCall(LongImpl.RuntimeLongClass, MemberNamespace.PublicStatic, methodName) pretransformSingleDispatch(ApplyFlags.empty, impl, None, targs.toList, isStat = false, usePreTransform = true)(cont)( throw new AssertionError(s"failed to inline RuntimeLong method $methodName at $pos")) } + def expandNoSplit(methodName: MethodName, targ: PreTransform): TailRec[Tree] = { + expand(methodName, targ)(cont) + } + + def expandSplit1(methodName: MethodName, targ: PreTransform): TailRec[Tree] = { + withSplitLong(targ) { (tlo, thi, cont1) => + expand(methodName, tlo, thi)(cont1) + } (cont) + } + + def expandSplit2(methodName: MethodName, tlhs: PreTransform, trhs: PreTransform): TailRec[Tree] = { + withSplitLong(tlhs) { (tlhsLo, tlhsHi, cont1) => + withSplitLong(trhs) { (trhsLo, trhsHi, cont2) => + expand(methodName, tlhsLo, tlhsHi, trhsLo, trhsHi)(cont2) + } (cont1) + } (cont) + } + + def expandSplitLeft(methodName: MethodName, tlhs: PreTransform, trhs: PreTransform): TailRec[Tree] = { + withSplitLong(tlhs) { (tlhsLo, tlhsHi, cont1) => + expand(methodName, tlhsLo, tlhsHi, trhs)(cont1) + } (cont) + } + pretrans match { case PreTransUnaryOp(op, arg) if useRuntimeLong => import UnaryOp._ (op: @switch) match { case IntToLong => - expand(LongImpl.fromInt, arg) + expandNoSplit(LongImpl.fromInt, arg) case LongToInt => - expand(LongImpl.toInt, arg) + expandSplit1(LongImpl.toInt, arg) case LongToDouble => - expand(LongImpl.toDouble, arg) + expandSplit1(LongImpl.toDouble, arg) case DoubleToLong => - expand(LongImpl.fromDouble, arg) + expandNoSplit(LongImpl.fromDouble, arg) case LongToFloat => - expand(LongImpl.toFloat, arg) + expandSplit1(LongImpl.toFloat, arg) case Double_toBits if config.coreSpec.esFeatures.esVersion >= ESVersion.ES2015 => expand(LongImpl.fromDoubleBits, - arg, PreTransTree(Transient(GetFPBitsDataView))) + arg, PreTransTree(Transient(GetFPBitsDataView)))(cont) case Double_fromBits if config.coreSpec.esFeatures.esVersion >= ESVersion.ES2015 => - expand(LongImpl.bitsToDouble, + expandSplitLeft(LongImpl.bitsToDouble, arg, PreTransTree(Transient(GetFPBitsDataView))) case Long_clz => - expand(LongImpl.clz, arg) + expandSplit1(LongImpl.clz, arg) case UnsignedIntToLong => - expand(LongImpl.fromUnsignedInt, arg) + expandNoSplit(LongImpl.fromUnsignedInt, arg) case _ => cont(pretrans) @@ -3609,34 +3530,34 @@ private[optimizer] abstract class OptimizerCore( import BinaryOp._ (op: @switch) match { - case Long_+ => expand(LongImpl.add, lhs, rhs) - case Long_- => expand(LongImpl.sub, lhs, rhs) - case Long_* => expand(LongImpl.mul, lhs, rhs) - case Long_/ => expand(LongImpl.divide, lhs, rhs) - case Long_% => expand(LongImpl.remainder, lhs, rhs) - - case Long_& => expand(LongImpl.and, lhs, rhs) - case Long_| => expand(LongImpl.or, lhs, rhs) - case Long_^ => expand(LongImpl.xor, lhs, rhs) - - case Long_<< => expand(LongImpl.shl, lhs, rhs) - case Long_>>> => expand(LongImpl.shr, lhs, rhs) - case Long_>> => expand(LongImpl.sar, lhs, rhs) - - case Long_== => expand(LongImpl.equals_, lhs, rhs) - case Long_!= => expand(LongImpl.notEquals, lhs, rhs) - case Long_< => expand(LongImpl.lt, lhs, rhs) - case Long_<= => expand(LongImpl.le, lhs, rhs) - case Long_> => expand(LongImpl.gt, lhs, rhs) - case Long_>= => expand(LongImpl.ge, lhs, rhs) - - case Long_unsigned_/ => expand(LongImpl.divideUnsigned, lhs, rhs) - case Long_unsigned_% => expand(LongImpl.remainderUnsigned, lhs, rhs) - - case Long_unsigned_< => expand(LongImpl.ltu, lhs, rhs) - case Long_unsigned_<= => expand(LongImpl.leu, lhs, rhs) - case Long_unsigned_> => expand(LongImpl.gtu, lhs, rhs) - case Long_unsigned_>= => expand(LongImpl.geu, lhs, rhs) + case Long_+ => expandSplit2(LongImpl.add, lhs, rhs) + case Long_- => expandSplit2(LongImpl.sub, lhs, rhs) + case Long_* => expandSplit2(LongImpl.mul, lhs, rhs) + case Long_/ => expandSplit2(LongImpl.divide, lhs, rhs) + case Long_% => expandSplit2(LongImpl.remainder, lhs, rhs) + + case Long_& => expandSplit2(LongImpl.and, lhs, rhs) + case Long_| => expandSplit2(LongImpl.or, lhs, rhs) + case Long_^ => expandSplit2(LongImpl.xor, lhs, rhs) + + case Long_<< => expandSplitLeft(LongImpl.shl, lhs, rhs) + case Long_>>> => expandSplitLeft(LongImpl.shr, lhs, rhs) + case Long_>> => expandSplitLeft(LongImpl.sar, lhs, rhs) + + case Long_== => expandSplit2(LongImpl.equals_, lhs, rhs) + case Long_!= => expandSplit2(LongImpl.notEquals, lhs, rhs) + case Long_< => expandSplit2(LongImpl.lt, lhs, rhs) + case Long_<= => expandSplit2(LongImpl.le, lhs, rhs) + case Long_> => expandSplit2(LongImpl.gt, lhs, rhs) + case Long_>= => expandSplit2(LongImpl.ge, lhs, rhs) + + case Long_unsigned_/ => expandSplit2(LongImpl.divideUnsigned, lhs, rhs) + case Long_unsigned_% => expandSplit2(LongImpl.remainderUnsigned, lhs, rhs) + + case Long_unsigned_< => expandSplit2(LongImpl.ltu, lhs, rhs) + case Long_unsigned_<= => expandSplit2(LongImpl.leu, lhs, rhs) + case Long_unsigned_> => expandSplit2(LongImpl.gtu, lhs, rhs) + case Long_unsigned_>= => expandSplit2(LongImpl.geu, lhs, rhs) case _ => cont(pretrans) @@ -3920,8 +3841,6 @@ private[optimizer] abstract class OptimizerCore( PreTransTree(Block(finishTransformStat(arg), ClassOf(typeRef))) arg.tpe match { - case RefinedType(ClassType(LongImpl.RuntimeLongClass, false), true) => - constant(ClassRef(BoxedLongClass)) case RefinedType(ClassType(className, false), true) => constant(ClassRef(className)) case RefinedType(ArrayType(arrayTypeRef, false), true) => @@ -4029,9 +3948,9 @@ private[optimizer] abstract class OptimizerCore( * compared with `equals()` instead of `==` so that `NaN === NaN` and * `+0.0 !== -0.0`. * - * Chars and Longs, however, never compare as `===`, since they are boxed - * chars and instances of `RuntimeLong`, respectively---unless we are using - * `BigInt`s for `Long`s, in which case those can be `===`. + * Chars and Longs, however, never compare as `===`, since they are + * boxed---unless we are using `BigInt`s for `Long`s, in which case Longs + * can be `===`. */ private def literal_===(lhs: Literal, rhs: Literal): Boolean = { object AnyNumLiteral { @@ -5376,7 +5295,11 @@ private[optimizer] abstract class OptimizerCore( val castTpe = RefinedType(tpe1, isExact = false, arg.tpe.allocationSite) - val isCastFreeAtRunTime = tpe != CharType + val isCastFreeAtRunTime = tpe match { + case CharType => false + case LongType => !useRuntimeLong + case _ => true + } if (isCastFreeAtRunTime) { // Try to push the cast down to usages of LocalDefs, in order to preserve aliases @@ -5683,6 +5606,46 @@ private[optimizer] abstract class OptimizerCore( } (cont) } + private def withSplitLong(tlongExpr: PreTransform)( + buildInner: (PreTransform, PreTransform, PreTransCont) => TailRec[Tree])( + cont: PreTransCont)( + implicit scope: Scope, pos: Position): TailRec[Tree] = { + + assert(useRuntimeLong) + + tlongExpr match { + case PreTransLit(LongLiteral(longValue)) => + val (loValue, hiValue) = LongImpl.extractParts(longValue) + val tlo = PreTransLit(IntLiteral(loValue)(tlongExpr.pos)) + val thi = PreTransLit(IntLiteral(hiValue)(tlongExpr.pos)) + buildInner(tlo, thi, cont) + + case _ => + // For other pretransforms, we need to evaluate them in a temporary. + withNewTempLocalDef(tlongExpr) { (localDef, cont1) => + val (lo, hi) = localDef.replacement match { + case LongPairReplacement(lo, hi) => + // Already split; directly access the underlying LocalDefs + (lo, hi) + case _ => + val lo = LocalDef(RefinedType(IntType), mutable = false, + LongPartReplacement(localDef, isHiPart = false)) + val hi = LocalDef(RefinedType(IntType), mutable = false, + LongPartReplacement(localDef, isHiPart = true)) + (lo, hi) + } + buildInner(lo.toPreTransform, hi.toPreTransform, cont1) + } (cont) + } + } + + private def withNewTempLocalDef(texpr: PreTransform)( + buildInner: (LocalDef, PreTransCont) => TailRec[Tree])( + cont: PreTransCont)( + implicit scope: Scope): TailRec[Tree] = { + withNewLocalDef(Binding.temp(LocalName("x"), texpr))(buildInner)(cont) + } + private def withNewTempLocalDefs(texprs: List[PreTransform])( buildInner: (List[LocalDef], PreTransCont) => TailRec[Tree])( cont: PreTransCont)( @@ -5726,47 +5689,24 @@ private[optimizer] abstract class OptimizerCore( implicit val pos = value.pos def withDedicatedVar(tpe: RefinedType): TailRec[Tree] = { - val rtLongClassType = ClassType(LongImpl.RuntimeLongClass, nullable = false) - - if (tpe.base == LongType && declaredType.toNonNullable != rtLongClassType && - useRuntimeLong) { - /* If the value's type is a primitive Long, and the declared type is - * not RuntimeLong, we want to force the expansion of the primitive - * Long (which we know is in fact a RuntimeLong) into a local variable, - * and then its two components into a Record. This makes sure that all - * Longs are stack-allocated when they are put in a var/val, even if - * they came from a method call or other opaque sources, and also if a - * var is initialized with a literal long. - * - * We only do all that if the library contains a inlineable version of - * RuntimeLong. - */ - expandLongValue(value) { expandedValue => - val expandedBinding = Binding(bindingName, originalName, - rtLongClassType, mutable, expandedValue) - withNewLocalDef(expandedBinding)(buildInner)(cont) - } - } else { - // Otherwise, we effectively declare a new binding - val (newName, newOriginalName) = freshLocalName(bindingName, originalName, mutable) + val (newName, newOriginalName) = freshLocalName(bindingName, originalName, mutable) - val used = newSimpleState[IsUsed](Unused) + val used = newSimpleState[IsUsed](Unused) - val (replacement, refinedType) = resolveRecordStructure(value) match { - case Some((structure, cancelFun)) => - (ReplaceWithRecordVarRef(newName, structure, used, cancelFun), value.tpe) + val (replacement, refinedType) = resolveRecordStructure(value) match { + case Some((structure, cancelFun)) => + (ReplaceWithRecordVarRef(newName, structure, used, cancelFun), value.tpe) - case None => - (ReplaceWithVarRef(newName, used), tpe) - } + case None => + (ReplaceWithVarRef(newName, used), tpe) + } - val localDef = LocalDef(refinedType, mutable, replacement) - val preTransBinding = PreTransBinding(newOriginalName, localDef, value) + val localDef = LocalDef(refinedType, mutable, replacement) + val preTransBinding = PreTransBinding(newOriginalName, localDef, value) - buildInner(localDef, { tinner => - cont(addPreTransBinding(preTransBinding, tinner)) - }) - } + buildInner(localDef, { tinner => + cont(addPreTransBinding(preTransBinding, tinner)) + }) } if (value.tpe.isNothingType) { @@ -6158,15 +6098,6 @@ private[optimizer] object OptimizerCore { used.value = used.value.inc VarRef(name)(tpe.base) - /* Allocate an instance of RuntimeLong on the fly. - * See the comment in finishTransformExpr about why it is desirable and - * safe to do so. - */ - case ReplaceWithRecordVarRef(name, structure, used, _) - if tpe.base == ClassType(LongImpl.RuntimeLongClass, nullable = false) => - used.value = used.value.inc - createNewLong(VarRef(name)(structure.recordType)) - case ReplaceWithRecordVarRef(_, _, _, cancelFun) => cancelFun() @@ -6190,23 +6121,22 @@ private[optimizer] object OptimizerCore { case ReplaceWithConstant(value) => value + case LongPairReplacement(lo, hi) => + Transient(PackLong(lo.newReplacement, hi.newReplacement)) + + case LongPartReplacement(longLocalDef, isHiPart) => + val longTree = longLocalDef.newReplacement + if (isHiPart) + Transient(ExtractLongHi(longTree)) + else + UnaryOp(UnaryOp.LongToInt, longTree) + case TentativeClosureReplacement(_, _, _, _, _, _, _, cancelFun) => cancelFun() case InlineClassBeingConstructedReplacement(_, _, cancelFun) => cancelFun() - /* Allocate an instance of RuntimeLong on the fly. - * See the comment in finishTransformExpr about why it is desirable and - * safe to do so. - */ - case InlineClassInstanceReplacement(structure, fieldLocalDefs, _) - if tpe.base == ClassType(LongImpl.RuntimeLongClass, nullable = false) => - val List(loField, hiField) = structure.fieldNames - val lo = fieldLocalDefs(loField).newReplacement - val hi = fieldLocalDefs(hiField).newReplacement - createNewLong(lo, hi) - case InlineClassInstanceReplacement(_, _, cancelFun) => cancelFun() @@ -6218,6 +6148,10 @@ private[optimizer] object OptimizerCore { (this eq that) || (replacement match { case ReplaceWithOtherLocalDef(localDef) => localDef.contains(that) + case LongPairReplacement(lo, hi) => + lo.contains(that) || hi.contains(that) + case LongPartReplacement(longLocalDef, _) => + longLocalDef.contains(that) case TentativeClosureReplacement(_, _, _, _, _, captureLocalDefs, _, _) => captureLocalDefs.exists(_.contains(that)) case InlineClassBeingConstructedReplacement(_, fieldLocalDefs, _) => @@ -6272,6 +6206,14 @@ private[optimizer] object OptimizerCore { private final case class ReplaceWithConstant( value: Tree) extends LocalDefReplacement + private final case class LongPairReplacement( + lo: LocalDef, hi: LocalDef) + extends LocalDefReplacement + + private final case class LongPartReplacement( + longLocalDef: LocalDef, isHiPart: Boolean) + extends LocalDefReplacement + private final case class TentativeClosureReplacement( flags: ClosureFlags, captureParams: List[ParamDef], params: List[ParamDef], resultType: Type, body: Tree, @@ -6738,26 +6680,6 @@ private[optimizer] object OptimizerCore { Transient(Cast(innerExpr, tpe)) } - /** Creates a new instance of `RuntimeLong` from a record of its `lo` and - * `hi` parts. - */ - private def createNewLong(recordVarRef: VarRef)( - implicit pos: Position): Tree = { - - val RecordType(List(loField, hiField)) = recordVarRef.tpe - createNewLong( - RecordSelect(recordVarRef, SimpleFieldIdent(loField.name))(IntType), - RecordSelect(recordVarRef, SimpleFieldIdent(hiField.name))(IntType)) - } - - /** Creates a new instance of `RuntimeLong` from its `lo` and `hi` parts. */ - private def createNewLong(lo: Tree, hi: Tree)( - implicit pos: Position): Tree = { - - New(LongImpl.RuntimeLongClass, MethodIdent(LongImpl.initFromParts), - List(lo, hi)) - } - /** Tests whether `x + y` is valid without falling out of range. */ private def canAddLongs(x: Long, y: Long): Boolean = if (y >= 0) x+y >= x diff --git a/linker/shared/src/test/scala/org/scalajs/linker/EmitterTest.scala b/linker/shared/src/test/scala/org/scalajs/linker/EmitterTest.scala index 2a473807ec..93f8cc115f 100644 --- a/linker/shared/src/test/scala/org/scalajs/linker/EmitterTest.scala +++ b/linker/shared/src/test/scala/org/scalajs/linker/EmitterTest.scala @@ -178,7 +178,7 @@ class EmitterTest { val Seq(classCacheReused2, classCacheInvalidated2) = lines2.assertContainsMatch(EmitterClassTreeCacheStatsMessage).map(_.toInt) - // At the time of writing this test, classCacheInvalidated1 reports 47 + // As of the latest update to this test, classCacheInvalidated1 reports 46 assertTrue( s"Not enough invalidated class caches (got $classCacheInvalidated1); extraction must have gone wrong", classCacheInvalidated1 > 40) @@ -196,10 +196,10 @@ class EmitterTest { val Seq(methodCacheReused2, methodCacheInvalidated2) = lines2.assertContainsMatch(EmitterMethodTreeCacheStatsMessage).map(_.toInt) - // At the time of writing this test, methodCacheInvalidated1 reports 107 + // As of the latest update to this test, methodCacheInvalidated1 reports 100 assertTrue( s"Not enough invalidated method caches (got $methodCacheInvalidated1); extraction must have gone wrong", - methodCacheInvalidated1 > 100) + methodCacheInvalidated1 > 95) assertEquals("First run must not reuse any method cache", 0, methodCacheReused1) @@ -214,10 +214,10 @@ class EmitterTest { val Seq(prePrints2) = lines2.assertContainsMatch(EmitterPrePrintsStatsMessage).map(_.toInt) - // At the time of writing this test, prePrints1 reports 188 + // As of the latest update to this test, prePrints1 reports 176 assertTrue( s"Not enough pre prints (got $prePrints1); extraction must have gone wrong", - prePrints1 > 180) + prePrints1 > 170) assertEquals("Second run may not have pre prints", 0, prePrints2) } diff --git a/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala b/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala index 39925a5cb2..d0fb2f29ed 100644 --- a/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala +++ b/linker/shared/src/test/scala/org/scalajs/linker/LibrarySizeTest.scala @@ -70,9 +70,9 @@ class LibrarySizeTest { ) testLinkedSizes( - expectedFastLinkSize = 147548, - expectedFullLinkSizeWithoutClosure = 87296, - expectedFullLinkSizeWithClosure = 20680, + expectedFastLinkSize = 142551, + expectedFullLinkSizeWithoutClosure = 87383, + expectedFullLinkSizeWithClosure = 19917, classDefs, moduleInitializers = MainTestModuleInitializers ) diff --git a/project/Build.scala b/project/Build.scala index 0883e88c59..c17347c1be 100644 --- a/project/Build.scala +++ b/project/Build.scala @@ -2053,34 +2053,34 @@ object Build { case `default212Version` => if (!useMinifySizes) { Some(ExpectedSizes( - fastLink = 624000 to 625000, + fastLink = 618000 to 619000, fullLink = 94000 to 95000, fastLinkGz = 75000 to 79000, fullLinkGz = 24000 to 25000, )) } else { Some(ExpectedSizes( - fastLink = 425000 to 426000, + fastLink = 424000 to 425000, fullLink = 282000 to 283000, fastLinkGz = 60000 to 61000, - fullLinkGz = 43000 to 44000, + fullLinkGz = 44000 to 45000, )) } case `default213Version` => if (!useMinifySizes) { Some(ExpectedSizes( - fastLink = 442000 to 443000, + fastLink = 436000 to 437000, fullLink = 90000 to 91000, fastLinkGz = 57000 to 58000, fullLinkGz = 24000 to 25000, )) } else { Some(ExpectedSizes( - fastLink = 301000 to 302000, - fullLink = 258000 to 259000, + fastLink = 300000 to 301000, + fullLink = 259000 to 260000, fastLinkGz = 47000 to 48000, - fullLinkGz = 42000 to 43000, + fullLinkGz = 43000 to 44000, )) } @@ -2547,6 +2547,9 @@ object Build { testSuiteExCommonSettings(isJSTest = true), name := "Scala.js test suite ex", publishArtifact in Compile := false, + + // FIXME Closure breaks the new Longs in this project + Test/fullLinkJS/scalaJSLinkerConfig ~= { _.withClosureCompiler(false) }, ).withScalaJSCompiler.withScalaJSJUnitPlugin.dependsOnLibrary.dependsOn( javalibExtDummies, jUnitRuntime, testBridge % "test", testSuite ) @@ -2740,6 +2743,9 @@ object Build { NoIDEExport.noIDEExportSettings, testOptions += Tests.Argument(TestFrameworks.JUnit, "-a", "-s"), + + // FIXME Closure breaks the new Longs in this project + Test/fullLinkJS/scalaJSLinkerConfig ~= { _.withClosureCompiler(false) }, ).zippedSettings(partest)(partest => unmanagedSources in Compile ++= { val scalaV = scalaVersion.value diff --git a/test-suite/shared/src/test/scala/org/scalajs/testsuite/compiler/LongTest.scala b/test-suite/shared/src/test/scala/org/scalajs/testsuite/compiler/LongTest.scala index 108dc817de..19dbf82e98 100644 --- a/test-suite/shared/src/test/scala/org/scalajs/testsuite/compiler/LongTest.scala +++ b/test-suite/shared/src/test/scala/org/scalajs/testsuite/compiler/LongTest.scala @@ -44,6 +44,8 @@ class LongTest { // Common values + private final val SignBit = Long.MinValue + def MaxVal: Long = lg(0xffffffff, 0x7fffffff) def MinVal: Long = lg(0, 0x80000000) def IntMaxVal: Long = lg(Int.MaxValue) @@ -878,6 +880,49 @@ class LongTest { test(lg(1839280888, -168388422), lg(-1645740821, -1967920957), 1) } + @Test def unsignedComparisons(): Unit = { + @inline def testInner(x: Long, y: Long, expected: Int): Unit = { + /* Do not factor out (xy ^ SignBit). Otherwise, the compiler + * transformation won't apply. + */ + assertEquals(expected == 0, x == y) + assertEquals(expected != 0, x != y) + assertEquals(expected < 0, (x ^ SignBit) < (y ^ SignBit)) + assertEquals(expected <= 0, (x ^ SignBit) <= (y ^ SignBit)) + assertEquals(expected > 0, (x ^ SignBit) > (y ^ SignBit)) + assertEquals(expected >= 0, (x ^ SignBit) >= (y ^ SignBit)) + assertEquals(expected, java.lang.Long.compareUnsigned(x, y).signum) + } + + @inline def test(x: Long, y: Long, expected: Int): Unit = { + testInner(x, y, expected) + testInner(hideFromOptimizer(x), y, expected) + testInner(x, hideFromOptimizer(y), expected) + testInner(hideFromOptimizer(x), hideFromOptimizer(y), expected) + } + + test(0L, 0L, 0) + test(0L, 1L, -1) + test(1L, 0L, 1) + test(0L, -1L, -1) + test(MaxVal, MinVal, -1) + test(MinVal + 2L, MaxVal - 5L, 1) + test(MaxVal, MinVal, -1) + test(MinVal, MaxVal, 1) + test(-1L, MaxVal, 1) + test(MinVal, -1L, -1) + + // Numbers requiring lo to be compared via unsigned + test(0x654789ab87654321L, 0x654789ab87654321L, 0) + test(0x654789ab87654321L, 0x654789ab12345678L, 1) + test(0x89abcdef87654321L, 0x89abcdef12345678L, 1) + + // Numbers requiring hi to be compared via unsigned + test(0x87654321abcdef00L, 0x87654321abcdef00L, 0) + test(0x87654321abcdef00L, 0x12345678abcdef00L, 1) + test(0x87654321654789abL, 0x12345678654789abL, 1) + } + @Test def bitwiseNot(): Unit = { @inline def test(expected: Long, x: Long): Unit = { assertEquals(expected, ~x)