diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp index 790a6869084258..2dbcf1564445fe 100644 --- a/src/coreclr/jit/gentree.cpp +++ b/src/coreclr/jit/gentree.cpp @@ -19899,11 +19899,6 @@ bool GenTree::IsArrayAddr(GenTreeArrAddr** pArrAddr) // bool GenTree::SupportsSettingZeroFlag() { - if (SupportsSettingResultFlags()) - { - return true; - } - #if defined(TARGET_XARCH) if (OperIs(GT_LSH, GT_RSH, GT_RSZ, GT_ROL, GT_ROR)) { @@ -19922,42 +19917,18 @@ bool GenTree::SupportsSettingZeroFlag() return true; } #endif -#endif - - return false; -} - -//------------------------------------------------------------------------ -// SupportsSettingResultFlags: Returns true if this is an arithmetic operation -// whose codegen supports setting the carry, overflow, zero and sign flags based -// on the result of the operation. -// -// Return Value: -// True if so. A false return does not imply that codegen for the node will -// not trash the result flags. -// -// Remarks: -// For example, for GT (AND x y) 0, arm64 can emit instructions that -// directly set the flags after the 'AND' and thus no comparison is needed. -// -// The backend expects any node for which the flags will be consumed to be -// marked with GTF_SET_FLAGS. -// -bool GenTree::SupportsSettingResultFlags() -{ -#if defined(TARGET_ARM64) +#elif defined(TARGET_ARM64) if (OperIs(GT_AND, GT_AND_NOT)) { return true; } - // We do not support setting result flags if neg has a contained mul + // We do not support setting zero flag for madd/msub. if (OperIs(GT_NEG) && (!gtGetOp1()->OperIs(GT_MUL) || !gtGetOp1()->isContained())) { return true; } - // We do not support setting result flags for madd/msub. if (OperIs(GT_ADD, GT_SUB) && (!gtGetOp2()->OperIs(GT_MUL) || !gtGetOp2()->isContained())) { return true; @@ -19967,6 +19938,19 @@ bool GenTree::SupportsSettingResultFlags() return false; } +//------------------------------------------------------------------------ +// SupportsSettingFlagsAsCompareToZero: Returns true if we support setting +// flags for compare to zero operations. +// +bool GenTree::SupportsSettingFlagsAsCompareToZero() +{ +#if defined(TARGET_ARMARCH) + return OperIs(GT_AND, GT_AND_NOT); +#else + return false; +#endif +} + //------------------------------------------------------------------------ // Create: Create or retrieve a field sequence for the given field handle. // diff --git a/src/coreclr/jit/gentree.h b/src/coreclr/jit/gentree.h index b76b6f1bc5049e..e581e8a1147e25 100644 --- a/src/coreclr/jit/gentree.h +++ b/src/coreclr/jit/gentree.h @@ -2070,7 +2070,7 @@ struct GenTree bool SupportsSettingZeroFlag(); - bool SupportsSettingResultFlags(); + bool SupportsSettingFlagsAsCompareToZero(); // These are only used for dumping. // The GetRegNum() is only valid in LIR, but the dumping methods are not easily diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 25e5db40c20c6c..53d8938badd34d 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -4435,11 +4435,10 @@ GenTree* Lowering::OptimizeConstCompare(GenTree* cmp) } // Optimize EQ/NE/GT/GE/LT/LE(op_that_sets_zf, 0) into op_that_sets_zf with GTF_SET_FLAGS + SETCC. - // For GT/GE/LT/LE don't allow ADD/SUB, runtime has to check for overflow. LIR::Use use; if (((cmp->OperIs(GT_EQ, GT_NE) && op2->IsIntegralConst(0) && op1->SupportsSettingZeroFlag()) || - (cmp->OperIs(GT_GT, GT_GE, GT_LT, GT_LE) && op2->IsIntegralConst(0) && !op1->OperIs(GT_ADD, GT_SUB) && - op1->SupportsSettingResultFlags())) && + (cmp->OperIs(GT_GT, GT_GE, GT_LT, GT_LE) && op2->IsIntegralConst(0) && + op1->SupportsSettingFlagsAsCompareToZero())) && BlockRange().TryGetUse(cmp, &use) && IsProfitableToSetZeroFlag(op1)) { op1->gtFlags |= GTF_SET_FLAGS; diff --git a/src/tests/JIT/opt/InstructionCombining/Neg.cs b/src/tests/JIT/opt/InstructionCombining/Neg.cs index b6a3e8ab8a9f07..3086d3976739db 100644 --- a/src/tests/JIT/opt/InstructionCombining/Neg.cs +++ b/src/tests/JIT/opt/InstructionCombining/Neg.cs @@ -115,6 +115,16 @@ public static int CheckNeg() fail = true; } + if (NegsGreaterThanIntMinValue()) + { + fail = true; + } + + if (NegsGreaterThanLongMinValue()) + { + fail = true; + } + if (fail) { return 101; @@ -266,33 +276,65 @@ static bool NegsBinOpSingleLine(int a, int b) //ARM64-FULL-LINE: negs {{w[0-9]+}}, {{w[0-9]+}}, LSL #1 return (-(a >> 1) != 0) | (-(b << 1) != 0); } - + [MethodImpl(MethodImplOptions.NoInlining)] static bool NegsGreaterThan(int a) { - //ARM64-FULL-LINE: negs {{w[0-9]+}}, {{w[0-9]+}} + //ARM64-FULL-LINE: neg {{w[0-9]+}}, {{w[0-9]+}} + //ARM64-FULL-LINE: cmp {{w[0-9]+}}, #0 return -a > 0; } [MethodImpl(MethodImplOptions.NoInlining)] static bool NegsGreaterThanEq(int a) { - //ARM64-FULL-LINE: negs {{w[0-9]+}}, {{w[0-9]+}} + //ARM64-FULL-LINE: neg {{w[0-9]+}}, {{w[0-9]+}} + //ARM64-FULL-LINE: cmp {{w[0-9]+}}, #0 return -a >= 0; } [MethodImpl(MethodImplOptions.NoInlining)] static bool NegsLessThan(int a) { - //ARM64-FULL-LINE: negs {{w[0-9]+}}, {{w[0-9]+}} + //ARM64-FULL-LINE: neg {{w[0-9]+}}, {{w[0-9]+}} + //ARM64-FULL-LINE: lsr {{w[0-9]+}}, {{w[0-9]+}}, #31 return -a < 0; } [MethodImpl(MethodImplOptions.NoInlining)] static bool NegsLessThanEq(int a) { - //ARM64-FULL-LINE: negs {{w[0-9]+}}, {{w[0-9]+}} + //ARM64-FULL-LINE: neg {{w[0-9]+}}, {{w[0-9]+}} + //ARM64-FULL-LINE: cmp {{w[0-9]+}}, #0 return -a <= 0; } + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool NegsGreaterThanIntMinValue() + { + //ARM64-FULL-LINE: neg {{w[0-9]+}}, {{w[0-9]+}} + //ARM64-FULL-LINE: cmp {{w[0-9]+}}, #0 + return -IntMinValue() > 0; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static bool NegsGreaterThanLongMinValue() + { + //ARM64-FULL-LINE: neg {{x[0-9]+}}, {{x[0-9]+}} + //ARM64-FULL-LINE: cmp {{x[0-9]+}}, #0 + return -LongMinValue() > 0; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static int IntMinValue() + { + return int.MinValue; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static long LongMinValue() + { + return long.MinValue; + } } }