diff --git a/llvm/include/llvm/IR/PatternMatch.h b/llvm/include/llvm/IR/PatternMatch.h index 2eaa7d0faabc1..1f86cdfd94e17 100644 --- a/llvm/include/llvm/IR/PatternMatch.h +++ b/llvm/include/llvm/IR/PatternMatch.h @@ -1323,6 +1323,14 @@ m_NSWAdd(const LHS &L, const RHS &R) { R); } template +inline OverflowingBinaryOp_match +m_c_NSWAdd(const LHS &L, const RHS &R) { + return OverflowingBinaryOp_match(L, R); +} +template inline OverflowingBinaryOp_match m_NSWSub(const LHS &L, const RHS &R) { diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index c1ce364eb1794..dece4b5ed94a5 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -1868,7 +1868,7 @@ Instruction *InstCombinerImpl::visitAdd(BinaryOperator &I) { {Sub, Builder.getFalse()}); Value *Ret = Builder.CreateSub( ConstantInt::get(A->getType(), A->getType()->getScalarSizeInBits()), - Ctlz, "", /*HasNUW*/ true, /*HasNSW*/ true); + Ctlz, "", /*HasNUW=*/true, /*HasNSW=*/true); return replaceInstUsesWith(I, Builder.CreateZExtOrTrunc(Ret, I.getType())); } @@ -2335,8 +2335,8 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) { OverflowingBinaryOperator *LHSSub = cast(Op0); bool HasNUW = I.hasNoUnsignedWrap() && LHSSub->hasNoUnsignedWrap(); bool HasNSW = HasNUW && I.hasNoSignedWrap() && LHSSub->hasNoSignedWrap(); - Value *Add = Builder.CreateAdd(Y, Op1, "", /* HasNUW */ HasNUW, - /* HasNSW */ HasNSW); + Value *Add = Builder.CreateAdd(Y, Op1, "", /*HasNUW=*/HasNUW, + /*HasNSW=*/HasNSW); BinaryOperator *Sub = BinaryOperator::CreateSub(X, Add); Sub->setHasNoUnsignedWrap(HasNUW); Sub->setHasNoSignedWrap(HasNSW); @@ -2807,6 +2807,51 @@ Instruction *InstCombinerImpl::visitSub(BinaryOperator &I) { if (Instruction *Res = foldBinOpOfSelectAndCastOfSelectCondition(I)) return Res; + // (sub (sext (add nsw (X, Y)), sext (X))) --> (sext (Y)) + if (match(Op1, m_SExtLike(m_Value(X))) && + match(Op0, m_SExtLike(m_c_NSWAdd(m_Specific(X), m_Value(Y))))) { + Value *SExtY = Builder.CreateSExt(Y, I.getType()); + return replaceInstUsesWith(I, SExtY); + } + + // (sub[ nsw] (sext (add nsw (X, Y)), sext (add nsw (X, Z)))) --> + // --> (sub[ nsw] (sext (Y), sext (Z))) + { + Value *Z, *Add0, *Add1; + if (match(Op0, m_SExtLike(m_Value(Add0))) && + match(Op1, m_SExtLike(m_Value(Add1))) && + ((match(Add0, m_NSWAdd(m_Value(X), m_Value(Y))) && + match(Add1, m_c_NSWAdd(m_Specific(X), m_Value(Z)))) || + (match(Add0, m_NSWAdd(m_Value(Y), m_Value(X))) && + match(Add1, m_c_NSWAdd(m_Specific(X), m_Value(Z)))))) { + unsigned NumOfNewInstrs = 0; + // Non-constant Y, Z require new SExt. + NumOfNewInstrs += !isa(Y) ? 1 : 0; + NumOfNewInstrs += !isa(Z) ? 1 : 0; + // Check if we can trade some of the old instructions for the new ones. + unsigned NumOfDeadInstrs = 0; + if (Op0->hasOneUse()) { + // If Op0 (sext) has multiple uses, then we keep it + // and the add that it uses, otherwise, we can remove + // the sext and probably the add (depending on the number of its uses). + ++NumOfDeadInstrs; + NumOfDeadInstrs += Add0->hasOneUse() ? 1 : 0; + } + if (Op1->hasOneUse()) { + ++NumOfDeadInstrs; + NumOfDeadInstrs += Add1->hasOneUse() ? 1 : 0; + } + if (NumOfDeadInstrs >= NumOfNewInstrs) { + Value *SExtY = Builder.CreateSExt(Y, I.getType()); + Value *SExtZ = Builder.CreateSExt(Z, I.getType()); + Value *Sub = Builder.CreateSub(SExtY, SExtZ, "", + /*HasNUW=*/false, + /*HasNSW=*/I.hasNoSignedWrap()); + return replaceInstUsesWith(I, Sub); + } + } + } + return TryToNarrowDeduceFlags(); } diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index 457199a72510e..deb51f1e9efd1 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -290,7 +290,7 @@ Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) { auto *Op1C = cast(Op1); return replaceInstUsesWith( I, Builder.CreateMul(NegOp0, ConstantExpr::getNeg(Op1C), "", - /* HasNUW */ false, + /*HasNUW=*/false, HasNSW && Op1C->isNotMinSignedValue())); } @@ -1243,8 +1243,8 @@ static Value *foldIDivShl(BinaryOperator &I, InstCombiner::BuilderTy &Builder) { // or divisor has nsw and operator is sdiv. Value *Dividend = Builder.CreateShl( One, Y, "shl.dividend", - /*HasNUW*/ true, - /*HasNSW*/ + /*HasNUW=*/true, + /*HasNSW=*/ IsSigned ? (Shl0->hasNoUnsignedWrap() || Shl1->hasNoUnsignedWrap()) : Shl0->hasNoSignedWrap()); return Builder.CreateLShr(Dividend, Z, "", I.isExact()); diff --git a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp index 2210336d92bf4..b0a0bcfbde19e 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineNegator.cpp @@ -233,7 +233,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { // However, only do this either if the old `sub` doesn't stick around, or // it was subtracting from a constant. Otherwise, this isn't profitable. return Builder.CreateSub(I->getOperand(1), I->getOperand(0), - I->getName() + ".neg", /* HasNUW */ false, + I->getName() + ".neg", /*HasNUW=*/false, IsNSW && I->hasNoSignedWrap()); } @@ -404,7 +404,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { IsNSW &= I->hasNoSignedWrap(); if (Value *NegOp0 = negate(I->getOperand(0), IsNSW, Depth + 1)) return Builder.CreateShl(NegOp0, I->getOperand(1), I->getName() + ".neg", - /* HasNUW */ false, IsNSW); + /*HasNUW=*/false, IsNSW); // Otherwise, `shl %x, C` can be interpreted as `mul %x, 1<getOperand(1), m_ImmConstant(Op1C)) || !IsTrulyNegation) @@ -412,7 +412,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { return Builder.CreateMul( I->getOperand(0), Builder.CreateShl(Constant::getAllOnesValue(Op1C->getType()), Op1C), - I->getName() + ".neg", /* HasNUW */ false, IsNSW); + I->getName() + ".neg", /*HasNUW=*/false, IsNSW); } case Instruction::Or: { if (!cast(I)->isDisjoint()) @@ -483,7 +483,7 @@ std::array Negator::getSortedOperandsOfBinOp(Instruction *I) { // Can't negate either of them. return nullptr; return Builder.CreateMul(NegatedOp, OtherOp, I->getName() + ".neg", - /* HasNUW */ false, IsNSW && I->hasNoSignedWrap()); + /*HasNUW=*/false, IsNSW && I->hasNoSignedWrap()); } default: return nullptr; // Don't know, likely not negatible for free. diff --git a/llvm/test/Transforms/InstCombine/sub-sext-add.ll b/llvm/test/Transforms/InstCombine/sub-sext-add.ll new file mode 100644 index 0000000000000..71e91f954420f --- /dev/null +++ b/llvm/test/Transforms/InstCombine/sub-sext-add.ll @@ -0,0 +1,350 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -passes=instcombine -S | FileCheck %s + +define i64 @src_2add_2sext_sub_1(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_1( +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[Z:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_2(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_2( +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[Z:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %z, %x + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_3(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_3( +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[Z:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %y, %x + %add2 = add nsw i32 %z, %x + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_4(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_4( +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[Z:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %y, %x + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + ret i64 %sub +} + +define i64 @src_2add_2sextlike_sub(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sextlike_sub( +; CHECK-NEXT: [[SEXT1:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[SEXT2:%.*]] = sext i32 [[Z:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[SEXT1]], [[SEXT2]] +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = zext nneg i32 %add1 to i64 + %sext2 = zext nneg i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_nsw(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_nsw( +; CHECK-NEXT: [[SEXT1:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[SEXT2:%.*]] = sext i32 [[Z:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[SEXT1]], [[SEXT2]] +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub nsw i64 %sext1, %sext2 + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_nuw(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_nuw( +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[Z:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub nuw i64 %sext1, %sext2 + ret i64 %sub +} + +declare void @use_i32(i32, i32) +declare void @use_i64(i64, i64) + +define i64 @src_2add_2sext_sub_multiple_uses_1(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_multiple_uses_1( +; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[SEXT1:%.*]] = sext i32 [[ADD1]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[Z:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: call void @use_i64(i64 [[SEXT1]], i64 [[SEXT1]]) +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + call void @use_i64(i64 %sext1, i64 %sext1) + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_multiple_uses_2(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_multiple_uses_2( +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[SEXT2:%.*]] = sext i32 [[ADD2]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[Z]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: call void @use_i64(i64 [[SEXT2]], i64 [[SEXT2]]) +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + call void @use_i64(i64 %sext2, i64 %sext2) + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_multiple_uses_3(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_multiple_uses_3( +; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[X]], [[Z:%.*]] +; CHECK-NEXT: [[SEXT1:%.*]] = sext i32 [[ADD1]] to i64 +; CHECK-NEXT: [[SEXT2:%.*]] = sext i32 [[ADD2]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[SEXT1]], [[SEXT2]] +; CHECK-NEXT: call void @use_i64(i64 [[SEXT1]], i64 [[SEXT2]]) +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + call void @use_i64(i64 %sext1, i64 %sext2) + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_multiple_uses_4(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_multiple_uses_4( +; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[X]], [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[Z]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: call void @use_i32(i32 [[ADD1]], i32 [[ADD2]]) +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + call void @use_i32(i32 %add1, i32 %add2) + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_multiple_uses_5(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_multiple_uses_5( +; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[SEXT1:%.*]] = sext i32 [[ADD1]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[Y]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[Z:%.*]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: call void @use_i32(i32 [[ADD1]], i32 [[ADD1]]) +; CHECK-NEXT: call void @use_i64(i64 [[SEXT1]], i64 [[SEXT1]]) +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + call void @use_i32(i32 %add1, i32 %add1) + call void @use_i64(i64 %sext1, i64 %sext1) + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_multiple_uses_6(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_multiple_uses_6( +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: [[SEXT2:%.*]] = sext i32 [[ADD2]] to i64 +; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[TMP2:%.*]] = sext i32 [[Z]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[TMP1]], [[TMP2]] +; CHECK-NEXT: call void @use_i32(i32 [[ADD2]], i32 [[ADD2]]) +; CHECK-NEXT: call void @use_i64(i64 [[SEXT2]], i64 [[SEXT2]]) +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + call void @use_i32(i32 %add2, i32 %add2) + call void @use_i64(i64 %sext2, i64 %sext2) + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_multiple_uses_7(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @src_2add_2sext_sub_multiple_uses_7( +; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[X]], [[Z:%.*]] +; CHECK-NEXT: [[SEXT1:%.*]] = sext i32 [[ADD1]] to i64 +; CHECK-NEXT: [[SEXT2:%.*]] = sext i32 [[ADD2]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[SEXT1]], [[SEXT2]] +; CHECK-NEXT: call void @use_i32(i32 [[ADD1]], i32 [[ADD2]]) +; CHECK-NEXT: call void @use_i64(i64 [[SEXT1]], i64 [[SEXT2]]) +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, %z + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + call void @use_i32(i32 %add1, i32 %add2) + call void @use_i64(i64 %sext1, i64 %sext2) + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_multiple_uses_8(i32 %x, i32 %y) { +; CHECK-LABEL: @src_2add_2sext_sub_multiple_uses_8( +; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[X]], 1 +; CHECK-NEXT: [[SEXT1:%.*]] = sext i32 [[ADD1]] to i64 +; CHECK-NEXT: [[SEXT2:%.*]] = sext i32 [[ADD2]] to i64 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[SEXT1]], [[SEXT2]] +; CHECK-NEXT: call void @use_i32(i32 [[ADD1]], i32 [[ADD2]]) +; CHECK-NEXT: call void @use_i64(i64 [[SEXT1]], i64 [[SEXT2]]) +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %add2 = add nsw i32 %x, 1 + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + call void @use_i32(i32 %add1, i32 %add2) + call void @use_i64(i64 %sext1, i64 %sext2) + ret i64 %sub +} + +define i64 @src_2add_2sext_sub_multiple_uses_9(i32 %x) { +; CHECK-LABEL: @src_2add_2sext_sub_multiple_uses_9( +; CHECK-NEXT: [[ADD1:%.*]] = add nsw i32 [[X:%.*]], 2 +; CHECK-NEXT: [[ADD2:%.*]] = add nsw i32 [[X]], 1 +; CHECK-NEXT: [[SEXT1:%.*]] = sext i32 [[ADD1]] to i64 +; CHECK-NEXT: [[SEXT2:%.*]] = sext i32 [[ADD2]] to i64 +; CHECK-NEXT: call void @use_i32(i32 [[ADD1]], i32 [[ADD2]]) +; CHECK-NEXT: call void @use_i64(i64 [[SEXT1]], i64 [[SEXT2]]) +; CHECK-NEXT: ret i64 1 +; + %add1 = add nsw i32 %x, 2 + %add2 = add nsw i32 %x, 1 + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %add2 to i64 + %sub = sub i64 %sext1, %sext2 + call void @use_i32(i32 %add1, i32 %add2) + call void @use_i64(i64 %sext1, i64 %sext2) + ret i64 %sub +} + +define i64 @src_x_add_2sext_sub_1(i32 %x, i32 %y) { +; CHECK-LABEL: @src_x_add_2sext_sub_1( +; CHECK-NEXT: [[SUB:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %x to i64 + %sub = sub i64 %sext1, %sext2 + ret i64 %sub +} + +define i64 @src_x_add_2sext_sub_2(i32 %x, i32 %y) { +; CHECK-LABEL: @src_x_add_2sext_sub_2( +; CHECK-NEXT: [[SUB:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %y, %x + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %x to i64 + %sub = sub i64 %sext1, %sext2 + ret i64 %sub +} + +define i64 @src_x_add_2sextlike_sub(i32 %x, i32 %y) { +; CHECK-LABEL: @src_x_add_2sextlike_sub( +; CHECK-NEXT: [[SUB:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %sext1 = zext nneg i32 %add1 to i64 + %sext2 = zext nneg i32 %x to i64 + %sub = sub i64 %sext1, %sext2 + ret i64 %sub +} + +define i64 @src_x_add_2sext_sub_nsw(i32 %x, i32 %y) { +; CHECK-LABEL: @src_x_add_2sext_sub_nsw( +; CHECK-NEXT: [[SUB:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %x to i64 + %sub = sub nsw i64 %sext1, %sext2 + ret i64 %sub +} + +define i64 @src_x_add_2sext_sub_nuw(i32 %x, i32 %y) { +; CHECK-LABEL: @src_x_add_2sext_sub_nuw( +; CHECK-NEXT: [[SUB:%.*]] = sext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: ret i64 [[SUB]] +; + %add1 = add nsw i32 %x, %y + %sext1 = sext i32 %add1 to i64 + %sext2 = sext i32 %x to i64 + %sub = sub nuw i64 %sext1, %sext2 + ret i64 %sub +}