8000 [SCEVPatternMatch] Introduce m_scev_AffineAddRec by artagnon · Pull Request #140377 · llvm/llvm-project · GitHub
[go: up one dir, main page]

Skip to content

[SCEVPatternMatch] Introduce m_scev_AffineAddRec #140377

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion llvm/include/llvm/Analysis/ScalarEvolutionPatternMatch.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ template <typename Class> struct class_match {
};

inline class_match<const SCEV> m_SCEV() { return class_match<const SCEV>(); }
inline class_match<const SCEVConstant> m_SCEVConstant() {
return class_match<const SCEVConstant>();
}

template <typename Class> struct bind_ty {
Class *&VR;
Expand Down Expand Up @@ -95,7 +98,7 @@ struct specificscev_ty {
};

/// Match if we have a specific specified SCEV.
inline specificscev_ty m_Specific(const SCEV *S) { return S; }
inline specificscev_ty m_scev_Specific(const SCEV *S) { return S; }

struct is_specific_cst {
uint64_t CV;
Expand Down Expand Up @@ -192,6 +195,12 @@ inline SCEVBinaryExpr_match<SCEVUDivExpr, Op0_t, Op1_t>
m_scev_UDiv(const Op0_t &Op0, const Op1_t &Op1) {
return m_scev_Binary<SCEVUDivExpr>(Op0, Op1);
}

template <typename Op0_t, typename Op1_t>
inline SCEVBinaryExpr_match<SCEVAddRecExpr, Op0_t, Op1_t>
m_scev_AffineAddRec(const Op0_t &Op0, const Op1_t &Op1) {
return m_scev_Binary<SCEVAddRecExpr>(Op0, Op1);
}
} // namespace SCEVPatternMatch
} // namespace llvm

Expand Down
21 changes: 8 additions & 13 deletions llvm/lib/Analysis/ScalarEvolution.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12480,26 +12480,21 @@ static bool IsKnownPredicateViaAddRecStart(ScalarEvolution &SE,
if (!ICmpInst::isRelational(Pred))
return false;

const SCEVAddRecExpr *LAR = dyn_cast<SCEVAddRecExpr>(LHS);
if (!LAR)
return false;
const SCEVAddRecExpr *RAR = dyn_cast<SCEVAddRecExpr>(RHS);
if (!RAR)
const SCEV *LStart, *RStart, *Step;
if (!match(LHS, m_scev_AffineAddRec(m_SCEV(LStart), m_SCEV(Step))) ||
!match(RHS, m_scev_AffineAddRec(m_SCEV(RStart), m_scev_Specific(Step))))
return false;
const SCEVAddRecExpr *LAR = cast<SCEVAddRecExpr>(LHS);
const SCEVAddRecExpr *RAR = cast<SCEVAddRecExpr>(RHS);
if (LAR->getLoop() != RAR->getLoop())
return false;
if (!LAR->isAffine() || !RAR->isAffine())
return false;

if (LAR->getStepRecurrence(SE) != RAR->getStepRecurrence(SE))
return false;

SCEV::NoWrapFlags NW = ICmpInst::isSigned(Pred) ?
SCEV::FlagNSW : SCEV::FlagNUW;
if (!LAR->getNoWrapFlags(NW) || !RAR->getNoWrapFlags(NW))
return false;

return SE.isKnownPredicate(Pred, LAR->getStart(), RAR->getStart());
return SE.isKnownPredicate(Pred, LStart, RStart);
}

/// Is LHS `Pred` RHS true on the virtue of LHS or RHS being a Min or Max
Expand Down Expand Up @@ -12716,15 +12711,15 @@ static bool isKnownPredicateExtendIdiom(CmpPredicate Pred, const SCEV *LHS,
case ICmpInst::ICMP_SLE: {
// If operand >=s 0 then ZExt == SExt. If operand <s 0 then SExt <s ZExt.
return match(LHS, m_scev_SExt(m_SCEV(Op))) &&
match(RHS, m_scev_ZExt(m_Specific(Op)));
match(RHS, m_scev_ZExt(m_scev_Specific(Op)));
}
case ICmpInst::ICMP_UGE:
std::swap(LHS, RHS);
[[fallthrough]];
case ICmpInst::ICMP_ULE: {
// If operand >=u 0 then ZExt == SExt. If operand <u 0 then ZExt <u SExt.
return match(LHS, m_scev_ZExt(m_SCEV(Op))) &&
match(RHS, m_scev_SExt(m_Specific(Op)));
match(RHS, m_scev_SExt(m_scev_Specific(Op)));
}
default:
return false;
Expand Down
120 changes: 54 additions & 66 deletions llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp
6D40
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,11 @@
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/Analysis/ScalarEvolutionPatternMatch.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
Expand Down Expand Up @@ -128,6 +128,7 @@
#include <utility>

using namespace llvm;
using namespace SCEVPatternMatch;

#define DEBUG_TYPE "loop-reduce"

Expand Down Expand Up @@ -556,16 +557,17 @@ static void DoInitialMatch(const SCEV *S, Loop *L,
}

// Look at addrec operands.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
if (!AR->getStart()->isZero() && AR->isAffine()) {
DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
AR->getStepRecurrence(SE),
// FIXME: AR->getNoWrapFlags()
AR->getLoop(), SCEV::FlagAnyWrap),
L, Good, Bad, SE);
return;
}
const SCEV *Start, *Step;
if (match(S, m_scev_AffineAddRec(m_SCEV(Start), m_SCEV(Step))) &&
!Start->isZero()) {
DoInitialMatch(Start, L, Good, Bad, SE);
DoInitialMatch(SE.getAddRecExpr(SE.getConstant(S->getType(), 0), Step,
// FIXME: AR->getNoWrapFlags()
cast<SCEVAddRecExpr>(S)->getLoop(),
SCEV::FlagAnyWrap),
L, Good, Bad, SE);
return;
}

// Handle a multiplication by -1 (negation) if it didn't fold.
if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
Expand Down Expand Up @@ -1436,22 +1438,16 @@ void Cost::RateRegister(const Formula &F, const SCEV *Reg,
unsigned LoopCost = 1;
if (TTI->isIndexedLoadLegal(TTI->MIM_PostInc, AR->getType()) ||
TTI->isIndexedStoreLegal(TTI->MIM_PostInc, AR->getType())) {

// If the step size matches the base offset, we could use pre-indexed
// addressing.
if (AMK == TTI::AMK_PreIndexed && F.BaseOffset.isFixed()) {
if (auto *Step = dyn_cast<SCEVConstant>(AR->getStepRecurrence(*SE)))
if (Step->getAPInt() == F.BaseOffset.getFixedValue())
LoopCost = 0;
} else if (AMK == TTI::AMK_PostIndexed) {
const SCEV *LoopStep = AR->getStepRecurrence(*SE);
if (isa<SCEVConstant>(LoopStep)) {
const SCEV *LoopStart = AR->getStart();
if (!isa<SCEVConstant>(LoopStart) &&
SE->isLoopInvariant(LoopStart, L))
LoopCost = 0;
}
}
const SCEV *Start;
const SCEVConstant *Step;
if (match(AR, m_scev_AffineAddRec(m_SCEV(Start), m_SCEVConstant(Step))))
// If the step size matches the base offset, we could use pre-indexed
// addressing.
if ((AMK == TTI::AMK_PreIndexed && F.BaseOffset.isFixed() &&
Step->getAPInt() == F.BaseOffset.getFixedValue()) ||
(AMK == TTI::AMK_PostIndexed && !isa<SCEVConstant>(Start) &&
SE->isLoopInvariant(Start, L)))
LoopCost = 0;
}
C.AddRecCost += LoopCost;

Expand Down Expand Up @@ -2544,13 +2540,11 @@ ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
// Check the relevant induction variable for conformance to
// the pattern.
const SCEV *IV = SE.getSCEV(Cond->getOperand(0));
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
if (!AR || !AR->isAffine() ||
AR->getStart() != One ||
AR->getStepRecurrence(SE) != One)
if (!match(IV,
m_scev_AffineAddRec(m_scev_SpecificInt(1), m_scev_SpecificInt(1))))
return Cond;

assert(AR->getLoop() == L &&
assert(cast<SCEVAddRecExpr>(IV)->getLoop() == L &&
"Loop condition operand is an addrec in a different loop!");

// Check the right operand of the select, and remember it, as it will
Expand Down Expand Up @@ -3345,7 +3339,7 @@ void LSRInstance::CollectChains() {
void LSRInstance::FinalizeChain(IVChain &Chain) {
assert(!Chain.Incs.empty() && "empty IV chains are not allowed");
LLVM_DEBUG(dbgs() << "Final Chain: " << *Chain.Incs[0].UserInst << "\n");

for (const IVInc &Inc : Chain) {
LLVM_DEBUG(dbgs() << " Inc: " << *Inc.UserInst << "\n");
auto UseI = find(Inc.UserInst->operands(), Inc.IVOperand);
Expand Down Expand Up @@ -3848,26 +3842,27 @@ static const SCEV *CollectSubexprs(const SCEV *S, const SCEVConstant *C,
Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
}
return nullptr;
} else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
}
const SCEV *Start, *Step;
if (match(S, m_scev_AffineAddRec(m_SCEV(Start), m_SCEV(Step)))) {
// Split a non-zero base out of an addrec.
if (AR->getStart()->isZero() || !AR->isAffine())
if (Start->isZero())
return S;

const SCEV *Remainder = CollectSubexprs(AR->getStart(),
C, Ops, L, SE, Depth+1);
const SCEV *Remainder = CollectSubexprs(Start, C, Ops, L, SE, Depth + 1);
// Split the non-zero AddRec unless it is part of a nested recurrence that
// does not pertain to this loop.
if (Remainder && (AR->getLoop() == L || !isa<SCEVAddRecExpr>(Remainder))) {
if (Remainder && (cast<SCEVAddRecExpr>(S)->getLoop() == L ||
!isa<SCEVAddRecExpr>(Remainder))) {
Ops.push_back(C ? SE.getMulExpr(C, Remainder) : Remainder);
Remainder = nullptr;
}
if (Remainder != AR->getStart()) {
if (Remainder != Start) {
if (!Remainder)
Remainder = SE.getConstant(AR->getType(), 0);
return SE.getAddRecExpr(Remainder,
AR->getStepRecurrence(SE),
AR->getLoop(),
//FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
Remainder = SE.getConstant(S->getType(), 0);
return SE.getAddRecExpr(Remainder, Step,
cast<SCEVAddRecExpr>(S)->getLoop(),
// FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
SC 9E88 EV::FlagAnyWrap);
}
} else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
Expand Down Expand Up @@ -3895,17 +3890,13 @@ static bool mayUsePostIncMode(const TargetTransformInfo &TTI,
if (LU.Kind != LSRUse::Address ||
!LU.AccessTy.getType()->isIntOrIntVectorTy())
return false;
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S);
if (!AR)
return false;
const SCEV *LoopStep = AR->getStepRecurrence(SE);
if (!isa<SCEVConstant>(LoopStep))
const SCEV *Start;
if (!match(S, m_scev_AffineAddRec(m_SCEV(Start), m_SCEVConstant())))
return false;
// Check if a post-indexed load/store can be used.
if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, AR->getType()) ||
TTI.isIndexedStoreLegal(TTI.MIM_PostInc, AR->getType())) {
const SCEV *LoopStart = AR->getStart();
if (!isa<SCEVConstant>(LoopStart) && SE.isLoopInvariant(LoopStart, L))
if (TTI.isIndexedLoadLegal(TTI.MIM_PostInc, S->getType()) ||
TTI.isIndexedStoreLegal(TTI.MIM_PostInc, S->getType())) {
if (!isa<SCEVConstant>(Start) && SE.isLoopInvariant(Start, L))
return true;
}
return false;
Expand Down Expand Up @@ -4164,18 +4155,15 @@ void LSRInstance::GenerateConstantOffsetsImpl(
// base pointer for each iteration of the loop, resulting in no extra add/sub
// instructions for pointer updating.
if (AMK == TTI::AMK_PreIndexed && LU.Kind == LSRUse::Address) {
if (auto *GAR = dyn_cast<SCEVAddRecExpr>(G)) {
if (auto *StepRec =
dyn_cast<SCEVConstant>(GAR->getStepRecurrence(SE))) {
const APInt &StepInt = StepRec->getAPInt();
int64_t Step = StepInt.isNegative() ?
StepInt.getSExtValue() : StepInt.getZExtValue();

for (Immediate Offset : Worklist) {
if (Offset.isFixed()) {
Offset = Immediate::getFixed(Offset.getFixedValue() - Step);
GenerateOffset(G, Offset);
}
const APInt *StepInt;
if (match(G, m_scev_AffineAddRec(m_SCEV(), m_scev_APInt(StepInt)))) {
int64_t Step = StepInt->isNegative() ? StepInt->getSExtValue()
: StepInt->getZExtValue();

for (Immediate Offset : Worklist) {
if (Offset.isFixed()) {
Offset = Immediate::getFixed(Offset.getFixedValue() - Step);
GenerateOffset(G, Offset);
}
}
}
Expand Down Expand Up @@ -6647,7 +6635,7 @@ struct SCEVDbgValueBuilder {
if (Op.getOp() != dwarf::DW_OP_LLVM_arg) {
Op.appendToVector(DestExpr);
continue;
}
}

DestExpr.push_back(dwarf::DW_OP_LLVM_arg);
// `DW_OP_LLVM_arg n` represents the nth LocationOp in this SCEV,
Expand Down
0