2017-05-26 15:46:46 -07:00
|
|
|
//===--- Passes/BinaryFunctionCallGraph.cpp -------------------------------===//
|
|
|
|
|
//
|
2021-03-15 18:04:18 -07:00
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
2017-05-26 15:46:46 -07:00
|
|
|
//
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
//
|
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
|
|
#include "BinaryFunctionCallGraph.h"
|
|
|
|
|
#include "BinaryFunction.h"
|
|
|
|
|
#include "BinaryContext.h"
|
2020-12-01 16:29:39 -08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2017-06-02 16:57:22 -07:00
|
|
|
#include "llvm/Support/Timer.h"
|
2020-12-01 16:29:39 -08:00
|
|
|
#include <stack>
|
2017-05-26 15:46:46 -07:00
|
|
|
|
|
|
|
|
#define DEBUG_TYPE "callgraph"
|
|
|
|
|
|
2017-06-08 13:46:17 -07:00
|
|
|
namespace opts {
|
|
|
|
|
extern llvm::cl::opt<bool> TimeOpts;
|
2017-06-06 17:43:45 -07:00
|
|
|
extern llvm::cl::opt<unsigned> Verbosity;
|
2017-06-08 13:46:17 -07:00
|
|
|
}
|
|
|
|
|
|
2017-05-26 15:46:46 -07:00
|
|
|
namespace llvm {
|
|
|
|
|
namespace bolt {
|
|
|
|
|
|
|
|
|
|
CallGraph::NodeId BinaryFunctionCallGraph::addNode(BinaryFunction *BF,
|
|
|
|
|
uint32_t Size,
|
|
|
|
|
uint64_t Samples) {
|
|
|
|
|
auto Id = CallGraph::addNode(Size, Samples);
|
|
|
|
|
assert(size_t(Id) == Funcs.size());
|
|
|
|
|
Funcs.push_back(BF);
|
|
|
|
|
FuncToNodeId[BF] = Id;
|
|
|
|
|
assert(Funcs[Id] == BF);
|
|
|
|
|
return Id;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
std::deque<BinaryFunction *> BinaryFunctionCallGraph::buildTraversalOrder() {
|
[BOLT rebase] Rebase fixes on top of LLVM Feb2018
Summary:
This commit includes all code necessary to make BOLT working again
after the rebase. This includes a redesign of the EHFrame work,
cherry-pick of the 3dnow disassembly work, compilation error fixes,
and port of the debug_info work. The macroop fusion feature is not
ported yet.
The rebased version has minor changes to the "executed instructions"
dynostats counter because REP prefixes are considered a part of the
instruction it applies to. Also, some X86 instructions had the "mayLoad"
tablegen property removed, which BOLT uses to identify and account
for loads, thus reducing the total number of loads reported by
dynostats. This was observed in X86::MOVDQUmr. TRAP instructions are
not terminators anymore, changing our CFG. This commit adds compensation
to preserve this old behavior and minimize tests changes. debug_info
sections are now slightly larger. The discriminator field in the line
table is slightly different due to a change upstream. New profiles
generated with the other bolt are incompatible with this version
because of different hash values calculated for functions, so they will
be considered 100% stale. This commit changes the corresponding test
to XFAIL so it can be updated. The hash function changes because it
relies on raw opcode values, which change according to the opcodes
described in the X86 tablegen files. When processing HHVM, bolt was
observed to be using about 800MB more memory in the rebased version
and being about 5% slower.
(cherry picked from FBD7078072)
2018-02-06 15:00:23 -08:00
|
|
|
NamedRegionTimer T1("buildcgorder", "Build cg traversal order",
|
|
|
|
|
"CG breakdown", "CG breakdown", opts::TimeOpts);
|
2017-05-26 15:46:46 -07:00
|
|
|
std::deque<BinaryFunction *> TopologicalOrder;
|
|
|
|
|
enum NodeStatus { NEW, VISITING, VISITED };
|
|
|
|
|
std::vector<NodeStatus> NodeStatus(Funcs.size());
|
|
|
|
|
std::stack<NodeId> Worklist;
|
|
|
|
|
|
|
|
|
|
for (auto *Func : Funcs) {
|
|
|
|
|
const auto Id = FuncToNodeId.at(Func);
|
|
|
|
|
Worklist.push(Id);
|
|
|
|
|
NodeStatus[Id] = NEW;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
while (!Worklist.empty()) {
|
|
|
|
|
const auto FuncId = Worklist.top();
|
|
|
|
|
Worklist.pop();
|
|
|
|
|
|
|
|
|
|
if (NodeStatus[FuncId] == VISITED)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
if (NodeStatus[FuncId] == VISITING) {
|
|
|
|
|
TopologicalOrder.push_back(Funcs[FuncId]);
|
|
|
|
|
NodeStatus[FuncId] = VISITED;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(NodeStatus[FuncId] == NEW);
|
|
|
|
|
NodeStatus[FuncId] = VISITING;
|
|
|
|
|
Worklist.push(FuncId);
|
|
|
|
|
for (const auto Callee : successors(FuncId)) {
|
|
|
|
|
if (NodeStatus[Callee] == VISITING || NodeStatus[Callee] == VISITED)
|
|
|
|
|
continue;
|
|
|
|
|
Worklist.push(Callee);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return TopologicalOrder;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
BinaryFunctionCallGraph buildCallGraph(BinaryContext &BC,
|
|
|
|
|
CgFilterFunction Filter,
|
2017-06-09 13:17:36 -07:00
|
|
|
bool CgFromPerfData,
|
2017-05-26 15:46:46 -07:00
|
|
|
bool IncludeColdCalls,
|
|
|
|
|
bool UseFunctionHotSize,
|
2017-06-09 13:17:36 -07:00
|
|
|
bool UseSplitHotSize,
|
|
|
|
|
bool UseEdgeCounts,
|
|
|
|
|
bool IgnoreRecursiveCalls) {
|
[BOLT rebase] Rebase fixes on top of LLVM Feb2018
Summary:
This commit includes all code necessary to make BOLT working again
after the rebase. This includes a redesign of the EHFrame work,
cherry-pick of the 3dnow disassembly work, compilation error fixes,
and port of the debug_info work. The macroop fusion feature is not
ported yet.
The rebased version has minor changes to the "executed instructions"
dynostats counter because REP prefixes are considered a part of the
instruction it applies to. Also, some X86 instructions had the "mayLoad"
tablegen property removed, which BOLT uses to identify and account
for loads, thus reducing the total number of loads reported by
dynostats. This was observed in X86::MOVDQUmr. TRAP instructions are
not terminators anymore, changing our CFG. This commit adds compensation
to preserve this old behavior and minimize tests changes. debug_info
sections are now slightly larger. The discriminator field in the line
table is slightly different due to a change upstream. New profiles
generated with the other bolt are incompatible with this version
because of different hash values calculated for functions, so they will
be considered 100% stale. This commit changes the corresponding test
to XFAIL so it can be updated. The hash function changes because it
relies on raw opcode values, which change according to the opcodes
described in the X86 tablegen files. When processing HHVM, bolt was
observed to be using about 800MB more memory in the rebased version
and being about 5% slower.
(cherry picked from FBD7078072)
2018-02-06 15:00:23 -08:00
|
|
|
NamedRegionTimer T1("buildcg", "Callgraph construction", "CG breakdown",
|
|
|
|
|
"CG breakdown", opts::TimeOpts);
|
2017-05-26 15:46:46 -07:00
|
|
|
BinaryFunctionCallGraph Cg;
|
2017-06-09 13:17:36 -07:00
|
|
|
static constexpr auto COUNT_NO_PROFILE = BinaryBasicBlock::COUNT_NO_PROFILE;
|
2017-05-26 15:46:46 -07:00
|
|
|
|
2017-10-18 15:18:52 -07:00
|
|
|
// Compute function size
|
|
|
|
|
auto functionSize = [&](const BinaryFunction *Function) {
|
|
|
|
|
return UseFunctionHotSize && Function->isSplit()
|
|
|
|
|
? Function->estimateHotSize(UseSplitHotSize)
|
|
|
|
|
: Function->estimateSize();
|
|
|
|
|
};
|
|
|
|
|
|
2017-05-26 15:46:46 -07:00
|
|
|
// Add call graph nodes.
|
|
|
|
|
auto lookupNode = [&](BinaryFunction *Function) {
|
|
|
|
|
const auto Id = Cg.maybeGetNodeId(Function);
|
|
|
|
|
if (Id == CallGraph::InvalidId) {
|
|
|
|
|
// It's ok to use the hot size here when the function is split. This is
|
|
|
|
|
// because emitFunctions will emit the hot part first in the order that is
|
|
|
|
|
// computed by ReorderFunctions. The cold part will be emitted with the
|
|
|
|
|
// rest of the cold functions and code.
|
2017-10-18 15:18:52 -07:00
|
|
|
const auto Size = functionSize(Function);
|
2017-05-26 15:46:46 -07:00
|
|
|
// NOTE: for functions without a profile, we set the number of samples
|
|
|
|
|
// to zero. This will keep these functions from appearing in the hot
|
|
|
|
|
// section. This is a little weird because we wouldn't be trying to
|
|
|
|
|
// create a node for a function unless it was the target of a call from
|
|
|
|
|
// a hot block. The alternative would be to set the count to one or
|
|
|
|
|
// accumulate the number of calls from the callsite into the function
|
|
|
|
|
// samples. Results from perfomance testing seem to favor the zero
|
|
|
|
|
// count though, so I'm leaving it this way for now.
|
2019-01-31 11:23:02 -08:00
|
|
|
return Cg.addNode(Function, Size, Function->getKnownExecutionCount());
|
2017-05-26 15:46:46 -07:00
|
|
|
} else {
|
|
|
|
|
return Id;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Add call graph edges.
|
|
|
|
|
uint64_t NotProcessed = 0;
|
2017-06-09 13:17:36 -07:00
|
|
|
uint64_t TotalCallsites = 0;
|
|
|
|
|
uint64_t NoProfileCallsites = 0;
|
|
|
|
|
uint64_t NumFallbacks = 0;
|
|
|
|
|
uint64_t RecursiveCallsites = 0;
|
2019-04-03 15:52:01 -07:00
|
|
|
for (auto &It : BC.getBinaryFunctions()) {
|
2017-05-26 15:46:46 -07:00
|
|
|
auto *Function = &It.second;
|
|
|
|
|
|
2017-10-18 15:18:52 -07:00
|
|
|
if (Filter(*Function)) {
|
2017-05-26 15:46:46 -07:00
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const auto SrcId = lookupNode(Function);
|
2017-10-18 15:18:52 -07:00
|
|
|
// Offset of the current basic block from the beginning of the function
|
|
|
|
|
uint64_t Offset = 0;
|
2017-05-26 15:46:46 -07:00
|
|
|
|
|
|
|
|
auto recordCall = [&](const MCSymbol *DestSymbol, const uint64_t Count) {
|
2017-06-09 13:17:36 -07:00
|
|
|
if (auto *DstFunc =
|
|
|
|
|
DestSymbol ? BC.getFunctionForSymbol(DestSymbol) : nullptr) {
|
|
|
|
|
if (DstFunc == Function) {
|
2020-12-01 16:29:39 -08:00
|
|
|
LLVM_DEBUG(dbgs() << "BOLT-INFO: recursive call detected in "
|
|
|
|
|
<< *DstFunc << "\n");
|
2017-06-09 13:17:36 -07:00
|
|
|
++RecursiveCallsites;
|
|
|
|
|
if (IgnoreRecursiveCalls)
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2018-03-22 09:48:59 -07:00
|
|
|
if (Filter(*DstFunc)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
2017-05-26 15:46:46 -07:00
|
|
|
const auto DstId = lookupNode(DstFunc);
|
2017-06-09 13:17:36 -07:00
|
|
|
const bool IsValidCount = Count != COUNT_NO_PROFILE;
|
|
|
|
|
const auto AdjCount = UseEdgeCounts && IsValidCount ? Count : 1;
|
2017-10-18 15:18:52 -07:00
|
|
|
if (!IsValidCount)
|
|
|
|
|
++NoProfileCallsites;
|
|
|
|
|
Cg.incArcWeight(SrcId, DstId, AdjCount, Offset);
|
2020-12-01 16:29:39 -08:00
|
|
|
LLVM_DEBUG(
|
2017-06-06 17:43:45 -07:00
|
|
|
if (opts::Verbosity > 1) {
|
|
|
|
|
dbgs() << "BOLT-DEBUG: buildCallGraph: call " << *Function
|
|
|
|
|
<< " -> " << *DstFunc << " @ " << Offset << "\n";
|
|
|
|
|
});
|
2017-05-26 15:46:46 -07:00
|
|
|
return true;
|
|
|
|
|
}
|
[BOLT rebase] Rebase fixes on top of LLVM Feb2018
Summary:
This commit includes all code necessary to make BOLT working again
after the rebase. This includes a redesign of the EHFrame work,
cherry-pick of the 3dnow disassembly work, compilation error fixes,
and port of the debug_info work. The macroop fusion feature is not
ported yet.
The rebased version has minor changes to the "executed instructions"
dynostats counter because REP prefixes are considered a part of the
instruction it applies to. Also, some X86 instructions had the "mayLoad"
tablegen property removed, which BOLT uses to identify and account
for loads, thus reducing the total number of loads reported by
dynostats. This was observed in X86::MOVDQUmr. TRAP instructions are
not terminators anymore, changing our CFG. This commit adds compensation
to preserve this old behavior and minimize tests changes. debug_info
sections are now slightly larger. The discriminator field in the line
table is slightly different due to a change upstream. New profiles
generated with the other bolt are incompatible with this version
because of different hash values calculated for functions, so they will
be considered 100% stale. This commit changes the corresponding test
to XFAIL so it can be updated. The hash function changes because it
relies on raw opcode values, which change according to the opcodes
described in the X86 tablegen files. When processing HHVM, bolt was
observed to be using about 800MB more memory in the rebased version
and being about 5% slower.
(cherry picked from FBD7078072)
2018-02-06 15:00:23 -08:00
|
|
|
|
2017-05-26 15:46:46 -07:00
|
|
|
return false;
|
|
|
|
|
};
|
|
|
|
|
|
2017-06-09 13:17:36 -07:00
|
|
|
// Get pairs of (symbol, count) for each target at this callsite.
|
|
|
|
|
// If the call is to an unknown function the symbol will be nullptr.
|
|
|
|
|
// If there is no profiling data the count will be COUNT_NO_PROFILE.
|
|
|
|
|
auto getCallInfo = [&](const BinaryBasicBlock *BB, const MCInst &Inst) {
|
|
|
|
|
std::vector<std::pair<const MCSymbol *, uint64_t>> Counts;
|
2018-03-09 09:45:13 -08:00
|
|
|
const auto *DstSym = BC.MIB->getTargetSymbol(Inst);
|
2017-05-26 15:46:46 -07:00
|
|
|
|
2017-06-09 13:17:36 -07:00
|
|
|
// If this is an indirect call use perf data directly.
|
2018-03-09 09:45:13 -08:00
|
|
|
if (!DstSym && BC.MIB->hasAnnotation(Inst, "CallProfile")) {
|
2017-12-13 23:12:01 -08:00
|
|
|
const auto &ICSP =
|
2018-03-09 09:45:13 -08:00
|
|
|
BC.MIB->getAnnotationAs<IndirectCallSiteProfile>(Inst, "CallProfile");
|
2017-12-13 23:12:01 -08:00
|
|
|
for (const auto &CSI : ICSP) {
|
2020-05-14 17:34:20 -07:00
|
|
|
if (CSI.Symbol)
|
|
|
|
|
Counts.push_back(std::make_pair(CSI.Symbol, CSI.Count));
|
2017-06-09 13:17:36 -07:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
const auto Count = BB->getExecutionCount();
|
|
|
|
|
Counts.push_back(std::make_pair(DstSym, Count));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return Counts;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// If the function has an invalid profile, try to use the perf data
|
|
|
|
|
// directly (if requested). If there is no perf data for this function,
|
|
|
|
|
// fall back to the CFG walker which attempts to handle missing data.
|
2017-12-13 23:12:01 -08:00
|
|
|
if (!Function->hasValidProfile() && CgFromPerfData &&
|
|
|
|
|
!Function->getAllCallSites().empty()) {
|
2020-12-01 16:29:39 -08:00
|
|
|
LLVM_DEBUG(
|
|
|
|
|
dbgs() << "BOLT-DEBUG: buildCallGraph: Falling back to perf data"
|
|
|
|
|
<< " for " << *Function << "\n");
|
2017-06-09 13:17:36 -07:00
|
|
|
++NumFallbacks;
|
2017-10-18 15:18:52 -07:00
|
|
|
const auto Size = functionSize(Function);
|
2017-12-13 23:12:01 -08:00
|
|
|
for (const auto &CSI : Function->getAllCallSites()) {
|
|
|
|
|
++TotalCallsites;
|
|
|
|
|
|
2020-05-14 17:34:20 -07:00
|
|
|
if (!CSI.Symbol)
|
2017-12-13 23:12:01 -08:00
|
|
|
continue;
|
|
|
|
|
|
2017-10-18 15:18:52 -07:00
|
|
|
// The computed offset may exceed the hot part of the function; hence,
|
2017-12-13 23:12:01 -08:00
|
|
|
// bound it by the size.
|
|
|
|
|
Offset = CSI.Offset;
|
2017-10-18 15:18:52 -07:00
|
|
|
if (Offset > Size)
|
|
|
|
|
Offset = Size;
|
|
|
|
|
|
2020-05-14 17:34:20 -07:00
|
|
|
if (!recordCall(CSI.Symbol, CSI.Count)) {
|
2017-06-09 13:17:36 -07:00
|
|
|
++NotProcessed;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
for (auto *BB : Function->layout()) {
|
|
|
|
|
// Don't count calls from cold blocks unless requested.
|
|
|
|
|
if (BB->isCold() && !IncludeColdCalls)
|
2017-05-26 15:46:46 -07:00
|
|
|
continue;
|
|
|
|
|
|
2017-10-18 15:18:52 -07:00
|
|
|
// Determine whether the block is included in Function's (hot) size
|
|
|
|
|
// See BinaryFunction::estimateHotSize
|
|
|
|
|
bool BBIncludedInFunctionSize = false;
|
|
|
|
|
if (UseFunctionHotSize && Function->isSplit()) {
|
|
|
|
|
if (UseSplitHotSize)
|
|
|
|
|
BBIncludedInFunctionSize = !BB->isCold();
|
|
|
|
|
else
|
|
|
|
|
BBIncludedInFunctionSize = BB->getKnownExecutionCount() != 0;
|
|
|
|
|
} else {
|
|
|
|
|
BBIncludedInFunctionSize = true;
|
|
|
|
|
}
|
2017-05-26 15:46:46 -07:00
|
|
|
|
2017-10-18 15:18:52 -07:00
|
|
|
for (auto &Inst : *BB) {
|
2017-06-09 13:17:36 -07:00
|
|
|
// Find call instructions and extract target symbols from each one.
|
2018-03-09 09:45:13 -08:00
|
|
|
if (BC.MIB->isCall(Inst)) {
|
2017-10-18 15:18:52 -07:00
|
|
|
const auto CallInfo = getCallInfo(BB, Inst);
|
2017-05-26 15:46:46 -07:00
|
|
|
|
2017-10-18 15:18:52 -07:00
|
|
|
if (!CallInfo.empty()) {
|
|
|
|
|
for (const auto &CI : CallInfo) {
|
|
|
|
|
++TotalCallsites;
|
|
|
|
|
if (!recordCall(CI.first, CI.second))
|
|
|
|
|
++NotProcessed;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
++TotalCallsites;
|
2017-05-26 15:46:46 -07:00
|
|
|
++NotProcessed;
|
2017-06-09 13:17:36 -07:00
|
|
|
}
|
2017-05-26 15:46:46 -07:00
|
|
|
}
|
2017-10-18 15:18:52 -07:00
|
|
|
// Increase Offset if needed
|
|
|
|
|
if (BBIncludedInFunctionSize) {
|
|
|
|
|
Offset += BC.computeCodeSize(&Inst, &Inst + 1);
|
|
|
|
|
}
|
2017-05-26 15:46:46 -07:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2017-06-06 17:43:45 -07:00
|
|
|
#ifndef NDEBUG
|
|
|
|
|
bool PrintInfo = DebugFlag && isCurrentDebugType("callgraph");
|
|
|
|
|
#else
|
|
|
|
|
bool PrintInfo = false;
|
|
|
|
|
#endif
|
|
|
|
|
if (PrintInfo || opts::Verbosity > 0) {
|
2017-06-09 13:17:36 -07:00
|
|
|
outs() << format("BOLT-INFO: buildCallGraph: %u nodes, %u callsites "
|
|
|
|
|
"(%u recursive), density = %.6lf, %u callsites not "
|
|
|
|
|
"processed, %u callsites with invalid profile, "
|
|
|
|
|
"used perf data for %u stale functions.\n",
|
|
|
|
|
Cg.numNodes(), TotalCallsites, RecursiveCallsites,
|
|
|
|
|
Cg.density(), NotProcessed, NoProfileCallsites,
|
|
|
|
|
NumFallbacks);
|
2017-06-06 17:43:45 -07:00
|
|
|
}
|
2017-05-26 15:46:46 -07:00
|
|
|
|
|
|
|
|
return Cg;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
}
|