Files
llvm/bolt/src/Passes/BinaryFunctionCallGraph.cpp

290 lines
10 KiB
C++
Raw Normal View History

//===--- Passes/BinaryFunctionCallGraph.cpp -------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
//===----------------------------------------------------------------------===//
#include "BinaryFunctionCallGraph.h"
#include "BinaryFunction.h"
#include "BinaryContext.h"
#include "llvm/Support/Options.h"
#include "llvm/Support/Timer.h"
#define DEBUG_TYPE "callgraph"
namespace opts {
extern llvm::cl::opt<bool> TimeOpts;
extern llvm::cl::opt<unsigned> Verbosity;
}
namespace llvm {
namespace bolt {
CallGraph::NodeId BinaryFunctionCallGraph::addNode(BinaryFunction *BF,
uint32_t Size,
uint64_t Samples) {
auto Id = CallGraph::addNode(Size, Samples);
assert(size_t(Id) == Funcs.size());
Funcs.push_back(BF);
FuncToNodeId[BF] = Id;
assert(Funcs[Id] == BF);
return Id;
}
std::deque<BinaryFunction *> BinaryFunctionCallGraph::buildTraversalOrder() {
[BOLT rebase] Rebase fixes on top of LLVM Feb2018 Summary: This commit includes all code necessary to make BOLT working again after the rebase. This includes a redesign of the EHFrame work, cherry-pick of the 3dnow disassembly work, compilation error fixes, and port of the debug_info work. The macroop fusion feature is not ported yet. The rebased version has minor changes to the "executed instructions" dynostats counter because REP prefixes are considered a part of the instruction it applies to. Also, some X86 instructions had the "mayLoad" tablegen property removed, which BOLT uses to identify and account for loads, thus reducing the total number of loads reported by dynostats. This was observed in X86::MOVDQUmr. TRAP instructions are not terminators anymore, changing our CFG. This commit adds compensation to preserve this old behavior and minimize tests changes. debug_info sections are now slightly larger. The discriminator field in the line table is slightly different due to a change upstream. New profiles generated with the other bolt are incompatible with this version because of different hash values calculated for functions, so they will be considered 100% stale. This commit changes the corresponding test to XFAIL so it can be updated. The hash function changes because it relies on raw opcode values, which change according to the opcodes described in the X86 tablegen files. When processing HHVM, bolt was observed to be using about 800MB more memory in the rebased version and being about 5% slower. (cherry picked from FBD7078072)
2018-02-06 15:00:23 -08:00
NamedRegionTimer T1("buildcgorder", "Build cg traversal order",
"CG breakdown", "CG breakdown", opts::TimeOpts);
std::deque<BinaryFunction *> TopologicalOrder;
enum NodeStatus { NEW, VISITING, VISITED };
std::vector<NodeStatus> NodeStatus(Funcs.size());
std::stack<NodeId> Worklist;
for (auto *Func : Funcs) {
const auto Id = FuncToNodeId.at(Func);
Worklist.push(Id);
NodeStatus[Id] = NEW;
}
while (!Worklist.empty()) {
const auto FuncId = Worklist.top();
Worklist.pop();
if (NodeStatus[FuncId] == VISITED)
continue;
if (NodeStatus[FuncId] == VISITING) {
TopologicalOrder.push_back(Funcs[FuncId]);
NodeStatus[FuncId] = VISITED;
continue;
}
assert(NodeStatus[FuncId] == NEW);
NodeStatus[FuncId] = VISITING;
Worklist.push(FuncId);
for (const auto Callee : successors(FuncId)) {
if (NodeStatus[Callee] == VISITING || NodeStatus[Callee] == VISITED)
continue;
Worklist.push(Callee);
}
}
return TopologicalOrder;
}
BinaryFunctionCallGraph buildCallGraph(BinaryContext &BC,
std::map<uint64_t, BinaryFunction> &BFs,
CgFilterFunction Filter,
bool CgFromPerfData,
bool IncludeColdCalls,
bool UseFunctionHotSize,
bool UseSplitHotSize,
bool UseEdgeCounts,
bool IgnoreRecursiveCalls) {
[BOLT rebase] Rebase fixes on top of LLVM Feb2018 Summary: This commit includes all code necessary to make BOLT working again after the rebase. This includes a redesign of the EHFrame work, cherry-pick of the 3dnow disassembly work, compilation error fixes, and port of the debug_info work. The macroop fusion feature is not ported yet. The rebased version has minor changes to the "executed instructions" dynostats counter because REP prefixes are considered a part of the instruction it applies to. Also, some X86 instructions had the "mayLoad" tablegen property removed, which BOLT uses to identify and account for loads, thus reducing the total number of loads reported by dynostats. This was observed in X86::MOVDQUmr. TRAP instructions are not terminators anymore, changing our CFG. This commit adds compensation to preserve this old behavior and minimize tests changes. debug_info sections are now slightly larger. The discriminator field in the line table is slightly different due to a change upstream. New profiles generated with the other bolt are incompatible with this version because of different hash values calculated for functions, so they will be considered 100% stale. This commit changes the corresponding test to XFAIL so it can be updated. The hash function changes because it relies on raw opcode values, which change according to the opcodes described in the X86 tablegen files. When processing HHVM, bolt was observed to be using about 800MB more memory in the rebased version and being about 5% slower. (cherry picked from FBD7078072)
2018-02-06 15:00:23 -08:00
NamedRegionTimer T1("buildcg", "Callgraph construction", "CG breakdown",
"CG breakdown", opts::TimeOpts);
BinaryFunctionCallGraph Cg;
static constexpr auto COUNT_NO_PROFILE = BinaryBasicBlock::COUNT_NO_PROFILE;
// Compute function size
auto functionSize = [&](const BinaryFunction *Function) {
return UseFunctionHotSize && Function->isSplit()
? Function->estimateHotSize(UseSplitHotSize)
: Function->estimateSize();
};
// Add call graph nodes.
auto lookupNode = [&](BinaryFunction *Function) {
const auto Id = Cg.maybeGetNodeId(Function);
if (Id == CallGraph::InvalidId) {
// It's ok to use the hot size here when the function is split. This is
// because emitFunctions will emit the hot part first in the order that is
// computed by ReorderFunctions. The cold part will be emitted with the
// rest of the cold functions and code.
const auto Size = functionSize(Function);
// NOTE: for functions without a profile, we set the number of samples
// to zero. This will keep these functions from appearing in the hot
// section. This is a little weird because we wouldn't be trying to
// create a node for a function unless it was the target of a call from
// a hot block. The alternative would be to set the count to one or
// accumulate the number of calls from the callsite into the function
// samples. Results from perfomance testing seem to favor the zero
// count though, so I'm leaving it this way for now.
const auto Samples =
Function->hasProfile() ? Function->getExecutionCount() : 0;
return Cg.addNode(Function, Size, Samples);
} else {
return Id;
}
};
// Add call graph edges.
uint64_t NotProcessed = 0;
uint64_t TotalCallsites = 0;
uint64_t NoProfileCallsites = 0;
uint64_t NumFallbacks = 0;
uint64_t RecursiveCallsites = 0;
for (auto &It : BFs) {
auto *Function = &It.second;
if (Filter(*Function)) {
continue;
}
const auto SrcId = lookupNode(Function);
// Offset of the current basic block from the beginning of the function
uint64_t Offset = 0;
auto recordCall = [&](const MCSymbol *DestSymbol, const uint64_t Count) {
if (auto *DstFunc =
DestSymbol ? BC.getFunctionForSymbol(DestSymbol) : nullptr) {
if (DstFunc == Function) {
DEBUG(dbgs() << "BOLT-INFO: recursive call detected in "
<< *DstFunc << "\n");
++RecursiveCallsites;
if (IgnoreRecursiveCalls)
return false;
}
if (Filter(*DstFunc)) {
return false;
}
const auto DstId = lookupNode(DstFunc);
const bool IsValidCount = Count != COUNT_NO_PROFILE;
const auto AdjCount = UseEdgeCounts && IsValidCount ? Count : 1;
if (!IsValidCount)
++NoProfileCallsites;
Cg.incArcWeight(SrcId, DstId, AdjCount, Offset);
DEBUG(
if (opts::Verbosity > 1) {
dbgs() << "BOLT-DEBUG: buildCallGraph: call " << *Function
<< " -> " << *DstFunc << " @ " << Offset << "\n";
});
return true;
}
[BOLT rebase] Rebase fixes on top of LLVM Feb2018 Summary: This commit includes all code necessary to make BOLT working again after the rebase. This includes a redesign of the EHFrame work, cherry-pick of the 3dnow disassembly work, compilation error fixes, and port of the debug_info work. The macroop fusion feature is not ported yet. The rebased version has minor changes to the "executed instructions" dynostats counter because REP prefixes are considered a part of the instruction it applies to. Also, some X86 instructions had the "mayLoad" tablegen property removed, which BOLT uses to identify and account for loads, thus reducing the total number of loads reported by dynostats. This was observed in X86::MOVDQUmr. TRAP instructions are not terminators anymore, changing our CFG. This commit adds compensation to preserve this old behavior and minimize tests changes. debug_info sections are now slightly larger. The discriminator field in the line table is slightly different due to a change upstream. New profiles generated with the other bolt are incompatible with this version because of different hash values calculated for functions, so they will be considered 100% stale. This commit changes the corresponding test to XFAIL so it can be updated. The hash function changes because it relies on raw opcode values, which change according to the opcodes described in the X86 tablegen files. When processing HHVM, bolt was observed to be using about 800MB more memory in the rebased version and being about 5% slower. (cherry picked from FBD7078072)
2018-02-06 15:00:23 -08:00
return false;
};
// Get pairs of (symbol, count) for each target at this callsite.
// If the call is to an unknown function the symbol will be nullptr.
// If there is no profiling data the count will be COUNT_NO_PROFILE.
auto getCallInfo = [&](const BinaryBasicBlock *BB, const MCInst &Inst) {
std::vector<std::pair<const MCSymbol *, uint64_t>> Counts;
const auto *DstSym = BC.MIB->getTargetSymbol(Inst);
// If this is an indirect call use perf data directly.
if (!DstSym && BC.MIB->hasAnnotation(Inst, "CallProfile")) {
const auto &ICSP =
BC.MIB->getAnnotationAs<IndirectCallSiteProfile>(Inst, "CallProfile");
for (const auto &CSI : ICSP) {
if (!CSI.IsFunction)
continue;
[BOLT] Refactor global symbol handling code. Summary: This is preparation work for static data reordering. I've created a new class called BinaryData which represents a symbol contained in a section. It records almost all the information relevant for dealing with data, e.g. names, address, size, alignment, profiling data, etc. BinaryContext still stores and manages BinaryData objects similar to how it managed symbols and global addresses before. The interfaces are not changed too drastically from before either. There is a bit of overlap between BinaryData and BinaryFunction. I would have liked to do some more refactoring to make a BinaryFunctionFragment that subclassed from BinaryData and then have BinaryFunction be composed or associated with BinaryFunctionFragments. I've also attempted to use (symbol + offset) for when addresses are pointing into the middle of symbols with known sizes. This changes the simplify rodata loads optimization slightly since the expression on an instruction can now also be a (symbol + offset) rather than just a symbol. One of the overall goals for this refactoring is to make sure every relocation is associated with a BinaryData object. This requires adding "hole" BinaryData's wherever there are gaps in a section's address space. Most of the holes seem to be data that has no associated symbol info. In this case we can't do any better than lumping all the adjacent hole symbols into one big symbol (there may be more than one actual data object that contributes to a hole). At least the combined holes should be moveable. Jump tables have similar issues. They appear to mostly be sub-objects for top level local symbols. The main problem is that we can't recognize jump tables at the time we scan the symbol table, we have to wait til disassembly. When a jump table is discovered we add it as a sub-object to the existing local symbol. If there are one or more existing BinaryData's that appear in the address range of a newly created jump table, those are added as sub-objects as well. (cherry picked from FBD6362544)
2017-11-14 20:05:11 -08:00
if (auto *DstBD = BC.getBinaryDataByName(CSI.Name)) {
Counts.push_back(std::make_pair(DstBD->getSymbol(), CSI.Count));
}
}
} else {
const auto Count = BB->getExecutionCount();
Counts.push_back(std::make_pair(DstSym, Count));
}
return Counts;
};
// If the function has an invalid profile, try to use the perf data
// directly (if requested). If there is no perf data for this function,
// fall back to the CFG walker which attempts to handle missing data.
if (!Function->hasValidProfile() && CgFromPerfData &&
!Function->getAllCallSites().empty()) {
DEBUG(dbgs() << "BOLT-DEBUG: buildCallGraph: Falling back to perf data"
<< " for " << *Function << "\n");
++NumFallbacks;
const auto Size = functionSize(Function);
for (const auto &CSI : Function->getAllCallSites()) {
++TotalCallsites;
if (!CSI.IsFunction)
continue;
[BOLT] Refactor global symbol handling code. Summary: This is preparation work for static data reordering. I've created a new class called BinaryData which represents a symbol contained in a section. It records almost all the information relevant for dealing with data, e.g. names, address, size, alignment, profiling data, etc. BinaryContext still stores and manages BinaryData objects similar to how it managed symbols and global addresses before. The interfaces are not changed too drastically from before either. There is a bit of overlap between BinaryData and BinaryFunction. I would have liked to do some more refactoring to make a BinaryFunctionFragment that subclassed from BinaryData and then have BinaryFunction be composed or associated with BinaryFunctionFragments. I've also attempted to use (symbol + offset) for when addresses are pointing into the middle of symbols with known sizes. This changes the simplify rodata loads optimization slightly since the expression on an instruction can now also be a (symbol + offset) rather than just a symbol. One of the overall goals for this refactoring is to make sure every relocation is associated with a BinaryData object. This requires adding "hole" BinaryData's wherever there are gaps in a section's address space. Most of the holes seem to be data that has no associated symbol info. In this case we can't do any better than lumping all the adjacent hole symbols into one big symbol (there may be more than one actual data object that contributes to a hole). At least the combined holes should be moveable. Jump tables have similar issues. They appear to mostly be sub-objects for top level local symbols. The main problem is that we can't recognize jump tables at the time we scan the symbol table, we have to wait til disassembly. When a jump table is discovered we add it as a sub-object to the existing local symbol. If there are one or more existing BinaryData's that appear in the address range of a newly created jump table, those are added as sub-objects as well. (cherry picked from FBD6362544)
2017-11-14 20:05:11 -08:00
auto *DstBD = BC.getBinaryDataByName(CSI.Name);
if (!DstBD)
continue;
// The computed offset may exceed the hot part of the function; hence,
// bound it by the size.
Offset = CSI.Offset;
if (Offset > Size)
Offset = Size;
[BOLT] Refactor global symbol handling code. Summary: This is preparation work for static data reordering. I've created a new class called BinaryData which represents a symbol contained in a section. It records almost all the information relevant for dealing with data, e.g. names, address, size, alignment, profiling data, etc. BinaryContext still stores and manages BinaryData objects similar to how it managed symbols and global addresses before. The interfaces are not changed too drastically from before either. There is a bit of overlap between BinaryData and BinaryFunction. I would have liked to do some more refactoring to make a BinaryFunctionFragment that subclassed from BinaryData and then have BinaryFunction be composed or associated with BinaryFunctionFragments. I've also attempted to use (symbol + offset) for when addresses are pointing into the middle of symbols with known sizes. This changes the simplify rodata loads optimization slightly since the expression on an instruction can now also be a (symbol + offset) rather than just a symbol. One of the overall goals for this refactoring is to make sure every relocation is associated with a BinaryData object. This requires adding "hole" BinaryData's wherever there are gaps in a section's address space. Most of the holes seem to be data that has no associated symbol info. In this case we can't do any better than lumping all the adjacent hole symbols into one big symbol (there may be more than one actual data object that contributes to a hole). At least the combined holes should be moveable. Jump tables have similar issues. They appear to mostly be sub-objects for top level local symbols. The main problem is that we can't recognize jump tables at the time we scan the symbol table, we have to wait til disassembly. When a jump table is discovered we add it as a sub-object to the existing local symbol. If there are one or more existing BinaryData's that appear in the address range of a newly created jump table, those are added as sub-objects as well. (cherry picked from FBD6362544)
2017-11-14 20:05:11 -08:00
if (!recordCall(DstBD->getSymbol(), CSI.Count)) {
++NotProcessed;
}
}
} else {
for (auto *BB : Function->layout()) {
// Don't count calls from cold blocks unless requested.
if (BB->isCold() && !IncludeColdCalls)
continue;
// Determine whether the block is included in Function's (hot) size
// See BinaryFunction::estimateHotSize
bool BBIncludedInFunctionSize = false;
if (UseFunctionHotSize && Function->isSplit()) {
if (UseSplitHotSize)
BBIncludedInFunctionSize = !BB->isCold();
else
BBIncludedInFunctionSize = BB->getKnownExecutionCount() != 0;
} else {
BBIncludedInFunctionSize = true;
}
for (auto &Inst : *BB) {
// Find call instructions and extract target symbols from each one.
if (BC.MIB->isCall(Inst)) {
const auto CallInfo = getCallInfo(BB, Inst);
if (!CallInfo.empty()) {
for (const auto &CI : CallInfo) {
++TotalCallsites;
if (!recordCall(CI.first, CI.second))
++NotProcessed;
}
} else {
++TotalCallsites;
++NotProcessed;
}
}
// Increase Offset if needed
if (BBIncludedInFunctionSize) {
Offset += BC.computeCodeSize(&Inst, &Inst + 1);
}
}
}
}
}
#ifndef NDEBUG
bool PrintInfo = DebugFlag && isCurrentDebugType("callgraph");
#else
bool PrintInfo = false;
#endif
if (PrintInfo || opts::Verbosity > 0) {
outs() << format("BOLT-INFO: buildCallGraph: %u nodes, %u callsites "
"(%u recursive), density = %.6lf, %u callsites not "
"processed, %u callsites with invalid profile, "
"used perf data for %u stale functions.\n",
Cg.numNodes(), TotalCallsites, RecursiveCallsites,
Cg.density(), NotProcessed, NoProfileCallsites,
NumFallbacks);
}
return Cg;
}
}
}