aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-02-11 12:38:04 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-02-11 12:38:11 +0000
commite3b557809604d036af6e00c60f012c2025b59a5e (patch)
tree8a11ba2269a3b669601e2fd41145b174008f4da8 /llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
parent08e8dd7b9db7bb4a9de26d44c1cbfd24e869c014 (diff)
Diffstat (limited to 'llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp')
-rw-r--r--llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp105
1 files changed, 44 insertions, 61 deletions
diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
index 79ea160afc22..a91d2ffe6042 100644
--- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
@@ -10,8 +10,9 @@
// loading of a model from a command line option.
//
//===----------------------------------------------------------------------===//
+#include "llvm/Analysis/TensorSpec.h"
#include "llvm/Config/config.h"
-#if defined(LLVM_HAVE_TF_API)
+#if defined(LLVM_HAVE_TFLITE)
#include "llvm/ADT/BitVector.h"
#include "llvm/Analysis/CallGraph.h"
@@ -20,11 +21,13 @@
#include "llvm/Analysis/ModelUnderTrainingRunner.h"
#include "llvm/Analysis/NoInferenceModelRunner.h"
#include "llvm/Analysis/Utils/TFUtils.h"
+#include "llvm/Analysis/Utils/TrainingLogger.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ManagedStatic.h"
#include <vector>
+#include <optional>
using namespace llvm;
@@ -90,13 +93,7 @@ struct InlineEvent {
int64_t Reward = 0;
};
-/// Collect data we may use for training a model, and write it as a textual
-/// Tensorflow SequenceExample
-/// (https://www.tensorflow.org/api_docs/python/tf/train/SequenceExample)
-/// protobuf (https://developers.google.com/protocol-buffers).
-/// Because this is a protobuf, we cannot just stream the events as they come.
-/// Internally, TrainingLogger stores data in column-major format, because that
-/// lines up with how TF SequenceExample represents it.
+/// Collect data we may use for training a model.
class TrainingLogger final {
public:
TrainingLogger(StringRef LogFileName, const ModelUnderTrainingRunner *MUTR);
@@ -105,17 +102,11 @@ public:
void logInlineEvent(const InlineEvent &Event,
const MLModelRunner &ModelRunner);
- /// Print the stored tensors.
- void print();
-
private:
StringRef LogFileName;
const ModelUnderTrainingRunner *const MUTR;
std::unique_ptr<Logger> L;
BitVector Effects;
- /// There's at least one output. We'll set this to a different value if MUTR
- /// is avaliable.
- size_t OutputCount = 1;
/// Set these 2 clearly OOB, to make sure we set them later.
size_t DefaultDecisionPos = std::numeric_limits<size_t>::max();
size_t DecisionPos = std::numeric_limits<size_t>::max();
@@ -156,7 +147,6 @@ public:
size_t getTotalSizeEstimate();
- virtual ~DevelopmentModeMLInlineAdvisor();
void updateNativeSizeEstimate(int64_t Change) {
*CurrentNativeSize += Change;
}
@@ -169,7 +159,7 @@ public:
std::unique_ptr<MLInlineAdvice>
getAdviceFromModel(CallBase &CB, OptimizationRemarkEmitter &ORE) override;
- Optional<size_t> getNativeSizeEstimate(const Function &F) const;
+ std::optional<size_t> getNativeSizeEstimate(const Function &F) const;
private:
bool isLogging() const { return !!Logger; }
@@ -179,8 +169,8 @@ private:
const bool IsDoingInference;
std::unique_ptr<TrainingLogger> Logger;
- const Optional<int32_t> InitialNativeSize;
- Optional<int32_t> CurrentNativeSize;
+ const std::optional<int32_t> InitialNativeSize;
+ std::optional<int32_t> CurrentNativeSize;
};
/// A variant of MLInlineAdvice that tracks all non-trivial inlining
@@ -190,8 +180,8 @@ public:
LoggingMLInlineAdvice(DevelopmentModeMLInlineAdvisor *Advisor, CallBase &CB,
OptimizationRemarkEmitter &ORE, bool Recommendation,
TrainingLogger &Logger,
- Optional<size_t> CallerSizeEstimateBefore,
- Optional<size_t> CalleeSizeEstimateBefore,
+ std::optional<size_t> CallerSizeEstimateBefore,
+ std::optional<size_t> CalleeSizeEstimateBefore,
bool DefaultDecision, bool Mandatory = false)
: MLInlineAdvice(Advisor, CB, ORE, Recommendation), Logger(Logger),
CallerSizeEstimateBefore(CallerSizeEstimateBefore),
@@ -257,8 +247,8 @@ private:
static const int64_t NoReward = 0;
TrainingLogger &Logger;
- const Optional<size_t> CallerSizeEstimateBefore;
- const Optional<size_t> CalleeSizeEstimateBefore;
+ const std::optional<size_t> CallerSizeEstimateBefore;
+ const std::optional<size_t> CalleeSizeEstimateBefore;
const int64_t DefaultDecision;
const int64_t Mandatory;
};
@@ -284,60 +274,58 @@ TrainingLogger::TrainingLogger(StringRef LogFileName,
const ModelUnderTrainingRunner *MUTR)
: LogFileName(LogFileName), MUTR(MUTR) {
// The first output is the inlining decision.
- if (MUTR)
- OutputCount = MUTR->outputLoggedFeatureSpecs().size();
- std::vector<LoggedFeatureSpec> FT;
+ std::vector<TensorSpec> FT(FeatureMap.begin(), FeatureMap.end());
- for (size_t I = 0; I < NumberOfFeatures; ++I)
- FT.push_back({FeatureMap.at(I), None});
- if (MUTR && MUTR->outputLoggedFeatureSpecs().size() > 1)
- append_range(FT, drop_begin(MUTR->outputLoggedFeatureSpecs()));
+ if (MUTR)
+ append_range(FT, MUTR->extraOutputsForLoggingSpecs());
DefaultDecisionPos = FT.size();
- FT.push_back(
- {TensorSpec::createSpec<int64_t>(DefaultDecisionName, {1}), None});
+ FT.push_back(TensorSpec::createSpec<int64_t>(DefaultDecisionName, {1}));
DecisionPos = FT.size();
- FT.push_back({TensorSpec::createSpec<int64_t>(DecisionName, {1}), None});
+ FT.push_back(TensorSpec::createSpec<int64_t>(DecisionName, {1}));
+ std::error_code EC;
+ auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC);
+ if (EC)
+ dbgs() << (EC.message() + ":" + TrainingLog);
L = std::make_unique<Logger>(
- FT, TensorSpec::createSpec<int64_t>(RewardName, {1}),
+ std::move(OS), FT, TensorSpec::createSpec<int64_t>(RewardName, {1}),
InlineSizeEstimatorAnalysis::isEvaluatorRequested());
+ L->switchContext("");
}
/// Log one inlining event.
void TrainingLogger::logInlineEvent(const InlineEvent &Event,
const MLModelRunner &ModelRunner) {
+ L->startObservation();
size_t CurrentFeature = 0;
- for (; CurrentFeature < NumberOfFeatures; ++CurrentFeature) {
- int64_t F = *ModelRunner.getTensor<int64_t>(CurrentFeature);
- L->logInt64Value(CurrentFeature, &F);
- }
+ for (; CurrentFeature < NumberOfFeatures; ++CurrentFeature)
+ L->logTensorValue(CurrentFeature,
+ reinterpret_cast<const char *>(
+ ModelRunner.getTensorUntyped(CurrentFeature)));
- for (size_t I = 1; I < OutputCount; ++I) {
- const auto &Result = *MUTR->lastEvaluationResult();
- const char *RawData =
- reinterpret_cast<const char *>(Result.getUntypedTensorValue(I));
- L->logSpecifiedTensorValue(CurrentFeature, RawData);
- ++CurrentFeature;
- }
+ if (MUTR)
+ for (size_t I = 0; I < MUTR->extraOutputsForLoggingSpecs().size(); ++I) {
+ const char *RawData =
+ reinterpret_cast<const char *>(MUTR->getUntypedExtraOutputValue(I));
+ L->logTensorValue(CurrentFeature, RawData);
+ ++CurrentFeature;
+ }
assert(CurrentFeature == DefaultDecisionPos);
- L->logInt64Value(DefaultDecisionPos, &Event.DefaultDecision);
- L->logInt64Value(DecisionPos, &Event.AdvisedDecision);
+ L->logTensorValue(DefaultDecisionPos,
+ reinterpret_cast<const char *>(&Event.DefaultDecision));
+ L->logTensorValue(DecisionPos,
+ reinterpret_cast<const char *>(&Event.AdvisedDecision));
+ L->endObservation();
if (InlineSizeEstimatorAnalysis::isEvaluatorRequested())
- L->logInt64Reward(Event.Reward);
+ L->logReward(Event.Reward);
// For debugging / later use
Effects.push_back(Event.Effect);
}
-void TrainingLogger::print() {
- std::error_code EC;
- raw_fd_ostream OutFile(LogFileName, EC);
- L->flush(OutFile);
-}
-
DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
Module &M, ModuleAnalysisManager &MAM,
std::unique_ptr<MLModelRunner> ModelRunner,
@@ -353,15 +341,10 @@ DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
assert(IsDoingInference || isLogging());
}
-DevelopmentModeMLInlineAdvisor::~DevelopmentModeMLInlineAdvisor() {
- if (isLogging())
- Logger->print();
-}
-
-Optional<size_t>
+std::optional<size_t>
DevelopmentModeMLInlineAdvisor::getNativeSizeEstimate(const Function &F) const {
if (!InlineSizeEstimatorAnalysis::isEvaluatorRequested())
- return None;
+ return std::nullopt;
auto &R =
FAM.getResult<InlineSizeEstimatorAnalysis>(const_cast<Function &>(F));
if (!R) {
@@ -437,4 +420,4 @@ std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
return std::make_unique<DevelopmentModeMLInlineAdvisor>(
M, MAM, std::move(Runner), GetDefaultAdvice, std::move(Logger));
}
-#endif // defined(LLVM_HAVE_TF_API)
+#endif // defined(LLVM_HAVE_TFLITE)