aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Analysis/TensorSpec.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2023-02-11 12:38:04 +0000
committerDimitry Andric <dim@FreeBSD.org>2023-02-11 12:38:11 +0000
commite3b557809604d036af6e00c60f012c2025b59a5e (patch)
tree8a11ba2269a3b669601e2fd41145b174008f4da8 /llvm/lib/Analysis/TensorSpec.cpp
parent08e8dd7b9db7bb4a9de26d44c1cbfd24e869c014 (diff)
Diffstat (limited to 'llvm/lib/Analysis/TensorSpec.cpp')
-rw-r--r--llvm/lib/Analysis/TensorSpec.cpp99
1 files changed, 30 insertions, 69 deletions
diff --git a/llvm/lib/Analysis/TensorSpec.cpp b/llvm/lib/Analysis/TensorSpec.cpp
index f6a5882371a7..4f7428ded85e 100644
--- a/llvm/lib/Analysis/TensorSpec.cpp
+++ b/llvm/lib/Analysis/TensorSpec.cpp
@@ -18,9 +18,8 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/JSON.h"
#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
+#include <array>
#include <cassert>
#include <numeric>
@@ -35,6 +34,29 @@ SUPPORTED_TENSOR_TYPES(TFUTILS_GETDATATYPE_IMPL)
#undef TFUTILS_GETDATATYPE_IMPL
+static std::array<std::string, static_cast<size_t>(TensorType::Total)>
+ TensorTypeNames{"INVALID",
+#define TFUTILS_GETNAME_IMPL(T, _) #T,
+ SUPPORTED_TENSOR_TYPES(TFUTILS_GETNAME_IMPL)
+#undef TFUTILS_GETNAME_IMPL
+ };
+
+StringRef toString(TensorType TT) {
+ return TensorTypeNames[static_cast<size_t>(TT)];
+}
+
+void TensorSpec::toJSON(json::OStream &OS) const {
+ OS.object([&]() {
+ OS.attribute("name", name());
+ OS.attribute("type", toString(type()));
+ OS.attribute("port", port());
+ OS.attributeArray("shape", [&]() {
+ for (size_t D : shape())
+ OS.value(static_cast<int64_t>(D));
+ });
+ });
+}
+
TensorSpec::TensorSpec(const std::string &Name, int Port, TensorType Type,
size_t ElementSize, const std::vector<int64_t> &Shape)
: Name(Name), Port(Port), Type(Type), Shape(Shape),
@@ -42,14 +64,15 @@ TensorSpec::TensorSpec(const std::string &Name, int Port, TensorType Type,
std::multiplies<int64_t>())),
ElementSize(ElementSize) {}
-Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
- const json::Value &Value) {
- auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
+std::optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
+ const json::Value &Value) {
+ auto EmitError =
+ [&](const llvm::Twine &Message) -> std::optional<TensorSpec> {
std::string S;
llvm::raw_string_ostream OS(S);
OS << Value;
Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
- return None;
+ return std::nullopt;
};
// FIXME: accept a Path as a parameter, and use it for error reporting.
json::Path::Root Root("tensor_spec");
@@ -76,69 +99,7 @@ Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
SUPPORTED_TENSOR_TYPES(PARSE_TYPE)
#undef PARSE_TYPE
- return None;
+ return std::nullopt;
}
-Optional<std::vector<LoggedFeatureSpec>>
-loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
- StringRef ModelPath, StringRef SpecFileOverride) {
- SmallVector<char, 128> OutputSpecsPath;
- StringRef FileName = SpecFileOverride;
- if (FileName.empty()) {
- llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
- FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()};
- }
-
- auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
- if (!BufferOrError) {
- Ctx.emitError("Error opening output specs file: " + FileName + " : " +
- BufferOrError.getError().message());
- return None;
- }
- auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
- if (!ParsedJSONValues) {
- Ctx.emitError("Could not parse specs file: " + FileName);
- return None;
- }
- auto ValuesArray = ParsedJSONValues->getAsArray();
- if (!ValuesArray) {
- Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
- "logging_name:<name>} dictionaries");
- return None;
- }
- std::vector<LoggedFeatureSpec> Ret;
- for (const auto &Value : *ValuesArray)
- if (const auto *Obj = Value.getAsObject())
- if (const auto *SpecPart = Obj->get("tensor_spec"))
- if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart))
- if (auto LoggingName = Obj->getString("logging_name")) {
- if (!TensorSpec->isElementType<int64_t>() &&
- !TensorSpec->isElementType<int32_t>() &&
- !TensorSpec->isElementType<float>()) {
- Ctx.emitError(
- "Only int64, int32, and float tensors are supported. "
- "Found unsupported type for tensor named " +
- TensorSpec->name());
- return None;
- }
- Ret.push_back({*TensorSpec, LoggingName->str()});
- }
-
- if (ValuesArray->size() != Ret.size()) {
- Ctx.emitError(
- "Unable to parse output spec. It should be a json file containing an "
- "array of dictionaries. Each dictionary must have a 'tensor_spec' key, "
- "with a json object describing a TensorSpec; and a 'logging_name' key, "
- "which is a string to use as name when logging this tensor in the "
- "training log.");
- return None;
- }
- if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) {
- Ctx.emitError("The first output spec must describe the decision tensor, "
- "and must have the logging_name " +
- StringRef(ExpectedDecisionName));
- return None;
- }
- return Ret;
-}
} // namespace llvm