aboutsummaryrefslogtreecommitdiff
path: root/llvm/lib/Analysis/TFUtils.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Analysis/TFUtils.cpp')
-rw-r--r--llvm/lib/Analysis/TFUtils.cpp163
1 files changed, 44 insertions, 119 deletions
diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp
index 26bc63983b4e..203858c1cf06 100644
--- a/llvm/lib/Analysis/TFUtils.cpp
+++ b/llvm/lib/Analysis/TFUtils.cpp
@@ -82,6 +82,33 @@ void serialize(const Message &SE, std::string *OutStr) {
*OutStr = SE.SerializeAsString();
}
}
+
+int getTFTypeIndex(TensorType TType) {
+ switch (TType) {
+ case TensorType::Double:
+ return TF_DOUBLE;
+ case TensorType::Float:
+ return TF_FLOAT;
+ case TensorType::Int8:
+ return TF_INT8;
+ case TensorType::UInt8:
+ return TF_UINT8;
+ case TensorType::Int16:
+ return TF_INT16;
+ case TensorType::UInt16:
+ return TF_UINT16;
+ case TensorType::Int32:
+ return TF_INT32;
+ case TensorType::UInt32:
+ return TF_UINT32;
+ case TensorType::Int64:
+ return TF_INT64;
+ case TensorType::UInt64:
+ return TF_UINT64;
+ case TensorType::Invalid:
+ llvm_unreachable("Unknown tensor type");
+ }
+}
} // namespace
namespace llvm {
@@ -105,116 +132,6 @@ private:
std::vector<TF_Tensor *> Output;
};
-size_t TensorSpec::getElementByteSize() const {
- return TF_DataTypeSize(static_cast<TF_DataType>(TypeIndex));
-}
-
-TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex,
- const std::vector<int64_t> &Shape)
- : Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape),
- ElementCount(std::accumulate(Shape.begin(), Shape.end(), 1,
- std::multiplies<int64_t>())) {}
-
-Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
- const json::Value &Value) {
- auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
- std::string S;
- llvm::raw_string_ostream OS(S);
- OS << Value;
- Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
- return None;
- };
- // FIXME: accept a Path as a parameter, and use it for error reporting.
- json::Path::Root Root("tensor_spec");
- json::ObjectMapper Mapper(Value, Root);
- if (!Mapper)
- return EmitError("Value is not a dict");
-
- std::string TensorName;
- int TensorPort = -1;
- std::string TensorType;
- std::vector<int64_t> TensorShape;
-
- if (!Mapper.map<std::string>("name", TensorName))
- return EmitError("'name' property not present or not a string");
- if (!Mapper.map<std::string>("type", TensorType))
- return EmitError("'type' property not present or not a string");
- if (!Mapper.map<int>("port", TensorPort))
- return EmitError("'port' property not present or not an int");
- if (!Mapper.map<std::vector<int64_t>>("shape", TensorShape))
- return EmitError("'shape' property not present or not an int array");
-
-#define PARSE_TYPE(T, E) \
- if (TensorType == #T) \
- return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
- TFUTILS_SUPPORTED_TYPES(PARSE_TYPE)
-#undef PARSE_TYPE
- return None;
-}
-
-Optional<std::vector<LoggedFeatureSpec>>
-loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
- StringRef ModelPath, StringRef SpecFileOverride) {
- SmallVector<char, 128> OutputSpecsPath;
- StringRef FileName = SpecFileOverride;
- if (FileName.empty()) {
- llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
- FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()};
- }
-
- auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
- if (!BufferOrError) {
- Ctx.emitError("Error opening output specs file: " + FileName + " : " +
- BufferOrError.getError().message());
- return None;
- }
- auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
- if (!ParsedJSONValues) {
- Ctx.emitError("Could not parse specs file: " + FileName);
- return None;
- }
- auto ValuesArray = ParsedJSONValues->getAsArray();
- if (!ValuesArray) {
- Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
- "logging_name:<name>} dictionaries");
- return None;
- }
- std::vector<LoggedFeatureSpec> Ret;
- for (const auto &Value : *ValuesArray)
- if (const auto *Obj = Value.getAsObject())
- if (const auto *SpecPart = Obj->get("tensor_spec"))
- if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart))
- if (auto LoggingName = Obj->getString("logging_name")) {
- if (!TensorSpec->isElementType<int64_t>() &&
- !TensorSpec->isElementType<int32_t>() &&
- !TensorSpec->isElementType<float>()) {
- Ctx.emitError(
- "Only int64, int32, and float tensors are supported. "
- "Found unsupported type for tensor named " +
- TensorSpec->name());
- return None;
- }
- Ret.push_back({*TensorSpec, LoggingName->str()});
- }
-
- if (ValuesArray->size() != Ret.size()) {
- Ctx.emitError(
- "Unable to parse output spec. It should be a json file containing an "
- "array of dictionaries. Each dictionary must have a 'tensor_spec' key, "
- "with a json object describing a TensorSpec; and a 'logging_name' key, "
- "which is a string to use as name when logging this tensor in the "
- "training log.");
- return None;
- }
- if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) {
- Ctx.emitError("The first output spec must describe the decision tensor, "
- "and must have the logging_name " +
- StringRef(ExpectedDecisionName));
- return None;
- }
- return Ret;
-}
-
class TFModelEvaluatorImpl {
public:
TFModelEvaluatorImpl(StringRef SavedModelPath,
@@ -383,16 +300,29 @@ TFModelEvaluatorImpl::TFModelEvaluatorImpl(
errs() << TF_Message(Status.get());
invalidate();
}
+ size_t NrSupported = 0;
for (size_t I = 0; I < InputSpecs.size(); ++I) {
auto &InputSpec = InputSpecs[I];
InputFeed[I] = {
TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
InputSpec.port()};
+ if (!InputFeed[I].oper) {
+ continue;
+ }
+ if (NrSupported++ != I) {
+ errs()
+ << "Unsupported features must be placed at the end of the InputSpecs";
+ invalidate();
+ return;
+ }
if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
return;
- initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
+ initInput(I, static_cast<TF_DataType>(getTFTypeIndex(InputSpec.type())),
InputSpec.shape());
}
+ InputFeed.resize(NrSupported);
+ Input.resize(NrSupported);
+
for (size_t I = 0; I < OutputSpecsSize; ++I) {
auto OutputSpec = GetOutputSpecs(I);
OutputFeed[I] = {
@@ -470,7 +400,9 @@ void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
}
void *TFModelEvaluator::getUntypedInput(size_t Index) {
- return TF_TensorData(Impl->getInput()[Index]);
+ if (Index < Impl->getInput().size())
+ return TF_TensorData(Impl->getInput()[Index]);
+ return nullptr;
}
TFModelEvaluator::EvaluationResult::EvaluationResult(
@@ -495,13 +427,6 @@ TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
return TF_TensorData(Impl->getOutput()[Index]);
}
-#define TFUTILS_GETDATATYPE_IMPL(T, E) \
- template <> int TensorSpec::getDataType<T>() { return E; }
-
-TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_IMPL)
-
-#undef TFUTILS_GETDATATYPE_IMPL
-
TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
TFModelEvaluator::~TFModelEvaluator() {}