[mlgo][nfc] Remove / fix vestigial references to Tensorflow

Some references in comments are unnecessarily specific, for historical reasons.
This commit is contained in:
Mircea Trofin
2023-10-11 08:02:15 -07:00
parent b15b84610f
commit 28bb2193f6
9 changed files with 25 additions and 19 deletions

View File

@@ -36,9 +36,8 @@ struct ReplayInlinerSettings;
///
/// - Development mode, for training new models.
/// In this mode, we trade off runtime performance for flexibility. This mode
/// requires the full C Tensorflow API library, and evaluates models
/// dynamically. This mode also permits generating training logs, for offline
/// training.
/// requires the TFLite library, and evaluates models dynamically. This mode
/// also permits generating training logs, for offline training.
///
/// - Dynamically load an advisor via a plugin (PluginInlineAdvisorAnalysis)
enum class InliningAdvisorMode : int { Default, Release, Development };

View File

@@ -92,8 +92,8 @@ constexpr bool isHeuristicInlineCostFeature(InlineCostFeatureIndex Feature) {
// List of features. Each feature is defined through a triple:
// - the name of an enum member, which will be the feature index
// - a textual name, used for Tensorflow model binding (so it needs to match the
// names used by the Tensorflow model)
// - a textual name, used for ML model binding (so it needs to match the
// names used by the ML model).
// - a documentation description. Currently, that is not used anywhere
// programmatically, and serves as workaround to inability of inserting comments
// in macros.

View File

@@ -17,7 +17,9 @@ namespace llvm {
class LLVMContext;
/// MLModelRunner interface: abstraction of a mechanism for evaluating a
/// tensorflow "saved model".
/// ML model. More abstractly, evaluating a function that has as tensors as
/// arguments, described via TensorSpecs, and returns a tensor. Currently, the
/// latter is assumed to be a scalar, in absence of more elaborate scenarios.
/// NOTE: feature indices are expected to be consistent all accross
/// MLModelRunners (pertaining to the same model), and also Loggers (see
/// TFUtils.h)

View File

@@ -23,9 +23,10 @@
namespace llvm {
/// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs
/// ModelUnderTrainingRunner - training mode implementation. It uses TFLite
/// to dynamically load and evaluate a TF SavedModel
/// (https://www.tensorflow.org/guide/saved_model). Runtime performance is
/// (https://www.tensorflow.org/guide/saved_model) converted to TFLite. see
/// lib/Analysis/models/saved-model-to-tflite.py. Runtime performance is
/// sacrificed for ease of use while training.
class ModelUnderTrainingRunner final : public MLModelRunner {
public:

View File

@@ -26,11 +26,15 @@ namespace llvm {
/// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
/// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
///
/// Known tensor types. The left part is the C type, the right is a name we
/// can use to identify the type (to implement TensorSpec equality checks), and
/// to use, if needed, when mapping to an underlying evaluator's type system.
/// The main requirement is that the C type we use has the same size and
/// encoding (e.g. endian-ness) as the one used by the evaluator.
/// Note that the design is motivated by Tensorflow, but it is not intended to
/// be Tensorflow-specific.
///
/// Known tensor types. The left part is the C type, the
/// right is a name we can use to identify the type (to implement TensorSpec
/// equality checks), and to use, if needed, when mapping to an underlying
/// evaluator's type system. The main requirement is that the C type we use has
/// the same size and encoding (e.g. endian-ness) as the one used by the
/// evaluator.
#define SUPPORTED_TENSOR_TYPES(M) \
M(float, Float) \
M(double, Double) \

View File

@@ -1,4 +1,4 @@
//===- TFUtils.h - utilities for tensorflow C API ---------------*- C++ -*-===//
//===- TFUtils.h - utilities for TFLite -------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -93,7 +93,7 @@ public:
return static_cast<T *>(getUntypedInput(Index));
}
/// Returns true if the tensorflow model was loaded successfully, false
/// Returns true if the model was loaded successfully, false
/// otherwise.
bool isValid() const { return !!Impl; }

View File

@@ -98,7 +98,7 @@
/* Define if zstd compression is available */
#cmakedefine01 LLVM_ENABLE_ZSTD
/* Define if LLVM is using tflite instead of libtensorflow */
/* Define if LLVM is using tflite */
#cmakedefine LLVM_HAVE_TFLITE
/* Define to 1 if you have the <sysexits.h> header file. */

View File

@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
// This file implements a model runner using Tensorflow C APIs, allowing the
// This file implements a model runner using TFLite, allowing the
// loading of a model from a command line option.
//
//===----------------------------------------------------------------------===//

View File

@@ -1,4 +1,4 @@
//===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//
//===- TFUtils.cpp - TFLite-based evaluation utilities --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
// This file implements utilities for interfacing with tensorflow C APIs.
// This file implements utilities for interfacing with TFLite.
//
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"