Übersicht
Die CustomOpDispatcher ist der API-Ersatz für die Definition benutzerdefinierter CPU-Vorgänge und benutzerdefinierter Op-Resolver in LiteRT. Sie bietet eine übersichtlichere Schnittstelle zum Einbinden benutzerdefinierter Vorgänge in LiteRT-Modelle.
Warum CustomOpDispatcher verwenden?
Herkömmlicher Ansatz (eingestellt)
Bisher mussten Entwickler:
TfLiteRegistration-Strukturen manuell erstellen- TFLite-spezifische Callback-Funktionen implementieren (init, prepare, invoke, free)
- Direkt mit Low-Level-TFLite-Strukturen arbeiten (
TfLiteContext,TfLiteNode,TfLiteTensor) MutableOpResolver::AddCustom()direkt verwenden
Neuer CustomOpDispatcher-Ansatz
Die CustomOpDispatcher bietet:
- Saubere Abstraktionsebene für TFLite-Interna
- Integration mit dem kompilierten Modell von LiteRT
Flow
┌─────────────────┐
│ User Custom Op │ (Your implementation)
└────────┬────────┘
│
▼
┌─────────────────────────┐
│ LiteRtCustomOpKernel │ (C API interface)
└────────┬────────────────┘
│
▼
┌─────────────────────────┐
│ CustomOpDispatcher │ (Bridge layer)
└────────┬────────────────┘
│
▼
┌─────────────────────────┐
│ TfLiteRegistration │ (TFLite runtime)
└─────────────────────────┘
Wichtige Komponenten und Dateien
Kernimplementierung
- runtime/custom_op_dispatcher.h: Header der Haupt-Dispatcher-Klasse
- runtime/custom_op_dispatcher.cc: Implementierung, die LiteRT mit TFLite verbindet
API-Header
- c/litert_custom_op_kernel.h: C-API für die benutzerdefinierte Op-Kernel-Schnittstelle
- cc/litert_custom_op_kernel.h: C++-Wrapper mit objektorientierter Schnittstelle
Optionssystem
- core/options.h: Struktur für Kernoptionen mit
CustomOpOption
- c/litert_options.h: C-API zum Verwalten von Kompilierungsoptionen
- cc/litert_options.h: C++-Wrapper für die Optionsverwaltung
Testbeispiele
- cc/litert_custom_op_test.cc: Beispiel für die Verwendung der C++-API
- c/litert_custom_op_test.cc: Beispiel für die Verwendung der C-API
API-Referenz
Core Kernel Interface (C API)
typedef struct {
LiteRtStatus (*Init)(void* user_data, const void* init_data,
size_t init_data_size);
LiteRtStatus (*GetOutputLayouts)(void* user_data, size_t num_inputs,
const LiteRtLayout* input_layouts,
size_t num_outputs,
LiteRtLayout* output_layouts);
LiteRtStatus (*Run)(void* user_data, size_t num_inputs,
const LiteRtTensorBuffer* inputs, size_t num_outputs,
LiteRtTensorBuffer* outputs);
LiteRtStatus (*Destroy)(void* user_data);
} LiteRtCustomOpKernel;
Abstrakte C++-Basisklasse
class CustomOpKernel {
public:
virtual const std::string& OpName() const = 0;
virtual int OpVersion() const = 0;
virtual Expected<void> Init(const void* init_data, size_t init_data_size) = 0;
virtual Expected<void> GetOutputLayouts(
const std::vector<Layout>& input_layouts,
std::vector<Layout>& output_layouts) = 0;
virtual Expected<void> Run(const std::vector<TensorBuffer>& inputs,
std::vector<TensorBuffer>& outputs) = 0;
virtual Expected<void> Destroy() = 0;
};
Implementierungsleitfaden
Schritt 1: Benutzerdefinierten Vorgang definieren
C++-Implementierung
#include "litert/cc/litert_custom_op_kernel.h"
class MyCustomOpKernel : public litert::CustomOpKernel {
public:
const std::string& OpName() const override {
return op_name_;
}
int OpVersion() const override {
return 1;
}
Expected<void> Init(const void* init_data, size_t init_data_size) override {
// Initialize any persistent state
return {};
}
Expected<void> GetOutputLayouts(
const std::vector<Layout>& input_layouts,
std::vector<Layout>& output_layouts) override {
// Define output tensor shapes based on inputs
output_layouts[0] = input_layouts[0];
return {};
}
Expected<void> Run(const std::vector<TensorBuffer>& inputs,
std::vector<TensorBuffer>& outputs) override {
// Lock input buffers for reading
LITERT_ASSIGN_OR_RETURN(auto input_lock,
TensorBufferScopedLock::Create<float>(
inputs[0], TensorBuffer::LockMode::kRead));
// Lock output buffer for writing
LITERT_ASSIGN_OR_RETURN(auto output_lock,
TensorBufferScopedLock::Create<float>(
outputs[0], TensorBuffer::LockMode::kWrite));
const float* input_data = input_lock.second;
float* output_data = output_lock.second;
// Perform computation
// ... your custom operation logic ...
return {};
}
Expected<void> Destroy() override {
// Clean up resources
return {};
}
private:
const std::string op_name_ = "MyCustomOp";
};
C-Implementierung
#include "litert/c/litert_custom_op_kernel.h"
LiteRtStatus MyOp_Init(void* user_data, const void* init_data,
size_t init_data_size) {
// Initialize state
return kLiteRtStatusOk;
}
LiteRtStatus MyOp_GetOutputLayouts(void* user_data, size_t num_inputs,
const LiteRtLayout* input_layouts,
size_t num_outputs,
LiteRtLayout* output_layouts) {
// Set output shape to match first input
output_layouts[0] = input_layouts[0];
return kLiteRtStatusOk;
}
LiteRtStatus MyOp_Run(void* user_data, size_t num_inputs,
const LiteRtTensorBuffer* inputs, size_t num_outputs,
LiteRtTensorBuffer* outputs) {
// Lock buffers
void* input_addr;
LITERT_RETURN_IF_ERROR(LiteRtLockTensorBuffer(
inputs[0], &input_addr, kLiteRtTensorBufferLockModeRead));
void* output_addr;
LITERT_RETURN_IF_ERROR(LiteRtLockTensorBuffer(
outputs[0], &output_addr, kLiteRtTensorBufferLockModeWrite));
// Perform computation
float* in = (float*)input_addr;
float* out = (float*)output_addr;
// ... your custom operation logic ...
// Unlock buffers
LITERT_RETURN_IF_ERROR(LiteRtUnlockTensorBuffer(inputs[0]));
LITERT_RETURN_IF_ERROR(LiteRtUnlockTensorBuffer(outputs[0]));
return kLiteRtStatusOk;
}
LiteRtStatus MyOp_Destroy(void* user_data) {
// Clean up
return kLiteRtStatusOk;
}
Schritt 2: Benutzerdefinierten Vorgang registrieren
C++-Registrierung
#include "litert/cc/litert_environment.h"
#include "litert/cc/litert_compiled_model.h"
#include "litert/cc/litert_options.h"
// Create environment
LITERT_ASSERT_OK_AND_ASSIGN(Environment env, Environment::Create({}));
// Load model
Model model = /* load your model */;
// Create options and register custom op
LITERT_ASSERT_OK_AND_ASSIGN(Options options, Options::Create());
options.SetHardwareAccelerators(kLiteRtHwAcceleratorCpu);
// Register custom op kernel
MyCustomOpKernel my_custom_op;
ASSERT_TRUE(options.AddCustomOpKernel(my_custom_op));
// Create compiled model with custom op
LITERT_ASSERT_OK_AND_ASSIGN(CompiledModel compiled_model,
CompiledModel::Create(env, model, options));
C-Registrierung
#include "litert/c/litert_environment.h"
#include "litert/c/litert_compiled_model.h"
#include "litert/c/litert_options.h"
// Create options
LiteRtOptions options;
LiteRtCreateOptions(&options);
LiteRtSetOptionsHardwareAccelerators(options, kLiteRtHwAcceleratorCpu);
// Define kernel
LiteRtCustomOpKernel kernel = {
.Init = MyOp_Init,
.GetOutputLayouts = MyOp_GetOutputLayouts,
.Run = MyOp_Run,
.Destroy = MyOp_Destroy,
};
// Register custom op
LiteRtAddCustomOpKernelOption(options, "MyCustomOp", 1, &kernel, NULL);
// Create environment
LiteRtEnvironment env;
LiteRtCreateEnvironment(0, NULL, &env);
// Create compiled model
LiteRtCompiledModel compiled_model;
LiteRtCreateCompiledModel(env, model, options, &compiled_model);
Schritt 3: Modell ausführen
C++-Ausführung
// Create buffers
LITERT_ASSERT_OK_AND_ASSIGN(auto input_buffers,
compiled_model.CreateInputBuffers());
LITERT_ASSERT_OK_AND_ASSIGN(auto output_buffers,
compiled_model.CreateOutputBuffers());
// Fill input data
input_buffers[0].Write<float>(your_input_data);
// Run inference
compiled_model.Run(input_buffers, output_buffers);
// Read output
LITERT_ASSERT_OK_AND_ASSIGN(auto lock,
TensorBufferScopedLock::Create<const float>(
output_buffers[0], TensorBuffer::LockMode::kRead));
const float* results = lock.second;
C. Ausführung
// Create buffers (see test files for complete buffer creation)
LiteRtTensorBuffer input_buffers[num_inputs];
LiteRtTensorBuffer output_buffers[num_outputs];
// ... buffer creation code ...
// Write input data
void* input_addr;
LiteRtLockTensorBuffer(input_buffers[0], &input_addr,
kLiteRtTensorBufferLockModeWrite);
memcpy(input_addr, your_data, data_size);
LiteRtUnlockTensorBuffer(input_buffers[0]);
// Run inference
LiteRtRunCompiledModel(compiled_model, 0, num_inputs, input_buffers,
num_outputs, output_buffers);
// Read output
void* output_addr;
LiteRtLockTensorBuffer(output_buffers[0], &output_addr,
kLiteRtTensorBufferLockModeRead);
// Process output_addr
LiteRtUnlockTensorBuffer(output_buffers[0]);