From 09c74276470b22b76563399f228740871b4848b5 Mon Sep 17 00:00:00 2001 From: Max Ren Date: Tue, 11 Mar 2025 12:54:33 -0700 Subject: [PATCH] [XNNPACK][Weights Cache] Enable in XNNPACK We enable the XNNPACK Weights cache in XNNPACK. the weights cache is initialized for the runtime with the named data map and a memory allocator (for now the memory allocator is not used, but i hope in the future this can be used to managed the memory for packed weights). Before Creating the runtime, we first initialize the weights cache, this sets the finalization state to false. As we add weight/bias tensors to the graph, we load them through the named data map in the weights cache, and keep a map of the pointer to the name. When XNNPACK Creates the runtime and packs the weights, it uses the weights_cache method look_up_or_insert. We use the pointers provided in the cache key to look up their names and append them together like ("weightsbias"). We then insert the packed weights with that key. In future look ups, we just use the pointer cached at the named pack tensor key, saving us from packing in the future. After creating the runtime and packing the weights, we finalize the cache. This sets is_finalized to true. We also free all unpacked buffers loaded from the named data map as they are no longer needed. We also keep reference counts for all the packed weights incrementing the packed weights which were used by this runtime. We return a vector of all the packed weight names to the xnn_executor runner. When the XNNExecutor is destroyed, we decrement the counts of the packed buffers and destroy them if necessary. Note that this feature is gated behind the XNN_ENABLE_WEIGHTS_CACHE flag. Since the weights_cache is a global member of the singleton xnnpack backend class, and it is also read/write, we add a mutex to ensure that access to the weights_cache is thread safe. We added a new mutex, so the mutex hiearchy is: workspace_mutex_ -> weights_cache_mutex_ Differential Revision: [D70885926](https://our.internmc.facebook.com/intern/diff/D70885926/) [ghstack-poisoned] --- backends/xnnpack/runtime/XNNCompiler.cpp | 50 +++++++++++++-------- backends/xnnpack/runtime/XNNCompiler.h | 4 +- backends/xnnpack/runtime/XNNExecutor.cpp | 4 +- backends/xnnpack/runtime/XNNExecutor.h | 8 +++- backends/xnnpack/runtime/XNNPACKBackend.cpp | 41 +++++++++++++++-- backends/xnnpack/targets.bzl | 10 +++-- 6 files changed, 87 insertions(+), 30 deletions(-) diff --git a/backends/xnnpack/runtime/XNNCompiler.cpp b/backends/xnnpack/runtime/XNNCompiler.cpp index af959735cd4..45a3d2c8cb3 100644 --- a/backends/xnnpack/runtime/XNNCompiler.cpp +++ b/backends/xnnpack/runtime/XNNCompiler.cpp @@ -166,8 +166,7 @@ const uint8_t* getConstantDataPtr( const fb_xnnpack::XNNTensorValue* tensor_value, GraphPtr flatbuffer_graph, const uint8_t* constant_data_ptr, - const NamedDataMap* named_data_map, - std::vector& loaded_buffers_from_map) { + XNNWeightsCache* weights_cache) { auto buffer_idx = tensor_value->constant_buffer_idx(); if (buffer_idx) { if (!constant_data_ptr) { @@ -184,14 +183,12 @@ const uint8_t* getConstantDataPtr( if (data_name.length() == 0) { return constant_data_ptr + offset; } else { - Result buffer = named_data_map->get_data(data_name.c_str()); - if (!buffer.ok()) { - ET_LOG(Error, "Failed to get constant data for key %s", data_name.c_str()); + Result data_ptr = weights_cache->load_unpacked_data(data_name); + if (!data_ptr.ok()){ + ET_LOG(Error, "Failed to load weights from cache"); return nullptr; } - const uint8_t* data_ptr = static_cast(buffer.get().data()); - loaded_buffers_from_map.push_back(std::move(buffer.get())); - return data_ptr; + return data_ptr.get(); } } } @@ -213,8 +210,7 @@ Error defineTensor( std::vector& input_ids, std::vector& output_ids, CompileAllocator& allocator, - const NamedDataMap* named_data_map, - std::vector& loaded_buffers_from_map) { + XNNWeightsCache* weights_cache) { const fb_xnnpack::XNNTensorValue* tensor_value = nullptr; const fb_xnnpack::XNNQuantizedTensorValue* qtensor_value = nullptr; @@ -255,8 +251,7 @@ Error defineTensor( tensor_value, flatbuffer_graph, constant_data_ptr, - named_data_map, - loaded_buffers_from_map + weights_cache ); xnn_status status; @@ -1992,8 +1987,7 @@ ET_NODISCARD Error XNNCompiler::compileModel( const void* buffer_pointer, size_t num_bytes, XNNExecutor* executor, - MemoryAllocator* runtime_allocator, - const NamedDataMap* named_data_map, + XNNWeightsCache* weights_cache, xnn_workspace_t workspace) { Result header = XNNHeader::Parse(buffer_pointer, num_bytes); const uint8_t* flatbuffer_data = nullptr; @@ -2073,8 +2067,7 @@ ET_NODISCARD Error XNNCompiler::compileModel( input_ids, output_ids, compile_allocator, - named_data_map, - loaded_buffers_from_map); + weights_cache); if (err != Error::Ok) { return err; @@ -2096,12 +2089,22 @@ ET_NODISCARD Error XNNCompiler::compileModel( xnn_runtime_t runtime_ptr = nullptr; + // XNNWeightsCache if weights cache is not enabled, then XNNWeightsCache + // just manages the unpacked weights until the runtime is created. +#ifdef ENABLE_XNNPACK_WEIGHTS_CACHE + xnn_weights_cache_t weights_cache_ptr = + weights_cache->get_num_unpacked_data() > 0 ? weights_cache->get() : nullptr; +#else + xnn_weights_cache_t weights_cache_ptr = nullptr; +#endif + + #ifdef ENABLE_XNNPACK_SHARED_WORKSPACE ET_CHECK_OR_RETURN_ERROR( workspace != nullptr, Internal, "Failed to initialize XNNPACK workspace"); status = xnn_create_runtime_v4( subgraph.get(), - /*weight_cache=*/nullptr, // TODO - support weight cache + weights_cache_ptr, workspace, ::executorch::extension::threadpool::get_pthreadpool(), runtime_flags, @@ -2109,7 +2112,7 @@ ET_NODISCARD Error XNNCompiler::compileModel( #else status = xnn_create_runtime_v3( subgraph.get(), - /*weight_cache=*/nullptr, // TODO - support weight cache + weights_cache_ptr, ::executorch::extension::threadpool::get_pthreadpool(), runtime_flags, &runtime_ptr); @@ -2121,10 +2124,19 @@ ET_NODISCARD Error XNNCompiler::compileModel( "XNN Runtime creation failed with code: %s", xnn_status_to_string(status)); + auto packed_weights_names = weights_cache->finalize_for_runtime(); + ET_CHECK_OR_RETURN_ERROR( + packed_weights_names.ok(), + Internal, + "Failed to finalize weights cache after creating the xnn runtime" + ) + + err = executor->initialize( // NOLINT: runtime_ptr is non-null runtime_ptr, std::move(input_ids), - std::move(output_ids)); + std::move(output_ids), + std::move(packed_weights_names.get())); return err; }; diff --git a/backends/xnnpack/runtime/XNNCompiler.h b/backends/xnnpack/runtime/XNNCompiler.h index 3ea621a4d59..f3c3ea4739f 100644 --- a/backends/xnnpack/runtime/XNNCompiler.h +++ b/backends/xnnpack/runtime/XNNCompiler.h @@ -9,6 +9,7 @@ #pragma once #include +#include #include #include @@ -29,8 +30,7 @@ class XNNCompiler { const void* buffer_pointer, size_t num_bytes, XNNExecutor* executor, - executorch::runtime::MemoryAllocator* runtime_allocator, - const executorch::runtime::NamedDataMap* named_data_map, + XNNWeightsCache* weights_cache, xnn_workspace_t workspace); }; diff --git a/backends/xnnpack/runtime/XNNExecutor.cpp b/backends/xnnpack/runtime/XNNExecutor.cpp index 1ba549bb8d7..ae7c0d66ecb 100644 --- a/backends/xnnpack/runtime/XNNExecutor.cpp +++ b/backends/xnnpack/runtime/XNNExecutor.cpp @@ -30,7 +30,8 @@ using executorch::runtime::kTensorDimensionLimit; ET_NODISCARD Error XNNExecutor::initialize( xnn_runtime_t runtime, std::vector&& input_ids, - std::vector&& output_ids) { + std::vector&& output_ids, + std::vector&& packed_data_names) { runtime_ = std::unique_ptr( runtime, xnn_delete_runtime); @@ -51,6 +52,7 @@ ET_NODISCARD Error XNNExecutor::initialize( std::sort(output_ids_.begin(), output_ids_.end()); externals_.resize(input_ids_.size() + output_ids_.size()); + packed_data_names_ = std::move(packed_data_names); return Error::Ok; } diff --git a/backends/xnnpack/runtime/XNNExecutor.h b/backends/xnnpack/runtime/XNNExecutor.h index 68ee18609e3..808d2524686 100644 --- a/backends/xnnpack/runtime/XNNExecutor.h +++ b/backends/xnnpack/runtime/XNNExecutor.h @@ -34,6 +34,7 @@ class XNNExecutor { std::vector input_ids_; std::vector output_ids_; std::vector externals_; + std::vector packed_data_names_; public: XNNExecutor() = default; @@ -46,6 +47,10 @@ class XNNExecutor { return output_ids_.size(); } + inline std::vector get_packed_data_names(){ + return packed_data_names_; + } + /** * Initialize the XNNExecutor with a given runtime and input/output ids. * The input/output ids are expected to be sorted in order of their @@ -54,7 +59,8 @@ class XNNExecutor { ET_NODISCARD executorch::runtime::Error initialize( xnn_runtime_t runtime, std::vector&& input_ids, - std::vector&& output_ids); + std::vector&& output_ids, + std::vector&& packed_data_names); /** * Prepares the arguments for runtime graph execution. diff --git a/backends/xnnpack/runtime/XNNPACKBackend.cpp b/backends/xnnpack/runtime/XNNPACKBackend.cpp index f453453cf76..78a8072fca4 100644 --- a/backends/xnnpack/runtime/XNNPACKBackend.cpp +++ b/backends/xnnpack/runtime/XNNPACKBackend.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -31,6 +32,7 @@ using executorch::runtime::EValue; using executorch::runtime::FreeableBuffer; using executorch::runtime::Result; using executorch::runtime::NamedDataMap; +using executorch::backends::xnnpack::delegate::XNNWeightsCache; class XnnpackBackend final : public ::executorch::runtime::BackendInterface { public: @@ -81,13 +83,23 @@ class XnnpackBackend final : public ::executorch::runtime::BackendInterface { } const NamedDataMap* named_data_map = context.get_named_data_map(); + weights_cache_->initialize_for_runtime( + context.get_runtime_allocator(), + named_data_map + ); -#ifdef ENABLE_XNNPACK_SHARED_WORKSPACE // This is needed to serialize access to xnn_create_runtime which is not // thread safe. This can heppen when multiple threads call init() on // the same backend instance. +#ifdef ENABLE_XNNPACK_SHARED_WORKSPACE const std::lock_guard lock(workspace_mutex_); #endif + +#ifdef ENABLE_XNNPACK_WEIGHTS_CACHE + const std::lock_guard lock(weights_cache_mutex_); +#endif + + // Executor has been allocated but not constructed, ensure that runtime_ is // nullptr by constructing it in place here. NOTE: Since we use placement // new and since this type is not trivially destructible, we must call the @@ -97,8 +109,7 @@ class XnnpackBackend final : public ::executorch::runtime::BackendInterface { processed->data(), processed->size(), executor, - context.get_runtime_allocator(), - named_data_map, + weights_cache_.get(), workspace_.get()); // This backend does not need its processed data after compiling the model. processed->Free(); @@ -125,6 +136,10 @@ class XnnpackBackend final : public ::executorch::runtime::BackendInterface { const std::lock_guard lock(workspace_mutex_); #endif +#ifdef ENABLE_XNNPACK_WEIGHTS_CACHE + const std::lock_guard lock(weights_cache_mutex_); +#endif + // Prepare Inputs/Outputs and Propagate Input Shapes Error err = executor->prepare_args(args); if (err != Error::Ok) { @@ -145,16 +160,24 @@ class XnnpackBackend final : public ::executorch::runtime::BackendInterface { void destroy(DelegateHandle* handle) const override { if (handle != nullptr) { -#ifdef ENABLE_XNNPACK_SHARED_WORKSPACE // This is needed to serialize access to xnn_delete_runtime which is not // thread safe. This can heppen when multiple threads call destroy() on // the same backend instance. +#ifdef ENABLE_XNNPACK_SHARED_WORKSPACE const std::lock_guard lock(workspace_mutex_); #endif + + auto executor = static_cast(handle); + #ifdef ENABLE_XNNPACK_PROFILING executor->print_avg_op_timings(); #endif + +#ifdef ENABLE_XNNPACK_WEIGHTS_CACHE + const std::lock_guard lock(weights_cache_mutex_); + weights_cache_->delete_packed_data(executor->get_packed_data_names()); +#endif // XNNExecutor is not trivially destructible. Since this was constructed // manually in init(), we must destroy it manually here. executor->~XNNExecutor(); @@ -167,6 +190,16 @@ class XnnpackBackend final : public ::executorch::runtime::BackendInterface { std::unique_ptr workspace_{ nullptr, &xnn_release_workspace}; + + // Weights cache is global to all delegate instances. + mutable std::mutex weights_cache_mutex_; + std::unique_ptr weights_cache_ = + std::make_unique(); + + + // Lock Hiearchy for Mutexes: + // workspace_mutex_ + // weights_cache_mutex_ }; namespace { diff --git a/backends/xnnpack/targets.bzl b/backends/xnnpack/targets.bzl index bb7f1979d3a..5b2059c25cf 100644 --- a/backends/xnnpack/targets.bzl +++ b/backends/xnnpack/targets.bzl @@ -6,11 +6,15 @@ def _get_preprocessor_flags(): Disable if someone explictly specified a config option, else Enable otherwise """ - if native.read_config("executorch", "xnnpack_workspace_sharing", "0") == "0": - return [] + preprocessor_flags = [] + if native.read_config("executorch", "xnnpack_workspace_sharing", "0") != "0": + preprocessor_flags.append("-DENABLE_XNNPACK_SHARED_WORKSPACE") + + if native.read_config("executorch", "xnnpack_weights_cache", "0") != "0": + preprocessor_flags.append("-DENABLE_XNNPACK_WEIGHTS_CACHE") # Enable if not disabled through config - return ["-DENABLE_XNNPACK_SHARED_WORKSPACE"] + return preprocessor_flags def define_common_targets(): runtime.cxx_library(