Skip to content

Commit 79d08f5

Browse files
committed
[Executorch][LLM] Use caching allocator for runner
We observed that on iOS it improves perf by 6% because SDPA op does temp allocations. No significant difference on android though. Differential Revision: [D86120038](https://our.internmc.facebook.com/intern/diff/D86120038/) ghstack-source-id: 321483010 Pull Request resolved: #15656
1 parent 6880faf commit 79d08f5

File tree

4 files changed

+22
-3
lines changed

4 files changed

+22
-3
lines changed

CMakeLists.txt

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -922,6 +922,8 @@ if(EXECUTORCH_BUILD_EXTENSION_TRAINING)
922922
endif()
923923

924924
if(EXECUTORCH_BUILD_EXTENSION_LLM_RUNNER)
925+
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/memory_allocator/runner)
926+
list(APPEND _executorch_extensions extension_memory_allocator)
925927
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/extension/llm/runner)
926928
list(APPEND _executorch_extensions extension_llm_runner)
927929
endif()

extension/llm/runner/CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ list(TRANSFORM _extension_llm_runner__srcs PREPEND "${EXECUTORCH_ROOT}/")
3434
add_library(extension_llm_runner STATIC ${_extension_llm_runner__srcs})
3535

3636
set(runner_deps executorch_core extension_module extension_tensor
37-
tokenizers::tokenizers
37+
extension_memory_allocator tokenizers::tokenizers
3838
)
3939

4040
# depend on arange_utils

extension/llm/runner/llm_runner_helper.cpp

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
#include <executorch/extension/llm/runner/text_llm_runner.h>
1818
#include <executorch/extension/llm/runner/text_prefiller.h>
1919
#include <executorch/extension/llm/runner/text_token_generator.h>
20+
#include <executorch/extension/memory_allocator/cpu_caching_malloc_allocator.h>
2021
#include <executorch/runtime/core/result.h>
2122
#include <executorch/runtime/platform/runtime.h>
2223
#include <pytorch/tokenizers/hf_tokenizer.h>
@@ -209,11 +210,26 @@ std::unique_ptr<TextLLMRunner> create_text_llm_runner(
209210

210211
// Create the Module
211212
std::unique_ptr<Module> module;
213+
uint32_t max_cached_memory_size_bytes_ = 1024 * 1024 * 10; // 10MB
212214
if (data_files.size() > 0) {
213215
module = std::make_unique<Module>(
214-
model_path, data_files, Module::LoadMode::File);
216+
model_path,
217+
data_files,
218+
Module::LoadMode::File,
219+
nullptr,
220+
std::make_unique<
221+
executorch::extension::CPUCachingAllocator>( // temp memory
222+
// allocator
223+
max_cached_memory_size_bytes_));
215224
} else {
216-
module = std::make_unique<Module>(model_path, Module::LoadMode::File);
225+
module = std::make_unique<Module>(
226+
model_path,
227+
Module::LoadMode::File,
228+
nullptr,
229+
std::make_unique<
230+
executorch::extension::CPUCachingAllocator>( // temp memory
231+
// allocator
232+
max_cached_memory_size_bytes_));
217233
}
218234

219235
// Get metadata from Module

extension/llm/runner/targets.bzl

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -148,6 +148,7 @@ def define_common_targets():
148148
":text_prefiller" + aten_suffix,
149149
":text_token_generator" + aten_suffix,
150150
"//executorch/extension/llm/runner/io_manager:io_manager" + aten_suffix,
151+
"//executorch/extension/memory_allocator:cpu_caching_allocator",
151152
"//pytorch/tokenizers:hf_tokenizer",
152153
"//pytorch/tokenizers:llama2c_tokenizer",
153154
"//pytorch/tokenizers:sentencepiece",

0 commit comments

Comments
 (0)