|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | +# |
| 4 | +# This source code is licensed under the BSD-style license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | + |
| 7 | +# |
| 8 | +# Simple CMake build system for selective build demo. |
| 9 | +# |
| 10 | +# ### Editing this file ### |
| 11 | +# |
| 12 | +# This file should be formatted with |
| 13 | +# ~~~ |
| 14 | +# cmake-format --first-comment-is-literal=True CMakeLists.txt |
| 15 | +# ~~~ |
| 16 | +# It should also be cmake-lint clean. |
| 17 | +# |
| 18 | + |
| 19 | +cmake_minimum_required(VERSION 3.19) |
| 20 | +project(QuantizedLinearOp) |
| 21 | +if(NOT CMAKE_CXX_STANDARD) |
| 22 | + set(CMAKE_CXX_STANDARD 17) |
| 23 | +endif() |
| 24 | + |
| 25 | +set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../../..) |
| 26 | +set(TORCH_ROOT ${EXECUTORCH_ROOT}/third-party/pytorch) |
| 27 | + |
| 28 | +set(_common_compile_options -Wno-deprecated-declarations -fPIC) |
| 29 | + |
| 30 | +# Let files say "include <executorch/path/to/header.h>". |
| 31 | +set(_common_include_directories ${EXECUTORCH_ROOT}/..) |
| 32 | + |
| 33 | +find_package(Llama REQUIRED) |
| 34 | +find_package(ExecuTorch REQUIRED) |
| 35 | +find_package(Torch CONFIG REQUIRED) |
| 36 | + |
| 37 | +target_include_directories(executorch INTERFACE ${_common_include_directories}) |
| 38 | + |
| 39 | + |
| 40 | +set(kernel_sources ${EXECUTORCH_ROOT}/examples/models/llama2/playground/op_linear.cpp) |
| 41 | +# |
| 42 | +# custom_kernels: C++ kernel implementations of custom ops |
| 43 | +# |
| 44 | +add_library(custom_kernels SHARED ${kernel_sources}) |
| 45 | +target_link_libraries(custom_kernels PRIVATE executorch ${LLAMA_LIBRARY}) |
| 46 | +target_compile_options(custom_kernels PUBLIC ${_common_compile_options}) |
| 47 | +target_include_directories(custom_kernels PRIVATE ${EXECUTORCH_ROOT}/examples/third-party ${TORCH_INCLUDE_DIRS}) |
| 48 | + |
| 49 | + |
| 50 | +if(EXECUTORCH_BUILD_GTEST) |
| 51 | + find_package( |
| 52 | + gflags REQUIRED PATHS ${CMAKE_CURRENT_BINARY_DIR}/../../../../third-party |
| 53 | + ) |
| 54 | + # |
| 55 | + # llama_cpp_test: test binary to run llama.cpp kernel ggml_mul_mat |
| 56 | + # |
| 57 | + add_executable(llama_cpp_test PRIVATE ${EXECUTORCH_ROOT}/examples/llama2/playground/test_op_linear.cpp) |
| 58 | + |
| 59 | + target_link_libraries(llama_cpp_test executorch gflags custom_kernels) |
| 60 | + target_compile_options(llama_cpp_test PUBLIC ${_common_compile_options}) |
| 61 | +endif() |
| 62 | + |
| 63 | +# Install libraries |
| 64 | +install( |
| 65 | + TARGETS custom_kernels |
| 66 | + DESTINATION lib |
| 67 | + INCLUDES |
| 68 | + DESTINATION ${_common_include_directories}) |
0 commit comments