Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit d5d5099

Browse filesBrowse files
committed
fix(ci): Update CMakeLists.txt for macos
1 parent d8cc231 commit d5d5099
Copy full SHA for d5d5099

File tree

Expand file treeCollapse file tree

1 file changed

+29
-14
lines changed
Filter options
Expand file treeCollapse file tree

1 file changed

+29
-14
lines changed

‎CMakeLists.txt

Copy file name to clipboardExpand all lines: CMakeLists.txt
+29-14Lines changed: 29 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -62,20 +62,35 @@ if (LLAMA_BUILD)
6262
# Enable building of the common library
6363
set(LLAMA_BUILD_COMMON ON CACHE BOOL "Build llama.cpp common library" FORCE)
6464

65-
# Building llama
66-
if (APPLE AND NOT CMAKE_SYSTEM_PROCESSOR MATCHES "arm64")
67-
# Need to disable these llama.cpp flags on Apple x86_64,
68-
# otherwise users may encounter invalid instruction errors
69-
set(GGML_AVX "Off" CACHE BOOL "ggml: enable AVX" FORCE)
70-
set(GGML_AVX2 "Off" CACHE BOOL "ggml: enable AVX2" FORCE)
71-
set(GGML_FMA "Off" CACHE BOOL "gml: enable FMA" FORCE)
72-
set(GGML_F16C "Off" CACHE BOOL "gml: enable F16C" FORCE)
73-
74-
set(CMAKE_OSX_ARCHITECTURES "arm64" CACHE STRING "Build architecture for OS X" FORCE)
75-
endif()
76-
65+
# Architecture detection and settings for Apple platforms
7766
if (APPLE)
78-
set(GGML_METAL_EMBED_LIBRARY "On" CACHE BOOL "llama: embed metal library" FORCE)
67+
# Get the target architecture
68+
execute_process(
69+
COMMAND uname -m
70+
OUTPUT_VARIABLE HOST_ARCH
71+
OUTPUT_STRIP_TRAILING_WHITESPACE
72+
)
73+
74+
# If CMAKE_OSX_ARCHITECTURES is not set, use the host architecture
75+
if(NOT CMAKE_OSX_ARCHITECTURES)
76+
set(CMAKE_OSX_ARCHITECTURES ${HOST_ARCH} CACHE STRING "Build architecture for macOS" FORCE)
77+
endif()
78+
79+
message(STATUS "Host architecture: ${HOST_ARCH}")
80+
message(STATUS "Target architecture: ${CMAKE_OSX_ARCHITECTURES}")
81+
82+
# Configure based on target architecture
83+
if(CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64")
84+
# Intel Mac settings
85+
set(GGML_AVX "OFF" CACHE BOOL "ggml: enable AVX" FORCE)
86+
set(GGML_AVX2 "OFF" CACHE BOOL "ggml: enable AVX2" FORCE)
87+
set(GGML_FMA "OFF" CACHE BOOL "ggml: enable FMA" FORCE)
88+
set(GGML_F16C "OFF" CACHE BOOL "ggml: enable F16C" FORCE)
89+
endif()
90+
91+
# Metal settings (enable for both architectures)
92+
set(GGML_METAL "ON" CACHE BOOL "ggml: enable Metal" FORCE)
93+
set(GGML_METAL_EMBED_LIBRARY "ON" CACHE BOOL "ggml: embed metal library" FORCE)
7994
endif()
8095

8196
add_subdirectory(vendor/llama.cpp)
@@ -130,7 +145,7 @@ if (LLAMA_BUILD)
130145
# Building llava
131146
add_subdirectory(vendor/llama.cpp/examples/llava)
132147
set_target_properties(llava_shared PROPERTIES OUTPUT_NAME "llava")
133-
# Set CUDA_ARCHITECTURES to OFF on Windows
148+
134149
if (WIN32)
135150
set_target_properties(llava_shared PROPERTIES CUDA_ARCHITECTURES OFF)
136151
endif()

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.