Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit ec10a80

Browse filesBrowse files
authored
Merge pull request #2 from tc-wolf/standalone_server_mac
Add deploy target for mac server bundle
2 parents 75466a3 + 9e19903 commit ec10a80
Copy full SHA for ec10a80

File tree

Expand file treeCollapse file tree

2 files changed

+24
-4
lines changed
Filter options
Expand file treeCollapse file tree

2 files changed

+24
-4
lines changed

‎Makefile

Copy file name to clipboardExpand all lines: Makefile
+23-4Lines changed: 23 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,16 +54,16 @@ deploy.pypi:
5454
deploy.gh-docs:
5555
mkdocs build
5656
mkdocs gh-deploy
57-
57+
5858
COMMIT := $(shell git rev-parse --short HEAD)
5959

6060
deploy.docker:
6161
# Make image with commit in name
6262
docker build -t openblas_server_$(COMMIT) .
63-
63+
6464
# Run image and immediately exit (just want to create the container)
6565
docker run openblas_server_$(COMMIT) bash
66-
66+
6767
# Get container ID, copy server tarball + libllama.so tarball, and delete
6868
# temp container
6969
CONTAINER_ID=$$(docker ps -lq --filter ancestor=openblas_server_$(COMMIT)) ; \
@@ -73,7 +73,25 @@ deploy.docker:
7373

7474
# More cleanup
7575
yes | docker image prune
76-
76+
77+
# Build standalone server, may want to do in fresh venv to avoid bloat
78+
deploy.pyinstaller.mac:
79+
# CPU must be aarch64 and OS is MacOS
80+
@if [ `uname -m` != "arm64" ]; then echo "Must be on aarch64"; exit 1; fi
81+
@if [ `uname` != "Darwin" ]; then echo "Must be on MacOS"; exit 1; fi
82+
@echo "Building and installing with proper env vars for aarch64-specific ops"
83+
CMAKE_ARGS="-DGGML_METAL=off -DGGML_LLAMAFILE=OFF -DGGML_BLAS=OFF -DCMAKE_BUILD_TYPE=Release" python3 -m pip install -v -e .[server,dev]
84+
@server_path=$$(python -c 'import llama_cpp.server; print(llama_cpp.server.__file__)' | sed s/init/main/) ; \
85+
echo "Server path: $$server_path" ; \
86+
libllama_path=$$(python -c 'import llama_cpp.llama_cpp; print(llama_cpp.llama_cpp._load_shared_library("llama")._name)') ; \
87+
libggml_path=$$(python -c 'import llama_cpp.llama_cpp; print(llama_cpp.llama_cpp._load_shared_library("ggml")._name)') ; \
88+
echo "libllama path: $$libllama_path" ; \
89+
echo "libggml path: $$libggml_path" ; \
90+
pyinstaller -DF $$server_path \
91+
--add-data $$libllama_path:llama_cpp/lib \
92+
--add-data $$libggml_path:llama_cpp/lib \
93+
-n llama-cpp-py-server
94+
7795
test:
7896
python3 -m pytest
7997

@@ -104,5 +122,6 @@ clean:
104122
deploy.pypi \
105123
deploy.gh-docs \
106124
deploy.docker \
125+
deploy.pyinstaller.mac \
107126
docker \
108127
clean

‎pyproject.toml

Copy file name to clipboardExpand all lines: pyproject.toml
+1Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ dev = [
5555
"httpx>=0.24.1",
5656
"pandas>=2.2.1",
5757
"tqdm>=4.66.2",
58+
"pyinstaller>=6.11.1",
5859
]
5960
all = ["llama_cpp_python[server,test,dev]"]
6061

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.