Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 2ec043a

Browse filesBrowse files
committed
Clean up stdout / stderr suppression
1 parent 4ea7027 commit 2ec043a
Copy full SHA for 2ec043a

File tree

Expand file treeCollapse file tree

2 files changed

+14
-26
lines changed
Filter options
Expand file treeCollapse file tree

2 files changed

+14
-26
lines changed

‎llama_cpp/_utils.py

Copy file name to clipboardExpand all lines: llama_cpp/_utils.py
+9Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,14 @@ class suppress_stdout_stderr(object):
99
sys = sys
1010
os = os
1111

12+
def __init__(self, disable: bool = True):
13+
self.disable = disable
14+
1215
# Oddly enough this works better than the contextlib version
1316
def __enter__(self):
17+
if self.disable:
18+
return self
19+
1420
self.outnull_file = self.open(self.os.devnull, "w")
1521
self.errnull_file = self.open(self.os.devnull, "w")
1622

@@ -31,6 +37,9 @@ def __enter__(self):
3137
return self
3238

3339
def __exit__(self, *_):
40+
if self.disable:
41+
return
42+
3443
self.sys.stdout = self.old_stdout
3544
self.sys.stderr = self.old_stderr
3645

‎llama_cpp/llama.py

Copy file name to clipboardExpand all lines: llama_cpp/llama.py
+5-26Lines changed: 5 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -296,11 +296,8 @@ def __init__(
296296

297297
self.numa = numa
298298
if not Llama.__backend_initialized:
299-
if self.verbose:
299+
with suppress_stdout_stderr(disable=self.verbose):
300300
llama_cpp.llama_backend_init(self.numa)
301-
else:
302-
with suppress_stdout_stderr():
303-
llama_cpp.llama_backend_init(self.numa)
304301
Llama.__backend_initialized = True
305302

306303
self.model_path = model_path
@@ -379,38 +376,23 @@ def __init__(
379376
if not os.path.exists(model_path):
380377
raise ValueError(f"Model path does not exist: {model_path}")
381378

382-
if verbose:
379+
with suppress_stdout_stderr(disable=self.verbose):
383380
self.model = llama_cpp.llama_load_model_from_file(
384381
self.model_path.encode("utf-8"), self.model_params
385382
)
386-
else:
387-
with suppress_stdout_stderr():
388-
self.model = llama_cpp.llama_load_model_from_file(
389-
self.model_path.encode("utf-8"), self.model_params
390-
)
391383
assert self.model is not None
392384

393-
if verbose:
385+
with suppress_stdout_stderr(disable=self.verbose):
394386
self.ctx = llama_cpp.llama_new_context_with_model(
395387
self.model, self.context_params
396388
)
397-
else:
398-
with suppress_stdout_stderr():
399-
self.ctx = llama_cpp.llama_new_context_with_model(
400-
self.model, self.context_params
401-
)
402389

403390
assert self.ctx is not None
404391

405-
if verbose:
392+
with suppress_stdout_stderr(disable=self.verbose):
406393
self.batch = llama_cpp.llama_batch_init(
407394
self.n_batch, 0, 1
408395
)
409-
else:
410-
with suppress_stdout_stderr():
411-
self.batch = llama_cpp.llama_batch_init(
412-
self.n_batch, 0, 1
413-
)
414396

415397
if self.lora_path:
416398
if llama_cpp.llama_model_apply_lora_from_file(
@@ -1615,11 +1597,8 @@ def _free_model(self, *, _lbatch_free=llama_cpp._lib.llama_batch_free, _lfree_mo
16151597
self.ctx = None
16161598

16171599
def __del__(self):
1618-
if self.verbose:
1600+
with suppress_stdout_stderr(disable=self.verbose):
16191601
self._free_model()
1620-
else:
1621-
with suppress_stdout_stderr():
1622-
self._free_model()
16231602

16241603
def __getstate__(self):
16251604
return dict(

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.