Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 6519423

Browse filesBrowse files
committed
ckpt
1 parent 3395aea commit 6519423
Copy full SHA for 6519423

File tree

Expand file treeCollapse file tree

5 files changed

+7
-6
lines changed
Filter options
Expand file treeCollapse file tree

5 files changed

+7
-6
lines changed

‎.gitignore

Copy file name to clipboardExpand all lines: .gitignore
+1Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,3 +181,4 @@ compile_commands.json
181181
/bin
182182
cmake_install.cmake
183183
CMakeCache.txt
184+
/.fwd_cache

‎llama_cpp/managers/__init__.py

Copy file name to clipboard
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
from .state import LlamaPersistantState, STATE_PROMPTS
2-
from .cache import _LlamaCacheManager, CACHE_TYPES
2+
from .cache import LlamaCacheManager, CACHE_TYPES

‎llama_cpp/managers/cache.py

Copy file name to clipboardExpand all lines: llama_cpp/managers/cache.py
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ class CACHE_TYPES(BaseModel):
2222
cache_type: Literal["disk", "ram"]
2323

2424

25-
class _LlamaCacheManager(Llama):
25+
class LlamaCacheManager(Llama):
2626
"""A class for an LLM to always use a specific state with a prompt.
2727
This should be inherited by a strategy class and not used directly."""
2828

‎llama_cpp/server/model.py

Copy file name to clipboardExpand all lines: llama_cpp/server/model.py
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
import llama_cpp.llama_tokenizer as llama_tokenizer
1010

1111
from llama_cpp.server.settings import ModelSettings
12-
12+
from llama_cpp.managers.cache import LlamaCacheManager
1313

1414
class LlamaProxy:
1515
def __init__(self, models: List[ModelSettings]) -> None:
@@ -132,7 +132,7 @@ def load_llama_from_model_settings(settings: ModelSettings) -> llama_cpp.Llama:
132132
filename=settings.model,
133133
)
134134
else:
135-
create_fn = llama_cpp.Llama
135+
create_fn = LlamaCacheManager
136136
kwargs["model_path"] = settings.model
137137

138138
_model = create_fn(

‎llama_cpp/strategies/chat_history.py

Copy file name to clipboardExpand all lines: llama_cpp/strategies/chat_history.py
+2-2Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
1-
from ..managers.cache import _LlamaCacheManager, CACHE_TYPES
1+
from ..managers.cache import CACHE_TYPES, LlamaCacheManager
22

33

4-
class ChatHistoryStrategy(_LlamaCacheManager):
4+
class ChatHistoryStrategy(LlamaCacheManager):
55
"""A class for an LLM to always use a specific state with a prompt."""
66

77
def __init__(

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.