Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 6c246a5

Browse filesBrowse files
committed
Funciont: Add redis support for saving chat log
1 parent 8e78c8f commit 6c246a5
Copy full SHA for 6c246a5

File tree

Expand file treeCollapse file tree

2 files changed

+25
-3
lines changed
Filter options
Expand file treeCollapse file tree

2 files changed

+25
-3
lines changed

‎llama_cpp/server/app.py

Copy file name to clipboardExpand all lines: llama_cpp/server/app.py
+24-2Lines changed: 24 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import json
22
import multiprocessing
3+
import redis
34
from threading import Lock
45
from functools import partial
56
from typing import Iterator, List, Optional, Union, Dict
@@ -85,13 +86,22 @@ class Settings(BaseSettings):
8586
port: int = Field(
8687
default=8000, description="Listen port"
8788
)
89+
redishost: str = Field(
90+
default="None", description="Redis server address"
91+
)
92+
redisport: int = Field(
93+
default=6379, description="Redis server port"
94+
)
95+
redisdb: int = Field(
96+
default=0, description="Redis server db"
97+
)
8898

8999

90100
router = APIRouter()
91101

92102
settings: Optional[Settings] = None
93103
llama: Optional[llama_cpp.Llama] = None
94-
104+
rediscon: Optional[redis.StrictRedis] = None
95105

96106
def create_app(settings: Optional[Settings] = None):
97107
if settings is None:
@@ -108,6 +118,14 @@ def create_app(settings: Optional[Settings] = None):
108118
allow_headers=["*"],
109119
)
110120
app.include_router(router)
121+
122+
if settings.redishost != 'None':
123+
global rediscon
124+
try:
125+
redscon = redis.StrictRedis(host=settings.redishost, port=settings.redisport, db=settings.redisdb)
126+
except Exception as e:
127+
print(e)
128+
111129
global llama
112130
llama = llama_cpp.Llama(
113131
model_path=settings.model,
@@ -506,7 +524,11 @@ async def event_publisher(inner_send_chan: MemoryObjectSendStream):
506524
raise anyio.get_cancelled_exc_class()()
507525
await inner_send_chan.send(dict(data="[DONE]"))
508526
log['messages'].append({'role':streamRole, 'content':streamContent})
509-
print(json.dumps(log,indent=4))
527+
528+
#print(json.dumps(log,indent=4))
529+
if rediscon is not None:
530+
logstr = json.dumps(log)
531+
rediscon.rpush('llama.cpp', logstr)
510532

511533
except anyio.get_cancelled_exc_class() as e:
512534
print("disconnected")

‎setup.py

Copy file name to clipboardExpand all lines: setup.py
+1-1Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
packages=["llama_cpp", "llama_cpp.server"],
1919
install_requires=["typing-extensions>=4.5.0", "numpy>=1.20.0", "diskcache>=5.6.1"],
2020
extras_require={
21-
"server": ["uvicorn>=0.21.1", "fastapi>=0.95.0", "sse-starlette>=1.3.3"],
21+
"server": ["uvicorn>=0.21.1", "fastapi>=0.95.0", "sse-starlette>=1.3.3", "redis[hiredis]>= 4.1.0"],
2222
},
2323
python_requires=">=3.7",
2424
classifiers=[

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.