Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 33c022f

Browse filesBrowse files
author
juanroesel
committed
ZenHubHQ/devops#2233 - Modified tests to accomodate 'ai_service' param integration
1 parent 50d5f2b commit 33c022f
Copy full SHA for 33c022f

File tree

Expand file treeCollapse file tree

1 file changed

+12
-9
lines changed
Filter options
Expand file treeCollapse file tree

1 file changed

+12
-9
lines changed

‎tests/test_llama.py

Copy file name to clipboardExpand all lines: tests/test_llama.py
+12-9Lines changed: 12 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,9 @@ def mock_kv_cache_seq_add(
153153

154154
def test_llama_patch(mock_llama):
155155
n_ctx = 128
156+
ai_service = "testing"
156157
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, n_ctx=n_ctx)
158+
157159
n_vocab = llama_cpp.llama_n_vocab(llama._model.model)
158160
assert n_vocab == 32000
159161

@@ -163,32 +165,32 @@ def test_llama_patch(mock_llama):
163165

164166
## Test basic completion from bos until eos
165167
mock_llama(llama, all_text)
166-
completion = llama.create_completion("", max_tokens=36)
168+
completion = llama.create_completion("", max_tokens=36, ai_service=ai_service)
167169
assert completion["choices"][0]["text"] == all_text
168170
assert completion["choices"][0]["finish_reason"] == "stop"
169171

170172
## Test basic completion until eos
171173
mock_llama(llama, all_text)
172-
completion = llama.create_completion(text, max_tokens=20)
174+
completion = llama.create_completion(text, max_tokens=20, ai_service=ai_service)
173175
assert completion["choices"][0]["text"] == output_text
174176
assert completion["choices"][0]["finish_reason"] == "stop"
175177

176178
## Test streaming completion until eos
177179
mock_llama(llama, all_text)
178-
chunks = list(llama.create_completion(text, max_tokens=20, stream=True))
180+
chunks = list(llama.create_completion(text, max_tokens=20, stream=True, ai_service=ai_service))
179181
assert "".join(chunk["choices"][0]["text"] for chunk in chunks) == output_text
180182
assert chunks[-1]["choices"][0]["finish_reason"] == "stop"
181183

182184
## Test basic completion until stop sequence
183185
mock_llama(llama, all_text)
184-
completion = llama.create_completion(text, max_tokens=20, stop=["lazy"])
186+
completion = llama.create_completion(text, max_tokens=20, stop=["lazy"], ai_service=ai_service)
185187
assert completion["choices"][0]["text"] == " jumps over the "
186188
assert completion["choices"][0]["finish_reason"] == "stop"
187189

188190
## Test streaming completion until stop sequence
189191
mock_llama(llama, all_text)
190192
chunks = list(
191-
llama.create_completion(text, max_tokens=20, stream=True, stop=["lazy"])
193+
llama.create_completion(text, max_tokens=20, stream=True, stop=["lazy"], ai_service=ai_service)
192194
)
193195
assert (
194196
"".join(chunk["choices"][0]["text"] for chunk in chunks) == " jumps over the "
@@ -197,13 +199,13 @@ def test_llama_patch(mock_llama):
197199

198200
## Test basic completion until length
199201
mock_llama(llama, all_text)
200-
completion = llama.create_completion(text, max_tokens=2)
202+
completion = llama.create_completion(text, max_tokens=2, ai_service=ai_service)
201203
assert completion["choices"][0]["text"] == " jumps"
202204
assert completion["choices"][0]["finish_reason"] == "length"
203205

204206
## Test streaming completion until length
205207
mock_llama(llama, all_text)
206-
chunks = list(llama.create_completion(text, max_tokens=2, stream=True))
208+
chunks = list(llama.create_completion(text, max_tokens=2, stream=True, ai_service=ai_service))
207209
assert "".join(chunk["choices"][0]["text"] for chunk in chunks) == " jumps"
208210
assert chunks[-1]["choices"][0]["finish_reason"] == "length"
209211

@@ -230,15 +232,16 @@ def test_utf8(mock_llama):
230232
llama = llama_cpp.Llama(model_path=MODEL, vocab_only=True, logits_all=True)
231233

232234
output_text = "😀"
235+
ai_service = "testing"
233236

234237
## Test basic completion with utf8 multibyte
235238
mock_llama(llama, output_text)
236-
completion = llama.create_completion("", max_tokens=4)
239+
completion = llama.create_completion("", max_tokens=4, ai_service=ai_service)
237240
assert completion["choices"][0]["text"] == output_text
238241

239242
## Test basic completion with incomplete utf8 multibyte
240243
mock_llama(llama, output_text)
241-
completion = llama.create_completion("", max_tokens=1)
244+
completion = llama.create_completion("", max_tokens=1, ai_service=ai_service)
242245
assert completion["choices"][0]["text"] == ""
243246

244247

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.