File tree Expand file tree Collapse file tree 2 files changed +4
-4
lines changed
Filter options
Expand file tree Collapse file tree 2 files changed +4
-4
lines changed
Original file line number Diff line number Diff line change @@ -493,7 +493,7 @@ async def tokenize(
493
493
) -> TokenizeInputResponse :
494
494
tokens = llama_proxy (body .model ).tokenize (body .input .encode ("utf-8" ), special = True )
495
495
496
- return { " tokens" : tokens }
496
+ return TokenizeInputResponse ( tokens = tokens )
497
497
498
498
499
499
@router .post (
@@ -508,7 +508,7 @@ async def count_query_tokens(
508
508
) -> TokenizeInputCountResponse :
509
509
tokens = llama_proxy (body .model ).tokenize (body .input .encode ("utf-8" ), special = True )
510
510
511
- return { " count" : len (tokens )}
511
+ return TokenizeInputCountResponse ( count = len (tokens ))
512
512
513
513
514
514
@router .post (
@@ -523,4 +523,4 @@ async def detokenize(
523
523
) -> DetokenizeInputResponse :
524
524
text = llama_proxy (body .model ).detokenize (body .tokens ).decode ("utf-8" )
525
525
526
- return { " text" : text }
526
+ return DetokenizeInputResponse ( text = text )
Original file line number Diff line number Diff line change @@ -268,7 +268,7 @@ class ModelList(TypedDict):
268
268
269
269
class TokenizeInputRequest (BaseModel ):
270
270
model : Optional [str ] = model_field
271
- input : Optional [ str ] = Field (description = "The input to tokenize." )
271
+ input : str = Field (description = "The input to tokenize." )
272
272
273
273
model_config = {
274
274
"json_schema_extra" : {"examples" : [{"input" : "How many tokens in this query?" }]}
You can’t perform that action at this time.
0 commit comments