File tree Expand file tree Collapse file tree 6 files changed +41
-4
lines changed
Filter options
Expand file tree Collapse file tree 6 files changed +41
-4
lines changed
Original file line number Diff line number Diff line change @@ -7,6 +7,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
8
8
## [ Unreleased]
9
9
10
+ ## [ 0.2.88]
11
+
12
+ - feat: Update llama.cpp to ggerganov/llama.cpp@fc4ca27b25464a11b3b86c9dbb5b6ed6065965c2
13
+ - fix: only print 'cache saved' in verbose mode by @lsorber in #1668
14
+ - fix: Added back from_file method to LlamaGrammar by @ExtReMLapin in #1673
15
+ - fix: grammar prints on each call by @abetlen in 0998ea0deea076a547d54bd598d6b413b588ee2b
16
+ - feat: Enable recursive search of HFFS.ls when using from_pretrained by @benHeidabetlen in #1656
17
+ - feat: Add more detailed log for prefix-match by @xu-song in #1659
18
+
10
19
## [ 0.2.87]
11
20
12
21
- feat: Update llama.cpp to ggerganov/llama.cpp@be55695eff44784a141a863f273661a6bce63dfc
Original file line number Diff line number Diff line change 1
1
from .llama_cpp import *
2
2
from .llama import *
3
3
4
- __version__ = "0.2.87 "
4
+ __version__ = "0.2.88 "
Original file line number Diff line number Diff line change @@ -1528,7 +1528,8 @@ def logit_bias_processor(
1528
1528
if self .verbose :
1529
1529
print ("Llama._create_completion: cache save" , file = sys .stderr )
1530
1530
self .cache [prompt_tokens + completion_tokens ] = self .save_state ()
1531
- print ("Llama._create_completion: cache saved" , file = sys .stderr )
1531
+ if self .verbose :
1532
+ print ("Llama._create_completion: cache saved" , file = sys .stderr )
1532
1533
return
1533
1534
1534
1535
if self .cache :
Original file line number Diff line number Diff line change @@ -1505,6 +1505,14 @@ def llama_model_has_encoder(model: llama_model_p, /) -> bool:
1505
1505
...
1506
1506
1507
1507
1508
+ # // Returns true if the model contains a decoder that requires llama_decode() call
1509
+ # LLAMA_API bool llama_model_has_decoder(const struct llama_model * model);
1510
+ @ctypes_function ("llama_model_has_decoder" , [llama_model_p_ctypes ], ctypes .c_bool )
1511
+ def llama_model_has_decoder (model : llama_model_p , / ) -> bool :
1512
+ """Returns true if the model contains a decoder that requires llama_decode() call"""
1513
+ ...
1514
+
1515
+
1508
1516
# // For encoder-decoder models, this function returns id of the token that must be provided
1509
1517
# // to the decoder to start generating output sequence. For other models, it returns -1.
1510
1518
# LLAMA_API llama_token llama_model_decoder_start_token(const struct llama_model * model);
Original file line number Diff line number Diff line change 1
1
"""Python implementation of llama grammar parser directly translated from C++ source file in vendor/llama.cpp/common/grammar-parser.cpp."""
2
2
3
3
# flake8: noqa
4
+ from pathlib import Path
4
5
import sys
5
6
import ctypes
6
7
import enum
@@ -890,8 +891,26 @@ def reset(self):
890
891
@classmethod
891
892
def from_string (cls , grammar : str , verbose : bool = True ) -> "LlamaGrammar" :
892
893
parsed_grammar = parse (grammar )
893
- print_grammar (file = sys .stdout , state = parsed_grammar )
894
+ if verbose :
895
+ print_grammar (file = sys .stdout , state = parsed_grammar )
894
896
return cls (parsed_grammar )
897
+
898
+ @classmethod
899
+ def from_file (cls , file : Union [str , Path ], verbose : bool = True ) -> "LlamaGrammar" :
900
+ try :
901
+ with open (file ) as f :
902
+ grammar = f .read ()
903
+ except Exception as err :
904
+ raise Exception (
905
+ f"{ cls .from_file .__name__ } : error reading grammar file: { err } "
906
+ )
907
+
908
+ if grammar :
909
+ return cls .from_string (grammar , verbose = verbose )
910
+
911
+ raise ValueError (
912
+ f"{ cls .from_file .__name__ } : error parsing grammar file: params_grammer is empty"
913
+ )
895
914
896
915
@classmethod
897
916
def from_json_schema (cls , json_schema : str , verbose : bool = True ) -> "LlamaGrammar" :
You can’t perform that action at this time.
0 commit comments