We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 04d9218 commit b4a3db3Copy full SHA for b4a3db3
llama_cpp/llama.py
@@ -325,7 +325,7 @@ def __init__(
325
self._token_eos = Llama.token_eos()
326
327
self._input_ids = np.array([], dtype=np.intc)
328
- self._scores = np.ndarray((0, self._n_vocab), dtype=np.single)
+ self._scores: npt.NDArray[np.single] = np.ndarray((0, self._n_vocab), dtype=np.single)
329
330
def tokenize(self, text: bytes, add_bos: bool = True) -> List[int]:
331
"""Tokenize a string.
@@ -405,7 +405,7 @@ def eval(self, tokens: Sequence[int]):
405
"""
406
assert self.ctx is not None
407
n_ctx = self._n_ctx
408
- scores = []
+ scores: List[npt.NDArray[np.single]] = []
409
for i in range(0, len(tokens), self.n_batch):
410
batch = tokens[i : min(len(tokens), i + self.n_batch)]
411
n_past = min(n_ctx - len(batch), len(self._input_ids))
0 commit comments