You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Error Message:
Loading model file vicuna-hf/model-00001-of-00004.safetensors
Loading model file vicuna-hf/model-00001-of-00004.safetensors
Loading model file vicuna-hf/model-00002-of-00004.safetensors
Loading model file vicuna-hf/model-00003-of-00004.safetensors
Loading model file vicuna-hf/model-00004-of-00004.safetensors
params = Params(n_vocab=128256, n_embd=4096, n_layer=32, n_ctx=8192, n_ff=14336, n_head=32, n_head_kv=8, n_experts=None, n_experts_used=None, f_norm_eps=1e-05, rope_scaling_type=None, f_rope_freq_base=500000.0, f_rope_scale=None, n_orig_ctx=None, rope_finetuned=None, ftype=<GGMLFileType.MostlyF16: 1>, path_model=PosixPath('vicuna-hf'))
Traceback (most recent call last):
File "/content/llama.cpp/convert.py", line 1548, in
main()
File "/content/llama.cpp/convert.py", line 1515, in main
vocab, special_vocab = vocab_factory.load_vocab(vocab_types, model_parent_path)
File "/content/llama.cpp/convert.py", line 1417, in load_vocab
vocab = self._create_vocab_by_path(vocab_types)
File "/content/llama.cpp/convert.py", line 1407, in _create_vocab_by_path
raise FileNotFoundError(f"Could not find a tokenizer matching any of {vocab_types}")
FileNotFoundError: Could not find a tokenizer matching any of ['spm', 'hfft']
The text was updated successfully, but these errors were encountered:
Loading model file vicuna-hf/model-00001-of-00004.safetensors
Loading model file vicuna-hf/model-00001-of-00004.safetensors
Loading model file vicuna-hf/model-00002-of-00004.safetensors
Loading model file vicuna-hf/model-00003-of-00004.safetensors
Loading model file vicuna-hf/model-00004-of-00004.safetensors
params = Params(n_vocab=128256, n_embd=4096, n_layer=32, n_ctx=8192, n_ff=14336, n_head=32, n_head_kv=8, n_experts=None, n_experts_used=None, f_norm_eps=1e-05, rope_scaling_type=None, f_rope_freq_base=500000.0, f_rope_scale=None, n_orig_ctx=None, rope_finetuned=None, ftype=<GGMLFileType.MostlyF16: 1>, path_model=PosixPath('vicuna-hf'))
Traceback (most recent call last):
File "/content/llama.cpp/convert.py", line 1548, in
main()
File "/content/llama.cpp/convert.py", line 1515, in main
vocab, special_vocab = vocab_factory.load_vocab(vocab_types, model_parent_path)
File "/content/llama.cpp/convert.py", line 1417, in load_vocab
vocab = self._create_vocab_by_path(vocab_types)
File "/content/llama.cpp/convert.py", line 1407, in _create_vocab_by_path
raise FileNotFoundError(f"Could not find a tokenizer matching any of {vocab_types}")
FileNotFoundError: Could not find a tokenizer matching any of ['spm', 'hfft']
The text was updated successfully, but these errors were encountered: