File tree Expand file tree Collapse file tree 6 files changed +410
-1
lines changed Expand file tree Collapse file tree 6 files changed +410
-1
lines changed Original file line number Diff line number Diff line change 11# Define the default target now so that it is always the first target
22BUILD_TARGETS = \
3- main quantize quantize-stats perplexity embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
3+ main quantize quantize-stats perplexity imatrix embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
44 simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \
55 speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup passkey tests/test-c.o
66
@@ -614,6 +614,9 @@ quantize-stats: examples/quantize-stats/quantize-stats.cpp build-info.o ggml.
614614perplexity : examples/perplexity/perplexity.cpp ggml.o llama.o $(COMMON_DEPS ) $(OBJS )
615615 $(CXX ) $(CXXFLAGS ) $(filter-out % .h,$^ ) -o $@ $(LDFLAGS )
616616
617+ imatrix : examples/imatrix/imatrix.cpp ggml.o llama.o $(COMMON_DEPS ) $(OBJS )
618+ $(CXX ) $(CXXFLAGS ) $(filter-out % .h,$^ ) -o $@ $(LDFLAGS )
619+
617620embedding : examples/embedding/embedding.cpp ggml.o llama.o $(COMMON_DEPS ) $(OBJS )
618621 $(CXX ) $(CXXFLAGS ) $(filter-out % .h,$^ ) -o $@ $(LDFLAGS )
619622
Original file line number Diff line number Diff line change 3636 add_subdirectory (lookahead)
3737 add_subdirectory (lookup)
3838 add_subdirectory (train-text-from-scratch)
39+ add_subdirectory (imatrix)
3940 if (LLAMA_METAL)
4041 add_subdirectory (metal)
4142 endif ()
Original file line number Diff line number Diff line change 1+ set (TARGET imatrix)
2+ add_executable (${TARGET} imatrix.cpp)
3+ install (TARGETS ${TARGET} RUNTIME)
4+ target_link_libraries (${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT} )
5+ target_compile_features (${TARGET} PRIVATE cxx_std_11)
You can’t perform that action at this time.
0 commit comments