You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
As #2324 introduced distributed inferencing thanks to
@rgerganov implementation in ggml-org/llama.cpp#6829 in upstream llama.cpp, now
it is possible to distribute the workload to remote llama.cpp gRPC server.
This changeset now uses mudler/edgevpn to establish a secure, distributed network between the nodes using a shared token.
The token is generated automatically when starting the server with the `--p2p` flag, and can be used by starting the workers
with `local-ai worker p2p-llama-cpp-rpc` by passing the token via environment variable (TOKEN) or with args (--token).
As per how mudler/edgevpn works, a network is established between the server and the workers with dht and mdns discovery protocols,
the llama.cpp rpc server is automatically started and exposed to the underlying p2p network so the API server can connect on.
When the HTTP server is started, it will discover the workers in the network and automatically create the port-forwards to the service locally.
Then llama.cpp is configured to use the services.
This feature is behind the "p2p" GO_FLAGS
Signed-off-by: Ettore Di Giacinto <[email protected]>
RunRunCMD`cmd:"" help:"Run LocalAI, this the default command if no other command is specified. Run 'local-ai run --help' for more information" default:"withargs"`
17
-
ModelsModelsCMD`cmd:"" help:"Manage LocalAI models and definitions"`
18
-
TTSTTSCMD`cmd:"" help:"Convert text to speech"`
19
-
TranscriptTranscriptCMD`cmd:"" help:"Convert audio to text"`
20
-
LLAMACPPWorkerLLAMACPPWorkerCMD`cmd:"" help:"Run workers to distribute workload (llama.cpp-only)"`
11
+
RunRunCMD`cmd:"" help:"Run LocalAI, this the default command if no other command is specified. Run 'local-ai run --help' for more information" default:"withargs"`
12
+
ModelsModelsCMD`cmd:"" help:"Manage LocalAI models and definitions"`
13
+
TTSTTSCMD`cmd:"" help:"Convert text to speech"`
14
+
TranscriptTranscriptCMD`cmd:"" help:"Convert audio to text"`
15
+
Worker worker.Worker`cmd:"" help:"Run workers to distribute workload (llama.cpp-only)"`
Threadsint`env:"LOCALAI_THREADS,THREADS" short:"t" default:"4" help:"Number of threads used for parallel computation. Usage of the number of physical cores in the system is suggested" group:"performance"`
38
41
ContextSizeint`env:"LOCALAI_CONTEXT_SIZE,CONTEXT_SIZE" default:"512" help:"Default context size for models" group:"performance"`
39
42
40
-
Addressstring`env:"LOCALAI_ADDRESS,ADDRESS" default:":8080" help:"Bind address for the API server" group:"api"`
UploadLimitint`env:"LOCALAI_UPLOAD_LIMIT,UPLOAD_LIMIT" default:"15" help:"Default upload-limit in MB" group:"api"`
44
-
APIKeys []string`env:"LOCALAI_API_KEY,API_KEY" help:"List of API Keys to enable API authentication. When this is set, all the requests must be authenticated with one of these API keys" group:"api"`
UploadLimitint`env:"LOCALAI_UPLOAD_LIMIT,UPLOAD_LIMIT" default:"15" help:"Default upload-limit in MB" group:"api"`
47
+
APIKeys []string`env:"LOCALAI_API_KEY,API_KEY" help:"List of API Keys to enable API authentication. When this is set, all the requests must be authenticated with one of these API keys" group:"api"`
Peer2PeerTokenstring`env:"LOCALAI_P2P_TOKEN,P2P_TOKEN" name:"p2ptoken" help:"Token for P2P mode (optional)" group:"p2p"`
47
51
ParallelRequestsbool`env:"LOCALAI_PARALLEL_REQUESTS,PARALLEL_REQUESTS" help:"Enable backends to handle multiple requests in parallel if they support it (e.g.: llama.cpp or vllm)" group:"backends"`
48
52
SingleActiveBackendbool`env:"LOCALAI_SINGLE_ACTIVE_BACKEND,SINGLE_ACTIVE_BACKEND" help:"Allow only one backend to be run at a time" group:"backends"`
49
53
PreloadBackendOnlybool`env:"LOCALAI_PRELOAD_BACKEND_ONLY,PRELOAD_BACKEND_ONLY" default:"false" help:"Do not launch the API services, only the preloaded models / backends are started (useful for multi-node setups)" group:"backends"`
@@ -54,7 +58,7 @@ type RunCMD struct {
54
58
WatchdogBusyTimeoutstring`env:"LOCALAI_WATCHDOG_BUSY_TIMEOUT,WATCHDOG_BUSY_TIMEOUT" default:"5m" help:"Threshold beyond which a busy backend should be stopped" group:"backends"`
BackendAssetsPathstring`env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
BackendAssetsPathstring`env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
BackendAssetsPathstring`env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
5
+
}
6
+
7
+
typeWorkerstruct {
8
+
P2PP2P`cmd:"" name:"p2p-llama-cpp-rpc" help:"Starts a LocalAI llama.cpp worker in P2P mode (requires a token)"`
9
+
LLamaCPPLLamaCPP`cmd:"" name:"llama-cpp-rpc" help:"Starts a llama.cpp worker in standalone mode"`
BackendAssetsPathstring`env:"LOCALAI_BACKEND_ASSETS_PATH,BACKEND_ASSETS_PATH" type:"path" default:"/tmp/localai/backend_data" help:"Path used to extract libraries that are required by some of the backends in runtime" group:"storage"`
13
+
typeLLamaCPPstruct {
14
+
Args []string`arg:"" optional:"" name:"models" help:"Model configuration URLs to load"`
0 commit comments