Skip to content

Commit aa71356

Browse files
committed
train : remove LLAMA_SUPPORTS_GPU_OFFLOAD
1 parent 8bfb0b6 commit aa71356

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

common/train.cpp

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1363,12 +1363,12 @@ bool consume_common_train_arg(
13631363
*invalid_param = true;
13641364
return true;
13651365
}
1366-
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
1367-
params->n_gpu_layers = std::stoi(argv[i]);
1368-
#else
1369-
fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
1370-
fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
1371-
#endif
1366+
if (llama_supports_gpu_offload()) {
1367+
params->n_gpu_layers = std::stoi(argv[i]);
1368+
} else {
1369+
fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
1370+
fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
1371+
}
13721372
} else if (arg == "-h" || arg == "--help") {
13731373
params->print_usage = true;
13741374
return true;

0 commit comments

Comments
 (0)