@@ -23,6 +23,7 @@ import (
23
23
24
24
"github.com/sourcegraph/src-cli/internal/api"
25
25
"github.com/sourcegraph/src-cli/internal/batches"
26
+ "github.com/sourcegraph/src-cli/internal/batches/docker"
26
27
"github.com/sourcegraph/src-cli/internal/batches/executor"
27
28
"github.com/sourcegraph/src-cli/internal/batches/graphql"
28
29
"github.com/sourcegraph/src-cli/internal/batches/service"
@@ -121,8 +122,8 @@ func newBatchExecuteFlags(flagSet *flag.FlagSet, workspaceExecution bool, cacheD
121
122
)
122
123
123
124
flagSet .IntVar (
124
- & caf .parallelism , "j" , runtime . GOMAXPROCS ( 0 ) ,
125
- "The maximum number of parallel jobs. Default is GOMAXPROCS." ,
125
+ & caf .parallelism , "j" , 0 ,
126
+ "The maximum number of parallel jobs. Default (or 0) is the number of CPU cores available to Docker, or GOMAXPROCS if Docker cannot report its number of cores ." ,
126
127
)
127
128
flagSet .DurationVar (
128
129
& caf .timeout , "timeout" , 60 * time .Minute ,
@@ -275,7 +276,11 @@ func executeBatchSpec(ctx context.Context, ui ui.ExecUI, opts executeBatchSpecOp
275
276
return err
276
277
}
277
278
278
- if err := checkExecutable ("docker" , "version" ); err != nil {
279
+ // In the past, we checked `docker version`, but now we retrieve the number
280
+ // of CPUs, since we need that anyway and it performs the same check (is
281
+ // Docker working _at all_?).
282
+ parallelism , err := getBatchParallelism (ctx , opts .flags .parallelism )
283
+ if err != nil {
279
284
return err
280
285
}
281
286
@@ -307,7 +312,7 @@ func executeBatchSpec(ctx context.Context, ui ui.ExecUI, opts executeBatchSpecOp
307
312
if len (batchSpec .Steps ) > 0 {
308
313
ui .PreparingContainerImages ()
309
314
images , err := svc .EnsureDockerImages (
310
- ctx , batchSpec .Steps , opts . flags . parallelism ,
315
+ ctx , batchSpec .Steps , parallelism ,
311
316
ui .PreparingContainerImagesProgress ,
312
317
)
313
318
if err != nil {
@@ -354,7 +359,7 @@ func executeBatchSpec(ctx context.Context, ui ui.ExecUI, opts executeBatchSpecOp
354
359
Cache : executor .NewDiskCache (opts .flags .cacheDir ),
355
360
SkipErrors : opts .flags .skipErrors ,
356
361
CleanArchives : opts .flags .cleanArchives ,
357
- Parallelism : opts . flags . parallelism ,
362
+ Parallelism : parallelism ,
358
363
Timeout : opts .flags .timeout ,
359
364
KeepLogs : opts .flags .keepLogs ,
360
365
TempDir : opts .flags .tempDir ,
@@ -386,7 +391,7 @@ func executeBatchSpec(ctx context.Context, ui ui.ExecUI, opts executeBatchSpecOp
386
391
}
387
392
ui .CheckingCacheSuccess (len (specs ), len (uncachedTasks ))
388
393
389
- taskExecUI := ui .ExecutingTasks (* verbose , opts . flags . parallelism )
394
+ taskExecUI := ui .ExecutingTasks (* verbose , parallelism )
390
395
freshSpecs , logFiles , execErr := coord .ExecuteAndBuildSpecs (ctx , batchSpec , uncachedTasks , taskExecUI )
391
396
// Add external changeset specs.
392
397
importedSpecs , importErr := svc .CreateImportChangesetSpecs (ctx , batchSpec )
@@ -533,3 +538,25 @@ func contextCancelOnInterrupt(parent context.Context) (context.Context, func())
533
538
ctxCancel ()
534
539
}
535
540
}
541
+
542
+ func getBatchParallelism (ctx context.Context , flag int ) (int , error ) {
543
+ if flag > 0 {
544
+ return flag , nil
545
+ }
546
+
547
+ ncpu , err := docker .NCPU (ctx )
548
+ var terr docker.TimeoutError
549
+ if errors .As (err , & terr ) {
550
+ return 0 , err
551
+ } else if err != nil {
552
+ // In the case of errors from Docker itself, we want to fall back to
553
+ // GOMAXPROCS, since it's possible Docker just doesn't have access to
554
+ // the CPU core count (either due to permissions, or being too old).
555
+ //
556
+ // It would obviously be better if we had a global logger available to
557
+ // log this.
558
+ return runtime .GOMAXPROCS (0 ), nil
559
+ }
560
+
561
+ return ncpu , nil
562
+ }
0 commit comments