Skip to content

Commit 7ffa15f

Browse files
committed
Some cleanup. FCS checking failing for unknown reason
1 parent 9fa6952 commit 7ffa15f

16 files changed

+72
-562
lines changed

.fantomasignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ artifacts/
1212

1313
# Explicitly formatted Tests/ subdirectories (with exceptions)
1414
!tests/ParallelTypeCheckingTests/
15-
*/.checkouts/
15+
*/.fcs_test/
1616

1717
# Explicitly unformatted implementation files
1818

src/Compiler/Driver/CompilerConfig.fsi

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -205,8 +205,11 @@ type ParallelReferenceResolution =
205205

206206
[<RequireQualifiedAccess>]
207207
type TypeCheckingMode =
208+
/// Default mode where all source files are processed sequentially in compilation order.
208209
| Sequential
210+
/// Signature files and implementation files without backing files are processed sequentially, then backed implementation files are processed in parallel.
209211
| ParallelCheckingOfBackedImplFiles
212+
/// Parallel type-checking that uses automated file-to-file dependency detection to construct a highly-parallelisable file graph.
210213
| Graph
211214

212215
[<RequireQualifiedAccess>]

src/Compiler/Driver/CompilerOptions.fs

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1392,6 +1392,11 @@ let testFlag tcConfigB =
13921392
{ tcConfigB.typeCheckingConfig with
13931393
Mode = TypeCheckingMode.ParallelCheckingOfBackedImplFiles
13941394
}
1395+
| "GraphBasedChecking" ->
1396+
tcConfigB.typeCheckingConfig <-
1397+
{ tcConfigB.typeCheckingConfig with
1398+
Mode = TypeCheckingMode.Graph
1399+
}
13951400
#if DEBUG
13961401
| "ShowParserStackOnParseError" -> showParserStackOnParseError <- true
13971402
#endif

src/Compiler/Driver/ParseAndCheckInputs.fs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1729,7 +1729,7 @@ let mutable typeCheckingMode: TypeCheckingMode = TypeCheckingMode.Sequential
17291729
let CheckClosedInputSet (ctok, checkForErrors, tcConfig: TcConfig, tcImports, tcGlobals, prefixPathOpt, tcState, eagerFormat, inputs) =
17301730
// tcEnvAtEndOfLastFile is the environment required by fsi.exe when incrementally adding definitions
17311731
let results, tcState =
1732-
match typeCheckingMode with
1732+
match tcConfig.typeCheckingConfig.Mode with
17331733
| TypeCheckingMode.Sequential ->
17341734
CheckMultipleInputsSequential(ctok, checkForErrors, tcConfig, tcImports, tcGlobals, prefixPathOpt, tcState, inputs)
17351735
| TypeCheckingMode.ParallelCheckingOfBackedImplFiles ->

tests/ParallelTypeCheckingTests/Code/DependencyResolution.fs

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -260,31 +260,31 @@ module internal DependencyResolution =
260260
}
261261

262262
/// <summary>
263-
/// Calculate and print some stats about the expected parallelism factor of a dependency graph
263+
/// Calculate and print some statistics about the expected parallelism factor of a dependency graph
264264
/// </summary>
265265
let analyseEfficiency (result: DepsResult) : unit =
266266
let graph = result.Graph
267-
let totalSize1 = graph |> Seq.sumBy (fun (KeyValue (_k, v)) -> v.Length)
267+
let edgeCount = graph |> Seq.sumBy (fun (KeyValue (_k, v)) -> v.Length)
268268
let t = graph |> Graph.transitive
269-
let totalSize2 = t |> Seq.sumBy (fun (KeyValue (_k, v)) -> v.Length)
269+
let edgeCountTransitive = t |> Seq.sumBy (fun (KeyValue (_k, v)) -> v.Length)
270270

271-
printfn $"Non-transitive size: {totalSize1}, transitive size: {totalSize2}"
271+
log $"Non-transitive edge count: {edgeCount}, transitive edge count: {edgeCountTransitive}"
272272

273-
let totalFileSize = result.Files |> Array.sumBy (fun file -> int64 (file.CodeSize))
273+
let fileCount = result.Files.Length
274274

275-
// Use depth-first search to calculate 'depth' of each file
275+
// Use depth-first search to calculate 'depth' of a file
276276
let rec depthDfs =
277277
Utils.memoize (fun (file: File) ->
278278
let deepestChild =
279279
match result.Graph[file] with
280-
| [||] -> 0L
280+
| [||] -> 0
281281
| d -> d |> Array.map depthDfs |> Array.max
282282

283-
let depth = int64 (file.CodeSize) + deepestChild
283+
let depth = 1 + deepestChild
284284
depth)
285285

286286
// Run DFS for every file node, collect the maximum depth found
287287
let maxDepth = result.Files |> Array.map (fun f -> depthDfs f.File) |> Array.max
288288

289289
log
290-
$"Total file size: {totalFileSize}. Max depth: {maxDepth}. Max Depth/Size = %.1f{100.0 * double (maxDepth) / double (totalFileSize)}%%"
290+
$"File count: {fileCount}. Longest path: {maxDepth}. Longest path/File count (a weak proxy for level of parallelism) = %.1f{100.0 * double maxDepth / double fileCount}%%"

tests/ParallelTypeCheckingTests/Code/FileInfoGathering.fs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,6 @@ let internal gatherBackingInfo (files: SourceFiles) : Files =
2323

2424
{
2525
Idx = FileIdx.make i
26-
Code = "no code here" // TODO
2726
AST = ASTOrFsix.AST f.AST
2827
FsiBacked = fsiBacked
2928
})

tests/ParallelTypeCheckingTests/Code/Parallel.fs

Lines changed: 12 additions & 99 deletions
Original file line numberDiff line numberDiff line change
@@ -4,82 +4,8 @@
44

55
open System
66
open System.Collections.Concurrent
7-
open System.Collections.Generic
87
open System.Threading
98

10-
/// The agent handles two kind of messages - the 'Start' message is sent
11-
/// when the caller wants to start a new work item. The 'Finished' message
12-
/// is sent (by the agent itself) when one work item is completed.
13-
type LimitAgentMessage =
14-
| Start of Async<unit>
15-
| Finished
16-
17-
/// A function that takes the limit - the maximal number of operations it
18-
/// will run in parallel - and returns an agent that accepts new
19-
/// tasks via the 'Start' message
20-
let threadingLimitAgent limit (ct: CancellationToken) =
21-
let act (inbox: MailboxProcessor<LimitAgentMessage>) =
22-
async {
23-
// Keep number of items running & queue of items to run later
24-
// NOTE: We keep an explicit queue, so that we can e.g. start dropping
25-
// items if there are too many requests (or do something else)
26-
// NOTE: The loop is only accessed from one thread at each time
27-
// so we can just use non-thread-safe queue & mutation
28-
let queue = Queue<_>()
29-
let mutable count = 0
30-
31-
while true do
32-
let! msg = inbox.Receive()
33-
// When we receive Start, add the work to the queue
34-
// When we receive Finished, do count--
35-
match msg with
36-
| Start work -> queue.Enqueue(work)
37-
| Finished -> count <- count + 1
38-
// After something happened, we check if we can
39-
// start a next task from the queue
40-
if count < limit && queue.Count > 0 then
41-
count <- count + 1
42-
let work = queue.Dequeue()
43-
// Start it in a thread pool (on background)
44-
Async.Start(
45-
async {
46-
do! work
47-
inbox.Post(Finished)
48-
}
49-
)
50-
}
51-
52-
MailboxProcessor.Start(act, ct)
53-
54-
// TODO Test this version
55-
/// Untested version that uses MailboxProcessor.
56-
/// See http://www.fssnip.net/nX/title/Limit-degree-of-parallelism-using-an-agent for implementation
57-
let processInParallelUsingMailbox
58-
(firstItems: 'Item[])
59-
(work: 'Item -> Async<'Item[]>)
60-
(parallelism: int)
61-
(notify: int -> unit)
62-
(ct: CancellationToken)
63-
: unit =
64-
let processedCountLock = Object()
65-
let mutable processedCount = 0
66-
let agent = threadingLimitAgent parallelism ct
67-
68-
let rec processItem item =
69-
async {
70-
let! toSchedule = work item
71-
72-
let pc =
73-
lock processedCountLock (fun () ->
74-
processedCount <- processedCount + 1
75-
processedCount)
76-
77-
notify pc
78-
toSchedule |> Array.iter (fun x -> agent.Post(Start(processItem x)))
79-
}
80-
81-
firstItems |> Array.iter (fun x -> agent.Post(Start(processItem x)))
82-
839
// TODO Could replace with MailboxProcessor+Tasks/Asyncs instead of BlockingCollection + Threads
8410
// See http://www.fssnip.net/nX/title/Limit-degree-of-parallelism-using-an-agent
8511
/// Process items in parallel, allow more work to be scheduled as a result of finished work,
@@ -88,51 +14,38 @@ let processInParallel
8814
(firstItems: 'Item[])
8915
(work: 'Item -> 'Item[])
9016
(parallelism: int)
91-
(stop: int -> bool)
17+
(shouldStop: int -> bool)
9218
(ct: CancellationToken)
93-
(_itemToString)
19+
(_itemToString : 'Item -> string)
9420
: unit =
9521
let bc = new BlockingCollection<'Item>()
9622
firstItems |> Array.iter bc.Add
9723
let processedCountLock = Object()
9824
let mutable processedCount = 0
9925

10026
let processItem item =
101-
// printfn $"Processing {itemToString item}"
27+
printfn $"Processing {_itemToString item}"
10228
let toSchedule = work item
10329

10430
let processedCount =
10531
lock processedCountLock (fun () ->
10632
processedCount <- processedCount + 1
10733
processedCount)
108-
// printfn $"ToSchedule {toSchedule.Length}"
109-
toSchedule |> Array.iter (fun next -> bc.Add(next))
34+
let toScheduleString =
35+
toSchedule
36+
|> Array.map _itemToString
37+
|> fun names -> String.Join(", ", names)
38+
printfn $"Scheduling {toSchedule.Length} items: {toScheduleString}"
39+
toSchedule |> Array.iter bc.Add
11040
processedCount
11141

11242
// TODO Could avoid workers with some semaphores
11343
let workerWork () : unit =
11444
for node in bc.GetConsumingEnumerable(ct) do
11545
if not ct.IsCancellationRequested then // improve
11646
let processedCount = processItem node
117-
118-
if stop processedCount then
47+
if shouldStop processedCount then
11948
bc.CompleteAdding()
12049

121-
Array.Parallel.map workerWork (Array.init parallelism (fun _ -> ())) |> ignore // use cancellation
122-
()
123-
124-
let test () =
125-
// Create an agent that can run at most 2 tasks in parallel
126-
// and send 10 work items that take 1 second to the queue
127-
use cts = new CancellationTokenSource()
128-
let agent = threadingLimitAgent 2 cts.Token
129-
130-
for i in 0..10 do
131-
agent.Post(
132-
Start(
133-
async {
134-
do! Async.Sleep(1000)
135-
printfn $"Finished: %d{i}"
136-
}
137-
)
138-
)
50+
// TODO Do we need to handle cancellation given that workers do it already?
51+
Array.Parallel.map workerWork (Array.init parallelism (fun _ -> ())) |> ignore

0 commit comments

Comments
 (0)