Skip to content

Commit b6f4d07

Browse files
committed
gopls/internal/lsp: fix initial diagnostics pass
CL 477979 changed the package diagnostics pass to record all diagnostics resulting from a call to PackageDiagnostics, rather than just those for CompiledGoFiles. However, as a result we stopped storing empty diagnostics sets for files without any package diagnostics. This caused our early diagnostics pass for changed files not to clear diagnostics, and so diagnostics were not cleared until after the diagnosticsDelay expired (at which point the set of reports for a given snapshot is assumed to be complete, and so no report is equivalent to an empty report). Fix this by explicitly storing empty diagnostics for all package files. Also fix a related bug, where the published hash was not being updated for reports that were skipped. Unfortunately, this fix is very hard to test with the current setup, because the client has no insight into whether diagnostics are published in the initial pass or second pass. I added a much needed TODO to simplify the entire pass, a goal of which should be to improve testability. Also optimize diagnostic hashing for the empty set, and remove some unhelpful Trace level logging. Fixes golang/go#59587 Change-Id: I1cf4c61cadad7933f534199b1ee06ea30a046b1d Reviewed-on: https://go-review.googlesource.com/c/tools/+/484195 Reviewed-by: Alan Donovan <[email protected]> TryBot-Result: Gopher Robot <[email protected]> Run-TryBot: Robert Findley <[email protected]> gopls-CI: kokoro <[email protected]>
1 parent d03c59d commit b6f4d07

File tree

2 files changed

+74
-25
lines changed

2 files changed

+74
-25
lines changed

gopls/internal/lsp/cache/analysis.go

+5-6
Original file line numberDiff line numberDiff line change
@@ -262,8 +262,10 @@ type analyzeSummary struct {
262262
// TODO(adonovan): generalize and move to a library when we can use generics.
263263
type actionsMap map[string]*actionSummary
264264

265-
var _ gob.GobEncoder = (actionsMap)(nil)
266-
var _ gob.GobDecoder = (*actionsMap)(nil)
265+
var (
266+
_ gob.GobEncoder = (actionsMap)(nil)
267+
_ gob.GobDecoder = (*actionsMap)(nil)
268+
)
267269

268270
type actionsMapEntry struct {
269271
K string
@@ -443,10 +445,8 @@ func analyzeImpl(ctx context.Context, snapshot *snapshot, analyzers []*analysis.
443445
if data, err := filecache.Get(cacheKind, key); err == nil {
444446
// cache hit
445447
mustDecode(data, &summary)
446-
447448
} else if err != filecache.ErrNotFound {
448449
return nil, bug.Errorf("internal error reading shared cache: %v", err)
449-
450450
} else {
451451
// Cache miss: do the work.
452452
var err error
@@ -566,7 +566,6 @@ func analysisCacheKey(analyzers []*analysis.Analyzer, m *source.Metadata, compil
566566
// actuallyAnalyze implements the cache-miss case.
567567
// This function does not access the snapshot.
568568
func actuallyAnalyze(ctx context.Context, analyzers []*analysis.Analyzer, m *source.Metadata, vdeps map[PackageID]*analyzeSummary, compiledGoFiles []source.FileHandle) (*analyzeSummary, error) {
569-
570569
// Create a local FileSet for processing this package only.
571570
fset := token.NewFileSet()
572571

@@ -952,7 +951,7 @@ func (act *action) exec() (interface{}, *actionSummary, error) {
952951
}
953952

954953
// Gather analysis Result values from horizontal dependencies.
955-
var inputs = make(map[*analysis.Analyzer]interface{})
954+
inputs := make(map[*analysis.Analyzer]interface{})
956955
for _, dep := range act.hdeps {
957956
inputs[dep.a] = dep.result
958957
}

gopls/internal/lsp/diagnostics.go

+69-19
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,6 @@ import (
1515
"sync"
1616
"time"
1717

18-
"golang.org/x/tools/gopls/internal/lsp/debug/log"
1918
"golang.org/x/tools/gopls/internal/lsp/mod"
2019
"golang.org/x/tools/gopls/internal/lsp/protocol"
2120
"golang.org/x/tools/gopls/internal/lsp/source"
@@ -28,6 +27,10 @@ import (
2827
"golang.org/x/tools/internal/xcontext"
2928
)
3029

30+
// TODO(rfindley): simplify this very complicated logic for publishing
31+
// diagnostics. While doing so, ensure that we can test subtle logic such as
32+
// for multi-pass diagnostics.
33+
3134
// diagnosticSource differentiates different sources of diagnostics.
3235
//
3336
// Diagnostics from the same source overwrite each other, whereas diagnostics
@@ -85,7 +88,7 @@ type fileReports struct {
8588
mustPublish bool
8689

8790
// The last stored diagnostics for each diagnostic source.
88-
reports map[diagnosticSource]diagnosticReport
91+
reports map[diagnosticSource]*diagnosticReport
8992
}
9093

9194
func (d diagnosticSource) String() string {
@@ -114,17 +117,35 @@ func (d diagnosticSource) String() string {
114117
}
115118

116119
// hashDiagnostics computes a hash to identify diags.
120+
//
121+
// hashDiagnostics mutates its argument (via sorting).
117122
func hashDiagnostics(diags ...*source.Diagnostic) string {
123+
if len(diags) == 0 {
124+
return emptyDiagnosticsHash
125+
}
126+
return computeDiagnosticHash(diags...)
127+
}
128+
129+
// opt: pre-computed hash for empty diagnostics
130+
var emptyDiagnosticsHash = computeDiagnosticHash()
131+
132+
// computeDiagnosticHash should only be called from hashDiagnostics.
133+
//
134+
// TODO(rfindley): this should use source.Hash.
135+
func computeDiagnosticHash(diags ...*source.Diagnostic) string {
118136
source.SortDiagnostics(diags)
119137
h := sha256.New()
120138
for _, d := range diags {
121139
for _, t := range d.Tags {
122-
fmt.Fprintf(h, "%s", t)
140+
fmt.Fprintf(h, "tag: %s\n", t)
123141
}
124142
for _, r := range d.Related {
125-
fmt.Fprintf(h, "%s%s%s", r.Location.URI.SpanURI(), r.Message, r.Location.Range)
143+
fmt.Fprintf(h, "related: %s %s %s\n", r.Location.URI.SpanURI(), r.Message, r.Location.Range)
126144
}
127-
fmt.Fprintf(h, "%s%s%s%s", d.Message, d.Range, d.Severity, d.Source)
145+
fmt.Fprintf(h, "message: %s\n", d.Message)
146+
fmt.Fprintf(h, "range: %s\n", d.Range)
147+
fmt.Fprintf(h, "severity: %s\n", d.Severity)
148+
fmt.Fprintf(h, "source: %s\n", d.Source)
128149
}
129150
return fmt.Sprintf("%x", h.Sum(nil))
130151
}
@@ -171,6 +192,9 @@ func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.U
171192
//
172193
// TODO(rfindley): it would be cleaner to simply put the diagnostic
173194
// debouncer on the view, and remove the "key" argument to debouncing.
195+
//
196+
// TODO(rfindley): debounce should accept a context, so that we don't hold onto
197+
// the snapshot when the BackgroundContext is cancelled.
174198
if ok := <-s.diagDebouncer.debounce(snapshot.View().Name(), snapshot.SequenceID(), time.After(delay)); ok {
175199
s.diagnose(ctx, snapshot, analyzeOpenPackages)
176200
s.publishDiagnostics(ctx, true, snapshot)
@@ -280,31 +304,27 @@ func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, analyze
280304
// Diagnose go.work file.
281305
workReports, workErr := work.Diagnostics(ctx, snapshot)
282306
if ctx.Err() != nil {
283-
log.Trace.Log(ctx, "diagnose cancelled")
284307
return
285308
}
286309
store(workSource, "diagnosing go.work file", workReports, workErr, true)
287310

288311
// Diagnose go.mod file.
289312
modReports, modErr := mod.Diagnostics(ctx, snapshot)
290313
if ctx.Err() != nil {
291-
log.Trace.Log(ctx, "diagnose cancelled")
292314
return
293315
}
294316
store(modParseSource, "diagnosing go.mod file", modReports, modErr, true)
295317

296318
// Diagnose go.mod upgrades.
297319
upgradeReports, upgradeErr := mod.UpgradeDiagnostics(ctx, snapshot)
298320
if ctx.Err() != nil {
299-
log.Trace.Log(ctx, "diagnose cancelled")
300321
return
301322
}
302323
store(modCheckUpgradesSource, "diagnosing go.mod upgrades", upgradeReports, upgradeErr, true)
303324

304325
// Diagnose vulnerabilities.
305326
vulnReports, vulnErr := mod.VulnerabilityDiagnostics(ctx, snapshot)
306327
if ctx.Err() != nil {
307-
log.Trace.Log(ctx, "diagnose cancelled")
308328
return
309329
}
310330
store(modVulncheckSource, "diagnosing vulnerabilities", vulnReports, vulnErr, false)
@@ -479,8 +499,26 @@ func (s *Server) diagnosePkgs(ctx context.Context, snapshot source.Snapshot, toD
479499
s.storeDiagnostics(snapshot, uri, analysisSource, adiags2, true)
480500
}
481501

502+
// golang/go#59587: guarantee that we store type-checking diagnostics for every compiled
503+
// package file.
504+
//
505+
// Without explicitly storing empty diagnostics, the eager diagnostics
506+
// publication for changed files will not publish anything for files with
507+
// empty diagnostics.
508+
storedPkgDiags := make(map[span.URI]bool)
509+
for _, m := range toDiagnose {
510+
for _, uri := range m.CompiledGoFiles {
511+
s.storeDiagnostics(snapshot, uri, typeCheckSource, pkgDiags[uri], true)
512+
storedPkgDiags[uri] = true
513+
}
514+
}
482515
// Store the package diagnostics.
483516
for uri, diags := range pkgDiags {
517+
if storedPkgDiags[uri] {
518+
continue
519+
}
520+
// builtin.go exists only for documentation purposes, and is not valid Go code.
521+
// Don't report distracting errors
484522
if snapshot.IsBuiltin(ctx, uri) {
485523
bug.Reportf("type checking reported diagnostics for the builtin file: %v", diags)
486524
continue
@@ -549,7 +587,7 @@ func (s *Server) mustPublishDiagnostics(uri span.URI) {
549587
if s.diagnostics[uri] == nil {
550588
s.diagnostics[uri] = &fileReports{
551589
publishedHash: hashDiagnostics(), // Hash for 0 diagnostics.
552-
reports: map[diagnosticSource]diagnosticReport{},
590+
reports: map[diagnosticSource]*diagnosticReport{},
553591
}
554592
}
555593
s.diagnostics[uri].mustPublish = true
@@ -558,6 +596,8 @@ func (s *Server) mustPublishDiagnostics(uri span.URI) {
558596
// storeDiagnostics stores results from a single diagnostic source. If merge is
559597
// true, it merges results into any existing results for this snapshot.
560598
//
599+
// Mutates (sorts) diags.
600+
//
561601
// TODO(hyangah): investigate whether we can unconditionally overwrite previous report.diags
562602
// with the new diags and eliminate the need for the `merge` flag.
563603
func (s *Server) storeDiagnostics(snapshot source.Snapshot, uri span.URI, dsource diagnosticSource, diags []*source.Diagnostic, merge bool) {
@@ -573,10 +613,14 @@ func (s *Server) storeDiagnostics(snapshot source.Snapshot, uri span.URI, dsourc
573613
if s.diagnostics[uri] == nil {
574614
s.diagnostics[uri] = &fileReports{
575615
publishedHash: hashDiagnostics(), // Hash for 0 diagnostics.
576-
reports: map[diagnosticSource]diagnosticReport{},
616+
reports: map[diagnosticSource]*diagnosticReport{},
577617
}
578618
}
579619
report := s.diagnostics[uri].reports[dsource]
620+
if report == nil {
621+
report = new(diagnosticReport)
622+
s.diagnostics[uri].reports[dsource] = report
623+
}
580624
// Don't set obsolete diagnostics.
581625
if report.snapshotID > snapshot.GlobalID() {
582626
return
@@ -588,7 +632,6 @@ func (s *Server) storeDiagnostics(snapshot source.Snapshot, uri span.URI, dsourc
588632
for _, d := range diags {
589633
report.diags[hashDiagnostics(d)] = d
590634
}
591-
s.diagnostics[uri].reports[dsource] = report
592635
}
593636

594637
// clearDiagnosticSource clears all diagnostics for a given source type. It is
@@ -735,6 +778,7 @@ func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot so
735778
diags = append(diags, d)
736779
reportDiags = append(reportDiags, d)
737780
}
781+
738782
hash := hashDiagnostics(reportDiags...)
739783
if hash != report.publishedHash {
740784
anyReportsChanged = true
@@ -748,7 +792,6 @@ func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot so
748792
continue
749793
}
750794

751-
source.SortDiagnostics(diags)
752795
hash := hashDiagnostics(diags...)
753796
if hash == r.publishedHash && !r.mustPublish {
754797
// Update snapshotID to be the latest snapshot for which this diagnostic
@@ -768,15 +811,22 @@ func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot so
768811
r.publishedHash = hash
769812
r.mustPublish = false // diagnostics have been successfully published
770813
r.publishedSnapshotID = snapshot.GlobalID()
771-
for dsource, hash := range reportHashes {
772-
report := r.reports[dsource]
773-
report.publishedHash = hash
774-
r.reports[dsource] = report
814+
// When we publish diagnostics for a file, we must update the
815+
// publishedHash for every report, not just the reports that were
816+
// published. Eliding a report is equivalent to publishing empty
817+
// diagnostics.
818+
for dsource, report := range r.reports {
819+
if hash, ok := reportHashes[dsource]; ok {
820+
report.publishedHash = hash
821+
} else {
822+
// The report was not (yet) stored for this snapshot. Record that we
823+
// published no diagnostics from this source.
824+
report.publishedHash = hashDiagnostics()
825+
}
775826
}
776827
} else {
777828
if ctx.Err() != nil {
778829
// Publish may have failed due to a cancelled context.
779-
log.Trace.Log(ctx, "publish cancelled")
780830
return
781831
}
782832
event.Error(ctx, "publishReports: failed to deliver diagnostic", err, tag.URI.Of(uri))
@@ -852,7 +902,7 @@ func (s *Server) Diagnostics() map[string][]string {
852902
return ans
853903
}
854904

855-
func auxStr(v *source.Diagnostic, d diagnosticReport, typ diagnosticSource) string {
905+
func auxStr(v *source.Diagnostic, d *diagnosticReport, typ diagnosticSource) string {
856906
// Tags? RelatedInformation?
857907
msg := fmt.Sprintf("(%s)%q(source:%q,code:%q,severity:%s,snapshot:%d,type:%s)",
858908
v.Range, v.Message, v.Source, v.Code, v.Severity, d.snapshotID, typ)

0 commit comments

Comments
 (0)