|
5 | 5 | package bench
|
6 | 6 |
|
7 | 7 | import (
|
8 |
| - "flag" |
9 | 8 | "fmt"
|
10 |
| - "os" |
11 |
| - "runtime" |
12 |
| - "runtime/pprof" |
13 | 9 | "testing"
|
14 | 10 |
|
15 | 11 | "golang.org/x/tools/gopls/internal/hooks"
|
16 | 12 | "golang.org/x/tools/internal/lsp/bug"
|
17 |
| - "golang.org/x/tools/internal/lsp/fake" |
18 |
| - . "golang.org/x/tools/internal/lsp/regtest" |
19 | 13 |
|
20 |
| - "golang.org/x/tools/internal/lsp/protocol" |
| 14 | + . "golang.org/x/tools/internal/lsp/regtest" |
21 | 15 | )
|
22 | 16 |
|
23 | 17 | func TestMain(m *testing.M) {
|
@@ -46,180 +40,3 @@ func benchmarkOptions(dir string) []RunOption {
|
46 | 40 | func printBenchmarkResults(result testing.BenchmarkResult) {
|
47 | 41 | fmt.Printf("BenchmarkStatistics\t%s\t%s\n", result.String(), result.MemString())
|
48 | 42 | }
|
49 |
| - |
50 |
| -var iwlOptions struct { |
51 |
| - workdir string |
52 |
| -} |
53 |
| - |
54 |
| -func init() { |
55 |
| - flag.StringVar(&iwlOptions.workdir, "iwl_workdir", "", "if set, run IWL benchmark in this directory") |
56 |
| -} |
57 |
| - |
58 |
| -func TestBenchmarkIWL(t *testing.T) { |
59 |
| - if iwlOptions.workdir == "" { |
60 |
| - t.Skip("-iwl_workdir not configured") |
61 |
| - } |
62 |
| - |
63 |
| - opts := stressTestOptions(iwlOptions.workdir) |
64 |
| - // Don't skip hooks, so that we can wait for IWL. |
65 |
| - opts = append(opts, SkipHooks(false)) |
66 |
| - |
67 |
| - results := testing.Benchmark(func(b *testing.B) { |
68 |
| - for i := 0; i < b.N; i++ { |
69 |
| - WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) {}) |
70 |
| - |
71 |
| - } |
72 |
| - }) |
73 |
| - |
74 |
| - printBenchmarkResults(results) |
75 |
| -} |
76 |
| - |
77 |
| -var symbolOptions struct { |
78 |
| - workdir, query, matcher, style string |
79 |
| - printResults bool |
80 |
| -} |
81 |
| - |
82 |
| -func init() { |
83 |
| - flag.StringVar(&symbolOptions.workdir, "symbol_workdir", "", "if set, run symbol benchmark in this directory") |
84 |
| - flag.StringVar(&symbolOptions.query, "symbol_query", "test", "symbol query to use in benchmark") |
85 |
| - flag.StringVar(&symbolOptions.matcher, "symbol_matcher", "", "symbol matcher to use in benchmark") |
86 |
| - flag.StringVar(&symbolOptions.style, "symbol_style", "", "symbol style to use in benchmark") |
87 |
| - flag.BoolVar(&symbolOptions.printResults, "symbol_print_results", false, "whether to print symbol query results") |
88 |
| -} |
89 |
| - |
90 |
| -func TestBenchmarkSymbols(t *testing.T) { |
91 |
| - if symbolOptions.workdir == "" { |
92 |
| - t.Skip("-symbol_workdir not configured") |
93 |
| - } |
94 |
| - |
95 |
| - opts := benchmarkOptions(symbolOptions.workdir) |
96 |
| - settings := make(Settings) |
97 |
| - if symbolOptions.matcher != "" { |
98 |
| - settings["symbolMatcher"] = symbolOptions.matcher |
99 |
| - } |
100 |
| - if symbolOptions.style != "" { |
101 |
| - settings["symbolStyle"] = symbolOptions.style |
102 |
| - } |
103 |
| - opts = append(opts, settings) |
104 |
| - |
105 |
| - WithOptions(opts...).Run(t, "", func(t *testing.T, env *Env) { |
106 |
| - // We can't Await in this test, since we have disabled hooks. Instead, run |
107 |
| - // one symbol request to completion to ensure all necessary cache entries |
108 |
| - // are populated. |
109 |
| - symbols, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{ |
110 |
| - Query: symbolOptions.query, |
111 |
| - }) |
112 |
| - if err != nil { |
113 |
| - t.Fatal(err) |
114 |
| - } |
115 |
| - |
116 |
| - if symbolOptions.printResults { |
117 |
| - fmt.Println("Results:") |
118 |
| - for i := 0; i < len(symbols); i++ { |
119 |
| - fmt.Printf("\t%d. %s (%s)\n", i, symbols[i].Name, symbols[i].ContainerName) |
120 |
| - } |
121 |
| - } |
122 |
| - |
123 |
| - results := testing.Benchmark(func(b *testing.B) { |
124 |
| - for i := 0; i < b.N; i++ { |
125 |
| - if _, err := env.Editor.Server.Symbol(env.Ctx, &protocol.WorkspaceSymbolParams{ |
126 |
| - Query: symbolOptions.query, |
127 |
| - }); err != nil { |
128 |
| - t.Fatal(err) |
129 |
| - } |
130 |
| - } |
131 |
| - }) |
132 |
| - printBenchmarkResults(results) |
133 |
| - }) |
134 |
| -} |
135 |
| - |
136 |
| -var ( |
137 |
| - benchDir = flag.String("didchange_dir", "", "If set, run benchmarks in this dir. Must also set didchange_file.") |
138 |
| - benchFile = flag.String("didchange_file", "", "The file to modify") |
139 |
| - benchProfile = flag.String("didchange_cpuprof", "", "file to write cpu profiling data to") |
140 |
| -) |
141 |
| - |
142 |
| -// TestBenchmarkDidChange benchmarks modifications of a single file by making |
143 |
| -// synthetic modifications in a comment. It controls pacing by waiting for the |
144 |
| -// server to actually start processing the didChange notification before |
145 |
| -// proceeding. Notably it does not wait for diagnostics to complete. |
146 |
| -// |
147 |
| -// Run it by passing -didchange_dir and -didchange_file, where -didchange_dir |
148 |
| -// is the path to a workspace root, and -didchange_file is the |
149 |
| -// workspace-relative path to a file to modify. e.g.: |
150 |
| -// |
151 |
| -// go test -run=TestBenchmarkDidChange \ |
152 |
| -// -didchange_dir=path/to/kubernetes \ |
153 |
| -// -didchange_file=pkg/util/hash/hash.go |
154 |
| -func TestBenchmarkDidChange(t *testing.T) { |
155 |
| - if *benchDir == "" { |
156 |
| - t.Skip("-didchange_dir is not set") |
157 |
| - } |
158 |
| - if *benchFile == "" { |
159 |
| - t.Fatal("-didchange_file must be set if -didchange_dir is set") |
160 |
| - } |
161 |
| - |
162 |
| - opts := benchmarkOptions(*benchDir) |
163 |
| - WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) { |
164 |
| - env.OpenFile(*benchFile) |
165 |
| - env.Await(env.DoneWithOpen()) |
166 |
| - // Insert the text we'll be modifying at the top of the file. |
167 |
| - env.EditBuffer(*benchFile, fake.Edit{Text: "// __REGTEST_PLACEHOLDER_0__\n"}) |
168 |
| - |
169 |
| - // Run the profiler after the initial load, |
170 |
| - // across all benchmark iterations. |
171 |
| - if *benchProfile != "" { |
172 |
| - profile, err := os.Create(*benchProfile) |
173 |
| - if err != nil { |
174 |
| - t.Fatal(err) |
175 |
| - } |
176 |
| - defer profile.Close() |
177 |
| - if err := pprof.StartCPUProfile(profile); err != nil { |
178 |
| - t.Fatal(err) |
179 |
| - } |
180 |
| - defer pprof.StopCPUProfile() |
181 |
| - } |
182 |
| - |
183 |
| - result := testing.Benchmark(func(b *testing.B) { |
184 |
| - for i := 0; i < b.N; i++ { |
185 |
| - env.EditBuffer(*benchFile, fake.Edit{ |
186 |
| - Start: fake.Pos{Line: 0, Column: 0}, |
187 |
| - End: fake.Pos{Line: 1, Column: 0}, |
188 |
| - // Increment |
189 |
| - Text: fmt.Sprintf("// __REGTEST_PLACEHOLDER_%d__\n", i+1), |
190 |
| - }) |
191 |
| - env.Await(StartedChange(uint64(i + 1))) |
192 |
| - } |
193 |
| - }) |
194 |
| - printBenchmarkResults(result) |
195 |
| - }) |
196 |
| -} |
197 |
| - |
198 |
| -// TestPrintMemStats measures the memory usage of loading a project. |
199 |
| -// It uses the same -didchange_dir flag as above. |
200 |
| -// Always run it in isolation since it measures global heap usage. |
201 |
| -// |
202 |
| -// Kubernetes example: |
203 |
| -// |
204 |
| -// $ go test -v -run=TestPrintMemStats -didchange_dir=$HOME/w/kubernetes |
205 |
| -// TotalAlloc: 5766 MB |
206 |
| -// HeapAlloc: 1984 MB |
207 |
| -// |
208 |
| -// Both figures exhibit variance of less than 1%. |
209 |
| -func TestPrintMemStats(t *testing.T) { |
210 |
| - if *benchDir == "" { |
211 |
| - t.Skip("-didchange_dir is not set") |
212 |
| - } |
213 |
| - |
214 |
| - // Load the program... |
215 |
| - opts := benchmarkOptions(*benchDir) |
216 |
| - WithOptions(opts...).Run(t, "", func(_ *testing.T, env *Env) { |
217 |
| - // ...and print the memory usage. |
218 |
| - runtime.GC() |
219 |
| - runtime.GC() |
220 |
| - var mem runtime.MemStats |
221 |
| - runtime.ReadMemStats(&mem) |
222 |
| - t.Logf("TotalAlloc:\t%d MB", mem.TotalAlloc/1e6) |
223 |
| - t.Logf("HeapAlloc:\t%d MB", mem.HeapAlloc/1e6) |
224 |
| - }) |
225 |
| -} |
0 commit comments