Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions lib/benchmark_runner.rb
Original file line number Diff line number Diff line change
Expand Up @@ -79,16 +79,20 @@ def render_graph(json_path)
end

# Checked system - error or return info if the command fails
def check_call(command, env: {}, raise_error: true, quiet: false)
def check_call(command, env: {}, raise_error: true, quiet: ENV['BENCHMARK_QUIET'] == '1')
puts("+ #{command}") unless quiet

result = {}

result[:success] = system(env, command)
if quiet
result[:success] = system(env, command, out: File::NULL, err: File::NULL)
else
result[:success] = system(env, command)
end
result[:status] = $?

unless result[:success]
puts "Command #{command.inspect} failed with exit code #{result[:status].exitstatus} in directory #{Dir.pwd}"
puts "Command #{command.inspect} failed with exit code #{result[:status].exitstatus} in directory #{Dir.pwd}" unless quiet
raise RuntimeError.new if raise_error
end

Expand Down
107 changes: 107 additions & 0 deletions lib/benchmark_runner/cli.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
# frozen_string_literal: true

require 'fileutils'
require_relative '../argument_parser'
require_relative '../cpu_config'
require_relative '../benchmark_runner'
require_relative '../benchmark_suite'
require_relative '../results_table_builder'

module BenchmarkRunner
class CLI
attr_reader :args

def self.run(argv = ARGV)
args = ArgumentParser.parse(argv)
new(args).run
end

def initialize(args)
@args = args
end

def run
CPUConfig.configure_for_benchmarking(turbo: args.turbo)

# Create the output directory
FileUtils.mkdir_p(args.out_path)

ruby_descriptions = {}

# Benchmark with and without YJIT
bench_start_time = Time.now.to_f
bench_data = {}
bench_failures = {}
args.executables.each do |name, executable|
ruby_descriptions[name] = `#{executable.shelljoin} -v`.chomp

suite = BenchmarkSuite.new(
ruby: executable,
ruby_description: ruby_descriptions[name],
categories: args.categories,
name_filters: args.name_filters,
out_path: args.out_path,
harness: args.harness,
pre_init: args.with_pre_init,
no_pinning: args.no_pinning
)
bench_data[name], failures = suite.run
# Make it easier to query later.
bench_failures[name] = failures unless failures.empty?
end

bench_end_time = Time.now.to_f
bench_total_time = (bench_end_time - bench_start_time).to_i
puts("Total time spent benchmarking: #{bench_total_time}s")

if !bench_failures.empty?
puts("Failed benchmarks: #{bench_failures.map { |k, v| v.size }.sum}")
end

puts

# Build results table
builder = ResultsTableBuilder.new(
executable_names: ruby_descriptions.keys,
bench_data: bench_data,
include_rss: args.rss
)
table, format = builder.build

output_path = BenchmarkRunner.output_path(args.out_path, out_override: args.out_override)

# Save the raw data as JSON
out_json_path = BenchmarkRunner.write_json(output_path, ruby_descriptions, bench_data)

# Save data as CSV so we can produce tables/graphs in a spreasheet program
# NOTE: we don't do any number formatting for the output file because
# we don't want to lose any precision
BenchmarkRunner.write_csv(output_path, ruby_descriptions, table)

# Save the output in a text file that we can easily refer to
output_str = BenchmarkRunner.build_output_text(ruby_descriptions, table, format, bench_failures)
out_txt_path = output_path + ".txt"
File.open(out_txt_path, "w") { |f| f.write output_str }

# Print the table to the console, with numbers truncated
puts(output_str)

# Print JSON and PNG file names
puts
puts "Output:"
puts out_json_path

if args.graph
puts BenchmarkRunner.render_graph(out_json_path)
end

if !bench_failures.empty?
puts "\nFailed benchmarks:"
bench_failures.each do |name, data|
puts " #{name}: #{data.keys.join(", ")}"
end
exit(1)
end
end
end
end
98 changes: 2 additions & 96 deletions run_benchmarks.rb
Original file line number Diff line number Diff line change
@@ -1,99 +1,5 @@
#!/usr/bin/env ruby

require 'pathname'
require 'fileutils'
require 'csv'
require 'json'
require 'shellwords'
require 'rbconfig'
require 'etc'
require 'yaml'
require_relative 'lib/cpu_config'
require_relative 'lib/benchmark_runner'
require_relative 'lib/benchmark_suite'
require_relative 'lib/argument_parser'
require_relative 'lib/results_table_builder'
require_relative 'lib/benchmark_runner/cli'

args = ArgumentParser.parse(ARGV)

CPUConfig.configure_for_benchmarking(turbo: args.turbo)

# Create the output directory
FileUtils.mkdir_p(args.out_path)

ruby_descriptions = {}

# Benchmark with and without YJIT
bench_start_time = Time.now.to_f
bench_data = {}
bench_failures = {}
args.executables.each do |name, executable|
ruby_descriptions[name] = `#{executable.shelljoin} -v`.chomp

suite = BenchmarkSuite.new(
ruby: executable,
ruby_description: ruby_descriptions[name],
categories: args.categories,
name_filters: args.name_filters,
out_path: args.out_path,
harness: args.harness,
pre_init: args.with_pre_init,
no_pinning: args.no_pinning
)
bench_data[name], failures = suite.run
# Make it easier to query later.
bench_failures[name] = failures unless failures.empty?
end

bench_end_time = Time.now.to_f
bench_total_time = (bench_end_time - bench_start_time).to_i
puts("Total time spent benchmarking: #{bench_total_time}s")

if !bench_failures.empty?
puts("Failed benchmarks: #{bench_failures.map { |k, v| v.size }.sum}")
end

puts

# Build results table
builder = ResultsTableBuilder.new(
executable_names: ruby_descriptions.keys,
bench_data: bench_data,
include_rss: args.rss
)
table, format = builder.build

output_path = BenchmarkRunner.output_path(args.out_path, out_override: args.out_override)

# Save the raw data as JSON
out_json_path = BenchmarkRunner.write_json(output_path, ruby_descriptions, bench_data)

# Save data as CSV so we can produce tables/graphs in a spreasheet program
# NOTE: we don't do any number formatting for the output file because
# we don't want to lose any precision
BenchmarkRunner.write_csv(output_path, ruby_descriptions, table)

# Save the output in a text file that we can easily refer to
output_str = BenchmarkRunner.build_output_text(ruby_descriptions, table, format, bench_failures)
out_txt_path = output_path + ".txt"
File.open(out_txt_path, "w") { |f| f.write output_str }

# Print the table to the console, with numbers truncated
puts(output_str)

# Print JSON and PNG file names
puts
puts "Output:"
puts out_json_path

if args.graph
puts BenchmarkRunner.render_graph(out_json_path)
end

if !bench_failures.empty?
puts "\nFailed benchmarks:"
bench_failures.each do |name, data|
puts " #{name}: #{data.keys.join(", ")}"
end
exit(1)
end
BenchmarkRunner::CLI.run(ARGV)
Loading
Loading