Skip to content

Remove obsolete/unused code #155

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jan 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions doc/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,6 @@
# All configuration values have a default; values that are commented out
# serve to show the default.

import sys
import os
import shlex

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
Expand Down
2 changes: 1 addition & 1 deletion doc/examples/bench_time_func.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ def bench_dict(loops, mydict):
range_it = range(loops)
t0 = pyperf.perf_counter()

for loops in range_it:
for _ in range_it:
mydict['0']
mydict['100']
mydict['200']
Expand Down
5 changes: 2 additions & 3 deletions pyperf/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -357,7 +357,7 @@ def group_by_name_ignored(self):
yield (suite, ignored)


def load_benchmarks(args, name=True):
def load_benchmarks(args):
data = Benchmarks()
data.load_benchmark_suites(args.filenames)
if getattr(args, 'benchmarks', None):
Expand Down Expand Up @@ -681,7 +681,6 @@ def cmd_convert(args):
file=sys.stderr)
sys.exit(1)
except TypeError:
raise
print("ERROR: Metadata %r of benchmark %r is not an integer"
% (name, benchmark.get_name()),
file=sys.stderr)
Expand All @@ -699,7 +698,7 @@ def cmd_convert(args):


def cmd_slowest(args):
data = load_benchmarks(args, name=False)
data = load_benchmarks(args)
nslowest = args.n

use_title = (data.get_nsuite() > 1)
Expand Down
3 changes: 1 addition & 2 deletions pyperf/_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -521,8 +521,7 @@ def _as_json(self, suite_metadata):
metadata = self._get_common_metadata()
common_metadata = dict(metadata, **suite_metadata)

data = {}
data['runs'] = [run._as_json(common_metadata) for run in self._runs]
data = {'runs': [run._as_json(common_metadata) for run in self._runs]}
metadata = _exclude_common_metadata(metadata, suite_metadata)
if metadata:
data['metadata'] = metadata
Expand Down
8 changes: 2 additions & 6 deletions pyperf/_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@ def format_run(bench, run_index, run, common_metadata=None, raw=False,
loops, value = warmup
raw_value = value * (loops * inner_loops)
if raw:
text = format_value(raw_value)
text = ("%s (loops: %s)"
% (format_value(raw_value),
format_number(loops)))
Expand Down Expand Up @@ -273,8 +272,7 @@ def format_stats(bench, lines):
lines.append('')

# Minimum
table = []
table.append(("Minimum", bench.format_value(min(values))))
table = [("Minimum", bench.format_value(min(values)))]

# Median +- MAD
median = bench.median()
Expand Down Expand Up @@ -382,8 +380,6 @@ def value_bucket(value):

value_width = max([len(bench.format_value(bucket * value_k))
for bucket in range(bucket_min, bucket_max + 1)])
width = columns - value_width

line = ': %s #' % count_max
width = columns - (value_width + len(line))
if not extend:
Expand Down Expand Up @@ -517,7 +513,7 @@ def format_result_value(bench):
return _format_result_value(bench)


def format_result(bench, prefix=True):
def format_result(bench):
loops = None
warmups = None
for run in bench._runs:
Expand Down
5 changes: 1 addition & 4 deletions pyperf/_collect_metadata.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,6 @@ def collect_cpu_freq(metadata, cpus):
# Example: "processor 0: version = 00, identification = [...]"
match = re.match(r'^processor ([0-9]+): ', line)
if match is None:
raise Exception
# unknown /proc/cpuinfo format: silently ignore and exit
return

Expand Down Expand Up @@ -410,9 +409,7 @@ def collect_cpu_metadata(metadata):


def collect_metadata(process=True):
metadata = {}
metadata['perf_version'] = pyperf.__version__
metadata['date'] = format_datetime(datetime.datetime.now())
metadata = {'perf_version': pyperf.__version__, 'date': format_datetime(datetime.datetime.now())}

collect_system_metadata(metadata)
collect_cpu_metadata(metadata)
Expand Down
4 changes: 1 addition & 3 deletions pyperf/_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -284,9 +284,7 @@ def sort_key(results):
for item in self.all_results[0]:
headers.append(item.changed.name)

all_norm_means = []
for column in headers[2:]:
all_norm_means.append([])
all_norm_means = [[] for _ in range(len(headers[2:]))]

rows = []
not_significant = []
Expand Down
4 changes: 1 addition & 3 deletions pyperf/_process_time.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def bench_process(loops, args, kw, profile_filename=None):
temp_profile_filename = tempfile.mktemp()
args = [args[0], "-m", "cProfile", "-o", temp_profile_filename] + args[1:]

for loop in range_it:
for _ in range_it:
start_rss = get_max_rss()

proc = subprocess.Popen(args, **kw)
Expand All @@ -75,8 +75,6 @@ def bench_process(loops, args, kw, profile_filename=None):
os.unlink(temp_profile_filename)
sys.exit(exitcode)

proc = None

rss = get_max_rss() - start_rss
max_rss = max(max_rss, rss)

Expand Down
8 changes: 4 additions & 4 deletions pyperf/_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class Runner:

# Default parameters are chosen to have approximatively a run of 0.5 second
# and so a total duration of 5 seconds by default
def __init__(self, values=None, warmups=None, processes=None,
def __init__(self, values=None, processes=None,
loops=0, min_time=0.1, metadata=None,
show_name=True,
program_args=None, add_cmdline_args=None,
Expand Down Expand Up @@ -485,7 +485,7 @@ def bench_time_func(self, name, time_func, *args, **kwargs):
if self.args.profile:
profiler, time_func = profiling_wrapper(time_func)

def task_func(task, loops):
def task_func(_, loops):
return time_func(loops, *args)

task = WorkerProcessTask(self, name, task_func, metadata)
Expand Down Expand Up @@ -514,7 +514,7 @@ def bench_func(self, name, func, *args, **kwargs):
if self.args.profile:
profiler, func = profiling_wrapper(func)

def task_func(task, loops):
def task_func(_, loops):
# use fast local variables
local_timer = time.perf_counter
local_func = func
Expand Down Expand Up @@ -557,7 +557,7 @@ def bench_async_func(self, name, func, *args, **kwargs):
if self.args.profile:
profiler, func = profiling_wrapper(func)

def task_func(task, loops):
def task_func(_, loops):
if loops != 1:
async def main():
# use fast local variables
Expand Down
2 changes: 1 addition & 1 deletion pyperf/_system.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def write_msr(self, cpu, reg_num, value):
fd = os.open(path, os.O_WRONLY)
try:
if hasattr(os, 'pwrite'):
data = os.pwrite(fd, data, reg_num)
os.pwrite(fd, data, reg_num)
else:
os.lseek(fd, reg_num, os.SEEK_SET)
os.write(fd, data)
Expand Down
2 changes: 1 addition & 1 deletion pyperf/_timeit.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def make_inner(self):
exec(code, global_ns, local_ns)
return local_ns["inner"]

def update_linecache(self, file=None):
def update_linecache(self):
import linecache

linecache.cache[self.filename] = (len(self.src),
Expand Down
2 changes: 1 addition & 1 deletion pyperf/tests/test_bench.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,7 +375,7 @@ def test_stats(self):
self.assertEqual(bench.median_abs_dev(), 24.0)

def test_stats_same(self):
values = [5.0 for i in range(10)]
values = [5.0 for _ in range(10)]
run = create_run(values)
bench = pyperf.Benchmark([run])
self.assertEqual(bench.mean(), 5.0)
Expand Down
2 changes: 1 addition & 1 deletion pyperf/tests/test_examples.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ def tearDownClass(cls):
if not_tested:
raise Exception("not tested scripts: %s" % sorted(not_tested))

def check_command(self, script, args, nproc=3):
def check_command(self, script, args):
self.TESTED.add(script)
script = os.path.join(EXAMPLES_DIR, script)

Expand Down