Skip to content
This repository was archived by the owner on Dec 6, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ install:
# Useful for debugging any issues with conda
- conda info -a

- conda create -q -n test-environment python=$PYTHON_VERSION numpy scipy pytest cython scikit-learn six joblib
- conda create -q -n test-environment python=$PYTHON_VERSION numpy scipy pytest cython scikit-learn joblib
- source activate test-environment
- make all

Expand Down
10 changes: 5 additions & 5 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@ ranking in Python.

Highlights:

- follows the `scikit-learn <http://scikit-learn.org>`_ API conventions
- follows the `scikit-learn <https://scikit-learn.org>`_ API conventions
- supports natively both dense and sparse data representations
- computationally demanding parts implemented in `Cython <http://cython.org>`_
- computationally demanding parts implemented in `Cython <https://cython.org>`_

Solvers supported:

Expand Down Expand Up @@ -66,8 +66,8 @@ penalty on the News20 dataset (c.f., `Blondel et al. 2013
Dependencies
------------

lightning requires Python >= 2.7, setuptools, Numpy >= 1.3, SciPy >= 0.7 and
scikit-learn >= 0.15. Building from source also requires Cython and a working C/C++ compiler. To run the tests you will also need pytest.
lightning requires Python >= 3.6, setuptools, Numpy >= 1.12, SciPy >= 0.19 and
scikit-learn >= 0.19. Building from source also requires Cython and a working C/C++ compiler. To run the tests you will also need pytest.
Comment on lines +69 to +70
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These are the first versions for which Python 3.6 wheels were uploaded to PyPI.


Installation
------------
Expand All @@ -93,7 +93,7 @@ Documentation

http://contrib.scikit-learn.org/lightning/

On Github
On GitHub
---------

https://github.com/scikit-learn-contrib/lightning
Expand Down
3 changes: 1 addition & 2 deletions appveyor.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
# AppVeyor.com is a Continuous Integration service to build and run tests under
# Windows
# https://ci.appveyor.com/project/fabianp/lightning-bpc6r

image: Visual Studio 2019

Expand Down Expand Up @@ -52,7 +51,7 @@ install:
- "python -c \"import struct; print(struct.calcsize('P') * 8)\""
- "python -m pip --version"

- "python -m pip install --timeout=60 numpy scipy cython pytest scikit-learn wheel six joblib"
- "python -m pip install --timeout=60 numpy scipy cython pytest scikit-learn wheel joblib"
- "python setup.py bdist_wheel bdist_wininst"

- ps: "ls dist"
Expand Down
2 changes: 1 addition & 1 deletion build_tools/move-conda-package.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
import shutil
from conda_build.config import config

with open(os.path.join(sys.argv[1], 'meta.yaml')) as f:
with open(os.path.join(sys.argv[1], 'meta.yaml'), encoding='utf-8') as f:
name = yaml.load(f)['package']['name']

binary_package_glob = os.path.join(
Expand Down
75 changes: 22 additions & 53 deletions doc/sphinxext/gen_rst.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
Files that generate images should start with 'plot'

"""
from __future__ import division, print_function
from time import time
import ast
import os
Expand All @@ -20,37 +19,19 @@
import posixpath
import subprocess
import warnings
import six

from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError

# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError


try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)

try:
basestring
except NameError:
basestring = str
def execfile(filename, global_vars=None, local_vars=None):
with open(filename, encoding='utf-8') as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)

import token
import tokenize
Expand Down Expand Up @@ -93,13 +74,8 @@ def flush(self):
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
Expand Down Expand Up @@ -427,10 +403,8 @@ def resolve(self, cobj, this_url):
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
if six.PY2:
lines = open(filename).readlines()
else:
lines = open(filename, encoding='utf-8').readlines()
with open(filename, encoding='utf-8') as f:
lines = f.readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
Expand Down Expand Up @@ -526,10 +500,8 @@ def generate_example_rst(app):
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
if six.PY2:
lines = open(example_file).readlines()
else:
lines = open(example_file, encoding='utf-8').readlines()
with open(example_file, encoding='utf-8') as f:
lines = f.readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
Expand Down Expand Up @@ -620,7 +592,7 @@ def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, se
%s


""" % open(os.path.join(src_dir, 'README.txt')).read())
""" % open(os.path.join(src_dir, 'README.txt'), encoding='utf-8').read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
Expand Down Expand Up @@ -676,8 +648,8 @@ def make_thumbnail(in_fname, out_fname, width, height):
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
scale_w = width / width_in
scale_h = height / height_in

if height_in * scale_w <= height:
scale = scale_w
Expand Down Expand Up @@ -727,7 +699,7 @@ class NameFinder(ast.NodeVisitor):
"""

def __init__(self):
super(NameFinder, self).__init__()
super().__init__()
self.imported_names = {}
self.accessed_names = set()

Expand Down Expand Up @@ -964,11 +936,8 @@ def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
f.flush()

# save variables so we can later add links to the documentation
if six.PY2:
example_code_obj = identify_names(open(example_file).read())
else:
example_code_obj = \
identify_names(open(example_file, encoding='utf-8').read())
with open(example_file, encoding='utf-8') as f:
example_code_obj = identify_names(f.read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
Expand Down
8 changes: 2 additions & 6 deletions doc/sphinxext/numpy_ext/docscrape.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,7 @@
import re
import pydoc
from warnings import warn
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
except:
from io import StringIO
from io import StringIO


class Reader(object):
Expand Down Expand Up @@ -466,7 +462,7 @@ def __str__(self):
out += '.. %s:: %s\n \n\n' % (roles.get(self._role, ''),
func_name)

out += super(FunctionDoc, self).__str__(func_role=self._role)
out += super().__str__(func_role=self._role)
return out


Expand Down
2 changes: 1 addition & 1 deletion doc/sphinxext/numpy_ext/docscrape_sphinx.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def _str_section(self, name):
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
see_also = super()._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
Expand Down
18 changes: 4 additions & 14 deletions doc/sphinxext/numpy_ext/numpydoc.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,6 @@

"""

from __future__ import unicode_literals

import sys # Only needed to check Python version
import os
import re
import pydoc
Expand All @@ -41,10 +38,7 @@ def mangle_docstrings(app, what, name, obj, options, lines,
lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
else:
doc = get_doc_object(obj, what, "\n".join(lines), config=cfg)
if sys.version_info[0] < 3:
lines[:] = unicode(doc).splitlines()
else:
lines[:] = str(doc).splitlines()
lines[:] = str(doc).splitlines()

if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
obj.__name__:
Expand Down Expand Up @@ -104,12 +98,8 @@ def setup(app, get_doc_object_=get_doc_object):
global get_doc_object
get_doc_object = get_doc_object_

if sys.version_info[0] < 3:
app.connect(b'autodoc-process-docstring', mangle_docstrings)
app.connect(b'autodoc-process-signature', mangle_signature)
else:
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('autodoc-process-signature', mangle_signature)
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
Expand All @@ -135,7 +125,7 @@ class ManglingDomainBase(object):
directive_mangling_map = {}

def __init__(self, *a, **kw):
super(ManglingDomainBase, self).__init__(*a, **kw)
super().__init__(*a, **kw)
self.wrap_mangling_directives()

def wrap_mangling_directives(self):
Expand Down
12 changes: 6 additions & 6 deletions examples/plot_sparse_non_linear.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,20 +33,20 @@ class SparseNonlinearClassifier(CDClassifier):

def __init__(self, gamma=1e-2, C=1, alpha=1):
self.gamma = gamma
super(SparseNonlinearClassifier, self).__init__(C=C,
alpha=alpha,
loss="squared_hinge",
penalty="l1")
super().__init__(C=C,
alpha=alpha,
loss="squared_hinge",
penalty="l1")

def fit(self, X, y):
K = rbf_kernel(X, gamma=self.gamma)
self.X_train_ = X
super(SparseNonlinearClassifier, self).fit(K, y)
super().fit(K, y)
return self

def decision_function(self, X):
K = rbf_kernel(X, self.X_train_, gamma=self.gamma)
return super(SparseNonlinearClassifier, self).decision_function(K)
return super().decision_function(K)


def gen_non_lin_separable_data():
Expand Down
3 changes: 1 addition & 2 deletions lightning/impl/adagrad.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
import numpy as np

from sklearn.utils import check_random_state
from six.moves import xrange

from .base import BaseClassifier, BaseRegressor
from .dataset_fast import get_dataset
Expand Down Expand Up @@ -38,7 +37,7 @@ def _fit(self, X, Y):
loss = self._get_loss()
n_calls = n_samples if self.n_calls is None else self.n_calls

for i in xrange(n_vectors):
for i in range(n_vectors):
_adagrad_fit(self, ds, Y[:, i], self.coef_[i], self.g_sum_[i],
self.g_norms_[i], loss, self.eta, delta, alpha1,
alpha2, self.n_iter, self.shuffle, self.callback,
Expand Down
14 changes: 7 additions & 7 deletions lightning/impl/adagrad_fast.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ cdef double _pred(double* data,
cdef int j, jj
cdef double dot = 0

for jj in xrange(n_nz):
for jj in range(n_nz):
j = indices[jj]
dot += w[j] * data[jj]

Expand Down Expand Up @@ -64,7 +64,7 @@ cpdef _proj_elastic_all(double eta,
np.ndarray[double, ndim=1] w):
cdef int n_features = w.shape[0]
cdef int j
for j in xrange(n_features):
for j in range(n_features):
if g_norms[j] != 0:
w[j] = _proj_elastic(eta, t, g_sum[j], g_norms[j], alpha1, alpha2,
delta)
Expand Down Expand Up @@ -107,21 +107,21 @@ def _adagrad_fit(self,
cdef double* w = <double*>coef.data

t = 1
for it in xrange(n_iter):
for it in range(n_iter):

# Shuffle sample indices.
if shuffle:
rng.shuffle(sindices)

for ii in xrange(n_samples):
for ii in range(n_samples):
i = sindices[ii]

# Retrieve sample i.
X.get_row_ptr(i, &indices, &data, &n_nz)

# Update w lazily.
if t > 1:
for jj in xrange(n_nz):
for jj in range(n_nz):
j = indices[jj]
if g_norms[j] != 0:
w[j] = _proj_elastic(eta, t - 1, g_sum[j], g_norms[j],
Expand All @@ -135,14 +135,14 @@ def _adagrad_fit(self,

# Update g_sum and g_norms.
if scale != 0:
for jj in xrange(n_nz):
for jj in range(n_nz):
j = indices[jj]
tmp = scale * data[jj]
g_sum[j] += tmp
g_norms[j] += tmp * tmp

# Update w by naive implementation: very slow.
# for j in xrange(n_features):
# for j in range(n_features):
# w[j] = _proj_elastic(eta, t, g_sum[j], g_norms[j], alpha1,
# alpha2, delta)

Expand Down
4 changes: 2 additions & 2 deletions lightning/impl/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ def n_nonzero(self, percentage=False):
if percentage:
if hasattr(self, "support_vectors_") and \
self.support_vectors_ is not None:
n_nz /= float(self.n_samples_)
n_nz /= self.n_samples_
else:
n_nz /= float(coef.shape[1])
n_nz /= coef.shape[1]

return n_nz

Expand Down
Loading