Skip to content

BUG: GH3611 fix again, float na_values were not stringified correctly #3841

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 11, 2013
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
94 changes: 64 additions & 30 deletions pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,6 +297,7 @@ def parser_f(filepath_or_buffer,
skipfooter=None,
skip_footer=0,
na_values=None,
na_fvalues=None,
true_values=None,
false_values=None,
delimiter=None,
Expand Down Expand Up @@ -359,6 +360,7 @@ def parser_f(filepath_or_buffer,
prefix=prefix,
skiprows=skiprows,
na_values=na_values,
na_fvalues=na_fvalues,
true_values=true_values,
false_values=false_values,
keep_default_na=keep_default_na,
Expand Down Expand Up @@ -554,7 +556,7 @@ def _clean_options(self, options, engine):
converters = {}

# Converting values to NA
na_values = _clean_na_values(na_values, keep_default_na)
na_values, na_fvalues = _clean_na_values(na_values, keep_default_na)

if com.is_integer(skiprows):
skiprows = range(skiprows)
Expand All @@ -565,6 +567,7 @@ def _clean_options(self, options, engine):
result['names'] = names
result['converters'] = converters
result['na_values'] = na_values
result['na_fvalues'] = na_fvalues
result['skiprows'] = skiprows

return result, engine
Expand Down Expand Up @@ -644,6 +647,7 @@ def __init__(self, kwds):
self.keep_date_col = kwds.pop('keep_date_col', False)

self.na_values = kwds.get('na_values')
self.na_fvalues = kwds.get('na_fvalues')
self.true_values = kwds.get('true_values')
self.false_values = kwds.get('false_values')
self.tupleize_cols = kwds.get('tupleize_cols',True)
Expand Down Expand Up @@ -837,31 +841,34 @@ def _agg_index(self, index, try_parse_dates=True):
arr = self._date_conv(arr)

col_na_values = self.na_values
col_na_fvalues = self.na_fvalues

if isinstance(self.na_values, dict):
col_name = self.index_names[i]
if col_name is not None:
col_na_values = _get_na_values(col_name,
self.na_values)

arr, _ = self._convert_types(arr, col_na_values)
col_na_values, col_na_fvalues = _get_na_values(col_name,
self.na_values,
self.na_fvalues)

arr, _ = self._convert_types(arr, col_na_values | col_na_fvalues)
arrays.append(arr)

index = MultiIndex.from_arrays(arrays, names=self.index_names)

return index

def _convert_to_ndarrays(self, dct, na_values, verbose=False,
def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
converters=None):
result = {}
for c, values in dct.iteritems():
conv_f = None if converters is None else converters.get(c, None)
col_na_values = _get_na_values(c, na_values)
col_na_values, col_na_fvalues = _get_na_values(c, na_values, na_fvalues)
coerce_type = True
if conv_f is not None:
values = lib.map_infer(values, conv_f)
coerce_type = False
cvals, na_count = self._convert_types(values, col_na_values,
cvals, na_count = self._convert_types(values,
set(col_na_values) | col_na_fvalues,
coerce_type)
result[c] = cvals
if verbose and na_count:
Expand Down Expand Up @@ -1370,7 +1377,7 @@ def _convert_data(self, data):
col = self.orig_names[col]
clean_conv[col] = f

return self._convert_to_ndarrays(data, self.na_values, self.verbose,
return self._convert_to_ndarrays(data, self.na_values, self.na_fvalues, self.verbose,
clean_conv)

def _infer_columns(self):
Expand Down Expand Up @@ -1754,37 +1761,26 @@ def _try_convert_dates(parser, colspec, data_dict, columns):


def _clean_na_values(na_values, keep_default_na=True):

if na_values is None and keep_default_na:
na_values = _NA_VALUES
na_fvalues = set()
elif isinstance(na_values, dict):
if keep_default_na:
for k, v in na_values.iteritems():
v = set(list(v)) | _NA_VALUES
na_values[k] = v
na_fvalues = dict([ (k, _floatify_na_values(v)) for k, v in na_values.items() ])
else:
if not com.is_list_like(na_values):
na_values = [na_values]
na_values = set(_stringify_na_values(na_values))
na_values = _stringify_na_values(na_values)
if keep_default_na:
na_values = na_values | _NA_VALUES

return na_values
na_fvalues = _floatify_na_values(na_values)

def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
result.append(float(x))
except:
pass
try:
result.append(int(x))
except:
pass
return result
return na_values, na_fvalues

def _clean_index_names(columns, index_col):
if not _is_index_col(index_col):
Expand Down Expand Up @@ -1832,14 +1828,52 @@ def _get_empty_meta(columns, index_col, index_names):
return index, columns, {}


def _get_na_values(col, na_values):
def _floatify_na_values(na_values):
# create float versions of the na_values
result = set()
for v in na_values:
try:
v = float(v)
if not np.isnan(v):
result.add(v)
except:
pass
return result

def _stringify_na_values(na_values):
""" return a stringified and numeric for these values """
result = []
for x in na_values:
result.append(str(x))
result.append(x)
try:
v = float(x)

# we are like 999 here
if v == int(v):
v = int(v)
result.append("%s.0" % v)
result.append(str(v))

result.append(v)
except:
pass
try:
result.append(int(x))
except:
pass
return set(result)

def _get_na_values(col, na_values, na_fvalues):
if isinstance(na_values, dict):
if col in na_values:
return set(_stringify_na_values(list(na_values[col])))
values = na_values[col]
fvalues = na_fvalues[col]
return na_values[col], na_fvalues[col]
else:
return _NA_VALUES
return _NA_VALUES, set()
else:
return na_values
return na_values, na_fvalues


def _get_col_names(colspec, columns):
Expand Down
30 changes: 30 additions & 0 deletions pandas/io/tests/test_parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,6 +540,36 @@ def test_non_string_na_values(self):
tm.assert_frame_equal(result1,result2)
tm.assert_frame_equal(result2,result3)

result4 = read_csv(path, sep= ' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep= ' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep= ' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep= ' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4,result3)
tm.assert_frame_equal(result5,result3)
tm.assert_frame_equal(result6,result3)
tm.assert_frame_equal(result7,result3)

good_compare = result3

# with an odd float format, so we can't match the string 999.0 exactly,
# but need float matching
df.to_csv(path, sep=' ', index=False, float_format = '%.3f')
result1 = read_csv(path, sep= ' ', header=0, na_values=['-999.0','-999'])
result2 = read_csv(path, sep= ' ', header=0, na_values=[-999,-999.0])
result3 = read_csv(path, sep= ' ', header=0, na_values=[-999.0,-999])
tm.assert_frame_equal(result1,good_compare)
tm.assert_frame_equal(result2,good_compare)
tm.assert_frame_equal(result3,good_compare)

result4 = read_csv(path, sep= ' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep= ' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep= ' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep= ' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4,good_compare)
tm.assert_frame_equal(result5,good_compare)
tm.assert_frame_equal(result6,good_compare)
tm.assert_frame_equal(result7,good_compare)

def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
Expand Down
Loading