7
7
import requests
8
8
import pandas as pd
9
9
from json import JSONDecodeError
10
- import warnings
11
- from pvlib . _deprecation import pvlibDeprecationWarning
10
+ from pvlib . _deprecation import deprecated
11
+ from pvlib import tools
12
12
13
13
NSRDB_API_BASE = "https://developer.nrel.gov"
14
14
PSM_URL = NSRDB_API_BASE + "/api/nsrdb/v2/solar/psm3-2-2-download.csv"
@@ -127,7 +127,7 @@ def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60,
127
127
timeseries data from NREL PSM3
128
128
metadata : dict
129
129
metadata from NREL PSM3 about the record, see
130
- :func:`pvlib.iotools.parse_psm3 ` for fields
130
+ :func:`pvlib.iotools.read_psm3 ` for fields
131
131
132
132
Raises
133
133
------
@@ -152,7 +152,7 @@ def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60,
152
152
153
153
See Also
154
154
--------
155
- pvlib.iotools.read_psm3, pvlib.iotools.parse_psm3
155
+ pvlib.iotools.read_psm3
156
156
157
157
References
158
158
----------
@@ -216,12 +216,12 @@ def get_psm3(latitude, longitude, api_key, email, names='tmy', interval=60,
216
216
# the CSV is in the response content as a UTF-8 bytestring
217
217
# to use pandas we need to create a file buffer from the response
218
218
fbuf = io .StringIO (response .content .decode ('utf-8' ))
219
- return parse_psm3 (fbuf , map_variables )
219
+ return read_psm3 (fbuf , map_variables )
220
220
221
221
222
- def parse_psm3 ( fbuf , map_variables = True ):
222
+ def read_psm3 ( filename , map_variables = True ):
223
223
"""
224
- Parse an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
224
+ Read an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
225
225
is described in [1]_ and the SAM CSV format is described in [2]_.
226
226
227
227
.. versionchanged:: 0.9.0
@@ -231,8 +231,8 @@ def parse_psm3(fbuf, map_variables=True):
231
231
232
232
Parameters
233
233
----------
234
- fbuf: file -like object
235
- File-like object containing data to read.
234
+ filename: str, path -like, or buffer
235
+ Filename or in-memory buffer of a file containing data to read.
236
236
map_variables: bool, default True
237
237
When true, renames columns of the Dataframe to pvlib variable names
238
238
where applicable. See variable :const:`VARIABLE_MAP`.
@@ -302,12 +302,15 @@ def parse_psm3(fbuf, map_variables=True):
302
302
Examples
303
303
--------
304
304
>>> # Read a local PSM3 file:
305
+ >>> df, metadata = iotools.read_psm3("data.csv") # doctest: +SKIP
306
+
307
+ >>> # Read a file object or an in-memory buffer:
305
308
>>> with open(filename, 'r') as f: # doctest: +SKIP
306
- ... df, metadata = iotools.parse_psm3 (f) # doctest: +SKIP
309
+ ... df, metadata = iotools.read_psm3 (f) # doctest: +SKIP
307
310
308
311
See Also
309
312
--------
310
- pvlib.iotools.read_psm3, pvlib.iotools. get_psm3
313
+ pvlib.iotools.get_psm3
311
314
312
315
References
313
316
----------
@@ -316,34 +319,35 @@ def parse_psm3(fbuf, map_variables=True):
316
319
.. [2] `Standard Time Series Data File Format
317
320
<https://web.archive.org/web/20170207203107/https://sam.nrel.gov/sites/default/files/content/documents/pdf/wfcsv.pdf>`_
318
321
"""
319
- # The first 2 lines of the response are headers with metadata
320
- metadata_fields = fbuf .readline ().split (',' )
321
- metadata_fields [- 1 ] = metadata_fields [- 1 ].strip () # strip trailing newline
322
- metadata_values = fbuf .readline ().split (',' )
323
- metadata_values [- 1 ] = metadata_values [- 1 ].strip () # strip trailing newline
322
+ with tools ._file_context_manager (filename ) as fbuf :
323
+ # The first 2 lines of the response are headers with metadata
324
+ metadata_fields = fbuf .readline ().split (',' )
325
+ metadata_values = fbuf .readline ().split (',' )
326
+ # get the column names so we can set the dtypes
327
+ columns = fbuf .readline ().split (',' )
328
+ columns [- 1 ] = columns [- 1 ].strip () # strip trailing newline
329
+ # Since the header has so many columns, excel saves blank cols in the
330
+ # data below the header lines.
331
+ columns = [col for col in columns if col != '' ]
332
+ dtypes = dict .fromkeys (columns , float ) # all floats except datevec
333
+ dtypes .update ({'Year' : int , 'Month' : int , 'Day' : int , 'Hour' : int ,
334
+ 'Minute' : int , 'Cloud Type' : int , 'Fill Flag' : int })
335
+ data = pd .read_csv (
336
+ fbuf , header = None , names = columns , usecols = columns , dtype = dtypes ,
337
+ delimiter = ',' , lineterminator = '\n ' ) # skip carriage returns \r
338
+
339
+ metadata_fields [- 1 ] = metadata_fields [- 1 ].strip () # trailing newline
340
+ metadata_values [- 1 ] = metadata_values [- 1 ].strip () # trailing newline
324
341
metadata = dict (zip (metadata_fields , metadata_values ))
325
342
# the response is all strings, so set some metadata types to numbers
326
343
metadata ['Local Time Zone' ] = int (metadata ['Local Time Zone' ])
327
344
metadata ['Time Zone' ] = int (metadata ['Time Zone' ])
328
345
metadata ['Latitude' ] = float (metadata ['Latitude' ])
329
346
metadata ['Longitude' ] = float (metadata ['Longitude' ])
330
347
metadata ['Elevation' ] = int (metadata ['Elevation' ])
331
- # get the column names so we can set the dtypes
332
- columns = fbuf .readline ().split (',' )
333
- columns [- 1 ] = columns [- 1 ].strip () # strip trailing newline
334
- # Since the header has so many columns, excel saves blank cols in the
335
- # data below the header lines.
336
- columns = [col for col in columns if col != '' ]
337
- dtypes = dict .fromkeys (columns , float ) # all floats except datevec
338
- dtypes .update (Year = int , Month = int , Day = int , Hour = int , Minute = int )
339
- dtypes ['Cloud Type' ] = int
340
- dtypes ['Fill Flag' ] = int
341
- data = pd .read_csv (
342
- fbuf , header = None , names = columns , usecols = columns , dtype = dtypes ,
343
- delimiter = ',' , lineterminator = '\n ' ) # skip carriage returns \r
348
+
344
349
# the response 1st 5 columns are a date vector, convert to datetime
345
- dtidx = pd .to_datetime (
346
- data [['Year' , 'Month' , 'Day' , 'Hour' , 'Minute' ]])
350
+ dtidx = pd .to_datetime (data [['Year' , 'Month' , 'Day' , 'Hour' , 'Minute' ]])
347
351
# in USA all timezones are integers
348
352
tz = 'Etc/GMT%+d' % - metadata ['Time Zone' ]
349
353
data .index = pd .DatetimeIndex (dtidx ).tz_localize (tz )
@@ -357,43 +361,5 @@ def parse_psm3(fbuf, map_variables=True):
357
361
return data , metadata
358
362
359
363
360
- def read_psm3 (filename , map_variables = True ):
361
- """
362
- Read an NSRDB PSM3 weather file (formatted as SAM CSV). The NSRDB
363
- is described in [1]_ and the SAM CSV format is described in [2]_.
364
-
365
- .. versionchanged:: 0.9.0
366
- The function now returns a tuple where the first element is a dataframe
367
- and the second element is a dictionary containing metadata. Previous
368
- versions of this function had the return values switched.
369
-
370
- Parameters
371
- ----------
372
- filename: str
373
- Filename of a file containing data to read.
374
- map_variables: bool, default True
375
- When true, renames columns of the Dataframe to pvlib variable names
376
- where applicable. See variable :const:`VARIABLE_MAP`.
377
-
378
- Returns
379
- -------
380
- data : pandas.DataFrame
381
- timeseries data from NREL PSM3
382
- metadata : dict
383
- metadata from NREL PSM3 about the record, see
384
- :func:`pvlib.iotools.parse_psm3` for fields
385
-
386
- See Also
387
- --------
388
- pvlib.iotools.parse_psm3, pvlib.iotools.get_psm3
389
-
390
- References
391
- ----------
392
- .. [1] `NREL National Solar Radiation Database (NSRDB)
393
- <https://nsrdb.nrel.gov/>`_
394
- .. [2] `Standard Time Series Data File Format
395
- <https://web.archive.org/web/20170207203107/https://sam.nrel.gov/sites/default/files/content/documents/pdf/wfcsv.pdf>`_
396
- """
397
- with open (str (filename ), 'r' ) as fbuf :
398
- content = parse_psm3 (fbuf , map_variables )
399
- return content
364
+ parse_psm3 = deprecated (since = "0.12.1" , name = "parse_psm3" ,
365
+ alternative = "read_psm3" )(read_psm3 )
0 commit comments