diff --git a/.travis.yml b/.travis.yml index 851567b4..670c8bb4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,4 +1,7 @@ language: python +# Testing Python 3.7 requires using the `xenial` dist which uses +# virtual machines rather than LXC and tends to take longer, so +# use it only where necessary matrix: include: - python: "2.7" @@ -9,12 +12,15 @@ matrix: env: TOXENV=py35 - python: "3.6" env: TOXENV=py36 + - python: "3.7" + env: TOXENV=py37 + dist: xenial - python: "3.6" env: TOXENV=codestyle before_install: true before_install: - - wget "https://dist.ipfs.io/go-ipfs/v0.4.14/go-ipfs_v0.4.14_linux-amd64.tar.gz" -O /tmp/ipfs.tar.gz + - wget "https://dist.ipfs.io/go-ipfs/v0.4.18/go-ipfs_v0.4.18_linux-amd64.tar.gz" -O /tmp/ipfs.tar.gz - mkdir -p $HOME/bin - pushd . && cd $HOME/bin && tar -xzvf /tmp/ipfs.tar.gz && popd - export PATH="$HOME/bin/go-ipfs:$PATH" @@ -22,4 +28,4 @@ before_install: install: - pip install tox -script: tox +script: tox \ No newline at end of file diff --git a/ipfshttpclient/client.py b/ipfshttpclient/client.py deleted file mode 100644 index 3f8ed965..00000000 --- a/ipfshttpclient/client.py +++ /dev/null @@ -1,2409 +0,0 @@ -# -*- coding: utf-8 -*- -"""IPFS API Bindings for Python. - -Classes: - - * Client – a TCP client for interacting with an IPFS daemon -""" -from __future__ import absolute_import - -import os -import warnings - -from . import http, multipart, utils, exceptions, encoding - -DEFAULT_HOST = str(os.environ.get("PY_IPFS_HTTP_CLIENT_DEFAULT_HOST", 'localhost')) -DEFAULT_PORT = int(os.environ.get("PY_IPFS_HTTP_CLIENT_DEFAULT_PORT", 5001)) -DEFAULT_BASE = str(os.environ.get("PY_IPFS_HTTP_CLIENT_DEFAULT_BASE", 'api/v0')) - -VERSION_MINIMUM = "0.4.3" -VERSION_MAXIMUM = "0.5.0" - - -def assert_version(version, minimum=VERSION_MINIMUM, maximum=VERSION_MAXIMUM): - """Make sure that the given daemon version is supported by this client - version. - - Raises - ------ - ~ipfshttpclient.exceptions.VersionMismatch - - Parameters - ---------- - version : str - The version of an IPFS daemon. - minimum : str - The minimal IPFS version to allow. - maximum : str - The maximum IPFS version to allow. - """ - # Convert version strings to integer tuples - version = list(map(int, version.split('-', 1)[0].split('.'))) - minimum = list(map(int, minimum.split('-', 1)[0].split('.'))) - maximum = list(map(int, maximum.split('-', 1)[0].split('.'))) - - if minimum > version or version >= maximum: - raise exceptions.VersionMismatch(version, minimum, maximum) - - -def connect(host=DEFAULT_HOST, port=DEFAULT_PORT, base=DEFAULT_BASE, - chunk_size=multipart.default_chunk_size, **defaults): - """Create a new :class:`~ipfshttpclient.Client` instance and connect to the - daemon to validate that its version is supported. - - Raises - ------ - ~ipfshttpclient.exceptions.VersionMismatch - ~ipfshttpclient.exceptions.ErrorResponse - ~ipfshttpclient.exceptions.ConnectionError - ~ipfshttpclient.exceptions.ProtocolError - ~ipfshttpclient.exceptions.StatusError - ~ipfshttpclient.exceptions.TimeoutError - - - All parameters are identical to those passed to the constructor of the - :class:`~ipfshttpclient.Client` class. - - Returns - ------- - ~ipfshttpclient.Client - """ - # Create client instance - client = Client(host, port, base, chunk_size, **defaults) - - # Query version number from daemon and validate it - assert_version(client.version()['Version']) - - return client - - -class SubChannel: - """ - Wrapper for a pubsub subscription object that allows for easy - closing of subscriptions. - """ - - def __init__(self, sub): - self.__sub = sub - - def read_message(self): - return next(self.__sub) - - def __iter__(self): - return self.__sub - - def close(self): - self.__sub.close() - - def __enter__(self): - return self - - def __exit__(self, *a): - self.close() - - -class Client(object): - """A TCP client for interacting with an IPFS daemon. - - A :class:`~ipfshttpclient.Client` instance will not actually establish a - connection to the daemon until at least one of it's methods is called. - - Parameters - ---------- - host : str - Hostname or IP address of the computer running the ``ipfs daemon`` - node (defaults to the local system) - port : int - The API port of the IPFS deamon (usually 5001) - base : str - Path of the deamon's API (currently always ``api/v0``) - chunk_size : int - The size of the chunks to break uploaded files and text content into - """ - - _clientfactory = http.HTTPClient - - def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, - base=DEFAULT_BASE, chunk_size=multipart.default_chunk_size, - **defaults): - """Connects to the API port of an IPFS node.""" - - self.chunk_size = chunk_size - - self._client = self._clientfactory(host, port, base, **defaults) - - def add(self, files, recursive=False, pattern='**', *args, **kwargs): - """Add a file, or directory of files to IPFS. - - .. code-block:: python - - >>> with io.open('nurseryrhyme.txt', 'w', encoding='utf-8') as f: - ... numbytes = f.write('Mary had a little lamb') - >>> c.add('nurseryrhyme.txt') - {'Hash': 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab', - 'Name': 'nurseryrhyme.txt'} - - Parameters - ---------- - files : str - A filepath to either a file or directory - recursive : bool - Controls if files in subdirectories are added or not - pattern : str | list - Single `*glob* `_ - pattern or list of *glob* patterns and compiled regular expressions - to match the names of the filepaths to keep - trickle : bool - Use trickle-dag format (optimized for streaming) when generating - the dag; see `the FAQ ` for - more information (Default: ``False``) - only_hash : bool - Only chunk and hash, but do not write to disk (Default: ``False``) - wrap_with_directory : bool - Wrap files with a directory object to preserve their filename - (Default: ``False``) - chunker : str - The chunking algorithm to use - pin : bool - Pin this object when adding (Default: ``True``) - - Returns - ------- - dict: File name and hash of the added file node - """ - #PY2: No support for kw-only parameters after glob parameters - opts = { - "trickle": kwargs.pop("trickle", False), - "only-hash": kwargs.pop("only_hash", False), - "wrap-with-directory": kwargs.pop("wrap_with_directory", False), - "pin": kwargs.pop("pin", True) - } - if "chunker" in kwargs: - opts["chunker"] = kwargs.pop("chunker") - kwargs.setdefault("opts", opts) - - body, headers = multipart.stream_filesystem_node( - files, recursive, pattern, self.chunk_size - ) - return self._client.request('/add', decoder='json', - data=body, headers=headers, **kwargs) - - def get(self, multihash, **kwargs): - """Downloads a file, or directory of files from IPFS. - - Files are placed in the current working directory. - - Parameters - ---------- - multihash : str - The path to the IPFS object(s) to be outputted - """ - args = (multihash,) - return self._client.download('/get', args, **kwargs) - - def cat(self, multihash, offset=0, length=-1, **kwargs): - r"""Retrieves the contents of a file identified by hash. - - .. code-block:: python - - >>> c.cat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') - Traceback (most recent call last): - ... - ipfshttpclient.exceptions.Error: this dag node is a directory - >>> c.cat('QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX') - b'\n\n\n\nipfs example viewer</…' - - Parameters - ---------- - multihash : str - The path to the IPFS object(s) to be retrieved - offset : int - Byte offset to begin reading from - length : int - Maximum number of bytes to read(-1 for all) - - Returns - ------- - str : File contents - """ - opts = {} - if offset != 0: - opts['offset'] = offset - if length != -1: - opts['length'] = length - args = (multihash,) - return self._client.request('/cat', args, opts=opts, **kwargs) - - def ls(self, multihash, **kwargs): - """Returns a list of objects linked to by the given hash. - - .. code-block:: python - - >>> c.ls('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') - {'Objects': [ - {'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', - 'Links': [ - {'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV', - 'Name': 'Makefile', 'Size': 174, 'Type': 2}, - … - {'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY', - 'Name': 'published-version', 'Size': 55, 'Type': 2} - ]} - ]} - - Parameters - ---------- - multihash : str - The path to the IPFS object(s) to list links from - - Returns - ------- - dict : Directory information and contents - """ - args = (multihash,) - return self._client.request('/ls', args, decoder='json', **kwargs) - - def refs(self, multihash, **kwargs): - """Returns a list of hashes of objects referenced by the given hash. - - .. code-block:: python - - >>> c.refs('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') - [{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''}, - … - {'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}] - - Parameters - ---------- - multihash : str - Path to the object(s) to list refs from - - Returns - ------- - list - """ - args = (multihash,) - return self._client.request('/refs', args, decoder='json', **kwargs) - - def refs_local(self, **kwargs): - """Displays the hashes of all local objects. - - .. code-block:: python - - >>> c.refs_local() - [{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''}, - … - {'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}] - - Returns - ------- - list - """ - return self._client.request('/refs/local', decoder='json', **kwargs) - - def block_stat(self, multihash, **kwargs): - """Returns a dict with the size of the block with the given hash. - - .. code-block:: python - - >>> c.block_stat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') - {'Key': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', - 'Size': 258} - - Parameters - ---------- - multihash : str - The base58 multihash of an existing block to stat - - Returns - ------- - dict : Information about the requested block - """ - args = (multihash,) - return self._client.request('/block/stat', args, - decoder='json', **kwargs) - - def block_get(self, multihash, **kwargs): - r"""Returns the raw contents of a block. - - .. code-block:: python - - >>> c.block_get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') - b'\x121\n"\x12 \xdaW>\x14\xe5\xc1\xf6\xe4\x92\xd1 … \n\x02\x08\x01' - - Parameters - ---------- - multihash : str - The base58 multihash of an existing block to get - - Returns - ------- - str : Value of the requested block - """ - args = (multihash,) - return self._client.request('/block/get', args, **kwargs) - - def block_put(self, file, **kwargs): - """Stores the contents of the given file object as an IPFS block. - - .. code-block:: python - - >>> c.block_put(io.BytesIO(b'Mary had a little lamb')) - {'Key': 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', - 'Size': 22} - - Parameters - ---------- - file : io.RawIOBase - The data to be stored as an IPFS block - - Returns - ------- - dict : Information about the new block - - See :meth:`~ipfshttpclient.Client.block_stat` - """ - body, headers = multipart.stream_files(file, self.chunk_size) - return self._client.request('/block/put', decoder='json', - data=body, headers=headers, **kwargs) - - def bitswap_wantlist(self, peer=None, **kwargs): - """Returns blocks currently on the bitswap wantlist. - - .. code-block:: python - - >>> c.bitswap_wantlist() - {'Keys': [ - 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', - 'QmdCWFLDXqgdWQY9kVubbEHBbkieKd3uo7MtCm7nTZZE9K', - 'QmVQ1XvYGF19X4eJqz1s7FJYJqAxFC4oqh3vWJJEXn66cp' - ]} - - Parameters - ---------- - peer : str - Peer to show wantlist for. - - Returns - ------- - dict : List of wanted blocks - """ - args = (peer,) - return self._client.request('/bitswap/wantlist', args, - decoder='json', **kwargs) - - def bitswap_stat(self, **kwargs): - """Returns some diagnostic information from the bitswap agent. - - .. code-block:: python - - >>> c.bitswap_stat() - {'BlocksReceived': 96, - 'DupBlksReceived': 73, - 'DupDataReceived': 2560601, - 'ProviderBufLen': 0, - 'Peers': [ - 'QmNZFQRxt9RMNm2VVtuV2Qx7q69bcMWRVXmr5CEkJEgJJP', - 'QmNfCubGpwYZAQxX8LQDsYgB48C4GbfZHuYdexpX9mbNyT', - 'QmNfnZ8SCs3jAtNPc8kf3WJqJqSoX7wsX7VqkLdEYMao4u', - … - ], - 'Wantlist': [ - 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', - 'QmdCWFLDXqgdWQY9kVubbEHBbkieKd3uo7MtCm7nTZZE9K', - 'QmVQ1XvYGF19X4eJqz1s7FJYJqAxFC4oqh3vWJJEXn66cp' - ] - } - - Returns - ------- - dict : Statistics, peers and wanted blocks - """ - return self._client.request('/bitswap/stat', decoder='json', **kwargs) - - def bitswap_unwant(self, key, **kwargs): - """ - Remove a given block from wantlist. - - Parameters - ---------- - key : str - Key to remove from wantlist. - """ - args = (key,) - return self._client.request('/bitswap/unwant', args, **kwargs) - - def object_data(self, multihash, **kwargs): - r"""Returns the raw bytes in an IPFS object. - - .. code-block:: python - - >>> c.object_data('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') - b'\x08\x01' - - Parameters - ---------- - multihash : str - Key of the object to retrieve, in base58-encoded multihash format - - Returns - ------- - str : Raw object data - """ - args = (multihash,) - return self._client.request('/object/data', args, **kwargs) - - def object_new(self, template=None, **kwargs): - """Creates a new object from an IPFS template. - - By default this creates and returns a new empty merkledag node, but you - may pass an optional template argument to create a preformatted node. - - .. code-block:: python - - >>> c.object_new() - {'Hash': 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n'} - - Parameters - ---------- - template : str - Blueprints from which to construct the new object. Possible values: - - * ``"unixfs-dir"`` - * ``None`` - - Returns - ------- - dict : Object hash - """ - args = (template,) if template is not None else () - return self._client.request('/object/new', args, - decoder='json', **kwargs) - - def object_links(self, multihash, **kwargs): - """Returns the links pointed to by the specified object. - - .. code-block:: python - - >>> c.object_links('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDx … ca7D') - {'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', - 'Links': [ - {'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV', - 'Name': 'Makefile', 'Size': 174}, - {'Hash': 'QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX', - 'Name': 'example', 'Size': 1474}, - {'Hash': 'QmZAL3oHMQYqsV61tGvoAVtQLs1WzRe1zkkamv9qxqnDuK', - 'Name': 'home', 'Size': 3947}, - {'Hash': 'QmZNPyKVriMsZwJSNXeQtVQSNU4v4KEKGUQaMT61LPahso', - 'Name': 'lib', 'Size': 268261}, - {'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY', - 'Name': 'published-version', 'Size': 55}]} - - Parameters - ---------- - multihash : str - Key of the object to retrieve, in base58-encoded multihash format - - Returns - ------- - dict : Object hash and merkedag links - """ - args = (multihash,) - return self._client.request('/object/links', args, - decoder='json', **kwargs) - - def object_get(self, multihash, **kwargs): - """Get and serialize the DAG node named by multihash. - - .. code-block:: python - - >>> c.object_get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') - {'Data': '\x08\x01', - 'Links': [ - {'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV', - 'Name': 'Makefile', 'Size': 174}, - {'Hash': 'QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX', - 'Name': 'example', 'Size': 1474}, - {'Hash': 'QmZAL3oHMQYqsV61tGvoAVtQLs1WzRe1zkkamv9qxqnDuK', - 'Name': 'home', 'Size': 3947}, - {'Hash': 'QmZNPyKVriMsZwJSNXeQtVQSNU4v4KEKGUQaMT61LPahso', - 'Name': 'lib', 'Size': 268261}, - {'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY', - 'Name': 'published-version', 'Size': 55}]} - - Parameters - ---------- - multihash : str - Key of the object to retrieve, in base58-encoded multihash format - - Returns - ------- - dict : Object data and links - """ - args = (multihash,) - return self._client.request('/object/get', args, - decoder='json', **kwargs) - - def object_put(self, file, **kwargs): - """Stores input as a DAG object and returns its key. - - .. code-block:: python - - >>> c.object_put(io.BytesIO(b''' - ... { - ... "Data": "another", - ... "Links": [ { - ... "Name": "some link", - ... "Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCV … R39V", - ... "Size": 8 - ... } ] - ... }''')) - {'Hash': 'QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm', - 'Links': [ - {'Hash': 'QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V', - 'Size': 8, 'Name': 'some link'} - ] - } - - Parameters - ---------- - file : io.RawIOBase - (JSON) object from which the DAG object will be created - - Returns - ------- - dict : Hash and links of the created DAG object - - See :meth:`~ipfshttpclient.Object.object_links` - """ - body, headers = multipart.stream_files(file, self.chunk_size) - return self._client.request('/object/put', decoder='json', - data=body, headers=headers, **kwargs) - - def object_stat(self, multihash, **kwargs): - """Get stats for the DAG node named by multihash. - - .. code-block:: python - - >>> c.object_stat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') - {'LinksSize': 256, 'NumLinks': 5, - 'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', - 'BlockSize': 258, 'CumulativeSize': 274169, 'DataSize': 2} - - Parameters - ---------- - multihash : str - Key of the object to retrieve, in base58-encoded multihash format - - Returns - ------- - dict - """ - args = (multihash,) - return self._client.request('/object/stat', args, - decoder='json', **kwargs) - - def object_patch_append_data(self, multihash, new_data, **kwargs): - """Creates a new merkledag object based on an existing one. - - The new object will have the provided data appended to it, - and will thus have a new Hash. - - .. code-block:: python - - >>> c.object_patch_append_data("QmZZmY … fTqm", io.BytesIO(b"bla")) - {'Hash': 'QmR79zQQj2aDfnrNgczUhvf2qWapEfQ82YQRt3QjrbhSb2'} - - Parameters - ---------- - multihash : str - The hash of an ipfs object to modify - new_data : io.RawIOBase - The data to append to the object's data section - - Returns - ------- - dict : Hash of new object - """ - args = (multihash,) - body, headers = multipart.stream_files(new_data, self.chunk_size) - return self._client.request('/object/patch/append-data', args, - decoder='json', - data=body, headers=headers, **kwargs) - - def object_patch_add_link(self, root, name, ref, create=False, **kwargs): - """Creates a new merkledag object based on an existing one. - - The new object will have a link to the provided object. - - .. code-block:: python - - >>> c.object_patch_add_link( - ... 'QmR79zQQj2aDfnrNgczUhvf2qWapEfQ82YQRt3QjrbhSb2', - ... 'Johnny', - ... 'QmR79zQQj2aDfnrNgczUhvf2qWapEfQ82YQRt3QjrbhSb2' - ... ) - {'Hash': 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k'} - - Parameters - ---------- - root : str - IPFS hash for the object being modified - name : str - name for the new link - ref : str - IPFS hash for the object being linked to - create : bool - Create intermediary nodes - - Returns - ------- - dict : Hash of new object - """ - kwargs.setdefault("opts", {"create": create}) - - args = ((root, name, ref),) - return self._client.request('/object/patch/add-link', args, - decoder='json', **kwargs) - - def object_patch_rm_link(self, root, link, **kwargs): - """Creates a new merkledag object based on an existing one. - - The new object will lack a link to the specified object. - - .. code-block:: python - - >>> c.object_patch_rm_link( - ... 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k', - ... 'Johnny' - ... ) - {'Hash': 'QmR79zQQj2aDfnrNgczUhvf2qWapEfQ82YQRt3QjrbhSb2'} - - Parameters - ---------- - root : str - IPFS hash of the object to modify - link : str - name of the link to remove - - Returns - ------- - dict : Hash of new object - """ - args = ((root, link),) - return self._client.request('/object/patch/rm-link', args, - decoder='json', **kwargs) - - def object_patch_set_data(self, root, data, **kwargs): - """Creates a new merkledag object based on an existing one. - - The new object will have the same links as the old object but - with the provided data instead of the old object's data contents. - - .. code-block:: python - - >>> c.object_patch_set_data( - ... 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k', - ... io.BytesIO(b'bla') - ... ) - {'Hash': 'QmSw3k2qkv4ZPsbu9DVEJaTMszAQWNgM1FTFYpfZeNQWrd'} - - Parameters - ---------- - root : str - IPFS hash of the object to modify - data : io.RawIOBase - The new data to store in root - - Returns - ------- - dict : Hash of new object - """ - args = (root,) - body, headers = multipart.stream_files(data, self.chunk_size) - return self._client.request('/object/patch/set-data', args, - decoder='json', - data=body, headers=headers, **kwargs) - - def file_ls(self, multihash, **kwargs): - """Lists directory contents for Unix filesystem objects. - - The result contains size information. For files, the child size is the - total size of the file contents. For directories, the child size is the - IPFS link size. - - The path can be a prefixless reference; in this case, it is assumed - that it is an ``/ipfs/`` reference and not ``/ipns/``. - - .. code-block:: python - - >>> c.file_ls('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') - {'Arguments': {'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D': - 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D'}, - 'Objects': { - 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D': { - 'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', - 'Size': 0, 'Type': 'Directory', - 'Links': [ - {'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV', - 'Name': 'Makefile', 'Size': 163, 'Type': 'File'}, - {'Hash': 'QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX', - 'Name': 'example', 'Size': 1463, 'Type': 'File'}, - {'Hash': 'QmZAL3oHMQYqsV61tGvoAVtQLs1WzRe1zkkamv9qxqnDuK', - 'Name': 'home', 'Size': 3947, 'Type': 'Directory'}, - {'Hash': 'QmZNPyKVriMsZwJSNXeQtVQSNU4v4KEKGUQaMT61LPahso', - 'Name': 'lib', 'Size': 268261, 'Type': 'Directory'}, - {'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY', - 'Name': 'published-version', - 'Size': 47, 'Type': 'File'} - ] - } - }} - - Parameters - ---------- - multihash : str - The path to the object(s) to list links from - - Returns - ------- - dict - """ - args = (multihash,) - return self._client.request('/file/ls', args, decoder='json', **kwargs) - - def resolve(self, name, recursive=False, **kwargs): - """Accepts an identifier and resolves it to the referenced item. - - There are a number of mutable name protocols that can link among - themselves and into IPNS. For example IPNS references can (currently) - point at an IPFS object, and DNS links can point at other DNS links, - IPNS entries, or IPFS objects. This command accepts any of these - identifiers. - - .. code-block:: python - - >>> c.resolve("/ipfs/QmTkzDwWqPbnAh5YiV5VwcTLnGdw … ca7D/Makefile") - {'Path': '/ipfs/Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV'} - >>> c.resolve("/ipns/ipfs.io") - {'Path': '/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n'} - - Parameters - ---------- - name : str - The name to resolve - recursive : bool - Resolve until the result is an IPFS name - - Returns - ------- - dict : IPFS path of resource - """ - kwargs.setdefault("opts", {"recursive": recursive}) - - args = (name,) - return self._client.request('/resolve', args, decoder='json', **kwargs) - - def key_list(self, **kwargs): - """Returns a list of generated public keys that can be used with name_publish - - .. code-block:: python - - >>> c.key_list() - [{'Name': 'self', - 'Id': 'QmQf22bZar3WKmojipms22PkXH1MZGmvsqzQtuSvQE3uhm'}, - {'Name': 'example_key_name', - 'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'} - ] - - Returns - ------- - list : List of dictionaries with Names and Ids of public keys. - """ - return self._client.request('/key/list', decoder='json', **kwargs) - - def key_gen(self, key_name, type, size=2048, **kwargs): - """Adds a new public key that can be used for name_publish. - - .. code-block:: python - - >>> c.key_gen('example_key_name') - {'Name': 'example_key_name', - 'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'} - - Parameters - ---------- - key_name : str - Name of the new Key to be generated. Used to reference the Keys. - type : str - Type of key to generate. The current possible keys types are: - - * ``"rsa"`` - * ``"ed25519"`` - size : int - Bitsize of key to generate - - Returns - ------- - dict : Key name and Key Id - """ - - opts = {"type": type, "size": size} - kwargs.setdefault("opts", opts) - args = (key_name,) - - return self._client.request('/key/gen', args, - decoder='json', **kwargs) - - def key_rm(self, key_name, *key_names, **kwargs): - """Remove a keypair - - .. code-block:: python - - >>> c.key_rm("bla") - {"Keys": [ - {"Name": "bla", - "Id": "QmfJpR6paB6h891y7SYXGe6gapyNgepBeAYMbyejWA4FWA"} - ]} - - Parameters - ---------- - key_name : str - Name of the key(s) to remove. - - Returns - ------- - dict : List of key names and IDs that have been removed - """ - args = (key_name,) + key_names - return self._client.request('/key/rm', args, decoder='json', **kwargs) - - def key_rename(self, key_name, new_key_name, **kwargs): - """Rename a keypair - - .. code-block:: python - - >>> c.key_rename("bla", "personal") - {"Was": "bla", - "Now": "personal", - "Id": "QmeyrRNxXaasZaoDXcCZgryoBCga9shaHQ4suHAYXbNZF3", - "Overwrite": False} - - Parameters - ---------- - key_name : str - Current name of the key to rename - new_key_name : str - New name of the key - - Returns - ------- - dict : List of key names and IDs that have been removed - """ - args = (key_name, new_key_name) - return self._client.request('/key/rename', args, decoder='json', - **kwargs) - - def name_publish(self, ipfs_path, resolve=True, lifetime="24h", ttl=None, - key=None, **kwargs): - """Publishes an object to IPNS. - - IPNS is a PKI namespace, where names are the hashes of public keys, and - the private key enables publishing new (signed) values. In publish, the - default value of *name* is your own identity public key. - - .. code-block:: python - - >>> c.name_publish('/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZK … GZ5d') - {'Value': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d', - 'Name': 'QmVgNoP89mzpgEAAqK8owYoDEyB97MkcGvoWZir8otE9Uc'} - - Parameters - ---------- - ipfs_path : str - IPFS path of the object to be published - resolve : bool - Resolve given path before publishing - lifetime : str - Time duration that the record will be valid for - - Accepts durations such as ``"300s"``, ``"1.5h"`` or ``"2h45m"``. - Valid units are: - - * ``"ns"`` - * ``"us"`` (or ``"µs"``) - * ``"ms"`` - * ``"s"`` - * ``"m"`` - * ``"h"`` - ttl : int - Time duration this record should be cached for - key : string - Name of the key to be used, as listed by 'ipfs key list'. - - Returns - ------- - dict : IPNS hash and the IPFS path it points at - """ - opts = {"lifetime": lifetime, "resolve": resolve} - if ttl: - opts["ttl"] = ttl - if key: - opts["key"] = key - kwargs.setdefault("opts", opts) - - args = (ipfs_path,) - return self._client.request('/name/publish', args, - decoder='json', **kwargs) - - def name_resolve(self, name=None, recursive=False, - nocache=False, **kwargs): - """Gets the value currently published at an IPNS name. - - IPNS is a PKI namespace, where names are the hashes of public keys, and - the private key enables publishing new (signed) values. In resolve, the - default value of ``name`` is your own identity public key. - - .. code-block:: python - - >>> c.name_resolve() - {'Path': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d'} - - Parameters - ---------- - name : str - The IPNS name to resolve (defaults to the connected node) - recursive : bool - Resolve until the result is not an IPFS name (default: false) - nocache : bool - Do not use cached entries (default: false) - - Returns - ------- - dict : The IPFS path the IPNS hash points at - """ - kwargs.setdefault("opts", {"recursive": recursive, - "nocache": nocache}) - args = (name,) if name is not None else () - return self._client.request('/name/resolve', args, - decoder='json', **kwargs) - - def dns(self, domain_name, recursive=False, **kwargs): - """Resolves DNS links to the referenced object. - - Multihashes are hard to remember, but domain names are usually easy to - remember. To create memorable aliases for multihashes, DNS TXT records - can point to other DNS links, IPFS objects, IPNS keys, etc. - This command resolves those links to the referenced object. - - For example, with this DNS TXT record:: - - >>> import dns.resolver - >>> a = dns.resolver.query("ipfs.io", "TXT") - >>> a.response.answer[0].items[0].to_text() - '"dnslink=/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n"' - - The resolver will give:: - - >>> c.dns("ipfs.io") - {'Path': '/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n'} - - Parameters - ---------- - domain_name : str - The domain-name name to resolve - recursive : bool - Resolve until the name is not a DNS link - - Returns - ------- - dict : Resource were a DNS entry points to - """ - kwargs.setdefault("opts", {"recursive": recursive}) - - args = (domain_name,) - return self._client.request('/dns', args, decoder='json', **kwargs) - - def pin_add(self, path, *paths, **kwargs): - """Pins objects to local storage. - - Stores an IPFS object(s) from a given path locally to disk. - - .. code-block:: python - - >>> c.pin_add("QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d") - {'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']} - - Parameters - ---------- - path : str - Path to object(s) to be pinned - recursive : bool - Recursively unpin the object linked to by the specified object(s) - - Returns - ------- - dict : List of IPFS objects that have been pinned - """ - #PY2: No support for kw-only parameters after glob parameters - if "recursive" in kwargs: - kwargs.setdefault("opts", {"recursive": kwargs.pop("recursive")}) - - args = (path,) + paths - return self._client.request('/pin/add', args, decoder='json', **kwargs) - - def pin_rm(self, path, *paths, **kwargs): - """Removes a pinned object from local storage. - - Removes the pin from the given object allowing it to be garbage - collected if needed. - - .. code-block:: python - - >>> c.pin_rm('QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d') - {'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']} - - Parameters - ---------- - path : str - Path to object(s) to be unpinned - recursive : bool - Recursively unpin the object linked to by the specified object(s) - - Returns - ------- - dict : List of IPFS objects that have been unpinned - """ - #PY2: No support for kw-only parameters after glob parameters - if "recursive" in kwargs: - kwargs.setdefault("opts", {"recursive": kwargs["recursive"]}) - del kwargs["recursive"] - - args = (path,) + paths - return self._client.request('/pin/rm', args, decoder='json', **kwargs) - - def pin_ls(self, type="all", **kwargs): - """Lists objects pinned to local storage. - - By default, all pinned objects are returned, but the ``type`` flag or - arguments can restrict that to a specific pin type or to some specific - objects respectively. - - .. code-block:: python - - >>> c.pin_ls() - {'Keys': { - 'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'}, - 'QmNPZUCeSN5458Uwny8mXSWubjjr6J … kP5e': {'Type': 'recursive'}, - 'QmNg5zWpRMxzRAVg7FTQ3tUxVbKj8E … gHPz': {'Type': 'indirect'}, - … - 'QmNiuVapnYCrLjxyweHeuk6Xdqfvts … wCCe': {'Type': 'indirect'}}} - - Parameters - ---------- - type : "str" - The type of pinned keys to list. Can be: - - * ``"direct"`` - * ``"indirect"`` - * ``"recursive"`` - * ``"all"`` - - Returns - ------- - dict : Hashes of pinned IPFS objects and why they are pinned - """ - kwargs.setdefault("opts", {"type": type}) - - return self._client.request('/pin/ls', decoder='json', **kwargs) - - def pin_update(self, from_path, to_path, **kwargs): - """Replaces one pin with another. - - Updates one pin to another, making sure that all objects in the new pin - are local. Then removes the old pin. This is an optimized version of - using first using :meth:`~ipfshttpclient.Client.pin_add` to add a new pin - for an object and then using :meth:`~ipfshttpclient.Client.pin_rm` to remove - the pin for the old object. - - .. code-block:: python - - >>> c.pin_update("QmXMqez83NU77ifmcPs5CkNRTMQksBLkyfBf4H5g1NZ52P", - ... "QmUykHAi1aSjMzHw3KmBoJjqRUQYNkFXm8K1y7ZsJxpfPH") - {"Pins": ["/ipfs/QmXMqez83NU77ifmcPs5CkNRTMQksBLkyfBf4H5g1NZ52P", - "/ipfs/QmUykHAi1aSjMzHw3KmBoJjqRUQYNkFXm8K1y7ZsJxpfPH"]} - - Parameters - ---------- - from_path : str - Path to the old object - to_path : str - Path to the new object to be pinned - unpin : bool - Should the pin of the old object be removed? (Default: ``True``) - - Returns - ------- - dict : List of IPFS objects affected by the pinning operation - """ - #PY2: No support for kw-only parameters after glob parameters - if "unpin" in kwargs: - kwargs.setdefault("opts", {"unpin": kwargs["unpin"]}) - del kwargs["unpin"] - - args = (from_path, to_path) - return self._client.request('/pin/update', args, decoder='json', - **kwargs) - - def pin_verify(self, path, *paths, **kwargs): - """Verify that recursive pins are complete. - - Scan the repo for pinned object graphs and check their integrity. - Issues will be reported back with a helpful human-readable error - message to aid in error recovery. This is useful to help recover - from datastore corruptions (such as when accidentally deleting - files added using the filestore backend). - - This function returns an iterator needs to be closed using a context - manager (``with``-statement) or using the ``.close()`` method. - - .. code-block:: python - - >>> with c.pin_verify("QmN…TTZ", verbose=True) as pin_verify_iter: - ... for item in pin_verify_iter: - ... print(item) - ... - {"Cid":"QmVkNdzCBukBRdpyFiKPyL2R15qPExMr9rV9RFV2kf9eeV","Ok":True} - {"Cid":"QmbPzQruAEFjUU3gQfupns6b8USr8VrD9H71GrqGDXQSxm","Ok":True} - {"Cid":"Qmcns1nUvbeWiecdGDPw8JxWeUfxCV8JKhTfgzs3F8JM4P","Ok":True} - … - - Parameters - ---------- - path : str - Path to object(s) to be checked - verbose : bool - Also report status of items that were OK? (Default: ``False``) - - Returns - ------- - iterable - """ - #PY2: No support for kw-only parameters after glob parameters - if "verbose" in kwargs: - kwargs.setdefault("opts", {"verbose": kwargs["verbose"]}) - del kwargs["verbose"] - - args = (path,) + paths - return self._client.request('/pin/verify', args, decoder='json', - stream=True, **kwargs) - - def repo_gc(self, **kwargs): - """Removes stored objects that are not pinned from the repo. - - .. code-block:: python - - >>> c.repo_gc() - [{'Key': 'QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQuwaHG2mpW2'}, - {'Key': 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k'}, - {'Key': 'QmRVBnxUCsD57ic5FksKYadtyUbMsyo9KYQKKELajqAp4q'}, - … - {'Key': 'QmYp4TeCurXrhsxnzt5wqLqqUz8ZRg5zsc7GuUrUSDtwzP'}] - - Performs a garbage collection sweep of the local set of - stored objects and remove ones that are not pinned in order - to reclaim hard disk space. Returns the hashes of all collected - objects. - - Returns - ------- - dict : List of IPFS objects that have been removed - """ - return self._client.request('/repo/gc', decoder='json', **kwargs) - - def repo_stat(self, **kwargs): - """Displays the repo's status. - - Returns the number of objects in the repo and the repo's size, - version, and path. - - .. code-block:: python - - >>> c.repo_stat() - {'NumObjects': 354, - 'RepoPath': '…/.local/share/ipfs', - 'Version': 'fs-repo@4', - 'RepoSize': 13789310} - - Returns - ------- - dict : General information about the IPFS file repository - - +------------+-------------------------------------------------+ - | NumObjects | Number of objects in the local repo. | - +------------+-------------------------------------------------+ - | RepoPath | The path to the repo being currently used. | - +------------+-------------------------------------------------+ - | RepoSize | Size in bytes that the repo is currently using. | - +------------+-------------------------------------------------+ - | Version | The repo version. | - +------------+-------------------------------------------------+ - """ - return self._client.request('/repo/stat', decoder='json', **kwargs) - - def id(self, peer=None, **kwargs): - """Shows IPFS Node ID info. - - Returns the PublicKey, ProtocolVersion, ID, AgentVersion and - Addresses of the connected daemon or some other node. - - .. code-block:: python - - >>> c.id() - {'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97MkcGvoWZir8otE9Uc', - 'PublicKey': 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggE … BAAE=', - 'AgentVersion': 'go-libp2p/3.3.4', - 'ProtocolVersion': 'ipfs/0.1.0', - 'Addresses': [ - '/ip4/127.0.0.1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owYo … E9Uc', - '/ip4/10.1.0.172/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owY … E9Uc', - '/ip4/172.18.0.1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owY … E9Uc', - '/ip6/::1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owYoDEyB97 … E9Uc', - '/ip6/fccc:7904:b05b:a579:957b:deef:f066:cad9/tcp/400 … E9Uc', - '/ip6/fd56:1966:efd8::212/tcp/4001/ipfs/QmVgNoP89mzpg … E9Uc', - '/ip6/fd56:1966:efd8:0:def1:34d0:773:48f/tcp/4001/ipf … E9Uc', - '/ip6/2001:db8:1::1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8 … E9Uc', - '/ip4/77.116.233.54/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8 … E9Uc', - '/ip4/77.116.233.54/tcp/10842/ipfs/QmVgNoP89mzpgEAAqK … E9Uc']} - - Parameters - ---------- - peer : str - Peer.ID of the node to look up (local node if ``None``) - - Returns - ------- - dict : Information about the IPFS node - """ - args = (peer,) if peer is not None else () - return self._client.request('/id', args, decoder='json', **kwargs) - - def bootstrap(self, **kwargs): - """Compatiblity alias for :meth:`~ipfshttpclient.Client.bootstrap_list`.""" - self.bootstrap_list(**kwargs) - - def bootstrap_list(self, **kwargs): - """Returns the addresses of peers used during initial discovery of the - IPFS network. - - Peers are output in the format ``<multiaddr>/<peerID>``. - - .. code-block:: python - - >>> c.bootstrap_list() - {'Peers': [ - '/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYER … uvuJ', - '/ip4/104.236.176.52/tcp/4001/ipfs/QmSoLnSGccFuZQJzRa … ca9z', - '/ip4/104.236.179.241/tcp/4001/ipfs/QmSoLPppuBtQSGwKD … KrGM', - … - '/ip4/178.62.61.185/tcp/4001/ipfs/QmSoLMeWqB7YGVLJN3p … QBU3']} - - Returns - ------- - dict : List of known bootstrap peers - """ - return self._client.request('/bootstrap', decoder='json', **kwargs) - - def bootstrap_add(self, peer, *peers, **kwargs): - """Adds peers to the bootstrap list. - - Parameters - ---------- - peer : str - IPFS MultiAddr of a peer to add to the list - - Returns - ------- - dict - """ - args = (peer,) + peers - return self._client.request('/bootstrap/add', args, - decoder='json', **kwargs) - - def bootstrap_rm(self, peer, *peers, **kwargs): - """Removes peers from the bootstrap list. - - Parameters - ---------- - peer : str - IPFS MultiAddr of a peer to remove from the list - - Returns - ------- - dict - """ - args = (peer,) + peers - return self._client.request('/bootstrap/rm', args, - decoder='json', **kwargs) - - def swarm_peers(self, **kwargs): - """Returns the addresses & IDs of currently connected peers. - - .. code-block:: python - - >>> c.swarm_peers() - {'Strings': [ - '/ip4/101.201.40.124/tcp/40001/ipfs/QmZDYAhmMDtnoC6XZ … kPZc', - '/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYER … uvuJ', - '/ip4/104.223.59.174/tcp/4001/ipfs/QmeWdgoZezpdHz1PX8 … 1jB6', - … - '/ip6/fce3: … :f140/tcp/43901/ipfs/QmSoLnSGccFuZQJzRa … ca9z']} - - Returns - ------- - dict : List of multiaddrs of currently connected peers - """ - return self._client.request('/swarm/peers', decoder='json', **kwargs) - - def swarm_addrs(self, **kwargs): - """Returns the addresses of currently connected peers by peer id. - - .. code-block:: python - - >>> pprint(c.swarm_addrs()) - {'Addrs': { - 'QmNMVHJTSZHTWMWBbmBrQgkA1hZPWYuVJx2DpSGESWW6Kn': [ - '/ip4/10.1.0.1/tcp/4001', - '/ip4/127.0.0.1/tcp/4001', - '/ip4/51.254.25.16/tcp/4001', - '/ip6/2001:41d0:b:587:3cae:6eff:fe40:94d8/tcp/4001', - '/ip6/2001:470:7812:1045::1/tcp/4001', - '/ip6/::1/tcp/4001', - '/ip6/fc02:2735:e595:bb70:8ffc:5293:8af8:c4b7/tcp/4001', - '/ip6/fd00:7374:6172:100::1/tcp/4001', - '/ip6/fd20:f8be:a41:0:c495:aff:fe7e:44ee/tcp/4001', - '/ip6/fd20:f8be:a41::953/tcp/4001'], - 'QmNQsK1Tnhe2Uh2t9s49MJjrz7wgPHj4VyrZzjRe8dj7KQ': [ - '/ip4/10.16.0.5/tcp/4001', - '/ip4/127.0.0.1/tcp/4001', - '/ip4/172.17.0.1/tcp/4001', - '/ip4/178.62.107.36/tcp/4001', - '/ip6/::1/tcp/4001'], - … - }} - - Returns - ------- - dict : Multiaddrs of peers by peer id - """ - return self._client.request('/swarm/addrs', decoder='json', **kwargs) - - def swarm_connect(self, address, *addresses, **kwargs): - """Opens a connection to a given address. - - This will open a new direct connection to a peer address. The address - format is an IPFS multiaddr:: - - /ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ - - .. code-block:: python - - >>> c.swarm_connect("/ip4/104.131.131.82/tcp/4001/ipfs/Qma … uvuJ") - {'Strings': ['connect QmaCpDMGvV2BGHeYERUEnRQAwe3 … uvuJ success']} - - Parameters - ---------- - address : str - Address of peer to connect to - - Returns - ------- - dict : Textual connection status report - """ - args = (address,) + addresses - return self._client.request('/swarm/connect', args, - decoder='json', **kwargs) - - def swarm_disconnect(self, address, *addresses, **kwargs): - """Closes the connection to a given address. - - This will close a connection to a peer address. The address format is - an IPFS multiaddr:: - - /ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ - - The disconnect is not permanent; if IPFS needs to talk to that address - later, it will reconnect. - - .. code-block:: python - - >>> c.swarm_disconnect("/ip4/104.131.131.82/tcp/4001/ipfs/Qm … uJ") - {'Strings': ['disconnect QmaCpDMGvV2BGHeYERUEnRQA … uvuJ success']} - - Parameters - ---------- - address : str - Address of peer to disconnect from - - Returns - ------- - dict : Textual connection status report - """ - args = (address,) + addresses - return self._client.request('/swarm/disconnect', args, - decoder='json', **kwargs) - - def swarm_filters_add(self, address, *addresses, **kwargs): - """Adds a given multiaddr filter to the filter list. - - This will add an address filter to the daemons swarm. Filters applied - this way will not persist daemon reboots, to achieve that, add your - filters to the configuration file. - - .. code-block:: python - - >>> c.swarm_filters_add("/ip4/192.168.0.0/ipcidr/16") - {'Strings': ['/ip4/192.168.0.0/ipcidr/16']} - - Parameters - ---------- - address : str - Multiaddr to filter - - Returns - ------- - dict : List of swarm filters added - """ - args = (address,) + addresses - return self._client.request('/swarm/filters/add', args, - decoder='json', **kwargs) - - def swarm_filters_rm(self, address, *addresses, **kwargs): - """Removes a given multiaddr filter from the filter list. - - This will remove an address filter from the daemons swarm. Filters - removed this way will not persist daemon reboots, to achieve that, - remove your filters from the configuration file. - - .. code-block:: python - - >>> c.swarm_filters_rm("/ip4/192.168.0.0/ipcidr/16") - {'Strings': ['/ip4/192.168.0.0/ipcidr/16']} - - Parameters - ---------- - address : str - Multiaddr filter to remove - - Returns - ------- - dict : List of swarm filters removed - """ - args = (address,) + addresses - return self._client.request('/swarm/filters/rm', args, - decoder='json', **kwargs) - - def dht_query(self, peer_id, *peer_ids, **kwargs): - """Finds the closest Peer IDs to a given Peer ID by querying the DHT. - - .. code-block:: python - - >>> c.dht_query("/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDM … uvuJ") - [{'ID': 'QmPkFbxAQ7DeKD5VGSh9HQrdS574pyNzDmxJeGrRJxoucF', - 'Extra': '', 'Type': 2, 'Responses': None}, - {'ID': 'QmR1MhHVLJSLt9ZthsNNhudb1ny1WdhY4FPW21ZYFWec4f', - 'Extra': '', 'Type': 2, 'Responses': None}, - {'ID': 'Qmcwx1K5aVme45ab6NYWb52K2TFBeABgCLccC7ntUeDsAs', - 'Extra': '', 'Type': 2, 'Responses': None}, - … - {'ID': 'QmYYy8L3YD1nsF4xtt4xmsc14yqvAAnKksjo3F3iZs5jPv', - 'Extra': '', 'Type': 1, 'Responses': []}] - - Parameters - ---------- - peer_id : str - The peerID to run the query against - - Returns - ------- - dict : List of peers IDs - """ - args = (peer_id,) + peer_ids - return self._client.request('/dht/query', args, - decoder='json', **kwargs) - - def dht_findprovs(self, multihash, *multihashes, **kwargs): - """Finds peers in the DHT that can provide a specific value. - - .. code-block:: python - - >>> c.dht_findprovs("QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQu … mpW2") - [{'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', - 'Extra': '', 'Type': 6, 'Responses': None}, - {'ID': 'QmaK6Aj5WXkfnWGoWq7V8pGUYzcHPZp4jKQ5JtmRvSzQGk', - 'Extra': '', 'Type': 6, 'Responses': None}, - {'ID': 'QmdUdLu8dNvr4MVW1iWXxKoQrbG6y1vAVWPdkeGK4xppds', - 'Extra': '', 'Type': 6, 'Responses': None}, - … - {'ID': '', 'Extra': '', 'Type': 4, 'Responses': [ - {'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97Mk … E9Uc', 'Addrs': None} - ]}, - {'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', - 'Extra': '', 'Type': 1, 'Responses': [ - {'ID': 'QmSHXfsmN3ZduwFDjeqBn1C8b1tcLkxK6yd … waXw', 'Addrs': [ - '/ip4/127.0.0.1/tcp/4001', - '/ip4/172.17.0.8/tcp/4001', - '/ip6/::1/tcp/4001', - '/ip4/52.32.109.74/tcp/1028' - ]} - ]}] - - Parameters - ---------- - multihash : str - The DHT key to find providers for - - Returns - ------- - dict : List of provider Peer IDs - """ - args = (multihash,) + multihashes - return self._client.request('/dht/findprovs', args, - decoder='json', **kwargs) - - def dht_findpeer(self, peer_id, *peer_ids, **kwargs): - """Queries the DHT for all of the associated multiaddresses. - - .. code-block:: python - - >>> c.dht_findpeer("QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZN … MTLZ") - [{'ID': 'QmfVGMFrwW6AV6fTWmD6eocaTybffqAvkVLXQEFrYdk6yc', - 'Extra': '', 'Type': 6, 'Responses': None}, - {'ID': 'QmTKiUdjbRjeN9yPhNhG1X38YNuBdjeiV9JXYWzCAJ4mj5', - 'Extra': '', 'Type': 6, 'Responses': None}, - {'ID': 'QmTGkgHSsULk8p3AKTAqKixxidZQXFyF7mCURcutPqrwjQ', - 'Extra': '', 'Type': 6, 'Responses': None}, - … - {'ID': '', 'Extra': '', 'Type': 2, - 'Responses': [ - {'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', - 'Addrs': [ - '/ip4/10.9.8.1/tcp/4001', - '/ip6/::1/tcp/4001', - '/ip4/164.132.197.107/tcp/4001', - '/ip4/127.0.0.1/tcp/4001']} - ]}] - - Parameters - ---------- - peer_id : str - The ID of the peer to search for - - Returns - ------- - dict : List of multiaddrs - """ - args = (peer_id,) + peer_ids - return self._client.request('/dht/findpeer', args, - decoder='json', **kwargs) - - def dht_get(self, key, *keys, **kwargs): - """Queries the DHT for its best value related to given key. - - There may be several different values for a given key stored in the - DHT; in this context *best* means the record that is most desirable. - There is no one metric for *best*: it depends entirely on the key type. - For IPNS, *best* is the record that is both valid and has the highest - sequence number (freshest). Different key types may specify other rules - for they consider to be the *best*. - - Parameters - ---------- - key : str - One or more keys whose values should be looked up - - Returns - ------- - str - """ - args = (key,) + keys - res = self._client.request('/dht/get', args, decoder='json', **kwargs) - - if isinstance(res, dict) and "Extra" in res: - return res["Extra"] - else: - for r in res: - if "Extra" in r and len(r["Extra"]) > 0: - return r["Extra"] - raise exceptions.Error("empty response from DHT") - - def dht_put(self, key, value, **kwargs): - """Writes a key/value pair to the DHT. - - Given a key of the form ``/foo/bar`` and a value of any form, this will - write that value to the DHT with that key. - - Keys have two parts: a keytype (foo) and the key name (bar). IPNS uses - the ``/ipns/`` keytype, and expects the key name to be a Peer ID. IPNS - entries are formatted with a special strucutre. - - You may only use keytypes that are supported in your ``ipfs`` binary: - ``go-ipfs`` currently only supports the ``/ipns/`` keytype. Unless you - have a relatively deep understanding of the key's internal structure, - you likely want to be using the :meth:`~ipfshttpclient.Client.name_publish` - instead. - - Value is arbitrary text. - - .. code-block:: python - - >>> c.dht_put("QmVgNoP89mzpgEAAqK8owYoDEyB97Mkc … E9Uc", "test123") - [{'ID': 'QmfLy2aqbhU1RqZnGQyqHSovV8tDufLUaPfN1LNtg5CvDZ', - 'Extra': '', 'Type': 5, 'Responses': None}, - {'ID': 'QmZ5qTkNvvZ5eFq9T4dcCEK7kX8L7iysYEpvQmij9vokGE', - 'Extra': '', 'Type': 5, 'Responses': None}, - {'ID': 'QmYqa6QHCbe6eKiiW6YoThU5yBy8c3eQzpiuW22SgVWSB8', - 'Extra': '', 'Type': 6, 'Responses': None}, - … - {'ID': 'QmP6TAKVDCziLmx9NV8QGekwtf7ZMuJnmbeHMjcfoZbRMd', - 'Extra': '', 'Type': 1, 'Responses': []}] - - Parameters - ---------- - key : str - A unique identifier - value : str - Abitrary text to associate with the input (2048 bytes or less) - - Returns - ------- - list - """ - args = (key, value) - return self._client.request('/dht/put', args, decoder='json', **kwargs) - - def ping(self, peer, *peers, **kwargs): - """Provides round-trip latency information for the routing system. - - Finds nodes via the routing system, sends pings, waits for pongs, - and prints out round-trip latency information. - - .. code-block:: python - - >>> c.ping("QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n") - [{'Success': True, 'Time': 0, - 'Text': 'Looking up peer QmTzQ1JRkWErjk39mryYw2WVaphAZN … c15n'}, - {'Success': False, 'Time': 0, - 'Text': 'Peer lookup error: routing: not found'}] - - Parameters - ---------- - peer : str - ID of peer to be pinged - count : int - Number of ping messages to send (Default: ``10``) - - Returns - ------- - list : Progress reports from the ping - """ - #PY2: No support for kw-only parameters after glob parameters - if "count" in kwargs: - kwargs.setdefault("opts", {"count": kwargs["count"]}) - del kwargs["count"] - - args = (peer,) + peers - return self._client.request('/ping', args, decoder='json', **kwargs) - - def config(self, key, value=None, **kwargs): - """Controls configuration variables. - - .. code-block:: python - - >>> c.config("Addresses.Gateway") - {'Key': 'Addresses.Gateway', 'Value': '/ip4/127.0.0.1/tcp/8080'} - >>> c.config("Addresses.Gateway", "/ip4/127.0.0.1/tcp/8081") - {'Key': 'Addresses.Gateway', 'Value': '/ip4/127.0.0.1/tcp/8081'} - - Parameters - ---------- - key : str - The key of the configuration entry (e.g. "Addresses.API") - value : dict - The value to set the configuration entry to - - Returns - ------- - dict : Requested/updated key and its (new) value - """ - args = (key, value) - return self._client.request('/config', args, decoder='json', **kwargs) - - def config_show(self, **kwargs): - """Returns a dict containing the server's configuration. - - .. warning:: - - The configuration file contains private key data that must be - handled with care. - - .. code-block:: python - - >>> config = c.config_show() - >>> config['Addresses'] - {'API': '/ip4/127.0.0.1/tcp/5001', - 'Gateway': '/ip4/127.0.0.1/tcp/8080', - 'Swarm': ['/ip4/0.0.0.0/tcp/4001', '/ip6/::/tcp/4001']}, - >>> config['Discovery'] - {'MDNS': {'Enabled': True, 'Interval': 10}} - - Returns - ------- - dict : The entire IPFS daemon configuration - """ - return self._client.request('/config/show', decoder='json', **kwargs) - - def config_replace(self, *args, **kwargs): - """Replaces the existing config with a user-defined config. - - Make sure to back up the config file first if neccessary, as this - operation can't be undone. - """ - return self._client.request('/config/replace', args, - decoder='json', **kwargs) - - def log_level(self, subsystem, level, **kwargs): - r"""Changes the logging output of a running daemon. - - .. code-block:: python - - >>> c.log_level("path", "info") - {'Message': "Changed log level of 'path' to 'info'\n"} - - Parameters - ---------- - subsystem : str - The subsystem logging identifier (Use ``"all"`` for all subsystems) - level : str - The desired logging level. Must be one of: - - * ``"debug"`` - * ``"info"`` - * ``"warning"`` - * ``"error"`` - * ``"fatal"`` - * ``"panic"`` - - Returns - ------- - dict : Status message - """ - args = (subsystem, level) - return self._client.request('/log/level', args, - decoder='json', **kwargs) - - def log_ls(self, **kwargs): - """Lists the logging subsystems of a running daemon. - - .. code-block:: python - - >>> c.log_ls() - {'Strings': [ - 'github.com/ipfs/go-libp2p/p2p/host', 'net/identify', - 'merkledag', 'providers', 'routing/record', 'chunk', 'mfs', - 'ipns-repub', 'flatfs', 'ping', 'mockrouter', 'dagio', - 'cmds/files', 'blockset', 'engine', 'mocknet', 'config', - 'commands/http', 'cmd/ipfs', 'command', 'conn', 'gc', - 'peerstore', 'core', 'coreunix', 'fsrepo', 'core/server', - 'boguskey', 'github.com/ipfs/go-libp2p/p2p/host/routed', - 'diagnostics', 'namesys', 'fuse/ipfs', 'node', 'secio', - 'core/commands', 'supernode', 'mdns', 'path', 'table', - 'swarm2', 'peerqueue', 'mount', 'fuse/ipns', 'blockstore', - 'github.com/ipfs/go-libp2p/p2p/host/basic', 'lock', 'nat', - 'importer', 'corerepo', 'dht.pb', 'pin', 'bitswap_network', - 'github.com/ipfs/go-libp2p/p2p/protocol/relay', 'peer', - 'transport', 'dht', 'offlinerouting', 'tarfmt', 'eventlog', - 'ipfsaddr', 'github.com/ipfs/go-libp2p/p2p/net/swarm/addr', - 'bitswap', 'reprovider', 'supernode/proxy', 'crypto', 'tour', - 'commands/cli', 'blockservice']} - - Returns - ------- - dict : List of daemon logging subsystems - """ - return self._client.request('/log/ls', decoder='json', **kwargs) - - def log_tail(self, **kwargs): - r"""Reads log outputs as they are written. - - This function returns an iterator needs to be closed using a context - manager (``with``-statement) or using the ``.close()`` method. - - .. code-block:: python - - >>> with c.log_tail() as log_tail_iter: - ... for item in log_tail_iter: - ... print(item) - ... - {"event":"updatePeer","system":"dht", - "peerID":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", - "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", - "time":"2016-08-22T13:25:27.43353297Z"} - {"event":"handleAddProviderBegin","system":"dht", - "peer":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", - "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", - "time":"2016-08-22T13:25:27.433642581Z"} - {"event":"handleAddProvider","system":"dht","duration":91704, - "key":"QmNT9Tejg6t57Vs8XM2TVJXCwevWiGsZh3kB4HQXUZRK1o", - "peer":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", - "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", - "time":"2016-08-22T13:25:27.433747513Z"} - {"event":"updatePeer","system":"dht", - "peerID":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", - "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", - "time":"2016-08-22T13:25:27.435843012Z"} - … - - Returns - ------- - iterable - """ - return self._client.request('/log/tail', decoder='json', - stream=True, **kwargs) - - def version(self, **kwargs): - """Returns the software version of the currently connected node. - - .. code-block:: python - - >>> c.version() - {'Version': '0.4.3-rc2', 'Repo': '4', 'Commit': '', - 'System': 'amd64/linux', 'Golang': 'go1.6.2'} - - Returns - ------- - dict : Daemon and system version information - """ - return self._client.request('/version', decoder='json', **kwargs) - - def files_cp(self, source, dest, **kwargs): - """Copies files within the MFS. - - Due to the nature of IPFS this will not actually involve any of the - file's content being copied. - - .. code-block:: python - - >>> c.files_ls("/") - {'Entries': [ - {'Size': 0, 'Hash': '', 'Name': 'Software', 'Type': 0}, - {'Size': 0, 'Hash': '', 'Name': 'test', 'Type': 0} - ]} - >>> c.files_cp("/test", "/bla") - '' - >>> c.files_ls("/") - {'Entries': [ - {'Size': 0, 'Hash': '', 'Name': 'Software', 'Type': 0}, - {'Size': 0, 'Hash': '', 'Name': 'bla', 'Type': 0}, - {'Size': 0, 'Hash': '', 'Name': 'test', 'Type': 0} - ]} - - Parameters - ---------- - source : str - Filepath within the MFS to copy from - dest : str - Destination filepath with the MFS to which the file will be - copied to - """ - args = (source, dest) - return self._client.request('/files/cp', args, **kwargs) - - def files_ls(self, path, **kwargs): - """Lists contents of a directory in the MFS. - - .. code-block:: python - - >>> c.files_ls("/") - {'Entries': [ - {'Size': 0, 'Hash': '', 'Name': 'Software', 'Type': 0} - ]} - - Parameters - ---------- - path : str - Filepath within the MFS - - Returns - ------- - dict : Directory entries - """ - args = (path,) - return self._client.request('/files/ls', args, - decoder='json', **kwargs) - - def files_mkdir(self, path, parents=False, **kwargs): - """Creates a directory within the MFS. - - .. code-block:: python - - >>> c.files_mkdir("/test") - b'' - - Parameters - ---------- - path : str - Filepath within the MFS - parents : bool - Create parent directories as needed and do not raise an exception - if the requested directory already exists - """ - kwargs.setdefault("opts", {"parents": parents}) - - args = (path,) - return self._client.request('/files/mkdir', args, **kwargs) - - def files_stat(self, path, **kwargs): - """Returns basic ``stat`` information for an MFS file - (including its hash). - - .. code-block:: python - - >>> c.files_stat("/test") - {'Hash': 'QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn', - 'Size': 0, 'CumulativeSize': 4, 'Type': 'directory', 'Blocks': 0} - - Parameters - ---------- - path : str - Filepath within the MFS - - Returns - ------- - dict : MFS file information - """ - args = (path,) - return self._client.request('/files/stat', args, - decoder='json', **kwargs) - - def files_rm(self, path, recursive=False, **kwargs): - """Removes a file from the MFS. - - .. code-block:: python - - >>> c.files_rm("/bla/file") - b'' - - Parameters - ---------- - path : str - Filepath within the MFS - recursive : bool - Recursively remove directories? - """ - kwargs.setdefault("opts", {"recursive": recursive}) - - args = (path,) - return self._client.request('/files/rm', args, **kwargs) - - def files_read(self, path, offset=0, count=None, **kwargs): - """Reads a file stored in the MFS. - - .. code-block:: python - - >>> c.files_read("/bla/file") - b'hi' - - Parameters - ---------- - path : str - Filepath within the MFS - offset : int - Byte offset at which to begin reading at - count : int - Maximum number of bytes to read - - Returns - ------- - str : MFS file contents - """ - opts = {"offset": offset} - if count is not None: - opts["count"] = count - kwargs.setdefault("opts", opts) - - args = (path,) - return self._client.request('/files/read', args, **kwargs) - - def files_write(self, path, file, offset=0, create=False, truncate=False, - count=None, **kwargs): - """Writes to a mutable file in the MFS. - - .. code-block:: python - - >>> c.files_write("/test/file", io.BytesIO(b"hi"), create=True) - b'' - - Parameters - ---------- - path : str - Filepath within the MFS - file : io.RawIOBase - IO stream object with data that should be written - offset : int - Byte offset at which to begin writing at - create : bool - Create the file if it does not exist - truncate : bool - Truncate the file to size zero before writing - count : int - Maximum number of bytes to read from the source ``file`` - """ - opts = {"offset": offset, "create": create, "truncate": truncate} - if count is not None: - opts["count"] = count - kwargs.setdefault("opts", opts) - - args = (path,) - body, headers = multipart.stream_files(file, self.chunk_size) - return self._client.request('/files/write', args, - data=body, headers=headers, **kwargs) - - def files_mv(self, source, dest, **kwargs): - """Moves files and directories within the MFS. - - .. code-block:: python - - >>> c.files_mv("/test/file", "/bla/file") - b'' - - Parameters - ---------- - source : str - Existing filepath within the MFS - dest : str - Destination to which the file will be moved in the MFS - """ - args = (source, dest) - return self._client.request('/files/mv', args, **kwargs) - - def shutdown(self): - """Stop the connected IPFS daemon instance. - - Sending any further requests after this will fail with - ``ipfshttpclient.exceptions.ConnectionError``, until you start another IPFS - daemon instance. - """ - try: - return self._client.request('/shutdown') - except exceptions.ConnectionError: - # Sometimes the daemon kills the connection before sending a - # response causing an incorrect `ConnectionError` to bubble - pass - - ########### - # HELPERS # - ########### - - @utils.return_field('Hash') - def add_bytes(self, data, **kwargs): - """Adds a set of bytes as a file to IPFS. - - .. code-block:: python - - >>> c.add_bytes(b"Mary had a little lamb") - 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab' - - Also accepts and will stream generator objects. - - Parameters - ---------- - data : bytes - Content to be added as a file - - Returns - ------- - str : Hash of the added IPFS object - """ - body, headers = multipart.stream_bytes(data, self.chunk_size) - return self._client.request('/add', decoder='json', - data=body, headers=headers, **kwargs) - - @utils.return_field('Hash') - def add_str(self, string, **kwargs): - """Adds a Python string as a file to IPFS. - - .. code-block:: python - - >>> c.add_str(u"Mary had a little lamb") - 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab' - - Also accepts and will stream generator objects. - - Parameters - ---------- - string : str - Content to be added as a file - - Returns - ------- - str : Hash of the added IPFS object - """ - body, headers = multipart.stream_text(string, self.chunk_size) - return self._client.request('/add', decoder='json', - data=body, headers=headers, **kwargs) - - def add_json(self, json_obj, **kwargs): - """Adds a json-serializable Python dict as a json file to IPFS. - - .. code-block:: python - - >>> c.add_json({'one': 1, 'two': 2, 'three': 3}) - 'QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob' - - Parameters - ---------- - json_obj : dict - A json-serializable Python dictionary - - Returns - ------- - str : Hash of the added IPFS object - """ - return self.add_bytes(encoding.Json().encode(json_obj), **kwargs) - - def get_json(self, multihash, **kwargs): - """Loads a json object from IPFS. - - .. code-block:: python - - >>> c.get_json('QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob') - {'one': 1, 'two': 2, 'three': 3} - - Parameters - ---------- - multihash : str - Multihash of the IPFS object to load - - Returns - ------- - object : Deserialized IPFS JSON object value - """ - return self.cat(multihash, decoder='json', **kwargs) - - def add_pyobj(self, py_obj, **kwargs): - """Adds a picklable Python object as a file to IPFS. - - .. deprecated:: 0.4.2 - The ``*_pyobj`` APIs allow for arbitrary code execution if abused. - Either switch to :meth:`~ipfshttpclient.Client.add_json` or use - ``client.add_bytes(pickle.dumps(py_obj))`` instead. - - Please see :meth:`~ipfshttpclient.Client.get_pyobj` for the - **security risks** of using these methods! - - .. code-block:: python - - >>> c.add_pyobj([0, 1.0, 2j, '3', 4e5]) - 'QmWgXZSUTNNDD8LdkdJ8UXSn55KfFnNvTP1r7SyaQd74Ji' - - Parameters - ---------- - py_obj : object - A picklable Python object - - Returns - ------- - str : Hash of the added IPFS object - """ - warnings.warn("Using `*_pyobj` on untrusted data is a security risk", - DeprecationWarning) - return self.add_bytes(encoding.Pickle().encode(py_obj), **kwargs) - - def get_pyobj(self, multihash, **kwargs): - """Loads a pickled Python object from IPFS. - - .. deprecated:: 0.4.2 - The ``*_pyobj`` APIs allow for arbitrary code execution if abused. - Either switch to :meth:`~ipfshttpclient.Client.get_json` or use - ``pickle.loads(client.cat(multihash))`` instead. - - .. caution:: - - The pickle module is not intended to be secure against erroneous or - maliciously constructed data. Never unpickle data received from an - untrusted or unauthenticated source. - - Please **read** - `this article <https://www.cs.uic.edu/%7Es/musings/pickle/>`_ to - understand the security risks of using this method! - - .. code-block:: python - - >>> c.get_pyobj('QmWgXZSUTNNDD8LdkdJ8UXSn55KfFnNvTP1r7SyaQd74Ji') - [0, 1.0, 2j, '3', 400000.0] - - Parameters - ---------- - multihash : str - Multihash of the IPFS object to load - - Returns - ------- - object : Deserialized IPFS Python object - """ - warnings.warn("Using `*_pyobj` on untrusted data is a security risk", - DeprecationWarning) - return self.cat(multihash, decoder='pickle', **kwargs) - - def pubsub_ls(self, **kwargs): - """Lists subscribed topics by name - - This method returns data that contains a list of - all topics the user is subscribed to. In order - to subscribe to a topic pubsub_sub must be called. - - .. code-block:: python - - # subscribe to a channel - >>> with c.pubsub_sub("hello") as sub: - ... c.pubsub_ls() - { - 'Strings' : ["hello"] - } - - Returns - ------- - dict : Dictionary with the key "Strings" who's value is an array of - topics we are subscribed to - """ - return self._client.request('/pubsub/ls', decoder='json', **kwargs) - - def pubsub_peers(self, topic=None, **kwargs): - """List the peers we are pubsubbing with. - - Lists the id's of other IPFS users who we - are connected to via some topic. Without specifying - a topic, IPFS peers from all subscribed topics - will be returned in the data. If a topic is specified - only the IPFS id's of the peers from the specified - topic will be returned in the data. - - .. code-block:: python - - >>> c.pubsub_peers() - {'Strings': - [ - 'QmPbZ3SDgmTNEB1gNSE9DEf4xT8eag3AFn5uo7X39TbZM8', - 'QmQKiXYzoFpiGZ93DaFBFDMDWDJCRjXDARu4wne2PRtSgA', - ... - 'QmepgFW7BHEtU4pZJdxaNiv75mKLLRQnPi1KaaXmQN4V1a' - ] - } - - ## with a topic - - # subscribe to a channel - >>> with c.pubsub_sub('hello') as sub: - ... c.pubsub_peers(topic='hello') - {'String': - [ - 'QmPbZ3SDgmTNEB1gNSE9DEf4xT8eag3AFn5uo7X39TbZM8', - ... - # other peers connected to the same channel - ] - } - - Parameters - ---------- - topic : str - The topic to list connected peers of - (defaults to None which lists peers for all topics) - - Returns - ------- - dict : Dictionary with the ke "Strings" who's value is id of IPFS - peers we're pubsubbing with - """ - args = (topic,) if topic is not None else () - return self._client.request('/pubsub/peers', args, - decoder='json', **kwargs) - - def pubsub_pub(self, topic, payload, **kwargs): - """Publish a message to a given pubsub topic - - Publishing will publish the given payload (string) to - everyone currently subscribed to the given topic. - - All data (including the id of the publisher) is automatically - base64 encoded when published. - - .. code-block:: python - - # publishes the message 'message' to the topic 'hello' - >>> c.pubsub_pub('hello', 'message') - [] - - Parameters - ---------- - topic : str - Topic to publish to - payload : Data to be published to the given topic - - Returns - ------- - list : empty list - """ - args = (topic, payload) - return self._client.request('/pubsub/pub', args, - decoder='json', **kwargs) - - def pubsub_sub(self, topic, discover=False, **kwargs): - """Subscribe to mesages on a given topic - - Subscribing to a topic in IPFS means anytime - a message is published to a topic, the subscribers - will be notified of the publication. - - The connection with the pubsub topic is opened and read. - The Subscription returned should be used inside a context - manager to ensure that it is closed properly and not left - hanging. - - .. code-block:: python - - >>> sub = c.pubsub_sub('testing') - >>> with c.pubsub_sub('testing') as sub: - # publish a message 'hello' to the topic 'testing' - ... c.pubsub_pub('testing', 'hello') - ... for message in sub: - ... print(message) - ... # Stop reading the subscription after - ... # we receive one publication - ... break - {'from': '<base64encoded IPFS id>', - 'data': 'aGVsbG8=', - 'topicIDs': ['testing']} - - # NOTE: in order to receive published data - # you must already be subscribed to the topic at publication - # time. - - Parameters - ---------- - topic : str - Name of a topic to subscribe to - - discover : bool - Try to discover other peers subscibed to the same topic - (defaults to False) - - Returns - ------- - Generator wrapped in a context - manager that maintains a connection - stream to the given topic. - """ - args = (topic, discover) - return SubChannel(self._client.request('/pubsub/sub', args, - stream=True, decoder='json')) diff --git a/ipfshttpclient/client/__init__.py b/ipfshttpclient/client/__init__.py new file mode 100644 index 00000000..f6b8dc62 --- /dev/null +++ b/ipfshttpclient/client/__init__.py @@ -0,0 +1,202 @@ +# -*- coding: utf-8 -*- +"""IPFS API Bindings for Python. + +Classes: + + * Client – a TCP client for interacting with an IPFS daemon +""" +from __future__ import absolute_import + +import os +import warnings + +DEFAULT_HOST = str(os.environ.get("PY_IPFS_HTTP_CLIENT_DEFAULT_HOST", 'localhost')) +DEFAULT_PORT = int(os.environ.get("PY_IPFS_HTTP_CLIENT_DEFAULT_PORT", 5001)) +DEFAULT_BASE = str(os.environ.get("PY_IPFS_HTTP_CLIENT_DEFAULT_BASE", 'api/v0')) + +VERSION_MINIMUM = "0.4.3" +VERSION_MAXIMUM = "0.5.0" + +from . import bitswap +from . import block +from . import bootstrap +from . import config +#TODO: `from . import dag` +from . import dht +from . import files +from . import key +from . import miscellaneous +from . import name +from . import object +from . import pin +from . import pubsub +from . import repo +#TODO: `from . import stats` +from . import swarm +from . import unstable + +from .. import encoding, exceptions, multipart, utils + + +def assert_version(version, minimum=VERSION_MINIMUM, maximum=VERSION_MAXIMUM): + """Make sure that the given daemon version is supported by this client + version. + + Raises + ------ + ~ipfshttpclient.exceptions.VersionMismatch + + Parameters + ---------- + version : str + The version of an IPFS daemon. + minimum : str + The minimal IPFS version to allow. + maximum : str + The maximum IPFS version to allow. + """ + # Convert version strings to integer tuples + version = list(map(int, version.split('-', 1)[0].split('.'))) + minimum = list(map(int, minimum.split('-', 1)[0].split('.'))) + maximum = list(map(int, maximum.split('-', 1)[0].split('.'))) + + if minimum > version or version >= maximum: + raise exceptions.VersionMismatch(version, minimum, maximum) + + +def connect(host=DEFAULT_HOST, port=DEFAULT_PORT, base=DEFAULT_BASE, + chunk_size=multipart.default_chunk_size, **defaults): + """Create a new :class:`~ipfshttpclient.Client` instance and connect to the + daemon to validate that its version is supported. + + Raises + ------ + ~ipfshttpclient.exceptions.VersionMismatch + ~ipfshttpclient.exceptions.ErrorResponse + ~ipfshttpclient.exceptions.ConnectionError + ~ipfshttpclient.exceptions.ProtocolError + ~ipfshttpclient.exceptions.StatusError + ~ipfshttpclient.exceptions.TimeoutError + + + All parameters are identical to those passed to the constructor of the + :class:`~ipfshttpclient.Client` class. + + Returns + ------- + ~ipfshttpclient.Client + """ + # Create client instance + client = Client(host, port, base, chunk_size, **defaults) + + # Query version number from daemon and validate it + assert_version(client.version()['Version']) + + return client + + +class Client(files.Base, miscellaneous.Base): + bitswap = base.SectionProperty(bitswap.Section) + block = base.SectionProperty(block.Section) + bootstrap = base.SectionProperty(bootstrap.Section) + config = base.SectionProperty(config.Section) + dht = base.SectionProperty(dht.Section) + key = base.SectionProperty(key.Section) + name = base.SectionProperty(name.Section) + object = base.SectionProperty(object.Section) + pin = base.SectionProperty(pin.Section) + pubsub = base.SectionProperty(pubsub.Section) + repo = base.SectionProperty(repo.Section) + swarm = base.SectionProperty(swarm.Section) + unstable = base.SectionProperty(unstable.Section) + + + ########### + # HELPERS # + ########### + + @utils.return_field('Hash') + def add_bytes(self, data, **kwargs): + """Adds a set of bytes as a file to IPFS. + + .. code-block:: python + + >>> client.add_bytes(b"Mary had a little lamb") + 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab' + + Also accepts and will stream generator objects. + + Parameters + ---------- + data : bytes + Content to be added as a file + + Returns + ------- + str : Hash of the added IPFS object + """ + body, headers = multipart.stream_bytes(data, self.chunk_size) + return self._client.request('/add', decoder='json', + data=body, headers=headers, **kwargs) + + @utils.return_field('Hash') + def add_str(self, string, **kwargs): + """Adds a Python string as a file to IPFS. + + .. code-block:: python + + >>> client.add_str(u"Mary had a little lamb") + 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab' + + Also accepts and will stream generator objects. + + Parameters + ---------- + string : str + Content to be added as a file + + Returns + ------- + str : Hash of the added IPFS object + """ + body, headers = multipart.stream_text(string, self.chunk_size) + return self._client.request('/add', decoder='json', + data=body, headers=headers, **kwargs) + + def add_json(self, json_obj, **kwargs): + """Adds a json-serializable Python dict as a json file to IPFS. + + .. code-block:: python + + >>> client.add_json({'one': 1, 'two': 2, 'three': 3}) + 'QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob' + + Parameters + ---------- + json_obj : dict + A json-serializable Python dictionary + + Returns + ------- + str : Hash of the added IPFS object + """ + return self.add_bytes(encoding.Json().encode(json_obj), **kwargs) + + def get_json(self, multihash, **kwargs): + """Loads a json object from IPFS. + + .. code-block:: python + + >>> client.get_json('QmVz9g7m5u3oHiNKHj2CJX1dbG1gtismRS3g9NaPBBLbob') + {'one': 1, 'two': 2, 'three': 3} + + Parameters + ---------- + multihash : str + Multihash of the IPFS object to load + + Returns + ------- + object : Deserialized IPFS JSON object value + """ + return self.cat(multihash, decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/base.py b/ipfshttpclient/client/base.py new file mode 100644 index 00000000..bccac4a8 --- /dev/null +++ b/ipfshttpclient/client/base.py @@ -0,0 +1,73 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import DEFAULT_HOST, DEFAULT_PORT, DEFAULT_BASE + +from .. import multipart, http + + +class SectionProperty(object): + def __init__(self, cls): + self.cls = cls + + def __get__(self, client_object, type=None): + try: + return client_object.__prop_objs__[self] + except AttributeError: + client_object.__prop_objs__ = { + self: self.cls(client_object) + } + return client_object.__prop_objs__[self] + except KeyError: + client_object.__prop_objs__[self] = self.cls(client_object) + return client_object.__prop_objs__[self] + + +class SectionBase(object): + # Accept parent object from property descriptor + def __init__(self, parent): + self.__parent = parent + + # Proxy the parent's properties + @property + def _client(self): + return self.__parent._client + + @property + def chunk_size(self): + return self.__parent.chunk_size + + @chunk_size.setter + def chunk_size(self, value): + self.__parent.chunk_size = value + + +class ClientBase(object): + """A TCP client for interacting with an IPFS daemon. + + A :class:`~ipfshttpclient.Client` instance will not actually establish a + connection to the daemon until at least one of it's methods is called. + + Parameters + ---------- + host : str + Hostname or IP address of the computer running the ``ipfs daemon`` + node (defaults to the local system) + port : int + The API port of the IPFS deamon (usually 5001) + base : str + Path of the deamon's API (currently always ``api/v0``) + chunk_size : int + The size of the chunks to break uploaded files and text content into + """ + + _clientfactory = http.HTTPClient + + def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, + base=DEFAULT_BASE, chunk_size=multipart.default_chunk_size, + **defaults): + """Connects to the API port of an IPFS node.""" + + self.chunk_size = chunk_size + + self._client = self._clientfactory(host, port, base, **defaults) \ No newline at end of file diff --git a/ipfshttpclient/client/bitswap.py b/ipfshttpclient/client/bitswap.py new file mode 100644 index 00000000..7f410a4f --- /dev/null +++ b/ipfshttpclient/client/bitswap.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + + +class Section(base.SectionBase): + def wantlist(self, peer=None, **kwargs): + """Returns blocks currently on the bitswap wantlist. + + .. code-block:: python + + >>> client.bitswap.wantlist() + {'Keys': [ + 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', + 'QmdCWFLDXqgdWQY9kVubbEHBbkieKd3uo7MtCm7nTZZE9K', + 'QmVQ1XvYGF19X4eJqz1s7FJYJqAxFC4oqh3vWJJEXn66cp' + ]} + + Parameters + ---------- + peer : str + Peer to show wantlist for. + + Returns + ------- + dict : List of wanted blocks + """ + args = (peer,) + return self._client.request('/bitswap/wantlist', args, decoder='json', **kwargs) + + + def stat(self, **kwargs): + """Returns some diagnostic information from the bitswap agent. + + .. code-block:: python + + >>> client.bitswap.stat() + {'BlocksReceived': 96, + 'DupBlksReceived': 73, + 'DupDataReceived': 2560601, + 'ProviderBufLen': 0, + 'Peers': [ + 'QmNZFQRxt9RMNm2VVtuV2Qx7q69bcMWRVXmr5CEkJEgJJP', + 'QmNfCubGpwYZAQxX8LQDsYgB48C4GbfZHuYdexpX9mbNyT', + 'QmNfnZ8SCs3jAtNPc8kf3WJqJqSoX7wsX7VqkLdEYMao4u', + … + ], + 'Wantlist': [ + 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', + 'QmdCWFLDXqgdWQY9kVubbEHBbkieKd3uo7MtCm7nTZZE9K', + 'QmVQ1XvYGF19X4eJqz1s7FJYJqAxFC4oqh3vWJJEXn66cp' + ] + } + + Returns + ------- + dict : Statistics, peers and wanted blocks + """ + return self._client.request('/bitswap/stat', decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/block.py b/ipfshttpclient/client/block.py new file mode 100644 index 00000000..072c0c42 --- /dev/null +++ b/ipfshttpclient/client/block.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + +from .. import multipart + + +class Section(base.SectionBase): + """ + Functions for interacting with raw IPFS blocks. + """ + + def get(self, multihash, **kwargs): + r"""Returns the raw contents of a block. + + .. code-block:: python + + >>> client.block.get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') + b'\x121\n"\x12 \xdaW>\x14\xe5\xc1\xf6\xe4\x92\xd1 … \n\x02\x08\x01' + + Parameters + ---------- + multihash : str + The base58 multihash of an existing block to get + + Returns + ------- + str : Value of the requested block + """ + args = (multihash,) + return self._client.request('/block/get', args, **kwargs) + + + def put(self, file, **kwargs): + """Stores the contents of the given file object as an IPFS block. + + .. code-block:: python + + >>> client.block.put(io.BytesIO(b'Mary had a little lamb')) + {'Key': 'QmeV6C6XVt1wf7V7as7Yak3mxPma8jzpqyhtRtCvpKcfBb', + 'Size': 22} + + Parameters + ---------- + file : io.RawIOBase + The data to be stored as an IPFS block + + Returns + ------- + dict : Information about the new block + + See :meth:`~ipfshttpclient.Client.block.stat` + """ + body, headers = multipart.stream_files(file, self.chunk_size) + return self._client.request('/block/put', decoder='json', data=body, + headers=headers, **kwargs) + + + def stat(self, multihash, **kwargs): + """Returns a dict with the size of the block with the given hash. + + .. code-block:: python + + >>> client.block.stat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') + {'Key': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', + 'Size': 258} + + Parameters + ---------- + multihash : str + The base58 multihash of an existing block to stat + + Returns + ------- + dict : Information about the requested block + """ + args = (multihash,) + return self._client.request('/block/stat', args, decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/bootstrap.py b/ipfshttpclient/client/bootstrap.py new file mode 100644 index 00000000..70bfe385 --- /dev/null +++ b/ipfshttpclient/client/bootstrap.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + + +class Section(base.SectionBase): + def add(self, peer, *peers, **kwargs): + """Adds peers to the bootstrap list. + + Parameters + ---------- + peer : str + IPFS MultiAddr of a peer to add to the list + + Returns + ------- + dict + """ + args = (peer,) + peers + return self._client.request('/bootstrap/add', args, decoder='json', **kwargs) + + + def list(self, **kwargs): + """Returns the addresses of peers used during initial discovery of the + IPFS network. + + Peers are output in the format ``<multiaddr>/<peerID>``. + + .. code-block:: python + + >>> client.bootstrap.list() + {'Peers': [ + '/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYER … uvuJ', + '/ip4/104.236.176.52/tcp/4001/ipfs/QmSoLnSGccFuZQJzRa … ca9z', + '/ip4/104.236.179.241/tcp/4001/ipfs/QmSoLPppuBtQSGwKD … KrGM', + … + '/ip4/178.62.61.185/tcp/4001/ipfs/QmSoLMeWqB7YGVLJN3p … QBU3']} + + Returns + ------- + dict : List of known bootstrap peers + """ + return self._client.request('/bootstrap', decoder='json', **kwargs) + + + def rm(self, peer, *peers, **kwargs): + """Removes peers from the bootstrap list. + + Parameters + ---------- + peer : str + IPFS MultiAddr of a peer to remove from the list + + Returns + ------- + dict + """ + args = (peer,) + peers + return self._client.request('/bootstrap/rm', args, decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/config.py b/ipfshttpclient/client/config.py new file mode 100644 index 00000000..fe93fa16 --- /dev/null +++ b/ipfshttpclient/client/config.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + + +class Section(base.SectionBase): + def get(self, **kwargs): + #TODO: Support the optional `key` parameter + """Returns the current used server configuration. + + .. warning:: + + The configuration file contains private key data that must be + handled with care. + + .. code-block:: python + + >>> config = client.config.get() + >>> config['Addresses'] + {'API': '/ip4/127.0.0.1/tcp/5001', + 'Gateway': '/ip4/127.0.0.1/tcp/8080', + 'Swarm': ['/ip4/0.0.0.0/tcp/4001', '/ip6/::/tcp/4001']}, + >>> config['Discovery'] + {'MDNS': {'Enabled': True, 'Interval': 10}} + + Returns + ------- + dict : The entire IPFS daemon configuration + """ + return self._client.request('/config/show', decoder='json', **kwargs) + + + def replace(self, config, **kwargs): + """Replaces the existing config with a user-defined config. + + Make sure to back up the config file first if neccessary, as this + operation can't be undone. + """ + return self._client.request('/config/replace', (config,), decoder='json', **kwargs) + + + def set(self, key, value=None, **kwargs): + """Add or replace a configuration value. + + .. code-block:: python + + >>> client.config("Addresses.Gateway") + {'Key': 'Addresses.Gateway', 'Value': '/ip4/127.0.0.1/tcp/8080'} + >>> client.config("Addresses.Gateway", "/ip4/127.0.0.1/tcp/8081") + {'Key': 'Addresses.Gateway', 'Value': '/ip4/127.0.0.1/tcp/8081'} + + Parameters + ---------- + key : str + The key of the configuration entry (e.g. "Addresses.API") + value : dict + The value to set the configuration entry to + + Returns + ------- + dict : Requested/updated key and its (new) value + """ + args = (key, value) + return self._client.request('/config', args, decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/dht.py b/ipfshttpclient/client/dht.py new file mode 100644 index 00000000..05f58deb --- /dev/null +++ b/ipfshttpclient/client/dht.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + +from .. import exceptions + + +class Section(base.SectionBase): + def findpeer(self, peer_id, *peer_ids, **kwargs): + """Queries the DHT for all of the associated multiaddresses. + + .. code-block:: python + + >>> client.dht_findpeer("QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZN … MTLZ") + [{'ID': 'QmfVGMFrwW6AV6fTWmD6eocaTybffqAvkVLXQEFrYdk6yc', + 'Extra': '', 'Type': 6, 'Responses': None}, + {'ID': 'QmTKiUdjbRjeN9yPhNhG1X38YNuBdjeiV9JXYWzCAJ4mj5', + 'Extra': '', 'Type': 6, 'Responses': None}, + {'ID': 'QmTGkgHSsULk8p3AKTAqKixxidZQXFyF7mCURcutPqrwjQ', + 'Extra': '', 'Type': 6, 'Responses': None}, + … + {'ID': '', 'Extra': '', 'Type': 2, + 'Responses': [ + {'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', + 'Addrs': [ + '/ip4/10.9.8.1/tcp/4001', + '/ip6/::1/tcp/4001', + '/ip4/164.132.197.107/tcp/4001', + '/ip4/127.0.0.1/tcp/4001']} + ]}] + + Parameters + ---------- + peer_id : str + The ID of the peer to search for + + Returns + ------- + dict : List of multiaddrs + """ + args = (peer_id,) + peer_ids + return self._client.request('/dht/findpeer', args, decoder='json', **kwargs) + + + def findprovs(self, multihash, *multihashes, **kwargs): + """Finds peers in the DHT that can provide a specific value. + + .. code-block:: python + + >>> client.dht_findprovs("QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQu … mpW2") + [{'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', + 'Extra': '', 'Type': 6, 'Responses': None}, + {'ID': 'QmaK6Aj5WXkfnWGoWq7V8pGUYzcHPZp4jKQ5JtmRvSzQGk', + 'Extra': '', 'Type': 6, 'Responses': None}, + {'ID': 'QmdUdLu8dNvr4MVW1iWXxKoQrbG6y1vAVWPdkeGK4xppds', + 'Extra': '', 'Type': 6, 'Responses': None}, + … + {'ID': '', 'Extra': '', 'Type': 4, 'Responses': [ + {'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97Mk … E9Uc', 'Addrs': None} + ]}, + {'ID': 'QmaxqKpiYNr62uSFBhxJAMmEMkT6dvc3oHkrZNpH2VMTLZ', + 'Extra': '', 'Type': 1, 'Responses': [ + {'ID': 'QmSHXfsmN3ZduwFDjeqBn1C8b1tcLkxK6yd … waXw', 'Addrs': [ + '/ip4/127.0.0.1/tcp/4001', + '/ip4/172.17.0.8/tcp/4001', + '/ip6/::1/tcp/4001', + '/ip4/52.32.109.74/tcp/1028' + ]} + ]}] + + Parameters + ---------- + multihash : str + The DHT key to find providers for + + Returns + ------- + dict : List of provider Peer IDs + """ + args = (multihash,) + multihashes + return self._client.request('/dht/findprovs', args, decoder='json', **kwargs) + + + def get(self, key, *keys, **kwargs): + """Queries the DHT for its best value related to given key. + + There may be several different values for a given key stored in the + DHT; in this context *best* means the record that is most desirable. + There is no one metric for *best*: it depends entirely on the key type. + For IPNS, *best* is the record that is both valid and has the highest + sequence number (freshest). Different key types may specify other rules + for what they consider to be the *best*. + + Parameters + ---------- + key : str + One or more keys whose values should be looked up + + Returns + ------- + str + """ + args = (key,) + keys + res = self._client.request('/dht/get', args, decoder='json', **kwargs) + + if isinstance(res, dict) and "Extra" in res: + return res["Extra"] + else: + for r in res: + if "Extra" in r and len(r["Extra"]) > 0: + return r["Extra"] + raise exceptions.Error("empty response from DHT") + + + #TODO: Implement `provide(cid)` + + + def put(self, key, value, **kwargs): + """Writes a key/value pair to the DHT. + + Given a key of the form ``/foo/bar`` and a value of any form, this will + write that value to the DHT with that key. + + Keys have two parts: a keytype (foo) and the key name (bar). IPNS uses + the ``/ipns/`` keytype, and expects the key name to be a Peer ID. IPNS + entries are formatted with a special strucutre. + + You may only use keytypes that are supported in your ``ipfs`` binary: + ``go-ipfs`` currently only supports the ``/ipns/`` keytype. Unless you + have a relatively deep understanding of the key's internal structure, + you likely want to be using the :meth:`~ipfshttpclient.Client.name_publish` + instead. + + Value is arbitrary text. + + .. code-block:: python + + >>> client.dht_put("QmVgNoP89mzpgEAAqK8owYoDEyB97Mkc … E9Uc", "test123") + [{'ID': 'QmfLy2aqbhU1RqZnGQyqHSovV8tDufLUaPfN1LNtg5CvDZ', + 'Extra': '', 'Type': 5, 'Responses': None}, + {'ID': 'QmZ5qTkNvvZ5eFq9T4dcCEK7kX8L7iysYEpvQmij9vokGE', + 'Extra': '', 'Type': 5, 'Responses': None}, + {'ID': 'QmYqa6QHCbe6eKiiW6YoThU5yBy8c3eQzpiuW22SgVWSB8', + 'Extra': '', 'Type': 6, 'Responses': None}, + … + {'ID': 'QmP6TAKVDCziLmx9NV8QGekwtf7ZMuJnmbeHMjcfoZbRMd', + 'Extra': '', 'Type': 1, 'Responses': []}] + + Parameters + ---------- + key : str + A unique identifier + value : str + Abitrary text to associate with the input (2048 bytes or less) + + Returns + ------- + list + """ + args = (key, value) + return self._client.request('/dht/put', args, decoder='json', **kwargs) + + + def query(self, peer_id, *peer_ids, **kwargs): + """Finds the closest Peer IDs to a given Peer ID by querying the DHT. + + .. code-block:: python + + >>> client.dht_query("/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDM … uvuJ") + [{'ID': 'QmPkFbxAQ7DeKD5VGSh9HQrdS574pyNzDmxJeGrRJxoucF', + 'Extra': '', 'Type': 2, 'Responses': None}, + {'ID': 'QmR1MhHVLJSLt9ZthsNNhudb1ny1WdhY4FPW21ZYFWec4f', + 'Extra': '', 'Type': 2, 'Responses': None}, + {'ID': 'Qmcwx1K5aVme45ab6NYWb52K2TFBeABgCLccC7ntUeDsAs', + 'Extra': '', 'Type': 2, 'Responses': None}, + … + {'ID': 'QmYYy8L3YD1nsF4xtt4xmsc14yqvAAnKksjo3F3iZs5jPv', + 'Extra': '', 'Type': 1, 'Responses': []}] + + Parameters + ---------- + peer_id : str + The peerID to run the query against + + Returns + ------- + dict : List of peers IDs + """ + args = (peer_id,) + peer_ids + return self._client.request('/dht/query', args, decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/files.py b/ipfshttpclient/client/files.py new file mode 100644 index 00000000..2dcbd8e7 --- /dev/null +++ b/ipfshttpclient/client/files.py @@ -0,0 +1,409 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + +from .. import multipart + + +class Section(base.SectionBase): + """ + Functions used to manage files in IPFS's virtual “Mutable File System” (MFS) + file storage space. + """ + + def cp(self, source, dest, **kwargs): + """Copies files within the MFS. + + Due to the nature of IPFS this will not actually involve any of the + file's content being copied. + + .. code-block:: python + + >>> client.files.ls("/") + {'Entries': [ + {'Size': 0, 'Hash': '', 'Name': 'Software', 'Type': 0}, + {'Size': 0, 'Hash': '', 'Name': 'test', 'Type': 0} + ]} + >>> client.files.cp("/test", "/bla") + '' + >>> client.files.ls("/") + {'Entries': [ + {'Size': 0, 'Hash': '', 'Name': 'Software', 'Type': 0}, + {'Size': 0, 'Hash': '', 'Name': 'bla', 'Type': 0}, + {'Size': 0, 'Hash': '', 'Name': 'test', 'Type': 0} + ]} + + Parameters + ---------- + source : str + Filepath within the MFS to copy from + dest : str + Destination filepath with the MFS to which the file will be + copied to + """ + args = (source, dest) + return self._client.request('/files/cp', args, **kwargs) + + + #TODO: Add `flush(path="/")` + + + def ls(self, path, **kwargs): + """Lists contents of a directory in the MFS. + + .. code-block:: python + + >>> client.files.ls("/") + {'Entries': [ + {'Size': 0, 'Hash': '', 'Name': 'Software', 'Type': 0} + ]} + + Parameters + ---------- + path : str + Filepath within the MFS + + Returns + ------- + dict : Directory entries + """ + args = (path,) + return self._client.request('/files/ls', args, decoder='json', **kwargs) + + + def mkdir(self, path, parents=False, **kwargs): + """Creates a directory within the MFS. + + .. code-block:: python + + >>> client.files.mkdir("/test") + b'' + + Parameters + ---------- + path : str + Filepath within the MFS + parents : bool + Create parent directories as needed and do not raise an exception + if the requested directory already exists + """ + kwargs.setdefault("opts", {"parents": parents}) + + args = (path,) + return self._client.request('/files/mkdir', args, **kwargs) + + + def mv(self, source, dest, **kwargs): + """Moves files and directories within the MFS. + + .. code-block:: python + + >>> client.files.mv("/test/file", "/bla/file") + b'' + + Parameters + ---------- + source : str + Existing filepath within the MFS + dest : str + Destination to which the file will be moved in the MFS + """ + args = (source, dest) + return self._client.request('/files/mv', args, **kwargs) + + + def read(self, path, offset=0, count=None, **kwargs): + """Reads a file stored in the MFS. + + .. code-block:: python + + >>> client.files.read("/bla/file") + b'hi' + + Parameters + ---------- + path : str + Filepath within the MFS + offset : int + Byte offset at which to begin reading at + count : int + Maximum number of bytes to read + + Returns + ------- + str : MFS file contents + """ + opts = {"offset": offset} + if count is not None: + opts["count"] = count + kwargs.setdefault("opts", opts) + + args = (path,) + return self._client.request('/files/read', args, **kwargs) + + + def rm(self, path, recursive=False, **kwargs): + """Removes a file from the MFS. + + .. code-block:: python + + >>> client.files.rm("/bla/file") + b'' + + Parameters + ---------- + path : str + Filepath within the MFS + recursive : bool + Recursively remove directories? + """ + kwargs.setdefault("opts", {"recursive": recursive}) + + args = (path,) + return self._client.request('/files/rm', args, **kwargs) + + + def stat(self, path, **kwargs): + """Returns basic ``stat`` information for an MFS file + (including its hash). + + .. code-block:: python + + >>> client.files.stat("/test") + {'Hash': 'QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn', + 'Size': 0, 'CumulativeSize': 4, 'Type': 'directory', 'Blocks': 0} + + Parameters + ---------- + path : str + Filepath within the MFS + + Returns + ------- + dict : MFS file information + """ + args = (path,) + return self._client.request('/files/stat', args, decoder='json', **kwargs) + + + def write(self, path, file, offset=0, create=False, truncate=False, count=None, **kwargs): + """Writes to a mutable file in the MFS. + + .. code-block:: python + + >>> client.files.write("/test/file", io.BytesIO(b"hi"), create=True) + b'' + + Parameters + ---------- + path : str + Filepath within the MFS + file : io.RawIOBase + IO stream object with data that should be written + offset : int + Byte offset at which to begin writing at + create : bool + Create the file if it does not exist + truncate : bool + Truncate the file to size zero before writing + count : int + Maximum number of bytes to read from the source ``file`` + """ + opts = {"offset": offset, "create": create, "truncate": truncate} + if count is not None: + opts["count"] = count + kwargs.setdefault("opts", opts) + + args = (path,) + body, headers = multipart.stream_files(file, self.chunk_size) + return self._client.request('/files/write', args, data=body, headers=headers, **kwargs) + + +class Base(base.ClientBase): + files = base.SectionProperty(Section) + + + def add(self, files, recursive=False, pattern='**', *args, **kwargs): + """Add a file, or directory of files to IPFS. + + .. code-block:: python + + >>> with io.open('nurseryrhyme.txt', 'w', encoding='utf-8') as f: + ... numbytes = f.write('Mary had a little lamb') + >>> client.add('nurseryrhyme.txt') + {'Hash': 'QmZfF6C9j4VtoCsTp4KSrhYH47QMd3DNXVZBKaxJdhaPab', + 'Name': 'nurseryrhyme.txt'} + + Parameters + ---------- + files : str + A filepath to either a file or directory + recursive : bool + Controls if files in subdirectories are added or not + pattern : str | list + Single `*glob* <https://docs.python.org/3/library/glob.html>`_ + pattern or list of *glob* patterns and compiled regular expressions + to match the names of the filepaths to keep + trickle : bool + Use trickle-dag format (optimized for streaming) when generating + the dag; see `the FAQ <https://github.com/ipfs/faq/issues/218>` for + more information (Default: ``False``) + only_hash : bool + Only chunk and hash, but do not write to disk (Default: ``False``) + wrap_with_directory : bool + Wrap files with a directory object to preserve their filename + (Default: ``False``) + chunker : str + The chunking algorithm to use + pin : bool + Pin this object when adding (Default: ``True``) + + Returns + ------- + dict: File name and hash of the added file node + """ + #PY2: No support for kw-only parameters after glob parameters + opts = { + "trickle": kwargs.pop("trickle", False), + "only-hash": kwargs.pop("only_hash", False), + "wrap-with-directory": kwargs.pop("wrap_with_directory", False), + "pin": kwargs.pop("pin", True) + } + if "chunker" in kwargs: + opts["chunker"] = kwargs.pop("chunker") + kwargs.setdefault("opts", opts) + + body, headers = multipart.stream_filesystem_node( + files, recursive, pattern, self.chunk_size + ) + return self._client.request('/add', decoder='json', data=body, headers=headers, **kwargs) + + + def file_ls(self, multihash, **kwargs): + """Lists directory contents for Unix filesystem objects. + + The result contains size information. For files, the child size is the + total size of the file contents. For directories, the child size is the + IPFS link size. + + The path can be a prefixless reference; in this case, it is assumed + that it is an ``/ipfs/`` reference and not ``/ipns/``. + + .. code-block:: python + + >>> client.file_ls('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') + { + 'Arguments': {'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D': + 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D'}, + 'Objects': { + 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D': { + 'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', + 'Size': 0, 'Type': 'Directory', + 'Links': [ + {'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV', + 'Name': 'Makefile', 'Size': 163, 'Type': 'File'}, + {'Hash': 'QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX', + 'Name': 'example', 'Size': 1463, 'Type': 'File'}, + {'Hash': 'QmZAL3oHMQYqsV61tGvoAVtQLs1WzRe1zkkamv9qxqnDuK', + 'Name': 'home', 'Size': 3947, 'Type': 'Directory'}, + {'Hash': 'QmZNPyKVriMsZwJSNXeQtVQSNU4v4KEKGUQaMT61LPahso', + 'Name': 'lib', 'Size': 268261, 'Type': 'Directory'}, + {'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY', + 'Name': 'published-version', 'Size': 47, 'Type': 'File'} + ] + } + } + } + + Parameters + ---------- + multihash : str + The path to the object(s) to list links from + + Returns + ------- + dict + """ + args = (multihash,) + return self._client.request('/file/ls', args, decoder='json', **kwargs) + + + def get(self, multihash, **kwargs): + """Downloads a file, or directory of files from IPFS. + + Files are placed in the current working directory. + + Parameters + ---------- + multihash : str + The path to the IPFS object(s) to be outputted + """ + args = (multihash,) + return self._client.download('/get', args, **kwargs) + + + def cat(self, multihash, offset=0, length=-1, **kwargs): + r"""Retrieves the contents of a file identified by hash. + + .. code-block:: python + + >>> client.cat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') + Traceback (most recent call last): + ... + ipfsapi.exceptions.Error: this dag node is a directory + >>> client.cat('QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX') + b'<!DOCTYPE html>\n<html>\n\n<head>\n<title>ipfs example viewer</…' + + Parameters + ---------- + multihash : str + The path to the IPFS object(s) to be retrieved + offset : int + Byte offset to begin reading from + length : int + Maximum number of bytes to read(-1 for all) + + Returns + ------- + str : File contents + """ + args = (multihash,) + opts = {} + if offset != 0: + opts['offset'] = offset + if length != -1: + opts['length'] = length + kwargs.setdefault('opts', opts) + return self._client.request('/cat', args, **kwargs) + + + def ls(self, multihash, **kwargs): + """Returns a list of objects linked to by the given hash. + + .. code-block:: python + + >>> client.ls('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') + {'Objects': [ + {'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', + 'Links': [ + {'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV', + 'Name': 'Makefile', 'Size': 174, 'Type': 2}, + … + {'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY', + 'Name': 'published-version', 'Size': 55, 'Type': 2} + ] + } + ]} + + Parameters + ---------- + multihash : str + The path to the IPFS object(s) to list links from + + Returns + ------- + dict : Directory information and contents + """ + args = (multihash,) + return self._client.request('/ls', args, decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/key.py b/ipfshttpclient/client/key.py new file mode 100644 index 00000000..56ff595e --- /dev/null +++ b/ipfshttpclient/client/key.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + + +class Section(base.SectionBase): + #TODO: Add `export(name, password)` + + + def gen(self, key_name, type, size=2048, **kwargs): + """Adds a new public key that can be used for + :meth:`~ipfshttpclient.Client.name.publish`. + + .. code-block:: python + + >>> client.key.gen('example_key_name') + {'Name': 'example_key_name', + 'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'} + + Parameters + ---------- + key_name : str + Name of the new Key to be generated. Used to reference the Keys. + type : str + Type of key to generate. The current possible keys types are: + + * ``"rsa"`` + * ``"ed25519"`` + size : int + Bitsize of key to generate + + Returns + ------- + dict : Key name and Key Id + """ + + opts = {"type": type, "size": size} + kwargs.setdefault("opts", opts) + args = (key_name,) + + return self._client.request('/key/gen', args, decoder='json', **kwargs) + + + #TODO: Add `import(name, pam, password)` + + + def list(self, **kwargs): + """Returns a list of generated public keys that can be used with + :meth:`~ipfshttpclient.Client.name.publish`. + + .. code-block:: python + + >>> client.key.list() + [{'Name': 'self', + 'Id': 'QmQf22bZar3WKmojipms22PkXH1MZGmvsqzQtuSvQE3uhm'}, + {'Name': 'example_key_name', + 'Id': 'QmQLaT5ZrCfSkXTH6rUKtVidcxj8jrW3X2h75Lug1AV7g8'} + ] + + Returns + ------- + list : List of dictionaries with Names and Ids of public keys. + """ + return self._client.request('/key/list', decoder='json', **kwargs) + + + def rename(self, key_name, new_key_name, **kwargs): + """Rename a keypair + + .. code-block:: python + + >>> client.key.rename("bla", "personal") + {"Was": "bla", + "Now": "personal", + "Id": "QmeyrRNxXaasZaoDXcCZgryoBCga9shaHQ4suHAYXbNZF3", + "Overwrite": False} + + Parameters + ---------- + key_name : str + Current name of the key to rename + new_key_name : str + New name of the key + + Returns + ------- + dict : List of key names and IDs that have been removed + """ + args = (key_name, new_key_name) + return self._client.request( + '/key/rename', args, decoder='json', **kwargs + ) + + + def rm(self, key_name, *key_names, **kwargs): + """Remove a keypair + + .. code-block:: python + + >>> client.key_rm("bla") + {"Keys": [ + {"Name": "bla", + "Id": "QmfJpR6paB6h891y7SYXGe6gapyNgepBeAYMbyejWA4FWA"} + ]} + + Parameters + ---------- + key_name : str + Name of the key(s) to remove. + + Returns + ------- + dict : List of key names and IDs that have been removed + """ + args = (key_name,) + key_names + return self._client.request('/key/rm', args, decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/miscellaneous.py b/ipfshttpclient/client/miscellaneous.py new file mode 100644 index 00000000..e9f3de16 --- /dev/null +++ b/ipfshttpclient/client/miscellaneous.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + +from .. import exceptions + + +class Base(base.ClientBase): + def dns(self, domain_name, recursive=False, **kwargs): + """Resolves DNS links to the referenced object. + + Multihashes are hard to remember, but domain names are usually easy to + remember. To create memorable aliases for multihashes, DNS TXT records + can point to other DNS links, IPFS objects, IPNS keys, etc. + This command resolves those links to the referenced object. + + For example, with this DNS TXT record:: + + >>> import dns.resolver + >>> a = dns.resolver.query("ipfs.io", "TXT") + >>> a.response.answer[0].items[0].to_text() + '"dnslink=/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n"' + + The resolver will give:: + + >>> client.dns("ipfs.io") + {'Path': '/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n'} + + Parameters + ---------- + domain_name : str + The domain-name name to resolve + recursive : bool + Resolve until the name is not a DNS link + + Returns + ------- + dict : Resource were a DNS entry points to + """ + kwargs.setdefault("opts", {"recursive": recursive}) + + args = (domain_name,) + return self._client.request('/dns', args, decoder='json', **kwargs) + + + def id(self, peer=None, **kwargs): + """Shows IPFS Node ID info. + + Returns the PublicKey, ProtocolVersion, ID, AgentVersion and + Addresses of the connected daemon or some other node. + + .. code-block:: python + + >>> client.id() + {'ID': 'QmVgNoP89mzpgEAAqK8owYoDEyB97MkcGvoWZir8otE9Uc', + 'PublicKey': 'CAASpgIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggE … BAAE=', + 'AgentVersion': 'go-libp2p/3.3.4', + 'ProtocolVersion': 'ipfs/0.1.0', + 'Addresses': [ + '/ip4/127.0.0.1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owYo … E9Uc', + '/ip4/10.1.0.172/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owY … E9Uc', + '/ip4/172.18.0.1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owY … E9Uc', + '/ip6/::1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8owYoDEyB97 … E9Uc', + '/ip6/fccc:7904:b05b:a579:957b:deef:f066:cad9/tcp/400 … E9Uc', + '/ip6/fd56:1966:efd8::212/tcp/4001/ipfs/QmVgNoP89mzpg … E9Uc', + '/ip6/fd56:1966:efd8:0:def1:34d0:773:48f/tcp/4001/ipf … E9Uc', + '/ip6/2001:db8:1::1/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8 … E9Uc', + '/ip4/77.116.233.54/tcp/4001/ipfs/QmVgNoP89mzpgEAAqK8 … E9Uc', + '/ip4/77.116.233.54/tcp/10842/ipfs/QmVgNoP89mzpgEAAqK … E9Uc']} + + Parameters + ---------- + peer : str + Peer.ID of the node to look up (local node if ``None``) + + Returns + ------- + dict : Information about the IPFS node + """ + args = (peer,) if peer is not None else () + return self._client.request('/id', args, decoder='json', **kwargs) + + + #TODO: isOnline() + + + def ping(self, peer, *peers, **kwargs): + """Provides round-trip latency information for the routing system. + + Finds nodes via the routing system, sends pings, waits for pongs, + and prints out round-trip latency information. + + .. code-block:: python + + >>> client.ping("QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n") + [{'Success': True, 'Time': 0, + 'Text': 'Looking up peer QmTzQ1JRkWErjk39mryYw2WVaphAZN … c15n'}, + {'Success': False, 'Time': 0, + 'Text': 'Peer lookup error: routing: not found'}] + + Parameters + ---------- + peer : str + ID of peer to be pinged + count : int + Number of ping messages to send (Default: ``10``) + + Returns + ------- + list : Progress reports from the ping + """ + #PY2: No support for kw-only parameters after glob parameters + if "count" in kwargs: + kwargs.setdefault("opts", {"count": kwargs["count"]}) + del kwargs["count"] + + args = (peer,) + peers + return self._client.request('/ping', args, decoder='json', **kwargs) + + def resolve(self, name, recursive=False, **kwargs): + """Accepts an identifier and resolves it to the referenced item. + + There are a number of mutable name protocols that can link among + themselves and into IPNS. For example IPNS references can (currently) + point at an IPFS object, and DNS links can point at other DNS links, + IPNS entries, or IPFS objects. This command accepts any of these + identifiers. + + .. code-block:: python + + >>> client.resolve("/ipfs/QmTkzDwWqPbnAh5YiV5VwcTLnGdw … ca7D/Makefile") + {'Path': '/ipfs/Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV'} + >>> client.resolve("/ipns/ipfs.io") + {'Path': '/ipfs/QmTzQ1JRkWErjk39mryYw2WVaphAZNAREyMchXzYQ7c15n'} + + Parameters + ---------- + name : str + The name to resolve + recursive : bool + Resolve until the result is an IPFS name + + Returns + ------- + dict : IPFS path of resource + """ + kwargs.setdefault("opts", {"recursive": recursive}) + + args = (name,) + return self._client.request('/resolve', args, decoder='json', **kwargs) + + + def stop(self): + """Stop the connected IPFS daemon instance. + + Sending any further requests after this will fail with + ``ipfsapi.exceptions.ConnectionError``, until you start another IPFS + daemon instance. + """ + try: + return self._client.request('/shutdown') + except exceptions.ConnectionError: + # Sometimes the daemon kills the connection before sending a + # response causing an incorrect `ConnectionError` to bubble + pass + + + def version(self, **kwargs): + """Returns the software version of the currently connected node. + + .. code-block:: python + + >>> client.version() + {'Version': '0.4.3-rc2', 'Repo': '4', 'Commit': '', + 'System': 'amd64/linux', 'Golang': 'go1.6.2'} + + Returns + ------- + dict : Daemon and system version information + """ + return self._client.request('/version', decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/name.py b/ipfshttpclient/client/name.py new file mode 100644 index 00000000..da180aff --- /dev/null +++ b/ipfshttpclient/client/name.py @@ -0,0 +1,86 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + + +class Section(base.SectionBase): + def publish(self, ipfs_path, resolve=True, lifetime="24h", ttl=None, key=None, **kwargs): + """Publishes an object to IPNS. + + IPNS is a PKI namespace, where names are the hashes of public keys, and + the private key enables publishing new (signed) values. In publish, the + default value of *name* is your own identity public key. + + .. code-block:: python + + >>> client.name.publish('/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZK … GZ5d') + {'Value': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d', + 'Name': 'QmVgNoP89mzpgEAAqK8owYoDEyB97MkcGvoWZir8otE9Uc'} + + Parameters + ---------- + ipfs_path : str + IPFS path of the object to be published + resolve : bool + Resolve given path before publishing + lifetime : str + Time duration that the record will be valid for + + Accepts durations such as ``"300s"``, ``"1.5h"`` or ``"2h45m"``. + Valid units are: + + * ``"ns"`` + * ``"us"`` (or ``"µs"``) + * ``"ms"`` + * ``"s"`` + * ``"m"`` + * ``"h"`` + ttl : int + Time duration this record should be cached for + key : string + Name of the key to be used, as listed by 'ipfs key list'. + + Returns + ------- + dict : IPNS hash and the IPFS path it points at + """ + opts = {"lifetime": lifetime, "resolve": resolve} + if ttl: + opts["ttl"] = ttl + if key: + opts["key"] = key + kwargs.setdefault("opts", opts) + + args = (ipfs_path,) + return self._client.request('/name/publish', args, decoder='json', **kwargs) + + + def resolve(self, name=None, recursive=False, nocache=False, **kwargs): + """Gets the value currently published at an IPNS name. + + IPNS is a PKI namespace, where names are the hashes of public keys, and + the private key enables publishing new (signed) values. In resolve, the + default value of ``name`` is your own identity public key. + + .. code-block:: python + + >>> client.name.resolve() + {'Path': '/ipfs/QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d'} + + Parameters + ---------- + name : str + The IPNS name to resolve (defaults to the connected node) + recursive : bool + Resolve until the result is not an IPFS name (default: false) + nocache : bool + Do not use cached entries (default: false) + + Returns + ------- + dict : The IPFS path the IPNS hash points at + """ + kwargs.setdefault("opts", {"recursive": recursive, "nocache": nocache}) + args = (name,) if name is not None else () + return self._client.request('/name/resolve', args, decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/object.py b/ipfshttpclient/client/object.py new file mode 100644 index 00000000..898ebf34 --- /dev/null +++ b/ipfshttpclient/client/object.py @@ -0,0 +1,306 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + +from .. import multipart + + +class PatchSection(base.SectionBase): + def add_link(self, root, name, ref, create=False, **kwargs): + """Creates a new merkledag object based on an existing one. + + The new object will have a link to the provided object. + + .. code-block:: python + + >>> client.object.patch.add_link( + ... 'QmR79zQQj2aDfnrNgczUhvf2qWapEfQ82YQRt3QjrbhSb2', + ... 'Johnny', + ... 'QmR79zQQj2aDfnrNgczUhvf2qWapEfQ82YQRt3QjrbhSb2' + ... ) + {'Hash': 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k'} + + Parameters + ---------- + root : str + IPFS hash for the object being modified + name : str + name for the new link + ref : str + IPFS hash for the object being linked to + create : bool + Create intermediary nodes + + Returns + ------- + dict : Hash of new object + """ + kwargs.setdefault("opts", {"create": create}) + + args = ((root, name, ref),) + return self._client.request('/object/patch/add-link', args, decoder='json', **kwargs) + + + def append_data(self, multihash, new_data, **kwargs): + """Creates a new merkledag object based on an existing one. + + The new object will have the provided data appended to it, + and will thus have a new Hash. + + .. code-block:: python + + >>> client.object.patch.append_data("QmZZmY … fTqm", io.BytesIO(b"bla")) + {'Hash': 'QmR79zQQj2aDfnrNgczUhvf2qWapEfQ82YQRt3QjrbhSb2'} + + Parameters + ---------- + multihash : str + The hash of an ipfs object to modify + new_data : io.RawIOBase + The data to append to the object's data section + + Returns + ------- + dict : Hash of new object + """ + args = (multihash,) + body, headers = multipart.stream_files(new_data, self.chunk_size) + return self._client.request('/object/patch/append-data', args, decoder='json', + data=body, headers=headers, **kwargs) + + + def rm_link(self, root, link, **kwargs): + """Creates a new merkledag object based on an existing one. + + The new object will lack a link to the specified object. + + .. code-block:: python + + >>> client.object.patch.rm_link( + ... 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k', + ... 'Johnny' + ... ) + {'Hash': 'QmR79zQQj2aDfnrNgczUhvf2qWapEfQ82YQRt3QjrbhSb2'} + + Parameters + ---------- + root : str + IPFS hash of the object to modify + link : str + name of the link to remove + + Returns + ------- + dict : Hash of new object + """ + args = ((root, link),) + return self._client.request('/object/patch/rm-link', args, decoder='json', **kwargs) + + + def set_data(self, root, data, **kwargs): + """Creates a new merkledag object based on an existing one. + + The new object will have the same links as the old object but + with the provided data instead of the old object's data contents. + + .. code-block:: python + + >>> client.object.patch.set_data( + ... 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k', + ... io.BytesIO(b'bla') + ... ) + {'Hash': 'QmSw3k2qkv4ZPsbu9DVEJaTMszAQWNgM1FTFYpfZeNQWrd'} + + Parameters + ---------- + root : str + IPFS hash of the object to modify + data : io.RawIOBase + The new data to store in root + + Returns + ------- + dict : Hash of new object + """ + args = (root,) + body, headers = multipart.stream_files(data, self.chunk_size) + return self._client.request('/object/patch/set-data', args, decoder='json', data=body, + headers=headers, **kwargs) + + + +class Section(base.SectionBase): + patch = base.SectionProperty(PatchSection) + + + def data(self, multihash, **kwargs): + r"""Returns the raw bytes in an IPFS object. + + .. code-block:: python + + >>> client.object.data('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') + b'\x08\x01' + + Parameters + ---------- + multihash : str + Key of the object to retrieve, in base58-encoded multihash format + + Returns + ------- + str : Raw object data + """ + args = (multihash,) + return self._client.request('/object/data', args, **kwargs) + + + def get(self, multihash, **kwargs): + """Get and serialize the DAG node named by multihash. + + .. code-block:: python + + >>> client.object.get('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') + {'Data': '\x08\x01', + 'Links': [ + {'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV', + 'Name': 'Makefile', 'Size': 174}, + {'Hash': 'QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX', + 'Name': 'example', 'Size': 1474}, + {'Hash': 'QmZAL3oHMQYqsV61tGvoAVtQLs1WzRe1zkkamv9qxqnDuK', + 'Name': 'home', 'Size': 3947}, + {'Hash': 'QmZNPyKVriMsZwJSNXeQtVQSNU4v4KEKGUQaMT61LPahso', + 'Name': 'lib', 'Size': 268261}, + {'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY', + 'Name': 'published-version', 'Size': 55}]} + + Parameters + ---------- + multihash : str + Key of the object to retrieve, in base58-encoded multihash format + + Returns + ------- + dict : Object data and links + """ + args = (multihash,) + return self._client.request('/object/get', args, decoder='json', **kwargs) + + + def links(self, multihash, **kwargs): + """Returns the links pointed to by the specified object. + + .. code-block:: python + + >>> client.object.links('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDx … ca7D') + {'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', + 'Links': [ + {'Hash': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7dtPNFkcNMV', + 'Name': 'Makefile', 'Size': 174}, + {'Hash': 'QmeKozNssnkJ4NcyRidYgDY2jfRZqVEoRGfipkgath71bX', + 'Name': 'example', 'Size': 1474}, + {'Hash': 'QmZAL3oHMQYqsV61tGvoAVtQLs1WzRe1zkkamv9qxqnDuK', + 'Name': 'home', 'Size': 3947}, + {'Hash': 'QmZNPyKVriMsZwJSNXeQtVQSNU4v4KEKGUQaMT61LPahso', + 'Name': 'lib', 'Size': 268261}, + {'Hash': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTiYwKir8eXJY', + 'Name': 'published-version', 'Size': 55}]} + + Parameters + ---------- + multihash : str + Key of the object to retrieve, in base58-encoded multihash format + + Returns + ------- + dict : Object hash and merkedag links + """ + args = (multihash,) + return self._client.request('/object/links', args, decoder='json', **kwargs) + + + def new(self, template=None, **kwargs): + """Creates a new object from an IPFS template. + + By default this creates and returns a new empty merkledag node, but you + may pass an optional template argument to create a preformatted node. + + .. code-block:: python + + >>> client.object.new() + {'Hash': 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n'} + + Parameters + ---------- + template : str + Blueprints from which to construct the new object. Possible values: + + * ``"unixfs-dir"`` + * ``None`` + + Returns + ------- + dict : Object hash + """ + args = (template,) if template is not None else () + return self._client.request('/object/new', args, decoder='json', **kwargs) + + + def put(self, file, **kwargs): + """Stores input as a DAG object and returns its key. + + .. code-block:: python + + >>> client.object.put(io.BytesIO(b''' + ... { + ... "Data": "another", + ... "Links": [ { + ... "Name": "some link", + ... "Hash": "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCV … R39V", + ... "Size": 8 + ... } ] + ... }''')) + {'Hash': 'QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm', + 'Links': [ + {'Hash': 'QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V', + 'Size': 8, 'Name': 'some link'} + ] + } + + Parameters + ---------- + file : io.RawIOBase + (JSON) object from which the DAG object will be created + + Returns + ------- + dict : Hash and links of the created DAG object + + See :meth:`~ipfshttpclient.Client.object.links` + """ + body, headers = multipart.stream_files(file, self.chunk_size) + return self._client.request('/object/put', decoder='json', data=body, + headers=headers, **kwargs) + + + def stat(self, multihash, **kwargs): + """Get stats for the DAG node named by multihash. + + .. code-block:: python + + >>> client.object.stat('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') + {'LinksSize': 256, 'NumLinks': 5, + 'Hash': 'QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D', + 'BlockSize': 258, 'CumulativeSize': 274169, 'DataSize': 2} + + Parameters + ---------- + multihash : str + Key of the object to retrieve, in base58-encoded multihash format + + Returns + ------- + dict + """ + args = (multihash,) + return self._client.request('/object/stat', args, decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/pin.py b/ipfshttpclient/client/pin.py new file mode 100644 index 00000000..3a3b3f96 --- /dev/null +++ b/ipfshttpclient/client/pin.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + + +class Section(base.SectionBase): + def add(self, path, *paths, **kwargs): + """Pins objects to local storage. + + Stores an IPFS object(s) from a given path locally to disk. + + .. code-block:: python + + >>> client.pin.add("QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d") + {'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']} + + Parameters + ---------- + path : str + Path to object(s) to be pinned + recursive : bool + Recursively unpin the object linked to by the specified object(s) + + Returns + ------- + dict : List of IPFS objects that have been pinned + """ + #PY2: No support for kw-only parameters after glob parameters + if "recursive" in kwargs: + kwargs.setdefault("opts", {"recursive": kwargs.pop("recursive")}) + + args = (path,) + paths + return self._client.request('/pin/add', args, decoder='json', **kwargs) + + + def ls(self, type="all", **kwargs): + """Lists objects pinned to local storage. + + By default, all pinned objects are returned, but the ``type`` flag or + arguments can restrict that to a specific pin type or to some specific + objects respectively. + + .. code-block:: python + + >>> client.pin.ls() + {'Keys': { + 'QmNNPMA1eGUbKxeph6yqV8ZmRkdVat … YMuz': {'Type': 'recursive'}, + 'QmNPZUCeSN5458Uwny8mXSWubjjr6J … kP5e': {'Type': 'recursive'}, + 'QmNg5zWpRMxzRAVg7FTQ3tUxVbKj8E … gHPz': {'Type': 'indirect'}, + … + 'QmNiuVapnYCrLjxyweHeuk6Xdqfvts … wCCe': {'Type': 'indirect'}}} + + Parameters + ---------- + type : "str" + The type of pinned keys to list. Can be: + + * ``"direct"`` + * ``"indirect"`` + * ``"recursive"`` + * ``"all"`` + + Returns + ------- + dict : Hashes of pinned IPFS objects and why they are pinned + """ + kwargs.setdefault("opts", {"type": type}) + + return self._client.request('/pin/ls', decoder='json', **kwargs) + + + def rm(self, path, *paths, **kwargs): + """Removes a pinned object from local storage. + + Removes the pin from the given object allowing it to be garbage + collected if needed. + + .. code-block:: python + + >>> client.pin.rm('QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d') + {'Pins': ['QmfZY61ukoQuCX8e5Pt7v8pRfhkyxwZKZMTodAtmvyGZ5d']} + + Parameters + ---------- + path : str + Path to object(s) to be unpinned + recursive : bool + Recursively unpin the object linked to by the specified object(s) + + Returns + ------- + dict : List of IPFS objects that have been unpinned + """ + #PY2: No support for kw-only parameters after glob parameters + if "recursive" in kwargs: + kwargs.setdefault("opts", {"recursive": kwargs["recursive"]}) + del kwargs["recursive"] + + args = (path,) + paths + return self._client.request('/pin/rm', args, decoder='json', **kwargs) + + + def update(self, from_path, to_path, **kwargs): + """Replaces one pin with another. + + Updates one pin to another, making sure that all objects in the new pin + are local. Then removes the old pin. This is an optimized version of + using first using :meth:`~ipfshttpclient.Client.pin.add` to add a new pin + for an object and then using :meth:`~ipfshttpclient.Client.pin.rm` to remove + the pin for the old object. + + .. code-block:: python + + >>> client.pin.update("QmXMqez83NU77ifmcPs5CkNRTMQksBLkyfBf4H5g1NZ52P", + ... "QmUykHAi1aSjMzHw3KmBoJjqRUQYNkFXm8K1y7ZsJxpfPH") + {"Pins": ["/ipfs/QmXMqez83NU77ifmcPs5CkNRTMQksBLkyfBf4H5g1NZ52P", + "/ipfs/QmUykHAi1aSjMzHw3KmBoJjqRUQYNkFXm8K1y7ZsJxpfPH"]} + + Parameters + ---------- + from_path : str + Path to the old object + to_path : str + Path to the new object to be pinned + unpin : bool + Should the pin of the old object be removed? (Default: ``True``) + + Returns + ------- + dict : List of IPFS objects affected by the pinning operation + """ + #PY2: No support for kw-only parameters after glob parameters + if "unpin" in kwargs: + kwargs.setdefault("opts", {"unpin": kwargs["unpin"]}) + del kwargs["unpin"] + + args = (from_path, to_path) + return self._client.request('/pin/update', args, decoder='json', **kwargs) + + + def verify(self, path, *paths, **kwargs): + """Verify that recursive pins are complete. + + Scan the repo for pinned object graphs and check their integrity. + Issues will be reported back with a helpful human-readable error + message to aid in error recovery. This is useful to help recover + from datastore corruptions (such as when accidentally deleting + files added using the filestore backend). + + This function returns an iterator needs to be closed using a context + manager (``with``-statement) or using the ``.close()`` method. + + .. code-block:: python + + >>> with client.pin.verify("QmN…TTZ", verbose=True) as pin_verify_iter: + ... for item in pin_verify_iter: + ... print(item) + ... + {"Cid":"QmVkNdzCBukBRdpyFiKPyL2R15qPExMr9rV9RFV2kf9eeV","Ok":True} + {"Cid":"QmbPzQruAEFjUU3gQfupns6b8USr8VrD9H71GrqGDXQSxm","Ok":True} + {"Cid":"Qmcns1nUvbeWiecdGDPw8JxWeUfxCV8JKhTfgzs3F8JM4P","Ok":True} + … + + Parameters + ---------- + path : str + Path to object(s) to be checked + verbose : bool + Also report status of items that were OK? (Default: ``False``) + + Returns + ------- + iterable + """ + #PY2: No support for kw-only parameters after glob parameters + if "verbose" in kwargs: + kwargs.setdefault("opts", {"verbose": kwargs["verbose"]}) + del kwargs["verbose"] + + args = (path,) + paths + return self._client.request('/pin/verify', args, decoder='json', stream=True, **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/pubsub.py b/ipfshttpclient/client/pubsub.py new file mode 100644 index 00000000..b3c9deea --- /dev/null +++ b/ipfshttpclient/client/pubsub.py @@ -0,0 +1,182 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + + +class SubChannel: + """ + Wrapper for a pubsub subscription object that allows for easy + closing of subscriptions. + """ + def __init__(self, sub): + self.__sub = sub + + def read_message(self): + return next(self.__sub) + + def __iter__(self): + return self.__sub + + def close(self): + self.__sub.close() + + def __enter__(self): + return self + + def __exit__(self, *a): + self.close() + + +class Section(base.SectionBase): + def ls(self, **kwargs): + """Lists subscribed topics by name + + This method returns data that contains a list of + all topics the user is subscribed to. In order + to subscribe to a topic ``pubsub.sub`` must be called. + + .. code-block:: python + + # subscribe to a channel + >>> with client.pubsub.sub("hello") as sub: + ... client.pubsub.ls() + { + 'Strings' : ["hello"] + } + + Returns + ------- + dict : Dictionary with the key "Strings" who's value is an array of + topics we are subscribed to + """ + return self._client.request('/pubsub/ls', decoder='json', **kwargs) + + + def peers(self, topic=None, **kwargs): + """List the peers we are pubsubbing with. + + Lists the id's of other IPFS users who we + are connected to via some topic. Without specifying + a topic, IPFS peers from all subscribed topics + will be returned in the data. If a topic is specified + only the IPFS id's of the peers from the specified + topic will be returned in the data. + + .. code-block:: python + + >>> client.pubsub.peers() + {'Strings': + [ + 'QmPbZ3SDgmTNEB1gNSE9DEf4xT8eag3AFn5uo7X39TbZM8', + 'QmQKiXYzoFpiGZ93DaFBFDMDWDJCRjXDARu4wne2PRtSgA', + ... + 'QmepgFW7BHEtU4pZJdxaNiv75mKLLRQnPi1KaaXmQN4V1a' + ] + } + + ## with a topic + + # subscribe to a channel + >>> with client.pubsub.sub('hello') as sub: + ... client.pubsub.peers(topic='hello') + {'String': + [ + 'QmPbZ3SDgmTNEB1gNSE9DEf4xT8eag3AFn5uo7X39TbZM8', + ... + # other peers connected to the same channel + ] + } + + Parameters + ---------- + topic : str + The topic to list connected peers of + (defaults to None which lists peers for all topics) + + Returns + ------- + dict : Dictionary with the ke "Strings" who's value is id of IPFS + peers we're pubsubbing with + """ + args = (topic,) if topic is not None else () + return self._client.request('/pubsub/peers', args, decoder='json', **kwargs) + + + def publish(self, topic, payload, **kwargs): + """Publish a message to a given pubsub topic + + Publishing will publish the given payload (string) to + everyone currently subscribed to the given topic. + + All data (including the id of the publisher) is automatically + base64 encoded when published. + + .. code-block:: python + + # publishes the message 'message' to the topic 'hello' + >>> client.pubsub.publish('hello', 'message') + [] + + Parameters + ---------- + topic : str + Topic to publish to + payload : Data to be published to the given topic + + Returns + ------- + list : empty list + """ + args = (topic, payload) + return self._client.request('/pubsub/pub', args, decoder='json', **kwargs) + + + def subscribe(self, topic, discover=False, **kwargs): + """Subscribe to mesages on a given topic + + Subscribing to a topic in IPFS means anytime + a message is published to a topic, the subscribers + will be notified of the publication. + + The connection with the pubsub topic is opened and read. + The Subscription returned should be used inside a context + manager to ensure that it is closed properly and not left + hanging. + + .. code-block:: python + + >>> sub = client.pubsub.subscribe('testing') + >>> with client.pubsub.subscribe('testing') as sub: + # publish a message 'hello' to the topic 'testing' + ... client.pubsub.publish('testing', 'hello') + ... for message in sub: + ... print(message) + ... # Stop reading the subscription after + ... # we receive one publication + ... break + {'from': '<base64encoded IPFS id>', + 'data': 'aGVsbG8=', + 'topicIDs': ['testing']} + + # NOTE: in order to receive published data + # you must already be subscribed to the topic at publication + # time. + + Parameters + ---------- + topic : str + Name of a topic to subscribe to + + discover : bool + Try to discover other peers subscibed to the same topic + (defaults to False) + + Returns + ------- + Generator wrapped in a context + manager that maintains a connection + stream to the given topic. + """ + args = (topic, discover) + return SubChannel(self._client.request('/pubsub/sub', args, stream=True, decoder='json')) \ No newline at end of file diff --git a/ipfshttpclient/client/repo.py b/ipfshttpclient/client/repo.py new file mode 100644 index 00000000..7a37adc0 --- /dev/null +++ b/ipfshttpclient/client/repo.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + + +class Section(base.SectionBase): + def gc(self, **kwargs): + """Removes stored objects that are not pinned from the repo. + + .. code-block:: python + + >>> client.repo.gc() + [{'Key': 'QmNPXDC6wTXVmZ9Uoc8X1oqxRRJr4f1sDuyQuwaHG2mpW2'}, + {'Key': 'QmNtXbF3AjAk59gQKRgEdVabHcSsiPUnJwHnZKyj2x8Z3k'}, + {'Key': 'QmRVBnxUCsD57ic5FksKYadtyUbMsyo9KYQKKELajqAp4q'}, + … + {'Key': 'QmYp4TeCurXrhsxnzt5wqLqqUz8ZRg5zsc7GuUrUSDtwzP'}] + + Performs a garbage collection sweep of the local set of + stored objects and remove ones that are not pinned in order + to reclaim hard disk space. Returns the hashes of all collected + objects. + + Returns + ------- + dict : List of IPFS objects that have been removed + """ + return self._client.request('/repo/gc', decoder='json', **kwargs) + + + def stat(self, **kwargs): + """Displays the repo's status. + + Returns the number of objects in the repo and the repo's size, + version, and path. + + .. code-block:: python + + >>> client.repo.stat() + {'NumObjects': 354, + 'RepoPath': '…/.local/share/ipfs', + 'Version': 'fs-repo@4', + 'RepoSize': 13789310} + + Returns + ------- + dict : General information about the IPFS file repository + + +------------+-------------------------------------------------+ + | NumObjects | Number of objects in the local repo. | + +------------+-------------------------------------------------+ + | RepoPath | The path to the repo being currently used. | + +------------+-------------------------------------------------+ + | RepoSize | Size in bytes that the repo is currently using. | + +------------+-------------------------------------------------+ + | Version | The repo version. | + +------------+-------------------------------------------------+ + """ + return self._client.request('/repo/stat', decoder='json', **kwargs) + + + #TODO: `version()` \ No newline at end of file diff --git a/ipfshttpclient/client/swarm.py b/ipfshttpclient/client/swarm.py new file mode 100644 index 00000000..89ccf34d --- /dev/null +++ b/ipfshttpclient/client/swarm.py @@ -0,0 +1,168 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + + +class FiltersSection(base.SectionBase): + def add(self, address, *addresses, **kwargs): + """Adds a given multiaddr filter to the filter list. + + This will add an address filter to the daemons swarm. Filters applied + this way will not persist daemon reboots, to achieve that, add your + filters to the configuration file. + + .. code-block:: python + + >>> client.swarm.filters.add("/ip4/192.168.0.0/ipcidr/16") + {'Strings': ['/ip4/192.168.0.0/ipcidr/16']} + + Parameters + ---------- + address : str + Multiaddr to filter + + Returns + ------- + dict : List of swarm filters added + """ + args = (address,) + addresses + return self._client.request('/swarm/filters/add', args, decoder='json', **kwargs) + + + def rm(self, address, *addresses, **kwargs): + """Removes a given multiaddr filter from the filter list. + + This will remove an address filter from the daemons swarm. Filters + removed this way will not persist daemon reboots, to achieve that, + remove your filters from the configuration file. + + .. code-block:: python + + >>> client.swarm.filters.rm("/ip4/192.168.0.0/ipcidr/16") + {'Strings': ['/ip4/192.168.0.0/ipcidr/16']} + + Parameters + ---------- + address : str + Multiaddr filter to remove + + Returns + ------- + dict : List of swarm filters removed + """ + args = (address,) + addresses + return self._client.request('/swarm/filters/rm', args, decoder='json', **kwargs) + + +class Section(base.SectionBase): + filters = base.SectionProperty(FiltersSection) + + + def addrs(self, **kwargs): + """Returns the addresses of currently connected peers by peer id. + + .. code-block:: python + + >>> pprint(client.swarm.addrs()) + {'Addrs': { + 'QmNMVHJTSZHTWMWBbmBrQgkA1hZPWYuVJx2DpSGESWW6Kn': [ + '/ip4/10.1.0.1/tcp/4001', + '/ip4/127.0.0.1/tcp/4001', + '/ip4/51.254.25.16/tcp/4001', + '/ip6/2001:41d0:b:587:3cae:6eff:fe40:94d8/tcp/4001', + '/ip6/2001:470:7812:1045::1/tcp/4001', + '/ip6/::1/tcp/4001', + '/ip6/fc02:2735:e595:bb70:8ffc:5293:8af8:c4b7/tcp/4001', + '/ip6/fd00:7374:6172:100::1/tcp/4001', + '/ip6/fd20:f8be:a41:0:c495:aff:fe7e:44ee/tcp/4001', + '/ip6/fd20:f8be:a41::953/tcp/4001'], + 'QmNQsK1Tnhe2Uh2t9s49MJjrz7wgPHj4VyrZzjRe8dj7KQ': [ + '/ip4/10.16.0.5/tcp/4001', + '/ip4/127.0.0.1/tcp/4001', + '/ip4/172.17.0.1/tcp/4001', + '/ip4/178.62.107.36/tcp/4001', + '/ip6/::1/tcp/4001'], + … + }} + + Returns + ------- + dict : Multiaddrs of peers by peer id + """ + return self._client.request('/swarm/addrs', decoder='json', **kwargs) + + + def connect(self, address, *addresses, **kwargs): + """Opens a connection to a given address. + + This will open a new direct connection to a peer address. The address + format is an IPFS multiaddr:: + + /ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ + + .. code-block:: python + + >>> client.swarm.connect("/ip4/104.131.131.82/tcp/4001/ipfs/Qma … uvuJ") + {'Strings': ['connect QmaCpDMGvV2BGHeYERUEnRQAwe3 … uvuJ success']} + + Parameters + ---------- + address : str + Address of peer to connect to + + Returns + ------- + dict : Textual connection status report + """ + args = (address,) + addresses + return self._client.request('/swarm/connect', args, decoder='json', **kwargs) + + + def disconnect(self, address, *addresses, **kwargs): + """Closes the connection to a given address. + + This will close a connection to a peer address. The address format is + an IPFS multiaddr:: + + /ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ + + The disconnect is not permanent; if IPFS needs to talk to that address + later, it will reconnect. + + .. code-block:: python + + >>> client.swarm.disconnect("/ip4/104.131.131.82/tcp/4001/ipfs/Qm … uJ") + {'Strings': ['disconnect QmaCpDMGvV2BGHeYERUEnRQA … uvuJ success']} + + Parameters + ---------- + address : str + Address of peer to disconnect from + + Returns + ------- + dict : Textual connection status report + """ + args = (address,) + addresses + return self._client.request('/swarm/disconnect', args, decoder='json', **kwargs) + + + def peers(self, **kwargs): + """Returns the addresses & IDs of currently connected peers. + + .. code-block:: python + + >>> client.swarm.peers() + {'Strings': [ + '/ip4/101.201.40.124/tcp/40001/ipfs/QmZDYAhmMDtnoC6XZ … kPZc', + '/ip4/104.131.131.82/tcp/4001/ipfs/QmaCpDMGvV2BGHeYER … uvuJ', + '/ip4/104.223.59.174/tcp/4001/ipfs/QmeWdgoZezpdHz1PX8 … 1jB6', + … + '/ip6/fce3: … :f140/tcp/43901/ipfs/QmSoLnSGccFuZQJzRa … ca9z']} + + Returns + ------- + dict : List of multiaddrs of currently connected peers + """ + return self._client.request('/swarm/peers', decoder='json', **kwargs) \ No newline at end of file diff --git a/ipfshttpclient/client/unstable.py b/ipfshttpclient/client/unstable.py new file mode 100644 index 00000000..1eff2ecb --- /dev/null +++ b/ipfshttpclient/client/unstable.py @@ -0,0 +1,167 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from . import base + + +class LogSection(base.SectionBase): + def level(self, subsystem, level, **kwargs): + r"""Changes the logging output of a running daemon. + + **This API is subject to future change or removal!** + + .. code-block:: python + + >>> client.log_level("path", "info") + {'Message': "Changed log level of 'path' to 'info'\n"} + + Parameters + ---------- + subsystem : str + The subsystem logging identifier (Use ``"all"`` for all subsystems) + level : str + The desired logging level. Must be one of: + + * ``"debug"`` + * ``"info"`` + * ``"warning"`` + * ``"error"`` + * ``"fatal"`` + * ``"panic"`` + + Returns + ------- + dict : Status message + """ + args = (subsystem, level) + return self._client.request('/log/level', args, + decoder='json', **kwargs) + + def ls(self, **kwargs): + """Lists the logging subsystems of a running daemon. + + **This API is subject to future change or removal!** + + .. code-block:: python + + >>> client.log_ls() + {'Strings': [ + 'github.com/ipfs/go-libp2p/p2p/host', 'net/identify', + 'merkledag', 'providers', 'routing/record', 'chunk', 'mfs', + 'ipns-repub', 'flatfs', 'ping', 'mockrouter', 'dagio', + 'cmds/files', 'blockset', 'engine', 'mocknet', 'config', + 'commands/http', 'cmd/ipfs', 'command', 'conn', 'gc', + 'peerstore', 'core', 'coreunix', 'fsrepo', 'core/server', + 'boguskey', 'github.com/ipfs/go-libp2p/p2p/host/routed', + 'diagnostics', 'namesys', 'fuse/ipfs', 'node', 'secio', + 'core/commands', 'supernode', 'mdns', 'path', 'table', + 'swarm2', 'peerqueue', 'mount', 'fuse/ipns', 'blockstore', + 'github.com/ipfs/go-libp2p/p2p/host/basic', 'lock', 'nat', + 'importer', 'corerepo', 'dht.pb', 'pin', 'bitswap_network', + 'github.com/ipfs/go-libp2p/p2p/protocol/relay', 'peer', + 'transport', 'dht', 'offlinerouting', 'tarfmt', 'eventlog', + 'ipfsaddr', 'github.com/ipfs/go-libp2p/p2p/net/swarm/addr', + 'bitswap', 'reprovider', 'supernode/proxy', 'crypto', 'tour', + 'commands/cli', 'blockservice']} + + Returns + ------- + dict : List of daemon logging subsystems + """ + return self._client.request('/log/ls', decoder='json', **kwargs) + + def tail(self, **kwargs): + r"""Reads log outputs as they are written. + + **This API is subject to future change or removal!** + + This function returns an iterator that needs to be closed using a + context manager (``with``-statement) or using the ``.close()`` method. + + .. code-block:: python + + >>> with client.log_tail() as log_tail_iter: + ... for item in log_tail_iter: + ... print(item) + ... + {"event":"updatePeer","system":"dht", + "peerID":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", + "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", + "time":"2016-08-22T13:25:27.43353297Z"} + {"event":"handleAddProviderBegin","system":"dht", + "peer":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", + "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", + "time":"2016-08-22T13:25:27.433642581Z"} + {"event":"handleAddProvider","system":"dht","duration":91704, + "key":"QmNT9Tejg6t57Vs8XM2TVJXCwevWiGsZh3kB4HQXUZRK1o", + "peer":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", + "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", + "time":"2016-08-22T13:25:27.433747513Z"} + {"event":"updatePeer","system":"dht", + "peerID":"QmepsDPxWtLDuKvEoafkpJxGij4kMax11uTH7WnKqD25Dq", + "session":"7770b5e0-25ec-47cd-aa64-f42e65a10023", + "time":"2016-08-22T13:25:27.435843012Z"} + … + + Returns + ------- + iterable + """ + return self._client.request('/log/tail', decoder='json', + stream=True, **kwargs) + + + +class RefsSection(base.SectionBase): + def __call__(self, multihash, **kwargs): + """Returns a list of hashes of objects referenced by the given hash. + + **This API is subject to future change or removal!** You likely want to + use :meth:`~ipfshttpclient.object.links` instead. + + .. code-block:: python + + >>> client.unstable.refs('QmTkzDwWqPbnAh5YiV5VwcTLnGdwSNsNTn2aDxdXBFca7D') + [{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''}, + … + {'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}] + + Parameters + ---------- + multihash : str + Path to the object(s) to list refs from + + Returns + ------- + list + """ + args = (multihash,) + return self._client.request('/refs', args, decoder='json', **kwargs) + + + def local(self, **kwargs): + """Displays the hashes of all local objects. + + **This API is subject to future change or removal!** + + .. code-block:: python + + >>> client.unstable.refs.local() + [{'Ref': 'Qmd2xkBfEwEs9oMTk77A6jrsgurpF3ugXSg7 … cNMV', 'Err': ''}, + … + {'Ref': 'QmSY8RfVntt3VdxWppv9w5hWgNrE31uctgTi … eXJY', 'Err': ''}] + + Returns + ------- + list + """ + return self._client.request('/refs/local', decoder='json', **kwargs) + + + +class Section(base.SectionBase): + """ + Features that are subject to change and are only provided for convinience + """ + log = base.SectionProperty(LogSection) + refs = base.SectionProperty(RefsSection) \ No newline at end of file diff --git a/ipfshttpclient/encoding.py b/ipfshttpclient/encoding.py index 2f2f2aa7..8f53ce9b 100644 --- a/ipfshttpclient/encoding.py +++ b/ipfshttpclient/encoding.py @@ -10,9 +10,7 @@ import abc import codecs -import io import json -import pickle import six @@ -287,98 +285,6 @@ def encode(self, obj): raise exceptions.EncodingError('json', error) -class Pickle(Encoding): - """Python object parser/encoder using `pickle`. - """ - name = 'pickle' - - def __init__(self): - self._buffer = io.BytesIO() - - def parse_partial(self, raw): - """Buffers the given data so that the it can be passed to `pickle` in - one go. - - This does not actually process the data in smaller chunks, but merely - buffers it until `parse_finalize` is called! This is mostly because - the standard-library module expects the entire data to be available up - front, which is currently always the case for our code anyways. - - Parameters - ---------- - raw : bytes - Data to be buffered - - Returns - ------- - tuple : An empty tuple - """ - self._buffer.write(raw) - return () - - def parse_finalize(self): - """Parses the buffered data and yields the result. - - Raises - ------ - ~ipfshttpclient.exceptions.DecodingError - - Returns - ------- - generator - """ - try: - self._buffer.seek(0, 0) - yield pickle.load(self._buffer) - except pickle.UnpicklingError as error: - raise exceptions.DecodingError('pickle', error) - - def parse(self, raw): - r"""Returns a Python object decoded from a pickle byte stream. - - .. code-block:: python - - >>> p = Pickle() - >>> p.parse(b'(lp0\nI1\naI2\naI3\naI01\naF4.5\naNaF6000.0\na.') - [1, 2, 3, True, 4.5, None, 6000.0] - - Raises - ------ - ~ipfshttpclient.exceptions.DecodingError - - Parameters - ---------- - raw : bytes - Pickle data bytes - - Returns - ------- - object - """ - return Encoding.parse(self, raw) - - def encode(self, obj): - """Returns ``obj`` serialized as a pickle binary string. - - Raises - ------ - ~ipfshttpclient.exceptions.EncodingError - - Parameters - ---------- - obj : object - Serializable Python object - - Returns - ------- - bytes - """ - try: - return pickle.dumps(obj) - except pickle.PicklingError as error: - raise exceptions.EncodingError('pickle', error) - - class Protobuf(Encoding): """Protobuf parser/encoder that handles protobuf.""" name = 'protobuf' @@ -393,7 +299,6 @@ class Xml(Encoding): __encodings = { Dummy.name: Dummy, Json.name: Json, - Pickle.name: Pickle, Protobuf.name: Protobuf, Xml.name: Xml } @@ -414,7 +319,6 @@ def get_encoding(name): * ``"none"`` * ``"json"`` - * ``"pickle"`` * ``"protobuf"`` * ``"xml"`` """ diff --git a/ipfshttpclient/multipart.py b/ipfshttpclient/multipart.py index 09ff5985..f3ec716d 100644 --- a/ipfshttpclient/multipart.py +++ b/ipfshttpclient/multipart.py @@ -380,13 +380,13 @@ def __init__(self, directory, for pattern in patterns: self.patterns.append(glob_compile(pattern) if isinstance(pattern, str) else pattern) - self.directory = directory + self.directory = utils.convert_path(directory) if not isinstance(self.directory, int): - self.directory = os.path.normpath(directory) + self.directory = os.path.normpath(self.directory) self.recursive = recursive self.dirname = dirname - name = os.path.basename(directory) if not isinstance(directory, int) else "" + name = os.path.basename(self.directory) if not isinstance(self.directory, int) else "" super(DirectoryStream, self).__init__(name, chunk_size=chunk_size) def _body_directory(self, short_path, visited_directories): @@ -621,9 +621,10 @@ def auto_close_iter_fd(fd, iter): finally: os.close(fd) - dirname = os.path.basename(os.path.normpath(directory)) + directory_str = utils.convert_path(directory) + dirname = os.path.basename(os.path.normpath(directory_str)) - fd = os.open(directory, os.O_CLOEXEC | os.O_DIRECTORY) + fd = os.open(directory_str, os.O_CLOEXEC | os.O_DIRECTORY) body, headers = stream_directory_impl(fd, dirname) return auto_close_iter_fd(fd, body), headers else: @@ -654,7 +655,7 @@ def stream_filesystem_node(filepaths, """ is_dir = False if isinstance(filepaths, utils.path_types): - is_dir = os.path.isdir(filepaths) + is_dir = os.path.isdir(utils.convert_path(filepaths)) elif isinstance(filepaths, int): import stat is_dir = stat.S_ISDIR(os.fstat(filepaths).st_mode) diff --git a/ipfshttpclient/utils.py b/ipfshttpclient/utils.py index b3a14313..646616b3 100644 --- a/ipfshttpclient/utils.py +++ b/ipfshttpclient/utils.py @@ -18,7 +18,31 @@ class collections: path_types = (six.text_type, six.binary_type) if hasattr(os, "PathLike"): #PY36+ - path_types += (os.PathLike,) + path_types += (os.PathLike,) + + def convert_path(path): + # Not needed since all system APIs also accept an `os.PathLike` + return path +else: + _pathlib_types = () + try: #PY2: doesn't have `pathlib` + import pathlib + _pathlib_types += (pathlib.PurePath,) + except ImportError: + pass + # Independently maintained forward-port of `pathlib` for Py27 and others + try: + import pathlib2 + _pathlib_types += (pathlib2.PurePath,) + except ImportError: + pass + path_types += _pathlib_types + + def convert_path(path): + # `pathlib`'s PathLike objects need to be treated specially and + # converted to strings when interacting with system APIs + return str(path) if isinstance(path, _pathlib_types) else path + def guess_mimetype(filename): @@ -59,7 +83,7 @@ def clean_file(file): if isinstance(file, int): return os.fdopen(file, 'rb', closefd=False), True elif not hasattr(file, 'read'): - return open(file, 'rb'), True + return open(convert_path(file), 'rb'), True else: return file, False diff --git a/requirements-codestyle.txt b/requirements-codestyle.txt index 0735869f..b339a523 100644 --- a/requirements-codestyle.txt +++ b/requirements-codestyle.txt @@ -1,3 +1,3 @@ flake8~=3.0 flake8-per-file-ignores~=0.6 -flake8-tabs~=1.0 \ No newline at end of file +flake8-tabs~=1.0.1 \ No newline at end of file diff --git a/test/functional/conftest.py b/test/functional/conftest.py new file mode 100644 index 00000000..5ccda2fd --- /dev/null +++ b/test/functional/conftest.py @@ -0,0 +1,56 @@ +# Note that this file is special in that py.test will automatically import this file and gather +# its list of fixtures even if it is not directly imported into the corresponding test case. +import pathlib + +import pytest + +import ipfshttpclient + + +TEST_DIR = pathlib.Path(__file__).parent + + +__is_available = None +def is_available(): # noqa + """ + Return whether the IPFS daemon is reachable or not + """ + global __is_available + + if not isinstance(__is_available, bool): + try: + ipfshttpclient.connect() + except ipfshttpclient.exceptions.Error as error: + __is_available = False + + # Make sure version incompatiblity is displayed to the user + if isinstance(error, ipfshttpclient.exceptions.VersionMismatch): + raise + else: + __is_available = True + + return __is_available + + +def sort_by_key(items, key="Name"): + return sorted(items, key=lambda x: x[key]) + + + +@pytest.fixture +def client(): + if is_available(): + return ipfshttpclient.Client() + else: + pytest.skip("Running IPFS node required") + + +@pytest.fixture +def cleanup_pins(client): + pinned = set(client.pin.ls(type="recursive")["Keys"]) + + yield + + for multihash in client.pin.ls(type="recursive")["Keys"]: + if multihash not in pinned: + client.pin.rm(multihash) \ No newline at end of file diff --git a/test/functional/test_bitswap.py b/test/functional/test_bitswap.py new file mode 100644 index 00000000..8f739cdf --- /dev/null +++ b/test/functional/test_bitswap.py @@ -0,0 +1,13 @@ +# _*_ coding: utf-8 -*- + + +def test_wantlist(client): + result = client.bitswap.wantlist(peer="QmdkJZUWnVkEc6yfptVu4LWY8nHkEnGwsxqQ233QSGj8UP") + assert type(result) is dict + assert "Keys" in result + + +def test_stat(client): + result = client.bitswap.stat() + assert type(result) is dict + assert "Wantlist" in result \ No newline at end of file diff --git a/test/functional/test_block.py b/test/functional/test_block.py new file mode 100644 index 00000000..5d99c919 --- /dev/null +++ b/test/functional/test_block.py @@ -0,0 +1,31 @@ +# _*_ coding: utf-8 -*- +import conftest + + +TEST_MULTIHASH = "QmYA2fn8cMbVWo4v95RwcwJVyQsNtnEwHerfWR8UNtEwoE" +TEST_CONTENT_SIZE = 248 + +TEST_PUT_FILEPATH = conftest.TEST_DIR / "fake_dir" / "fsdfgh" +TEST_PUT_MULTIHASH = "QmPevo2B1pwvDyuZyJbWVfhwkaGPee3f1kX36wFmqx1yna" + + +# Uncomment this if you don't want to wait for the `test_start` test during development: +#import pytest +#pytest.skip("TEMP!", allow_module_level=True) + + +def test_stat(client): + expected_keys = {"Key", "Size"} + res = client.block.stat(TEST_MULTIHASH) + assert set(res.keys()).issuperset(expected_keys) + + +def test_get(client): + assert len(client.block.get(TEST_MULTIHASH)) == TEST_CONTENT_SIZE + + +def test_put(client): + expected_keys = {"Key", "Size"} + res = client.block.put(TEST_PUT_FILEPATH) + assert set(res.keys()).issuperset(expected_keys) + assert res["Key"] == TEST_PUT_MULTIHASH \ No newline at end of file diff --git a/test/functional/test_files.py b/test/functional/test_files.py new file mode 100644 index 00000000..5c64faba --- /dev/null +++ b/test/functional/test_files.py @@ -0,0 +1,270 @@ +# _*_ coding: utf-8 -*- +import os +import shutil + +import pytest + +import ipfshttpclient.exceptions + +import conftest + + + +### test_add_multiple_from_list +FAKE_FILE1_PATH = conftest.TEST_DIR / "fake_dir" / "fsdfgh" +FAKE_FILE2_PATH = conftest.TEST_DIR / "fake_dir" / "popoiopiu" + +FAKE_FILE1_HASH = {"Hash": "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX", + "Name": "fsdfgh", "Size": "16"} +FAKE_FILE1_DIR_HASH = [ + {"Hash": "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX", + "Name": "fsdfgh", "Size": "16"}, + {"Hash": "Qme7vmxd4LAAYL7vpho3suQeT3gvMeLLtPdp7myCb9Db55", + "Name": "", "Size": "68"} +] + +FAKE_FILES_HASH = [ + {"Hash": "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX", + "Name": "fsdfgh", "Size": "16"}, + {"Hash": "QmYAhvKYu46rh5NcHzeu6Bhc7NG9SqkF9wySj2jvB74Rkv", + "Name": "popoiopiu", "Size": "23"} +] + +### test_add_multiple_from_dirname +FAKE_DIR_TEST2_PATH = conftest.TEST_DIR / "fake_dir" / "test2" +FAKE_DIR_TEST2_HASH = [ + {"Hash": "QmStL6TPbJfMHQhHjoVT93kCynVx3GwLf7xwgrtScqABhU", + "Name": "test2", "Size": "297"}, + {"Hash": "QmV3n14G8iQoNG8zpHCUZnmQpcQbhEfhQZ8NHvUEdoiXAN", + "Name": "test2/high", "Size": "114"}, + {"Hash": "QmZazHsY4nbhRTHTEp5SUWd4At6aSXia1kxEuywHTicayE", + "Name": "test2/high/five", "Size": "64"}, + {"Hash": "QmW8tRcpqy5siMNAU9Lx3GADAxQbVUrx8XJGFDjkd6vqLT", + "Name": "test2/high/five/dummy", "Size": "13"}, + {"Hash": "Qmb1NPqPzdHCMvHRfCkk6TWLcnpGJ71KnafacCMm6TKLcD", + "Name": "test2/fssdf", "Size": "22"}, + {"Hash": "QmNuvmuFeeWWpxjCQwLkHshr8iqhGLWXFzSGzafBeawTTZ", + "Name": "test2/llllg", "Size": "17"} +] + +### test_add_filepattern_from_dirname +FAKE_DIR_FNPATTERN1 = "**/fss*" +# The hash of the folder is not same as above because the content of the folder added is not same +FAKE_DIR_FNPATTERN1_HASH = [ + {"Hash": "Qmb1NPqPzdHCMvHRfCkk6TWLcnpGJ71KnafacCMm6TKLcD", + "Name": "fake_dir/test2/fssdf", "Size": "22"}, + {"Hash": "QmT5rV6EsKNSW619SntLrkCxbUXXQh4BrKm3JazF2zEgEe", + "Name": "fake_dir/test2", "Size": "73"}, + {"Hash": "QmbPzQruAEFjUU3gQfupns6b8USr8VrD9H71GrqGDXQSxm", + "Name": "fake_dir", "Size": "124"} +] + +## test_add_filepattern_subdir_wildcard +FAKE_DIR_FNPATTERN2 = "test2/**/high" +FAKE_DIR_FNPATTERN2_HASH = [ + {"Hash": "QmUXuNHpV6cdeTngSkEMbP2nQDPuyE2MFXNYtTXzZvLZHf", + "Name": "fake_dir", "Size": "216"}, + {"Hash": "QmZGuwqaXMmSwJcfTsvseHwy3mvDPD9zrs9WVowAZcQN4W", + "Name": "fake_dir/test2", "Size": "164"}, + {"Hash": "QmV3n14G8iQoNG8zpHCUZnmQpcQbhEfhQZ8NHvUEdoiXAN", + "Name": "fake_dir/test2/high", "Size": "114"}, + {"Hash": "QmZazHsY4nbhRTHTEp5SUWd4At6aSXia1kxEuywHTicayE", + "Name": "fake_dir/test2/high/five", "Size": "64"}, + {"Hash": "QmW8tRcpqy5siMNAU9Lx3GADAxQbVUrx8XJGFDjkd6vqLT", + "Name": "fake_dir/test2/high/five/dummy", "Size": "13"} +] + + +## test_add_recursive +FAKE_DIR_PATH = conftest.TEST_DIR / "fake_dir" +FAKE_DIR_HASH = [ + {"Hash": "QmNx8xVu9mpdz9k6etbh2S8JwZygatsZVCH4XhgtfUYAJi", + "Name": "fake_dir", "Size": "610"}, + {"Hash": "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX", + "Name": "fake_dir/fsdfgh", "Size": "16"}, + {"Hash": "QmYAhvKYu46rh5NcHzeu6Bhc7NG9SqkF9wySj2jvB74Rkv", + "Name": "fake_dir/popoiopiu", "Size": "23"}, + {"Hash": "QmStL6TPbJfMHQhHjoVT93kCynVx3GwLf7xwgrtScqABhU", + "Name": "fake_dir/test2", "Size": "297"}, + {"Hash": "Qmb1NPqPzdHCMvHRfCkk6TWLcnpGJ71KnafacCMm6TKLcD", + "Name": "fake_dir/test2/fssdf", "Size": "22"}, + {"Hash": "QmV3n14G8iQoNG8zpHCUZnmQpcQbhEfhQZ8NHvUEdoiXAN", + "Name": "fake_dir/test2/high", "Size": "114"}, + {"Hash": "QmZazHsY4nbhRTHTEp5SUWd4At6aSXia1kxEuywHTicayE", + "Name": "fake_dir/test2/high/five", "Size": "64"}, + {"Hash": "QmW8tRcpqy5siMNAU9Lx3GADAxQbVUrx8XJGFDjkd6vqLT", + "Name": "fake_dir/test2/high/five/dummy", "Size": "13"}, + {"Hash": "QmNuvmuFeeWWpxjCQwLkHshr8iqhGLWXFzSGzafBeawTTZ", + "Name": "fake_dir/test2/llllg", "Size": "17"}, + {"Hash": "QmRphRr6ULDEj7YnXpLdnxhnPiVjv5RDtGX3er94Ec6v4Q", + "Name": "fake_dir/test3", "Size": "76"}, + {"Hash": "QmeMbJSHNCesAh7EeopackUdjutTJznum1Fn7knPm873Fe", + "Name": "fake_dir/test3/ppppoooooooooo", "Size": "16"} +] + + + +def test_add_single_from_str_with_dir(client, cleanup_pins): + res = client.add(FAKE_FILE1_PATH, wrap_with_directory=True) + + assert FAKE_FILE1_DIR_HASH == res + + dir_hash = None + for item in res: + if item["Name"] == "": + dir_hash = item["Hash"] + assert dir_hash in client.pin.ls(type="recursive")["Keys"] + + +def test_only_hash_file(client): + client.repo.gc() + + res = client.add(FAKE_FILE1_PATH, only_hash=True) + + assert FAKE_FILE1_HASH == res + + assert res["Hash"] not in client.pin.ls(type="recursive") + assert res["Hash"] not in list(map(lambda i: i["Ref"], client.unstable.refs.local())) + + +def test_add_multiple_from_list(client, cleanup_pins): + res = client.add([FAKE_FILE1_PATH, FAKE_FILE2_PATH]) + assert FAKE_FILES_HASH == res + + +def test_add_multiple_from_dirname(client, cleanup_pins): + res = client.add(FAKE_DIR_TEST2_PATH) + assert conftest.sort_by_key(FAKE_DIR_TEST2_HASH) == conftest.sort_by_key(res) + + +def test_add_filepattern_from_dirname(client, cleanup_pins): + res = client.add(FAKE_DIR_PATH, pattern=FAKE_DIR_FNPATTERN1) + assert conftest.sort_by_key(FAKE_DIR_FNPATTERN1_HASH) == conftest.sort_by_key(res) + + +def test_add_filepattern_subdir_wildcard(client, cleanup_pins): + res = client.add(FAKE_DIR_PATH, pattern=FAKE_DIR_FNPATTERN2) + assert conftest.sort_by_key(FAKE_DIR_FNPATTERN2_HASH) == conftest.sort_by_key(res) + + +def test_add_recursive(client, cleanup_pins): + res = client.add(FAKE_DIR_PATH, recursive=True) + assert conftest.sort_by_key(FAKE_DIR_HASH) == conftest.sort_by_key(res) + + +def test_get_file(client, cleanup_pins): + client.add(FAKE_FILE1_PATH) + + test_hash = FAKE_DIR_HASH[1]["Hash"] + + try: + client.get(test_hash) + assert test_hash in os.listdir(os.getcwd()) + finally: + os.remove(test_hash) + assert test_hash not in os.listdir(os.getcwd()) + + +def test_get_dir(client, cleanup_pins): + client.add(FAKE_DIR_PATH, recursive=True) + + test_hash = FAKE_DIR_HASH[0]["Hash"] + + try: + client.get(test_hash) + assert test_hash in os.listdir(os.getcwd()) + finally: + shutil.rmtree(test_hash) + assert test_hash not in os.listdir(os.getcwd()) + + +def test_get_path(client, cleanup_pins): + client.add(FAKE_FILE1_PATH) + + test_hash = FAKE_DIR_HASH[0]["Hash"] + "/fsdfgh" + + try: + client.get(test_hash) + assert "fsdfgh" in os.listdir(os.getcwd()) + finally: + os.remove("fsdfgh") + assert "fsdfgh" not in os.listdir(os.getcwd()) + + +def test_cat_single_file_str(client, cleanup_pins): + client.add(FAKE_FILE1_PATH) + + content = client.cat("QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX") + assert content == b"dsadsad\n" + + +def test_cat_file_block(client, cleanup_pins): + client.add(FAKE_FILE1_PATH) + + content = b"dsadsad\n" + for offset in range(len(content)): + for length in range(len(content)): + block = client.cat("QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX", + offset=offset, length=length) + assert block == content[offset:(offset + length)] + + +################################################## +# Mutable File System (MFS) aka `client.files.*` # +################################################## +TEST_MFS_FILES = { + "test_file1": { + "Name": conftest.TEST_DIR / "fake_dir" / "popoiopiu", + "Stat": { + u"Type": "file", + u"Hash": "QmUvobKqcCE56brA8pGTRRRsGy2SsDEKSxFLZkBQFv7Vvv", + u"Blocks": 1, + u"CumulativeSize": 73, + u"Size": 15 + } + } +} + +TEST_MFS_DIRECTORY = "/test_dir" + + +def test_mfs_file_write_stat_read_delete(client): + for filename, desc in TEST_MFS_FILES.items(): + filepath = "/" + filename + + # Create target file + client.files.write(filepath, desc["Name"], create=True) + + # Verify stat information of file + stat = client.files.stat(filepath) + assert sorted(desc["Stat"].items()) == sorted(stat.items()) + + # Read back (and compare file contents) + with open(str(desc["Name"]), "rb") as file: + content = client.files.read(filepath) + assert content == file.read() + + # Remove file + client.files.rm(filepath) + + +def test_mfs_dir_make_fill_list_delete(client): + client.files.mkdir(TEST_MFS_DIRECTORY) + for filename, desc in TEST_MFS_FILES.items(): + # Create target file in directory + client.files.write( + TEST_MFS_DIRECTORY + "/" + filename, + desc["Name"], create=True + ) + + # Verify directory contents + contents = client.files.ls(TEST_MFS_DIRECTORY)[u"Entries"] + filenames1 = list(map(lambda d: d["Name"], contents)) + filenames2 = list(TEST_MFS_FILES.keys()) + assert filenames1 == filenames2 + + # Remove directory + client.files.rm(TEST_MFS_DIRECTORY, recursive=True) + + with pytest.raises(ipfshttpclient.exceptions.Error): + client.files.stat(TEST_MFS_DIRECTORY) \ No newline at end of file diff --git a/test/functional/test_key.py b/test/functional/test_key.py new file mode 100644 index 00000000..baac4aac --- /dev/null +++ b/test/functional/test_key.py @@ -0,0 +1,36 @@ +# _*_ coding: utf-8 -*- + + +def test_add_list_rename_rm(client): + # Remove keys if they already exist + key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) + if "ipfshttpclient-test-rsa" in key_list: + client.key.rm("ipfshttpclient-test-rsa") + if "ipfshttpclient-test-ed" in key_list: + client.key.rm("ipfshttpclient-test-ed") + + # Add new RSA and ED25519 key + key1 = client.key.gen("ipfshttpclient-test-rsa", "rsa")["Name"] + key2 = client.key.gen("ipfshttpclient-test-ed", "ed25519")["Name"] + + # Validate the keys exist now + key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) + assert key1 in key_list + assert key2 in key_list + + # Rename the EC key + key2_new = client.key.rename(key2, "ipfshttpclient-test-ed2")["Now"] + + # Validate that the key was successfully renamed + key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) + assert key1 in key_list + assert key2 not in key_list + assert key2_new in key_list + + # Drop both keys with one request + client.key.rm(key1, key2_new) + + # Validate that the keys are gone again + key_list = list(map(lambda k: k["Name"], client.key.list()["Keys"])) + assert key1 not in key_list + assert key2_new not in key_list \ No newline at end of file diff --git a/test/functional/test_miscellaneous.py b/test/functional/test_miscellaneous.py new file mode 100644 index 00000000..132681b8 --- /dev/null +++ b/test/functional/test_miscellaneous.py @@ -0,0 +1,96 @@ +# _*_ coding: utf-8 -*- +import os +import time + +import pytest + + + +def test_version(client): + expected_keys = {"Repo", "Commit", "Version"} + resp_version = client.version() + assert set(resp_version.keys()).issuperset(expected_keys) + + +def test_id(client): + expected_keys = {"PublicKey", "ProtocolVersion", "ID", "AgentVersion", "Addresses"} + resp_id = client.id() + assert set(resp_id.keys()).issuperset(expected_keys) + + +################# +# Shutdown test # +################# + +def check_pid_exists(pid): + """ + Check whether pid exists in the current process table + + Source: https://stackoverflow.com/a/23409343/277882 + """ + if os.name == "posix": + import errno + if pid < 0: + return False + try: + os.kill(pid, 0) + except OSError as e: + return e.errno == errno.EPERM + else: + return True + else: + import ctypes + kernel32 = ctypes.windll.kernel32 + HANDLE = ctypes.c_void_p + DWORD = ctypes.c_ulong + LPDWORD = ctypes.POINTER(DWORD) + + class ExitCodeProcess(ctypes.Structure): + _fields_ = [("hProcess", HANDLE), ("lpExitCode", LPDWORD)] + + SYNCHRONIZE = 0x100000 + process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid) + if not process: + return False + + ec = ExitCodeProcess() + out = kernel32.GetExitCodeProcess(process, ctypes.byref(ec)) + if not out: + if kernel32.GetLastError() == 5: + # Access is denied. + print("Access is denied when getting PID info") + kernel32.CloseHandle(process) + return False + elif bool(ec.lpExitCode): + # There is an exit code, it quit + kernel32.CloseHandle(process) + return False + # No exit code, it"s running. + kernel32.CloseHandle(process) + return True + + +# Run test for `.stop()` only as the last test in CI environments – it would be to annoying +# during normal testing +@pytest.mark.last +def test_daemon_stop(client): + have_ci = os.environ.get("CI", "false") == "true" + have_pid = os.environ.get("PY_IPFS_HTTP_CLIENT_TEST_DAEMON_PID", "").isdigit() + if not have_ci or not have_pid: + pytest.skip("CI-only test") + daemon_pid = int(os.environ["PY_IPFS_HTTP_CLIENT_TEST_DAEMON_PID"]) + + # Daemon should still be running at this point + assert check_pid_exists(daemon_pid) + + # Send stop request + client.stop() + + # Wait for daemon process to disappear + for _ in range(10000): + if not check_pid_exists(daemon_pid): + break + time.sleep(0.001) + + # Daemon should not be running anymore + assert not check_pid_exists(daemon_pid) \ No newline at end of file diff --git a/test/functional/test_object.py b/test/functional/test_object.py new file mode 100644 index 00000000..7db08eec --- /dev/null +++ b/test/functional/test_object.py @@ -0,0 +1,105 @@ +# _*_ coding: utf-8 -*- +import conftest + + +def test_new(client): + expected_keys = {"Hash"} + res = client.object.new() + assert set(res.keys()).issuperset(expected_keys) + + +def test_stat(client): + expected_keys = {"Hash", "CumulativeSize", "DataSize", "NumLinks", "LinksSize", "BlockSize"} + resource = client.add_str("Mary had a little lamb") + resp_stat = client.object.stat(resource) + assert set(resp_stat.keys()).issuperset(expected_keys) + + +def test_put_get(client): + # Set paths to test json files + path_no_links = conftest.TEST_DIR / "fake_json" / "no_links.json" + path_links = conftest.TEST_DIR / "fake_json" / "links.json" + + # Put the json objects on the DAG + no_links = client.object.put(path_no_links) + links = client.object.put(path_links) + + # Verify the correct content was put + assert no_links["Hash"] == "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V" + assert links["Hash"] == "QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm" + + # Get the objects from the DAG + get_no_links = client.object.get("QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V") + get_links = client.object.get("QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm") + + # Verify the objects we put have been gotten + assert get_no_links["Data"] == "abc" + assert get_links["Data"] == "another" + assert get_links["Links"][0]["Name"] == "some link" + + +def test_links(client): + # Set paths to test json files + path_links = conftest.TEST_DIR / "fake_json" / "links.json" + + # Put json object on the DAG and get its links + client.object.put(path_links) + links = client.object.links("QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm") + + # Verify the correct link has been gotten + assert links["Links"][0]["Hash"] == "QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V" + + +def test_data(client): + # Set paths to test json files + path_links = conftest.TEST_DIR / "fake_json" / "links.json" + + # Put json objects on the DAG and get its data + client.object.put(path_links) + data = client.object.data("QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm") + + # Verify the correct bytes have been gotten + assert data == b"another" + + +def test_patch_append_data(client): + """Warning, this test depends on the contents of + test/functional/fake_dir/fsdfgh + """ + result = client.object.patch.append_data( + "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", + conftest.TEST_DIR / "fake_dir" / "fsdfgh" + ) + assert result == {"Hash": "QmcUsyoGVxWoQgYKgmLaDBGm8J3eHWfchMh3oDUD5FrrtN"} + + +def test_patch_add_link(client): + """Warning, this test depends on the contents of + test/functional/fake_dir/fsdfgh + """ + result = client.object.patch.add_link( + "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", "self", + "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n" + ) + assert result == {"Hash": "QmbWSr7YXBLcF23VVb7yPvUuogUPn46GD7gXftXC6mmsNM"} + + +def test_patch_rm_link(client): + """Warning, this test depends on the contents of + test/functional/fake_dir/fsdfgh + """ + result = client.object.patch.rm_link( + "QmbWSr7YXBLcF23VVb7yPvUuogUPn46GD7gXftXC6mmsNM", "self" + ) + assert result == {"Hash": "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n"} + + +def test_patch_set_data(client): + """Warning, this test depends on the contents of + test/functional/fake_dir/popoiopiu + """ + result = client.object.patch.set_data( + "QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n", + conftest.TEST_DIR / "fake_dir" / "popoiopiu" + ) + assert result == {"Hash": "QmV4QR7MCBj5VTi6ddHmXPyjWGzbaKEtX2mx7axA5PA13G"} \ No newline at end of file diff --git a/test/functional/test_other.py b/test/functional/test_other.py new file mode 100644 index 00000000..b9b8956d --- /dev/null +++ b/test/functional/test_other.py @@ -0,0 +1,24 @@ +# _*_ coding: utf-8 -*- +import ipfshttpclient + +import conftest + + +def test_ipfs_node_available(): + """ + Dummy test to ensure that running the tests without a daemon produces a failure, since we + think it's unlikely that people running tests want this + """ + addr = "[{0}]:{1}".format(ipfshttpclient.DEFAULT_HOST, ipfshttpclient.DEFAULT_PORT) + assert conftest.is_available(), "Functional tests require an IPFS node to be available at: " + addr + + +def test_add_json(client, cleanup_pins): + data = {"Action": "Open", "Type": "PR", "Name": "IPFS", "Pubkey": 7} + res = client.add_json(data) + + assert data == client.get_json(res) + + # have to test the string added to IPFS, deserializing JSON will not + # test order of keys + assert '{"Action":"Open","Name":"IPFS","Pubkey":7,"Type":"PR"}' == client.cat(res).decode("utf-8") \ No newline at end of file diff --git a/test/functional/test_pin.py b/test/functional/test_pin.py new file mode 100644 index 00000000..7d4f6a9a --- /dev/null +++ b/test/functional/test_pin.py @@ -0,0 +1,121 @@ +# _*_ coding: utf-8 -*- +import pytest + + +class Resources(object): + def __init__(self, client): + self.msg = client.add_str("Mary had a little lamb") + resp_add = client.add("test/functional/fake_dir", recursive=True) + self.fake_dir_hashes = [el["Hash"] for el in resp_add if "Hash" in el] + for resp in resp_add: + if resp["Name"] == "fake_dir": + self.fake_dir_hash = resp["Hash"] + elif resp["Name"] == "fake_dir/test2": + self.fake_dir_test2_hash = resp["Hash"] + +@pytest.fixture # noqa +def resources(client): + return Resources(client) + + + +def test_ls_add_rm_single(client, resources): + # Get pinned objects at start. + pins_begin = client.pin.ls()["Keys"] + + # Unpin the resource if already pinned. + if resources.msg in pins_begin.keys(): + client.pin.rm(resources.msg) + + # No matter what, the resource should not be pinned at this point + assert resources.msg not in client.pin.ls()["Keys"] + + for option in (True, False): + # Pin the resource. + resp_add = client.pin.add(resources.msg, recursive=option) + pins_afer_add = client.pin.ls()["Keys"] + assert resp_add["Pins"] == [resources.msg] + assert resources.msg in pins_afer_add + if option: + assert pins_afer_add[resources.msg]["Type"] == "recursive" + else: + assert pins_afer_add[resources.msg]["Type"] != "recursive" + + # Unpin the resource + resp_rm = client.pin.rm(resources.msg) + pins_afer_rm = client.pin.ls()["Keys"] + assert resp_rm["Pins"] == [resources.msg] + assert resources.msg not in pins_afer_rm + + # Get pinned objects at end + pins_end = client.pin.ls()["Keys"] + + # Compare pinned items from start to finish of test + assert resources.msg not in pins_end.keys() + + +def test_ls_add_rm_directory(client, resources): + # Remove fake_dir if it had previously been pinned + if resources.fake_dir_hash in client.pin.ls(type="recursive")["Keys"].keys(): + client.pin.rm(resources.fake_dir_hash) + + # Make sure I removed it + assert resources.fake_dir_hash not in client.pin.ls()["Keys"].keys() + + # Add "fake_dir" recursively + client.pin.add(resources.fake_dir_hash) + + # Make sure all appear on the list of pinned objects + pins_after_add = client.pin.ls()["Keys"].keys() + assert set(pins_after_add).issuperset(set(resources.fake_dir_hashes)) + + # Clean up + client.pin.rm(resources.fake_dir_hash) + pins_end = client.pin.ls(type="recursive")["Keys"].keys() + assert resources.fake_dir_hash not in pins_end + + +def test_add_update_verify_rm(client, resources): + # Get pinned objects at start + pins_begin = client.pin.ls(type="recursive")["Keys"].keys() + + # Remove fake_dir and demo resource if it had previously been pinned + if resources.fake_dir_hash in pins_begin: + client.pin.rm(resources.fake_dir_hash) + if resources.fake_dir_test2_hash in pins_begin: + client.pin.rm(resources.fake_dir_test2_hash) + + # Ensure that none of the above are pinned anymore + pins_after_rm = client.pin.ls(type="recursive")["Keys"].keys() + assert resources.fake_dir_hash not in pins_after_rm + assert resources.fake_dir_test2_hash not in pins_after_rm + + # Add pin for sub-directory + client.pin.add(resources.fake_dir_test2_hash) + + # Replace it by pin for the entire fake dir + client.pin.update(resources.fake_dir_test2_hash, resources.fake_dir_hash) + + # Ensure that the sub-directory is not pinned directly anymore + pins_after_update = client.pin.ls(type="recursive")["Keys"].keys() + assert resources.fake_dir_test2_hash not in pins_after_update + assert resources.fake_dir_hash in pins_after_update + + # Now add a pin to the sub-directory from the parent directory + client.pin.update(resources.fake_dir_hash, resources.fake_dir_test2_hash, unpin=False) + + # Check integrity of all directory content hashes and whether all + # directory contents have been processed in doing this + hashes = [] + for result in client.pin.verify(resources.fake_dir_hash, verbose=True): + assert result["Ok"] + hashes.append(result["Cid"]) + assert resources.fake_dir_hash in hashes + + # Ensure that both directories are now recursively pinned + pins_after_update2 = client.pin.ls(type="recursive")["Keys"].keys() + assert resources.fake_dir_test2_hash in pins_after_update2 + assert resources.fake_dir_hash in pins_after_update2 + + # Clean up + client.pin.rm(resources.fake_dir_hash, resources.fake_dir_test2_hash) \ No newline at end of file diff --git a/test/functional/test_pubsub.py b/test/functional/test_pubsub.py new file mode 100644 index 00000000..c5d587ae --- /dev/null +++ b/test/functional/test_pubsub.py @@ -0,0 +1,85 @@ +# _*_ coding: utf-8 -*- +import uuid + +import pytest + + + +@pytest.fixture +def pubsub_topic(): + """ + Creates a unique topic for testing purposes + """ + return "{}.testing".format(uuid.uuid4()) + + + +def test_publish_subscribe(client, pubsub_topic): + """ + We test both publishing and subscribing at + the same time because we cannot verify that + something has been properly published unless + we subscribe to that channel and receive it. + Likewise, we cannot accurately test a subscription + without publishing something on the topic we are subscribed + to. + """ + # the message that will be published + message = "hello" + + expected_data = "aGVsbG8=" + expected_topicIDs = [pubsub_topic] + + # get the subscription stream + with client.pubsub.subscribe(pubsub_topic) as sub: + # make sure something was actually returned from the subscription + assert sub is not None + + # publish a message to topic + client.pubsub.publish(pubsub_topic, message) + + # get the message + sub_data = sub.read_message() + + # assert that the returned dict has the following keys + assert "data" in sub_data + assert "topicIDs" in sub_data + + assert sub_data["data"] == expected_data + assert sub_data["topicIDs"] == expected_topicIDs + + +def test_ls(client, pubsub_topic): + """ + Testing the ls, assumes we are able + to at least subscribe to a topic + """ + expected_return = {"Strings": [pubsub_topic]} + + # subscribe to the topic testing + sub = client.pubsub.subscribe(pubsub_topic) + + channels = None + try: + # grab the channels we"re subscribed to + channels = client.pubsub.ls() + finally: + sub.close() + + assert channels == expected_return + + +def test_peers(client): + """ + Not sure how to test this since it fully depends + on who we"re connected to. We may not even have + any peers + """ + peers = client.pubsub.peers() + + # make sure the Strings key is in the map thats returned + assert "Strings" in peers + + # ensure the value of "Strings" is a list. + # The list may or may not be empty. + assert isinstance(peers["Strings"], list) \ No newline at end of file diff --git a/test/functional/test_repo.py b/test/functional/test_repo.py new file mode 100644 index 00000000..8bf15347 --- /dev/null +++ b/test/functional/test_repo.py @@ -0,0 +1,26 @@ +# _*_ coding: utf-8 -*- + + +def test_stat(client): + # Verify that the correct key-value pairs are returned + stat = client.repo.stat() + assert sorted(stat.keys()) == [ + u"NumObjects", u"RepoPath", u"RepoSize", + u"StorageMax", u"Version" + ] + + +def test_gc(client): + # Add and unpin an object to be garbage collected + garbage = client.add_str("Test String") + client.pin.rm(garbage) + + # Collect the garbage object with object count before and after + orig_objs = client.repo.stat()["NumObjects"] + gc = client.repo.gc() + cur_objs = client.repo.stat()["NumObjects"] + + # Verify the garbage object was collected + assert orig_objs > cur_objs + keys = [el["Key"]["/"] for el in gc] + assert garbage in keys \ No newline at end of file diff --git a/test/functional/test_unstable.py b/test/functional/test_unstable.py new file mode 100644 index 00000000..76a279f9 --- /dev/null +++ b/test/functional/test_unstable.py @@ -0,0 +1,70 @@ +# _*_ coding: utf-8 -*- +import conftest + + +################## +# Daemon Logging # +################## + +def test_log_ls_level(client): + """ + Unfortunately there is no way of knowing the logging levels prior + to this test. This makes it impossible to guarantee that the logging + levels are the same as before the test was run. + """ + # Retrieves the list of logging subsystems for a running daemon. + resp_ls = client.unstable.log.ls() + # The response should be a dictionary with only one key ('Strings'). + assert "Strings" in resp_ls + + # Sets the logging level to 'error' for the first subsystem found. + sub = resp_ls["Strings"][0] + resp_level = client.unstable.log.level(sub, "error") + assert resp_level["Message"] == "Changed log level of '{0}' to 'error'\n".format(sub) + + +def test_log_tail(client): + # Gets the response object. + tail = client.unstable.log.tail() + + # The log should have been parsed into a dictionary object with + # various keys depending on the event that occured. + assert type(next(tail)) is dict + + +############ +# Refs API # +############ + +REFS_RESULT = [ + {"Err": "", "Ref": "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX"}, + {"Err": "", "Ref": "QmYAhvKYu46rh5NcHzeu6Bhc7NG9SqkF9wySj2jvB74Rkv"}, + {"Err": "", "Ref": "QmStL6TPbJfMHQhHjoVT93kCynVx3GwLf7xwgrtScqABhU"}, + {"Err": "", "Ref": "QmRphRr6ULDEj7YnXpLdnxhnPiVjv5RDtGX3er94Ec6v4Q"} +] + + +def test_refs_local_1(client): + with open(str(conftest.TEST_DIR / "fake_dir" / "fsdfgh"), "rb") as fp: + res = client.add(fp, pin=False) + + assert res["Hash"] == "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX" + + assert res["Hash"] not in client.pin.ls(type="recursive") + assert res["Hash"] in list(map(lambda i: i["Ref"], client.unstable.refs.local())) + + +def test_refs_local_2(client): + res = client.add(conftest.TEST_DIR / "fake_dir" / "fsdfgh", pin=False) + + assert res["Hash"] == "QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX" + + assert res["Hash"] not in client.pin.ls(type="recursive") + assert res["Hash"] in list(map(lambda i: i["Ref"], client.unstable.refs.local())) + + +def test_refs(client, cleanup_pins): + client.add(conftest.TEST_DIR / "fake_dir", recursive=True) + + refs = client.unstable.refs("QmNx8xVu9mpdz9k6etbh2S8JwZygatsZVCH4XhgtfUYAJi") + assert conftest.sort_by_key(REFS_RESULT, "Ref") == conftest.sort_by_key(refs, "Ref") \ No newline at end of file diff --git a/test/functional/tests.py b/test/functional/tests.py deleted file mode 100644 index f50720a4..00000000 --- a/test/functional/tests.py +++ /dev/null @@ -1,1017 +0,0 @@ -# _*_ coding: utf-8 -*- -import os -import json -import shutil -import socket -import sys -import time -import unittest -import logging -import uuid - -import pytest - -import ipfshttpclient - - -__is_available = None -def is_available(): - """ - Return whether the IPFS daemon is reachable or not - """ - global __is_available - - if not isinstance(__is_available, bool): - try: - ipfshttpclient.connect() - except ipfshttpclient.exceptions.Error as error: - __is_available = False - - # Make sure version incompatiblity is displayed to the user - if isinstance(error, ipfshttpclient.exceptions.VersionMismatch): - raise - else: - __is_available = True - - return __is_available - - -def skipIfOffline(): - if is_available(): - return lambda func: func - else: - return unittest.skip("IPFS node is not available") - -def skipUnlessCI(): - have_ci = os.environ.get("CI", "false") == "true" - have_pid = os.environ.get("PY_IPFS_HTTP_CLIENT_TEST_DAEMON_PID", "").isdigit() - return unittest.skipUnless(have_ci and have_pid, "CI-only test") - - -def test_ipfs_node_available(): - addr = "[{0}]:{1}".format(ipfshttpclient.DEFAULT_HOST, ipfshttpclient.DEFAULT_PORT) - assert is_available(), "Functional tests require an IPFS node to be available at: " + addr - - - -HERE = os.path.dirname(os.path.abspath(__file__)) - -class AssertVersionTest(unittest.TestCase): - def test_assert_version(self): - # Minimum required version - ipfshttpclient.assert_version("0.1.0", "0.1.0", "0.2.0") - - # Too high version - with self.assertRaises(ipfshttpclient.exceptions.VersionMismatch): - ipfshttpclient.assert_version("0.2.0", "0.1.0", "0.2.0") - - # Too low version - with self.assertRaises(ipfshttpclient.exceptions.VersionMismatch): - ipfshttpclient.assert_version("0.0.5", "0.1.0", "0.2.0") - -@skipIfOffline() -class IpfsHttpClientTest(unittest.TestCase): - - http_client = ipfshttpclient.Client() - - fake = [{'Hash': u'QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX', - 'Name': 'fake_dir/fsdfgh'}, - {'Hash': u'QmYAhvKYu46rh5NcHzeu6Bhc7NG9SqkF9wySj2jvB74Rkv', - 'Name': 'fake_dir/popoiopiu'}, - {'Hash': u'QmeMbJSHNCesAh7EeopackUdjutTJznum1Fn7knPm873Fe', - 'Name': 'fake_dir/test3/ppppoooooooooo'}, - {'Hash': u'QmRphRr6ULDEj7YnXpLdnxhnPiVjv5RDtGX3er94Ec6v4Q', - 'Name': 'fake_dir/test3'}, - {'Hash': u'QmNuvmuFeeWWpxjCQwLkHshr8iqhGLWXFzSGzafBeawTTZ', - 'Name': 'fake_dir/test2/llllg'}, - {'Hash': u'Qmb1NPqPzdHCMvHRfCkk6TWLcnpGJ71KnafacCMm6TKLcD', - 'Name': 'fake_dir/test2/fssdf'}, - {'Hash': u'QmX1dd5DtkgoiYRKaPQPTCtXArUu4jEZ62rJBUcd5WhxAZ', - 'Name': 'fake_dir/test2'}, - {'Hash': u'QmRphRr6ULDEj7YnXpLdnxhnPiVjv5RDtGX3er94Ec6v4Q', - 'Name': 'fake_dir/test3'}, - {'Hash': u'QmNx8xVu9mpdz9k6etbh2S8JwZygatsZVCH4XhgtfUYAJi', - 'Name': 'fake_dir'}] - - fake_lookup = dict((i['Name'], i['Hash']) for i in fake) - - ## test_add_multiple_from_list - fake_file = 'fake_dir/fsdfgh' - fake_file_only_res = {'Hash': 'QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX', - 'Name': 'fsdfgh', 'Size': '16'} - fake_file_dir_res = [ - {'Hash': 'QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX', - 'Name': 'fsdfgh', 'Size': '16'}, - {'Hash': 'Qme7vmxd4LAAYL7vpho3suQeT3gvMeLLtPdp7myCb9Db55', - 'Name': '', 'Size': '68'} - ] - fake_file2 = 'fake_dir/popoiopiu' - fake_files_res = [ - {'Hash': 'QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX', - 'Name': 'fsdfgh', 'Size': '16'}, - {'Hash': 'QmYAhvKYu46rh5NcHzeu6Bhc7NG9SqkF9wySj2jvB74Rkv', - 'Name': 'popoiopiu', 'Size': '23'}] - - ## test_add_multiple_from_dirname - fake_dir_test2 = 'fake_dir/test2' - fake_dir_res = [ - {'Hash': 'QmStL6TPbJfMHQhHjoVT93kCynVx3GwLf7xwgrtScqABhU', - 'Name': 'test2', 'Size': '297'}, - {'Hash': 'QmV3n14G8iQoNG8zpHCUZnmQpcQbhEfhQZ8NHvUEdoiXAN', - 'Name': 'test2/high', 'Size': '114'}, - {'Hash': 'QmZazHsY4nbhRTHTEp5SUWd4At6aSXia1kxEuywHTicayE', - 'Name': 'test2/high/five', 'Size': '64'}, - {'Hash': 'QmW8tRcpqy5siMNAU9Lx3GADAxQbVUrx8XJGFDjkd6vqLT', - 'Name': 'test2/high/five/dummy', 'Size': '13'}, - {'Hash': 'Qmb1NPqPzdHCMvHRfCkk6TWLcnpGJ71KnafacCMm6TKLcD', - 'Name': 'test2/fssdf', 'Size': '22'}, - {'Hash': 'QmNuvmuFeeWWpxjCQwLkHshr8iqhGLWXFzSGzafBeawTTZ', - 'Name': 'test2/llllg', 'Size': '17'}] - - ## test_add_filepattern_from_dirname - pattern = '**/fss*' - # the hash of the folder is not same as above because the content of the folder - # added is not same. - fake_dir_fnpattern_res = [ - {'Name': 'fake_dir/test2/fssdf', 'Hash': 'Qmb1NPqPzdHCMvHRfCkk6TWLcnpGJ71KnafacCMm6TKLcD', 'Size': '22'}, - {'Name': 'fake_dir/test2', 'Hash': 'QmT5rV6EsKNSW619SntLrkCxbUXXQh4BrKm3JazF2zEgEe', 'Size': '73'}, - {'Name': 'fake_dir', 'Hash': 'QmbPzQruAEFjUU3gQfupns6b8USr8VrD9H71GrqGDXQSxm', 'Size': '124'}] - - ## test_add_filepattern_subdir_wildcard - pattern2 = 'test2/**/high' - fake_dir_fnpattern2_res = [ - {'Hash': 'QmUXuNHpV6cdeTngSkEMbP2nQDPuyE2MFXNYtTXzZvLZHf', - 'Name': 'fake_dir', 'Size': '216'}, - {'Hash': 'QmZGuwqaXMmSwJcfTsvseHwy3mvDPD9zrs9WVowAZcQN4W', - 'Name': 'fake_dir/test2', 'Size': '164'}, - {'Hash': 'QmV3n14G8iQoNG8zpHCUZnmQpcQbhEfhQZ8NHvUEdoiXAN', - 'Name': 'fake_dir/test2/high', 'Size': '114'}, - {'Hash': 'QmZazHsY4nbhRTHTEp5SUWd4At6aSXia1kxEuywHTicayE', - 'Name': 'fake_dir/test2/high/five', 'Size': '64'}, - {'Hash': 'QmW8tRcpqy5siMNAU9Lx3GADAxQbVUrx8XJGFDjkd6vqLT', - 'Name': 'fake_dir/test2/high/five/dummy', 'Size': '13'}] - - - ## test_add_recursive - fake_dir = 'fake_dir' - fake_dir_recursive_res = [ - {'Hash': 'QmNx8xVu9mpdz9k6etbh2S8JwZygatsZVCH4XhgtfUYAJi', - 'Name': 'fake_dir', 'Size': '610'}, - {'Hash': 'QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX', - 'Name': 'fake_dir/fsdfgh', 'Size': '16'}, - {'Hash': 'QmYAhvKYu46rh5NcHzeu6Bhc7NG9SqkF9wySj2jvB74Rkv', - 'Name': 'fake_dir/popoiopiu', 'Size': '23'}, - {'Hash': 'QmStL6TPbJfMHQhHjoVT93kCynVx3GwLf7xwgrtScqABhU', - 'Name': 'fake_dir/test2', 'Size': '297'}, - {'Hash': 'Qmb1NPqPzdHCMvHRfCkk6TWLcnpGJ71KnafacCMm6TKLcD', - 'Name': 'fake_dir/test2/fssdf', 'Size': '22'}, - {'Hash': 'QmV3n14G8iQoNG8zpHCUZnmQpcQbhEfhQZ8NHvUEdoiXAN', - 'Name': 'fake_dir/test2/high', 'Size': '114'}, - {'Hash': 'QmZazHsY4nbhRTHTEp5SUWd4At6aSXia1kxEuywHTicayE', - 'Name': 'fake_dir/test2/high/five', 'Size': '64'}, - {'Hash': 'QmW8tRcpqy5siMNAU9Lx3GADAxQbVUrx8XJGFDjkd6vqLT', - 'Name': 'fake_dir/test2/high/five/dummy', 'Size': '13'}, - {'Hash': 'QmNuvmuFeeWWpxjCQwLkHshr8iqhGLWXFzSGzafBeawTTZ', - 'Name': 'fake_dir/test2/llllg', 'Size': '17'}, - {'Hash': 'QmRphRr6ULDEj7YnXpLdnxhnPiVjv5RDtGX3er94Ec6v4Q', - 'Name': 'fake_dir/test3', 'Size': '76'}, - {'Hash': 'QmeMbJSHNCesAh7EeopackUdjutTJznum1Fn7knPm873Fe', - 'Name': 'fake_dir/test3/ppppoooooooooo', 'Size': '16'}] - - ## test_refs - refs_res = [{'Err': '', 'Ref': 'QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX'}, - {'Err': '', 'Ref': 'QmYAhvKYu46rh5NcHzeu6Bhc7NG9SqkF9wySj2jvB74Rkv'}, - {'Err': '', 'Ref': 'QmStL6TPbJfMHQhHjoVT93kCynVx3GwLf7xwgrtScqABhU'}, - {'Err': '', 'Ref': 'QmRphRr6ULDEj7YnXpLdnxhnPiVjv5RDtGX3er94Ec6v4Q'}] - - def setUp(self): - self._olddir = os.getcwd() - os.chdir(HERE) - - # Makes all of the diff visible if the hashes change for some reason - self.maxDiff = None - - self.pinned = set(self.http_client.pin_ls(type="recursive")["Keys"]) - - def tearDown(self): - os.chdir(self._olddir) - - def _clean_up_pins(self): - for multihash in self.http_client.pin_ls(type="recursive")["Keys"]: - if multihash not in self.pinned: - self.http_client.pin_rm(multihash) - - @staticmethod - def _sort_by_key(items, key="Name"): - return sorted(items, key=lambda x: x[key]) - - ######### - # TESTS # - ######### - - def test_version(self): - expected = ['Repo', 'Commit', 'Version'] - resp_version = self.http_client.version() - for key in expected: - assert key in resp_version - - def test_id(self): - expected = ['PublicKey', 'ProtocolVersion', - 'ID', 'AgentVersion', 'Addresses'] - resp_id = self.http_client.id() - for key in expected: - assert key in resp_id - - def test_add_single_from_str(self): - res = self.http_client.add(self.fake_file, pin=False) - - assert self.fake_file_only_res == res - - assert res["Hash"] not in self.http_client.pin_ls(type="recursive") - assert res["Hash"] in list(map(lambda i: i["Ref"], self.http_client.refs_local())) - - def test_add_single_from_fp(self): - with open(self.fake_file, 'rb') as fp: - res = self.http_client.add(fp, pin=False) - - assert self.fake_file_only_res == res - - assert res["Hash"] not in self.http_client.pin_ls(type="recursive") - assert res["Hash"] in list(map(lambda i: i["Ref"], self.http_client.refs_local())) - - def test_add_single_from_str_with_dir(self): - res = self.http_client.add(self.fake_file, wrap_with_directory=True) - - try: - assert self.fake_file_dir_res == res - - dir_hash = None - for item in res: - if item["Name"] == "": - dir_hash = item["Hash"] - assert dir_hash in self.http_client.pin_ls(type="recursive")["Keys"] - finally: - self._clean_up_pins() - - def test_only_hash_file(self): - self.http_client.repo_gc() - - res = self.http_client.add(self.fake_file, only_hash=True) - - assert self.fake_file_only_res == res - - assert res["Hash"] not in self.http_client.pin_ls(type="recursive") - assert res["Hash"] not in list(map(lambda i: i["Ref"], self.http_client.refs_local())) - - def test_add_multiple_from_list(self): - res = self.http_client.add([self.fake_file, self.fake_file2]) - - try: - assert self.fake_files_res == res - finally: - self._clean_up_pins() - - def test_add_multiple_from_dirname(self): - res = self.http_client.add(self.fake_dir_test2) - - try: - assert self._sort_by_key(self.fake_dir_res) == self._sort_by_key(res) - finally: - self._clean_up_pins() - - def test_add_filepattern_from_dirname(self): - res = self.http_client.add(self.fake_dir, pattern=self.pattern) - - try: - assert self._sort_by_key(self.fake_dir_fnpattern_res) == self._sort_by_key(res) - finally: - self._clean_up_pins() - - - def test_add_filepattern_subdir_wildcard(self): - res = self.http_client.add(self.fake_dir, pattern=self.pattern2) - - try: - assert self._sort_by_key(self.fake_dir_fnpattern2_res) == self._sort_by_key(res) - finally: - self._clean_up_pins() - - def test_add_recursive(self): - res = self.http_client.add(self.fake_dir, recursive=True) - - try: - assert self._sort_by_key(self.fake_dir_recursive_res) == self._sort_by_key(res) - finally: - self._clean_up_pins() - - def test_add_json(self): - data = {'Action': 'Open', 'Type': 'PR', 'Name': 'IPFS', 'Pubkey': 7} - res = self.http_client.add_json(data) - - try: - assert data == self.http_client.get_json(res) - - # have to test the string added to IPFS, deserializing JSON will not - # test order of keys - assert '{"Action":"Open","Name":"IPFS","Pubkey":7,"Type":"PR"}' == self.http_client.cat(res).decode('utf-8') - finally: - self._clean_up_pins() - - def test_add_get_pyobject(self): - data = [-1, 3.14, u'Hän€', b'23' ] - res = self.http_client.add_pyobj(data) - - try: - assert data == self.http_client.get_pyobj(res) - finally: - self._clean_up_pins() - - def test_get_file(self): - self.http_client.add(self.fake_file) - - try: - test_hash = self.fake[0]['Hash'] - - self.http_client.get(test_hash) - assert test_hash in os.listdir(os.getcwd()) - - os.remove(test_hash) - assert test_hash not in os.listdir(os.getcwd()) - finally: - self._clean_up_pins() - - def test_get_dir(self): - self.http_client.add(self.fake_dir, recursive=True) - - try: - test_hash = self.fake[8]['Hash'] - - self.http_client.get(test_hash) - assert test_hash in os.listdir(os.getcwd()) - - shutil.rmtree(test_hash) - assert test_hash not in os.listdir(os.getcwd()) - finally: - self._clean_up_pins() - - def test_get_path(self): - self.http_client.add(self.fake_file) - - try: - test_hash = self.fake[8]['Hash'] + '/fsdfgh' - - self.http_client.get(test_hash) - assert 'fsdfgh' in os.listdir(os.getcwd()) - - os.remove('fsdfgh') - assert 'fsdfgh' not in os.listdir(os.getcwd()) - finally: - self._clean_up_pins() - - def test_refs(self): - self.http_client.add(self.fake_dir, recursive=True) - - try: - refs = self.http_client.refs(self.fake[8]['Hash']) - assert self._sort_by_key(self.refs_res, "Ref") == self._sort_by_key(refs, "Ref") - finally: - self._clean_up_pins() - - def test_cat_single_file_str(self): - self.http_client.add(self.fake_file) - - try: - content = self.http_client.cat('QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX') - assert content == b"dsadsad\n" - finally: - self._clean_up_pins() - - def test_cat_file_block(self): - self.http_client.add(self.fake_file) - - content = b"dsadsad\n" - try: - for offset in range(len(content)): - for length in range(len(content)): - block = self.http_client.cat('QmQcCtMgLVwvMQGu6mvsRYLjwqrZJcYtH4mboM9urWW9vX', offset=offset, length=length) - assert block == content[offset:offset+length] - finally: - self._clean_up_pins() - - -@skipIfOffline() -class IpfsHttpClientLogTest(unittest.TestCase): - - def setUp(self): - self.http_client = ipfshttpclient.Client() - - def test_log_ls_level(self): - """ - Unfortunately there is no way of knowing the logging levels prior - to this test. This makes it impossible to guarantee that the logging - levels are the same as before the test was run. - """ - # Retrieves the list of logging subsystems for a running daemon. - resp_ls = self.http_client.log_ls() - # The response should be a dictionary with only one key ('Strings'). - self.assertTrue('Strings' in resp_ls) - - # Sets the logging level to 'error' for the first subsystem found. - sub = resp_ls['Strings'][0] - resp_level = self.http_client.log_level(sub, 'error') - self.assertEqual(resp_level['Message'], - "Changed log level of \'%s\' to 'error'\n" % sub) - - def test_log_tail(self): - # Gets the response object. - tail = self.http_client.log_tail() - - # The log should have been parsed into a dictionary object with - # various keys depending on the event that occured. - self.assertIs(type(next(tail)), dict) - - -@skipIfOffline() -class IpfsHttpClientPinTest(unittest.TestCase): - def setUp(self): - self.http_client = ipfshttpclient.Client() - - # Add resources to be pinned. - self.resource = self.http_client.add_str('Mary had a little lamb') - resp_add = self.http_client.add('test/functional/fake_dir', recursive=True) - self.fake_dir_hashes = [el['Hash'] for el in resp_add if 'Hash' in el] - for resp in resp_add: - if resp["Name"] == "fake_dir": - self.fake_dir_hash = resp["Hash"] - elif resp["Name"] == "fake_dir/test2": - self.fake_dir_test2_hash = resp["Hash"] - - def test_pin_ls_add_rm_single(self): - # Get pinned objects at start. - pins_begin = self.http_client.pin_ls()['Keys'] - - # Unpin the resource if already pinned. - if self.resource in pins_begin.keys(): - self.http_client.pin_rm(self.resource) - - # No matter what, the resource should not be pinned at this point. - self.assertNotIn(self.resource, self.http_client.pin_ls()['Keys']) - - for option in [True, False]: - # Pin the resource. - resp_add = self.http_client.pin_add(self.resource, recursive=option) - pins_afer_add = self.http_client.pin_ls()['Keys'] - self.assertEqual(resp_add['Pins'], [self.resource]) - self.assertTrue(self.resource in pins_afer_add) - self.assertEqual(pins_afer_add[self.resource]['Type'] == 'recursive', - option) - - # Unpin the resource. - resp_rm = self.http_client.pin_rm(self.resource) - pins_afer_rm = self.http_client.pin_ls()['Keys'] - self.assertEqual(resp_rm['Pins'], [self.resource]) - self.assertFalse(self.resource in pins_afer_rm) - - # Get pinned objects at end. - pins_end = self.http_client.pin_ls()['Keys'] - - # Compare pinned items from start to finish of test. - self.assertFalse(self.resource in pins_end.keys()) - - def test_pin_ls_add_rm_directory(self): - # Remove fake_dir if it had previously been pinned. - if self.fake_dir_hash in self.http_client.pin_ls(type="recursive")['Keys'].keys(): - self.http_client.pin_rm(self.fake_dir_hash) - - # Make sure I removed it - assert self.fake_dir_hash not in self.http_client.pin_ls()['Keys'].keys() - - # Add 'fake_dir' recursively. - self.http_client.pin_add(self.fake_dir_hash) - - # Make sure all appear on the list of pinned objects. - pins_after_add = self.http_client.pin_ls()['Keys'].keys() - for el in self.fake_dir_hashes: - assert el in pins_after_add - - # Clean up. - self.http_client.pin_rm(self.fake_dir_hash) - pins_end = self.http_client.pin_ls(type="recursive")['Keys'].keys() - assert self.fake_dir_hash not in pins_end - - def test_pin_add_update_verify_rm(self): - # Get pinned objects at start. - pins_begin = self.http_client.pin_ls(type="recursive")['Keys'].keys() - - # Remove fake_dir and demo resource if it had previously been pinned. - if self.fake_dir_hash in pins_begin: - self.http_client.pin_rm(self.fake_dir_hash) - if self.fake_dir_test2_hash in pins_begin: - self.http_client.pin_rm(self.fake_dir_test2_hash) - - # Ensure that none of the above are pinned anymore. - pins_after_rm = self.http_client.pin_ls(type="recursive")['Keys'].keys() - assert self.fake_dir_hash not in pins_after_rm - assert self.fake_dir_test2_hash not in pins_after_rm - - # Add pin for sub-directory - self.http_client.pin_add(self.fake_dir_test2_hash) - - # Replace it by pin for the entire fake dir - self.http_client.pin_update(self.fake_dir_test2_hash, self.fake_dir_hash) - - # Ensure that the sub-directory is not pinned directly anymore - pins_after_update = self.http_client.pin_ls(type="recursive")["Keys"].keys() - assert self.fake_dir_test2_hash not in pins_after_update - assert self.fake_dir_hash in pins_after_update - - # Now add a pin to the sub-directory from the parent directory - self.http_client.pin_update(self.fake_dir_hash, self.fake_dir_test2_hash, unpin=False) - - # Check integrity of all directory content hashes and whether all - # directory contents have been processed in doing this - hashes = [] - for result in self.http_client.pin_verify(self.fake_dir_hash, verbose=True): - assert result["Ok"] - hashes.append(result["Cid"]) - assert self.fake_dir_hash in hashes - - # Ensure that both directories are now recursively pinned - pins_after_update2 = self.http_client.pin_ls(type="recursive")["Keys"].keys() - assert self.fake_dir_test2_hash in pins_after_update2 - assert self.fake_dir_hash in pins_after_update2 - - # Clean up - self.http_client.pin_rm(self.fake_dir_hash, self.fake_dir_test2_hash) - - - - -@skipIfOffline() -class IpfsHttpClientMFSTest(unittest.TestCase): - - test_files = { - 'test_file1': { - u'Name': u'fake_dir/popoiopiu', - u'Stat': {u'Type': 'file', - u'Hash': 'QmUvobKqcCE56brA8pGTRRRsGy2SsDEKSxFLZkBQFv7Vvv', - u'Blocks': 1, - u'CumulativeSize': 73, - u'Size': 15} - } - } - - test_directory_path = '/test_dir' - - def setUp(self): - self.http_client = ipfshttpclient.Client() - self._olddir = os.getcwd() - os.chdir(HERE) - - def tearDown(self): - os.chdir(self._olddir) - - def test_file_write_stat_read_delete(self): - for filename, desc in self.test_files.items(): - filepath = "/" + filename - - # Create target file - self.http_client.files_write(filepath, desc[u'Name'], create=True) - - # Verify stat information of file - stat = self.http_client.files_stat(filepath) - self.assertEqual(sorted(desc[u'Stat'].items()), - sorted(stat.items())) - - # Read back (and compare file contents) - with open(desc[u'Name'], 'rb') as file: - content = self.http_client.files_read(filepath) - self.assertEqual(content, file.read()) - - # Remove file - self.http_client.files_rm(filepath) - - def test_dir_make_fill_list_delete(self): - self.http_client.files_mkdir(self.test_directory_path) - for filename, desc in self.test_files.items(): - # Create target file in directory - self.http_client.files_write( - self.test_directory_path + "/" + filename, - desc[u'Name'], create=True - ) - - # Verify directory contents - contents = self.http_client.files_ls(self.test_directory_path)[u'Entries'] - filenames1 = list(map(lambda d: d[u'Name'], contents)) - filenames2 = list(self.test_files.keys()) - self.assertEqual(filenames1, filenames2) - - # Remove directory - self.http_client.files_rm(self.test_directory_path, recursive=True) - - with self.assertRaises(ipfshttpclient.exceptions.Error): - self.http_client.files_stat(self.test_directory_path) - - -skipIfOffline() -class TestBlockFunctions(unittest.TestCase): - def setUp(self): - self.http_client = ipfshttpclient.Client() - self.multihash = 'QmYA2fn8cMbVWo4v95RwcwJVyQsNtnEwHerfWR8UNtEwoE' - self.content_size = 248 - - def test_block_stat(self): - expected_keys = ['Key', 'Size'] - res = self.http_client.block_stat(self.multihash) - for key in expected_keys: - self.assertTrue(key in res) - - def test_block_get(self): - self.assertEqual(len(self.http_client.block_get(self.multihash)), self.content_size) - - def test_block_put(self): - path = os.path.join(os.path.dirname(os.path.dirname(__file__)), - "functional", "fake_dir", "fsdfgh") - expected_block_multihash = 'QmPevo2B1pwvDyuZyJbWVfhwkaGPee3f1kX36wFmqx1yna' - expected_keys = ['Key', 'Size'] - res = self.http_client.block_put(path) - for key in expected_keys: - self.assertTrue(key in res) - self.assertEqual(res['Key'], expected_block_multihash) - - -@skipIfOffline() -class IpfsHttpClientRepoTest(unittest.TestCase): - - def setUp(self): - self.http_client = ipfshttpclient.Client() - - def test_repo_stat(self): - # Verify that the correct key-value pairs are returned - stat = self.http_client.repo_stat() - self.assertEqual(sorted(stat.keys()), [u'NumObjects', u'RepoPath', u'RepoSize', - u'StorageMax', u'Version']) - - def test_repo_gc(self): - # Add and unpin an object to be garbage collected - garbage = self.http_client.add_str('Test String') - self.http_client.pin_rm(garbage) - - # Collect the garbage object with object count before and after - orig_objs = self.http_client.repo_stat()['NumObjects'] - gc = self.http_client.repo_gc() - cur_objs = self.http_client.repo_stat()['NumObjects'] - - # Verify the garbage object was collected - self.assertGreater(orig_objs, cur_objs) - keys = [el['Key']['/'] for el in gc] - self.assertTrue(garbage in keys) - - -@skipIfOffline() -class IpfsHttpClientKeyTest(unittest.TestCase): - def setUp(self): - self.http_client = ipfshttpclient.Client() - - def test_key_add_list_rename_rm(self): - # Remove keys if they already exist - key_list = list(map(lambda k: k["Name"], self.http_client.key_list()["Keys"])) - if "ipfshttpclient-test-rsa" in key_list: - self.http_client.key_rm("ipfshttpclient-test-rsa") - if "ipfshttpclient-test-ed" in key_list: - self.http_client.key_rm("ipfshttpclient-test-ed") - - # Add new RSA and ED25519 key - key1 = self.http_client.key_gen("ipfshttpclient-test-rsa", "rsa")["Name"] - key2 = self.http_client.key_gen("ipfshttpclient-test-ed", "ed25519")["Name"] - - # Validate the keys exist now - key_list = list(map(lambda k: k["Name"], self.http_client.key_list()["Keys"])) - assert key1 in key_list - assert key2 in key_list - - # Rename the EC key - key2_new = self.http_client.key_rename(key2, "ipfshttpclient-test-ed2")["Now"] - - # Validate that the key was successfully renamed - key_list = list(map(lambda k: k["Name"], self.http_client.key_list()["Keys"])) - assert key1 in key_list - assert key2 not in key_list - assert key2_new in key_list - - # Drop both keys with one request - self.http_client.key_rm(key1, key2_new) - - # Validate that the keys are gone again - key_list = list(map(lambda k: k["Name"], self.http_client.key_list()["Keys"])) - assert key1 not in key_list - assert key2_new not in key_list - - -@skipIfOffline() -class IpfsHttpClientObjectTest(unittest.TestCase): - - def setUp(self): - self.http_client = ipfshttpclient.Client() - self._olddir = os.getcwd() - os.chdir(HERE) - # Add a resource to get the stats for. - self.resource = self.http_client.add_str('Mary had a little lamb') - - def tearDown(self): - os.chdir(self._olddir) - - def test_object_new(self): - expected_keys = ['Hash'] - res = self.http_client.object_new() - for key in expected_keys: - self.assertTrue(key in res) - - def test_object_stat(self): - expected = ['Hash', 'CumulativeSize', 'DataSize', - 'NumLinks', 'LinksSize', 'BlockSize'] - resp_stat = self.http_client.object_stat(self.resource) - for key in expected: - self.assertTrue(key in resp_stat) - - def test_object_put_get(self): - # Set paths to test json files - path_no_links = os.path.join(os.path.dirname(__file__), - "fake_json", "no_links.json") - path_links = os.path.join(os.path.dirname(__file__), - "fake_json", "links.json") - - # Put the json objects on the DAG - no_links = self.http_client.object_put(path_no_links) - links = self.http_client.object_put(path_links) - - # Verify the correct content was put - self.assertEqual(no_links['Hash'], 'QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V') - self.assertEqual(links['Hash'], 'QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm') - - # Get the objects from the DAG - get_no_links = self.http_client.object_get('QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V') - get_links = self.http_client.object_get('QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm') - - # Verify the objects we put have been gotten - self.assertEqual(get_no_links['Data'], 'abc') - self.assertEqual(get_links['Data'], 'another') - self.assertEqual(get_links['Links'][0]['Name'], 'some link') - - def test_object_links(self): - # Set paths to test json files - path_links = os.path.join(os.path.dirname(__file__), - "fake_json", "links.json") - - # Put json object on the DAG and get its links - self.http_client.object_put(path_links) - links = self.http_client.object_links('QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm') - - # Verify the correct link has been gotten - self.assertEqual(links['Links'][0]['Hash'], 'QmXg9Pp2ytZ14xgmQjYEiHjVjMFXzCVVEcRTWJBmLgR39V') - - def test_object_data(self): - # Set paths to test json files - path_links = os.path.join(os.path.dirname(__file__), - "fake_json", "links.json") - - # Put json objects on the DAG and get its data - self.http_client.object_put(path_links) - data = self.http_client.object_data('QmZZmY4KCu9r3e7M2Pcn46Fc5qbn6NpzaAGaYb22kbfTqm') - - # Verify the correct bytes have been gotten - self.assertEqual(data, b'another') - - def test_object_patch_append_data(self): - """Warning, this test depends on the contents of - test/functional/fake_dir/fsdfgh - """ - result = self.http_client.object_patch_append_data( - 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n', 'fake_dir/fsdfgh') - self.assertEqual(result, - {'Hash': 'QmcUsyoGVxWoQgYKgmLaDBGm8J3eHWfchMh3oDUD5FrrtN'}) - - def test_object_patch_add_link(self): - """Warning, this test depends on the contents of - test/functional/fake_dir/fsdfgh - """ - result = self.http_client.object_patch_add_link( - 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n', 'self', - 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n') - self.assertEqual(result, - {'Hash': 'QmbWSr7YXBLcF23VVb7yPvUuogUPn46GD7gXftXC6mmsNM'}) - - def test_object_patch_rm_link(self): - """Warning, this test depends on the contents of - test/functional/fake_dir/fsdfgh - """ - result = self.http_client.object_patch_rm_link( - 'QmbWSr7YXBLcF23VVb7yPvUuogUPn46GD7gXftXC6mmsNM', 'self') - self.assertEqual(result, - {'Hash': 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n'}) - - def test_object_patch_set_data(self): - """Warning, this test depends on the contents of - test/functional/fake_dir/popoiopiu - """ - result = self.http_client.object_patch_set_data( - 'QmdfTbBqBPQ7VNxZEYEj14VmRuZBkqFbiwReogJgS1zR1n', 'fake_dir/popoiopiu') - self.assertEqual(result, - {'Hash': 'QmV4QR7MCBj5VTi6ddHmXPyjWGzbaKEtX2mx7axA5PA13G'}) - -@skipIfOffline() -class IpfsHttpClientBitswapTest(unittest.TestCase): - - def setUp(self): - self.http_client = ipfshttpclient.Client() - - def test_bitswap_wantlist(self): - result = self.http_client.bitswap_wantlist(peer='QmdkJZUWnVkEc6yfptVu4LWY8nHkEnGwsxqQ233QSGj8UP') - self.assertTrue(result and type(result) is dict and 'Keys' in result) - - def test_bitswap_stat(self): - result = self.http_client.bitswap_stat() - self.assertTrue(result and type(result) is dict and 'Wantlist' in result) - - def test_bitswap_unwant(self): - """ - Cannot ensure what is present in the wantlist prior to execution, so just ensure - something comes back. - """ - - result = self.http_client.bitswap_unwant(key='QmZTR5bcpQD7cFgTorqxZDYaew1Wqgfbd2ud9QqGPAkK2V') - self.assertTrue(result is not None) - -@skipIfOffline() -class IpfsHttpClientPubSubTest(unittest.TestCase): - - def setUp(self): - self.http_client = ipfshttpclient.Client() - - def createTestChannel(self): - """ - Creates a unique topic for testing purposes - """ - return "{}.testing".format(uuid.uuid4()) - - def test_pubsub_pub_sub(self): - """ - We test both publishing and subscribing at - the same time because we cannot verify that - something has been properly published unless - we subscribe to that channel and receive it. - Likewise, we cannot accurately test a subscription - without publishing something on the topic we are subscribed - to. - """ - # the topic that will be published/subscribed to - topic = self.createTestChannel() - # the message that will be published - message = 'hello' - - expected_data = 'aGVsbG8=' - expected_topicIDs = [topic] - - - # get the subscription stream - with self.http_client.pubsub_sub(topic) as sub: - - # make sure something was actually returned from the subscription - assert sub is not None - - # publish a message to topic - self.http_client.pubsub_pub(topic, message) - - # get the message - sub_data = sub.read_message() - - # assert that the returned dict has the following keys - assert 'data' in sub_data - assert 'topicIDs' in sub_data - - assert sub_data['data'] == expected_data - assert sub_data['topicIDs'] == expected_topicIDs - - def test_pubsub_ls(self): - """ - Testing the ls, assumes we are able - to at least subscribe to a topic - """ - topic = self.createTestChannel() - expected_return = { 'Strings': [topic] } - - # subscribe to the topic testing - sub = self.http_client.pubsub_sub(topic) - - channels = None - try: - # grab the channels we're subscribed to - channels = self.http_client.pubsub_ls() - finally: - sub.close() - - assert channels == expected_return - - def test_pubsub_peers(self): - """ - Not sure how to test this since it fully depends - on who we're connected to. We may not even have - any peers - """ - peers = self.http_client.pubsub_peers() - - expected_return = { - 'Strings': [] - } - - # make sure the Strings key is in the map thats returned - assert 'Strings' in peers - - # ensure the value of 'Strings' is a list. - # The list may or may not be empty. - assert isinstance(peers['Strings'], list) - - -# Run test for `.shutdown()` only as the last test in CI environments – it would be to annoying -# during normal testing -@skipIfOffline() -@skipUnlessCI() -@pytest.mark.last -class IpfsHttpClientShutdownTest(unittest.TestCase): - def setUp(self): - self.http_client = ipfshttpclient.Client() - self.pid = int(os.environ["PY_IPFS_HTTP_CLIENT_TEST_DAEMON_PID"]) - - @staticmethod - def _pid_exists(pid): - """ - Check whether pid exists in the current process table - - Source: https://stackoverflow.com/a/23409343/277882 - """ - if os.name == 'posix': - import errno - if pid < 0: - return False - try: - os.kill(pid, 0) - except OSError as e: - return e.errno == errno.EPERM - else: - return True - else: - import ctypes - kernel32 = ctypes.windll.kernel32 - HANDLE = ctypes.c_void_p - DWORD = ctypes.c_ulong - LPDWORD = ctypes.POINTER(DWORD) - class ExitCodeProcess(ctypes.Structure): - _fields_ = [ ('hProcess', HANDLE), - ('lpExitCode', LPDWORD)] - - SYNCHRONIZE = 0x100000 - process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid) - if not process: - return False - - ec = ExitCodeProcess() - out = kernel32.GetExitCodeProcess(process, ctypes.byref(ec)) - if not out: - err = kernel32.GetLastError() - if kernel32.GetLastError() == 5: - # Access is denied. - logging.warning("Access is denied to get pid info.") - kernel32.CloseHandle(process) - return False - elif bool(ec.lpExitCode): - # There is an exit code, it quit - kernel32.CloseHandle(process) - return False - # No exit code, it's running. - kernel32.CloseHandle(process) - return True - - def _is_ipfs_running(self): - return self._pid_exists(self.pid) - - - def test_daemon_shutdown(self): - # Daemon should still be running at this point - assert self._is_ipfs_running() - - # Send stop request - self.http_client.shutdown() - - # Wait for daemon process to disappear - for _ in range(10000): - if not self._is_ipfs_running(): - break - time.sleep(0.001) - - # Daemon should not be running anymore - assert not self._is_ipfs_running() - - - -if __name__ == "__main__": - unittest.main() diff --git a/test/run-tests.py b/test/run-tests.py index 1fbff48e..0f0d8763 100755 --- a/test/run-tests.py +++ b/test/run-tests.py @@ -72,6 +72,8 @@ def _contextlib_suppress(*exceptions): # Start daemon # ################ +import ipfshttpclient # noqa + # Spawn IPFS daemon in data directory DAEMON = subprocess.Popen(["ipfs", "daemon", "--enable-pubsub-experiment"]) os.environ["PY_IPFS_HTTP_CLIENT_TEST_DAEMON_PID"] = str(DAEMON.pid) @@ -83,7 +85,6 @@ def _contextlib_suppress(*exceptions): signal.signal(signal.SIGCHLD, lambda *a: DAEMON.poll()) # Wait for daemon to start up -import ipfshttpclient while True: try: ipfshttpclient.connect(HOST, PORT) diff --git a/test/unit/test_client.py b/test/unit/test_client.py new file mode 100644 index 00000000..9ef3ec8e --- /dev/null +++ b/test/unit/test_client.py @@ -0,0 +1,16 @@ +import pytest + +import ipfshttpclient + + +def test_assert_version(): + # Minimum required version + ipfshttpclient.assert_version("0.1.0", "0.1.0", "0.2.0") + + # Too high version + with pytest.raises(ipfshttpclient.exceptions.VersionMismatch): + ipfshttpclient.assert_version("0.2.0", "0.1.0", "0.2.0") + + # Too low version + with pytest.raises(ipfshttpclient.exceptions.VersionMismatch): + ipfshttpclient.assert_version("0.0.5", "0.1.0", "0.2.0") \ No newline at end of file diff --git a/test/unit/test_encoding.py b/test/unit/test_encoding.py index 15b72ee2..a8991dc2 100644 --- a/test/unit/test_encoding.py +++ b/test/unit/test_encoding.py @@ -6,7 +6,6 @@ import unittest import json -import pickle import pytest import six @@ -32,7 +31,6 @@ class TestEncoding(unittest.TestCase): def setUp(self): """create a Json encoder""" self.encoder_json = ipfshttpclient.encoding.Json() - self.encoder_pickle = ipfshttpclient.encoding.Pickle() def test_json_parse(self): """Asserts parsed key/value json matches expected output.""" @@ -109,24 +107,6 @@ def test_json_encode(self): data = {'key': 'value'} assert self.encoder_json.encode(data) == b'{"key":"value"}' - def test_encode_pickle(self): - """Tests serilization of pickle formatted string into an object.""" - # In Python 2, data appears to be encoded differently based on the - # context from which pickle.dumps() is called. For this reason we are - # encoding and then decoding data to ensure that the decoded values are - # equivalent after the original data has been serialized. - data = {'key': 'value'} - encoder_res = pickle.loads(self.encoder_pickle.encode(data)) - pickle_res = pickle.loads(pickle.dumps(data)) - assert encoder_res == pickle_res - - def test_parse_pickle(self): - """Tests if pickled Python object matches expected output.""" - data = {'key': 'value'} - raw = pickle.dumps(data) - res = self.encoder_pickle.parse(raw) - assert res['key'] == 'value' - def test_get_encoder_by_name(self): """Tests the process of obtaining an Encoder object given the named encoding.""" encoder = ipfshttpclient.encoding.get_encoding('json') diff --git a/tox.ini b/tox.ini index c98ac381..ef3d7a88 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,8 @@ envlist = py27, py34, py35, - py36 + py36, + py37 # Tox' sdist feature presumes that `./setup.py sdist` is available # Disable this feature until PEP-517 is implemented by both tox and flit. @@ -27,30 +28,31 @@ commands = flake8 {posargs} [flake8] -exclude = .git,.tox,+junk,dist,doc,*egg,build,tools,test,docs,*__init__.py +exclude = .git,.tox,+junk,dist,doc,*egg,build,tools,test/unit,docs,*__init__.py # E221: Multiple spaces before operator -# E222: Multiple spaces after operator +# E241: Multiple spaces after ',': Breaks element alignment collections # E262: Inline comment should start with '# ': Breaks tagged comments (ie: '#TODO: ') # E265: Block comment should start with '# ': ^ +# E266: Too many leading '#' for block comment: Breaks declaring mega-blocks (ie: '### Section') # E303: More than 2 consecutive newlines # W292: No newline at end of file # W391: Blank line at end of file (sometimes trigged instead of the above!?) # F403: `from <module> import *` used; unable to detect undefined names ←– Probably should be fixed… -ignore = E221,E222,E262,E265,E303,W292,W391,F403 +ignore = E221,E241,E262,E265,E266,E303,W292,W391,F403 max-line-length = 100 tab-width = 4 # E701: Multiple statements on one line # - multipart.py: Lots of `yield from` polyfills using `for chunk in X: yield chunk` +# - test_*.py: Aligning `assert … not in …` and `assert … in …` kind of statements per-file-ignores = /ipfshttpclient/multipart.py: E701 + /test/functional/test_*.py: E272 [pytest] python_files = test_*.py - *_test.py - tests.py addopts = # --doctest-modules / Totally useless since it cannot properly check the `client` package ipfshttpclient