diff --git a/.github/actions/latest-ipfs-tag/action.yml b/.github/actions/latest-ipfs-tag/action.yml deleted file mode 100644 index c16af5f33..000000000 --- a/.github/actions/latest-ipfs-tag/action.yml +++ /dev/null @@ -1,7 +0,0 @@ -name: 'Find latest go-ipfs tag' -outputs: - latest_tag: - description: "latest go-ipfs tag name" -runs: - using: 'docker' - image: 'Dockerfile' diff --git a/.github/actions/latest-ipfs-tag/Dockerfile b/.github/actions/latest-kubo-tag/Dockerfile similarity index 100% rename from .github/actions/latest-ipfs-tag/Dockerfile rename to .github/actions/latest-kubo-tag/Dockerfile diff --git a/.github/actions/latest-kubo-tag/action.yml b/.github/actions/latest-kubo-tag/action.yml new file mode 100644 index 000000000..42f2508a3 --- /dev/null +++ b/.github/actions/latest-kubo-tag/action.yml @@ -0,0 +1,7 @@ +name: 'Find latest Kubo tag' +outputs: + latest_tag: + description: "latest Kubo tag name" +runs: + using: 'docker' + image: 'Dockerfile' diff --git a/.github/actions/latest-ipfs-tag/entrypoint.sh b/.github/actions/latest-kubo-tag/entrypoint.sh similarity index 53% rename from .github/actions/latest-ipfs-tag/entrypoint.sh rename to .github/actions/latest-kubo-tag/entrypoint.sh index aaec647d6..777e0b6bd 100755 --- a/.github/actions/latest-ipfs-tag/entrypoint.sh +++ b/.github/actions/latest-kubo-tag/entrypoint.sh @@ -2,16 +2,16 @@ set -eu # extract tag name from latest stable release -REPO="ipfs/go-ipfs" -LATEST_IPFS_TAG=$(curl -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/${REPO}/releases/latest" | jq --raw-output ".tag_name") +REPO="ipfs/kubo" +LATEST_IPFS_TAG=$(curl -L -H "Accept: application/vnd.github.v3+json" "https://api.github.com/repos/${REPO}/releases/latest" | jq --raw-output ".tag_name") # extract IPFS release cd /tmp git clone "https://github.com/$REPO.git" -cd go-ipfs +cd kubo # confirm tag is valid git describe --tags "${LATEST_IPFS_TAG}" -echo "The latest IPFS tag is ${LATEST_IPFS_TAG}" +echo "The latest Kubo tag is ${LATEST_IPFS_TAG}" echo "::set-output name=latest_tag::${LATEST_IPFS_TAG}" diff --git a/.github/actions/update-with-latest-versions/action.yml b/.github/actions/update-with-latest-versions/action.yml index 53de97d6f..4af9e51a7 100644 --- a/.github/actions/update-with-latest-versions/action.yml +++ b/.github/actions/update-with-latest-versions/action.yml @@ -1,7 +1,7 @@ name: 'Update when a new tag or a new release is available' inputs: latest_ipfs_tag: - description: "latest go ipfs tag" + description: "latest Kubo tag" required: true outputs: updated_branch: diff --git a/.github/actions/update-with-latest-versions/entrypoint.sh b/.github/actions/update-with-latest-versions/entrypoint.sh index dca42a77a..0287e3036 100755 --- a/.github/actions/update-with-latest-versions/entrypoint.sh +++ b/.github/actions/update-with-latest-versions/entrypoint.sh @@ -4,27 +4,27 @@ set -eu BRANCH=bump-documentation-to-latest-versions LATEST_IPFS_TAG=$INPUT_LATEST_IPFS_TAG -echo "The latest IPFS tag is ${LATEST_IPFS_TAG}" +echo "The latest Kubo tag is ${LATEST_IPFS_TAG}" ROOT=`pwd` git checkout -b ${BRANCH} -API_FILE=`pwd`/docs/reference/http/api.md +API_FILE="$(pwd)/docs/reference/kubo/rpc.md" # Update http api docs and cli docs cd tools/http-api-docs -# extract go-ipfs release tag used in http-api-docs from go.mod in this repo -CURRENT_IPFS_TAG=`grep 'github.com/ipfs/go-ipfs ' ./go.mod | awk '{print $2}'` -echo "The currently used go-ipfs tag in http-api-docs is ${CURRENT_IPFS_TAG}" +# extract kubo release tag used in http-api-docs from go.mod in this repo +CURRENT_IPFS_TAG=$(grep 'github.com/ipfs/kubo ' ./go.mod | awk '{print $2}') +echo "The currently used Kubo tag in http-api-docs is ${CURRENT_IPFS_TAG}" -# make the upgrade, if newer go-ipfs tags exist +# make the upgrade, if newer Kubo tags exist if [ "$CURRENT_IPFS_TAG" = "$LATEST_IPFS_TAG" ]; then - echo "http-api-docs already uses the latest go-ipfs tag." + echo "http-api-docs already uses the latest Kubo tag." else # update http-api-docs - sed "s/^\s*github.com\/ipfs\/go-ipfs\s\+$CURRENT_IPFS_TAG\s*$/ github.com\/ipfs\/go-ipfs $LATEST_IPFS_TAG/" go.mod > go.mod2 + sed "s/^\s*github.com\/ipfs\/kubo\s\+$CURRENT_IPFS_TAG\s*$/ github.com\/ipfs\/kubo $LATEST_IPFS_TAG/" go.mod > go.mod2 mv go.mod2 go.mod go mod tidy make @@ -32,12 +32,12 @@ else # update cli docs cd "$ROOT" # go back to root of ipfs-docs repo - git clone https://github.com/ipfs/go-ipfs.git - cd go-ipfs + git clone https://github.com/ipfs/kubo.git + cd kubo git fetch --all --tags git checkout "tags/$LATEST_IPFS_TAG" go install ./cmd/ipfs - cd "$ROOT/docs/reference" + cd "$ROOT/docs/reference/kubo" ./generate-cli-docs.sh fi @@ -64,7 +64,7 @@ update_version() { cd "${ROOT}" update_version ipfs/ipfs-update current-ipfs-updater-version update_version ipfs-cluster/ipfs-cluster current-ipfs-cluster-version -update_version ipfs/go-ipfs current-ipfs-version +update_version ipfs/kubo current-ipfs-version # Push on change diff --git a/.github/workflows/update-on-new-ipfs-tag.yml b/.github/workflows/update-on-new-ipfs-tag.yml index 5a1101b13..cf1f291d4 100644 --- a/.github/workflows/update-on-new-ipfs-tag.yml +++ b/.github/workflows/update-on-new-ipfs-tag.yml @@ -11,9 +11,9 @@ jobs: steps: - name: Checkout ipfs-docs uses: actions/checkout@v2 - - name: Find latest go-ipfs tag + - name: Find latest kubo tag id: latest_ipfs - uses: ./.github/actions/latest-ipfs-tag + uses: ./.github/actions/latest-kubo-tag - name: Update docs id: update uses: ./.github/actions/update-with-latest-versions @@ -26,7 +26,7 @@ jobs: github_token: ${{ secrets.GITHUB_TOKEN }} source_branch: ${{ steps.update.outputs.updated_branch }} destination_branch: "main" - pr_title: "Update documentation ${{ steps.latest_ipfs.outputs.latest_tag }}" - pr_body: "Release Notes: https://github.com/ipfs/go-ipfs/releases/${{ steps.latest_ipfs.outputs.latest_tag }}" + pr_title: "Update release version numbers" + pr_body: "This PR was opened from update-on-new-ipfs-tag.yml workflow." pr_label: "needs/triage,P0" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ab640ca79..05d7e7ded 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -64,9 +64,9 @@ Write everything in using the [GitHub Flavored Markdown](https://github.github.c ### Project specific titles -When referring to projects by name, use proper noun capitalization: Go-IPFS and JS-IPFS. +When referring to projects by name, use proper noun capitalization: Kubo (GO-IPFS) and JS-IPFS. -Cases inside code blocks refer to commands and are not capitalized: `go-ipfs` or `js-ipfs`. +Cases inside code blocks refer to commands and are not capitalized: `kubo` (`go-ipfs`) or `js-ipfs`. ### Style and tone diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index d5d8bf4f3..92bd828c9 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -263,10 +263,11 @@ module.exports = { title: 'API & CLI', path: '/reference/', children: [ - '/reference/go/api', + '/reference/http/gateway', '/reference/js/api', - '/reference/http/api', - '/reference/cli' + '/reference/go/api', + '/reference/kubo/cli', + '/reference/kubo/rpc' ] }, { diff --git a/docs/.vuepress/redirects b/docs/.vuepress/redirects index dcfdc1b55..0075ee73a 100644 --- a/docs/.vuepress/redirects +++ b/docs/.vuepress/redirects @@ -51,7 +51,10 @@ /recent-releases/go-ipfs-0-7/install/ /install/recent-releases /recent-releases/go-ipfs-0-7/update-procedure/ /install/recent-releases /reference/api/ /reference -/reference/api/cli/ /reference/cli +/reference/api/cli/ /reference/kubo/cli +/reference/cli/ /reference/kubo/cli +/reference/kubo/ /reference +/reference/http/ /reference/http/api /reference/api/http/ /reference/http/api /reference/go/overview/ /reference/go/api /reference/js/overview/ /reference/js/api diff --git a/docs/.vuepress/theme/components/Page.vue b/docs/.vuepress/theme/components/Page.vue index 18008ad4a..202c31e4b 100644 --- a/docs/.vuepress/theme/components/Page.vue +++ b/docs/.vuepress/theme/components/Page.vue @@ -54,10 +54,21 @@ export default { return root.scrollHeight < 15000 ? root.classList.add('smooth-scroll') : root.classList.remove('smooth-scroll') + }, + advancedRedirect: async function () { + // Advanced redirect that is aware of URL #hash + const url = window.location.href + // https://github.com/ipfs/ipfs-docs/pull/1185 + if (url.includes('/reference/http/api')) { + if (window.location.hash.startsWith('#api-v0')) { + window.location.replace(url.replace('/reference/http/api','/reference/kubo/rpc')) + } + } } }, mounted: function () { this.smoothScroll() + this.advancedRedirect() }, updated: function () { this.smoothScroll() diff --git a/docs/basics/command-line.md b/docs/basics/command-line.md index 6195410cf..85888ab9e 100644 --- a/docs/basics/command-line.md +++ b/docs/basics/command-line.md @@ -42,7 +42,7 @@ This will output something like: ```plaintext Initializing daemon... -go-ipfs version: 0.12.0 +Kubo version: 0.12.0 Repo version: 12 System version: arm64/darwin [...] diff --git a/docs/community/contribute/grammar-formatting-and-style.md b/docs/community/contribute/grammar-formatting-and-style.md index 43f18ab90..84039dc93 100755 --- a/docs/community/contribute/grammar-formatting-and-style.md +++ b/docs/community/contribute/grammar-formatting-and-style.md @@ -36,9 +36,9 @@ If you have to use an acronym, spell the full phrase first and include the acron ### Project specific titles -When referring to projects by name, use proper noun capitalization: Go-IPFS and JS-IPFS. +When referring to projects by name, use proper noun capitalization: Kubo and JS-IPFS. -Cases inside code blocks refer to commands and are not capitalized: `go-ipfs` or `js-ipfs`. +Cases inside code blocks refer to commands and are not capitalized: `kubo` or `js-ipfs`. ### _Using_ IPFS, not _on_ IPFS diff --git a/docs/community/contribute/ways-to-contribute.md b/docs/community/contribute/ways-to-contribute.md index 40a72d294..71f7cd7ea 100644 --- a/docs/community/contribute/ways-to-contribute.md +++ b/docs/community/contribute/ways-to-contribute.md @@ -14,7 +14,7 @@ IPFS and its sister-projects are big, with lots of code written in multiple lang The biggest and most active repositories we have today are: -- [ipfs/go-ipfs](https://github.com/ipfs/go-ipfs) +- [ipfs/kubo](https://github.com/ipfs/kubo) - [ipfs/js-ipfs](https://github.com/ipfs/js-ipfs) - [libp2p/go-libp2p](https://github.com/libp2p/go-libp2p) - [libp2p/js-libp2p](https://github.com/libp2p/js-libp2p) diff --git a/docs/concepts/case-study-arbol.md b/docs/concepts/case-study-arbol.md index 33e07ec52..be732d561 100644 --- a/docs/concepts/case-study-arbol.md +++ b/docs/concepts/case-study-arbol.md @@ -80,13 +80,13 @@ Arbol's end users enjoy the "it just works" benefits of parametric protection, b 4. **Compression:** This step is the final one before data is imported to IPFS. Arbol compresses each file to save on disk space and reduce sync time. -5. **Hashing:** Arbol uses the stock IPFS recursive add operation ([`ipfs add -r`](./reference/cli/#ipfs-add)) for hashing, as well as the experimental `no-copy` feature. This feature cuts down on disk space used by the hashing node, especially on the initial build of the dataset. Without it, an entire dataset would be copied into the local IPFS datastore directory. This can create problems, since the default flat file system datastore (`flatfs`) can start to run out of index nodes (the software representation of disk locations) after a few million files, leading to hashing failure. Arbol is also experimenting with [Badger](https://github.com/ipfs/go-ipfs/releases/tag/v0.5.0), an alternative to flat file storage, in collaboration with the IPFS core team as the core team considers incorporating this change into IPFS itself. +5. **Hashing:** Arbol uses the stock IPFS recursive add operation ([`ipfs add -r`](./reference/kubo/cli/#ipfs-add)) for hashing, as well as the experimental `no-copy` feature. This feature cuts down on disk space used by the hashing node, especially on the initial build of the dataset. Without it, an entire dataset would be copied into the local IPFS datastore directory. This can create problems, since the default flat file system datastore (`flatfs`) can start to run out of index nodes (the software representation of disk locations) after a few million files, leading to hashing failure. Arbol is also experimenting with [Badger](https://github.com/ipfs/kubo/releases/tag/v0.5.0), an alternative to flat file storage, in collaboration with the IPFS core team as the core team considers incorporating this change into IPFS itself. 6. **Verification:** To ensure no errors were introduced to files during the parsing stage, queries are made to the source data files and compared against the results of an identical query made to the parsed, hashed data. 7. **Publishing:** Once a hash has been verified, it is posted to Arbol's master heads reference file, and is at this point accessible via Arbol's gateway and available for use in contracts. -8. **Pinning and syncing:** When storage nodes in the Arbol network detect that a new hash has been added to the heads file, they run the standard, recursive [`ipfs pin -r`](./reference/cli.md#ipfs-pin) command on it. Arbol's primary active nodes don't need to be large in number: The network includes a single [gateway node](ipfs-gateway.md) that bootstraps with all the parsing/hashing nodes, and a few large storage nodes that serve as the primary data storage backup. However, data is also regularly synced with "cold nodes" — archival storage nodes that are mostly kept offline — as well as on individual IPFS nodes on Arbol's developers' and agronomists' personal computers. +8. **Pinning and syncing:** When storage nodes in the Arbol network detect that a new hash has been added to the heads file, they run the standard, recursive [`ipfs pin -r`](./reference/kubo/cli.md#ipfs-pin) command on it. Arbol's primary active nodes don't need to be large in number: The network includes a single [gateway node](ipfs-gateway.md) that bootstraps with all the parsing/hashing nodes, and a few large storage nodes that serve as the primary data storage backup. However, data is also regularly synced with "cold nodes" — archival storage nodes that are mostly kept offline — as well as on individual IPFS nodes on Arbol's developers' and agronomists' personal computers. 9. **Garbage collection:** Some older Arbol datasets require [garbage collection](glossary.md#garbage-collection) whenever new data is added, due to a legacy method of overwriting old hashes with new hashes. However, all of Arbol's newer datasets use an architecture where old hashes are preserved and new posts reference the previous post. This methodology creates a linked list of hashes, with each hash containing a reference to the previous hash. As the length of the list becomes computationally burdensome, the system consolidates intermediate nodes and adds a new route to the head, creating a [DAG (directed acyclic graph)](merkle-dag.md) structure. Heads are always stored in a master [heads.json reference file](https://gateway.arbolmarket.com/climate/hashes/heads.json) located on Arbol's command server. @@ -94,7 +94,7 @@ Arbol's end users enjoy the "it just works" benefits of parametric protection, b ![Arbol high-level architecture](./images/case-studies/img-arbol-arch.svg) -In addition to out-of-the-box [`go-ipfs`](https://github.com/ipfs/go-ipfs), Arbol relies heavily on custom written libraries and a number of weather-specialized Python libraries such as [netCDF4](https://pypi.org/project/netCDF4/) (an interface to netCDF, a self-describing format for array-oriented data) and [rasterio](https://pypi.org/project/rasterio) (for geospatial raster data). Additionally, Docker and Digital Ocean are important tools in Arbol's box for continuous integration and deployment. +In addition to out-of-the-box [`kubo`](https://github.com/ipfs/kubo), Arbol relies heavily on custom written libraries and a number of weather-specialized Python libraries such as [netCDF4](https://pypi.org/project/netCDF4/) (an interface to netCDF, a self-describing format for array-oriented data) and [rasterio](https://pypi.org/project/rasterio) (for geospatial raster data). Additionally, Docker and Digital Ocean are important tools in Arbol's box for continuous integration and deployment. As described above, Arbol datasets are ingested and augmented via either push or pull. For pulling data, Arbol uses a command server to query dataset release pages for new content. When new data is found, the command server spins up a Digital Ocean droplet (a Linux-based virtual machine) and deploys a "parse-interpret-compress-hash-verify" Docker container to it. This is done using a custom-built library that Arbol describes as "homebrew Lambda." Because Amazon's Lambda serverless compute has disk storage, CPU, and RAM limitations that make it unsuitable for the scale and complexity of Arbol's pipeline, the team has created their own tool. diff --git a/docs/concepts/case-study-audius.md b/docs/concepts/case-study-audius.md index c1c64c20d..a6964a9fd 100644 --- a/docs/concepts/case-study-audius.md +++ b/docs/concepts/case-study-audius.md @@ -94,15 +94,15 @@ IPFS has provided Audius the full benefits of decentralized storage with no hass ## How Audius uses IPFS -All files and metadata on Audius are _shared_ using IPFS by creator node services, _registered_ on Audius smart contracts, _indexed_ by discovery services, and _served_ through the client to end users. Audius runs nodes internally to test new changes, and there are a dozen public hosts running nodes for specific services and geographies. However, content creators and listeners don’t need to know anything about the back end; they use the Audius client and client libraries to upload and stream audio. Each IPFS node within the Audius network is currently a [`go-ipfs`](https://github.com/ipfs/go-ipfs) container co-located with service logic. Audius implements the services interface with `go-ipfs` using [`py-ipfs-api`](https://github.com/ipfs-shipyard/py-ipfs-http-client) or [`ipfs-http-client`](https://github.com/ipfs/js-ipfs/tree/master/packages/ipfs-http-client) (JavaScript) to perform read and write operations. +All files and metadata on Audius are _shared_ using IPFS by creator node services, _registered_ on Audius smart contracts, _indexed_ by discovery services, and _served_ through the client to end users. Audius runs nodes internally to test new changes, and there are a dozen public hosts running nodes for specific services and geographies. However, content creators and listeners don’t need to know anything about the back end; they use the Audius client and client libraries to upload and stream audio. Each IPFS node within the Audius network is currently a [`kubo`](https://github.com/ipfs/kubo) container co-located with service logic. Audius implements the services interface with `kubo` using [`py-ipfs-api`](https://github.com/ipfs-shipyard/py-ipfs-http-client) or [`ipfs-http-client`](https://github.com/ipfs/js-ipfs/tree/master/packages/ipfs-http-client) (JavaScript) to perform read and write operations. ### The tooling Audius uses the following IPFS implementations with no modification: - **IPFS core** -- [`go-ipfs`](https://github.com/ipfs/go-ipfs) - - _All individual nodes are `go-ipfs` containers_ +- [`kubo`](https://github.com/ipfs/kubo) + - _All individual nodes are `kubo` containers_ - [`py-ipfs-api`](https://github.com/ipfs-shipyard/py-ipfs-http-client) - _Discovery provider is a Python application_ - _Python application uses a Flask server + Celery worker queue + PostgreSQL database_ diff --git a/docs/concepts/case-study-fleek.md b/docs/concepts/case-study-fleek.md index 8852f9124..9324bf747 100644 --- a/docs/concepts/case-study-fleek.md +++ b/docs/concepts/case-study-fleek.md @@ -82,7 +82,7 @@ In short, a developer on Fleek can use familiar tools and workflows without worr ### The tooling -At the core of all of Fleek's offerings are [`go-ipfs`](https://github.com/ipfs/go-ipfs) and [`ipfs-http-client`](https://github.com/ipfs/js-ipfs), used out of the box. Nodes on `go-ipfs` appear throughout the Fleek ecosystem, including: +At the core of all of Fleek's offerings are [`kubo`](https://github.com/ipfs/kubo) and [`ipfs-http-client`](https://github.com/ipfs/js-ipfs), used out of the box. Nodes on `kubo` appear throughout the Fleek ecosystem, including: - Pinning nodes used by Fleek Hosting and Fleek Storage - Nodes for encrypted backups @@ -97,26 +97,26 @@ Additionally, Fleek relies upon building blocks from [Textile](https://textile.i #### Fleek Hosting -- Fleek Hosting deploys built sites onto `go-ipfs` nodes directly, and also replicates on at least one additional node. +- Fleek Hosting deploys built sites onto `kubo` nodes directly, and also replicates on at least one additional node. - Additionally, the service augments these nodes with a traditional HTTP CDN in order to improve performance on secondary fetching. Says Shear: "Websites these days are used to 20ms times for fetching sites past the first load, so we want to offer at least that." - This CDN's cache is cleared, at minimum, for each new Git commit and its resulting Fleek Hosting auto-deployment. - Whenever the CDN's cache is cleared, content is re-fetched from IPFS (with all requests made to Fleek's gateway) in the same manner in which it was originally fetched — e.g. from the TXT record of an IPFS hash added to a user's DNS, via [DNSLink](https://dnslink.io/), or via [Ethereum Name Service (ENS)](https://ens.domains/) domains. #### Fleek Storage -- Fleek Storage uses `go-ipfs` nodes in combination with [MinIO](https://min.io/) S3-equivalent APIs; the S3-like APIs give full compatibility with any AWS tooling and a familiar bucket structure to files. +- Fleek Storage uses `kubo` nodes in combination with [MinIO](https://min.io/) S3-equivalent APIs; the S3-like APIs give full compatibility with any AWS tooling and a familiar bucket structure to files. - Fleek-built handlers create a root bucket hash, as well as folder hashes that Fleek Storage files live within. - Any change to files or folders updates all the way back up to the root bucket hash. #### Space Daemon -- The Space Daemon developer toolset code combines a `go-ipfs` IPFS node with offerings from Textile (particularly its [Threads](http://docs.textile.io/threads) multi-party database architecture) and a key management tool in a single, easy-to-install Go daemon. +- The Space Daemon developer toolset code combines a `kubo` IPFS node with offerings from Textile (particularly its [Threads](http://docs.textile.io/threads) multi-party database architecture) and a key management tool in a single, easy-to-install Go daemon. - Third-party integrations for enhanced functionality include — but aren't limited to — Filecoin (via [Textile Powergate](https://docs.textile.io/powergate/)) for encrypted backups, [Ceramic](https://www.ceramic.network/) for identity management, and [Handshake](https://handshake.org/) for naming and domain functionality. - The Space Daemon JavaScript library acts as an abstraction layer to the [gRPC](https://grpc.io/docs/) methods of the Go daemon, providing developers with a convenient JavaScript interface that they can install locally in their applications. #### Fleek Gateway -- While not a Fleek "product" per se, Fleek extensively uses its own HTTP gateway (ipfs.fleek.co) created from their `go-ipfs` node infrastructure. +- While not a Fleek "product" per se, Fleek extensively uses its own HTTP gateway (ipfs.fleek.co) created from their `kubo` node infrastructure. - When gateway-based delivery is needed — for example, for verification and other links in Fleek product GUIs — links to CIDs of hosting or storage content use this gateway, but can also be fetched using any other IPFS gateway. ## Fleek + IPFS: the future diff --git a/docs/concepts/case-study-likecoin.md b/docs/concepts/case-study-likecoin.md index 81e2fadc1..c6ef1118c 100644 --- a/docs/concepts/case-study-likecoin.md +++ b/docs/concepts/case-study-likecoin.md @@ -68,7 +68,7 @@ The LikeCoin team's core goals of providing a quantifiably rewardable space for - **Performant distributed storage:** IPFS provides a reliable, proven solution for distributed storage out of the box, backed by active core development and an engaged international [user and developer community](../community/README.md). - **Data integrity:** Thanks to its inherent use of [content addressing](content-addressing.md), IPFS generates a unique content identifier (CID) for every artifact stored on IPFS — meaning that if an item is modified, its CID changes, too. Creators, curators, and users can share and view content in Liker Land with assurance that items haven't been modified by third parties. - **Censorship resistance:** API gateways to the LikeCoin blockchain itself can potentially be blocked by governments or other infrastructure players. By contrast, content stored and provided using IPFS can be accessed as long as a copy exists on an IPFS node somewhere on the network. -- **IPLD as a blockchain intermediary:** Using the [IPLD plugin](https://github.com/ipfs/go-ipfs/tree/master/plugin) included in `go-ipfs`, any IPFS node can be used to access data stored on the LikeCoin blockchain. This makes it much harder for anyone to block access to LikeCoin-affiliated content. And because the plugin enables users to retrieve ISCN metadata through a CID — just like with any other piece of content on IPFS — the user experience of interacting with the LikeCoin blockchain is simplified even further. +- **IPLD as a blockchain intermediary:** Using the [IPLD plugin](https://github.com/ipfs/kubo/tree/master/plugin) included in `kubo`, any IPFS node can be used to access data stored on the LikeCoin blockchain. This makes it much harder for anyone to block access to LikeCoin-affiliated content. And because the plugin enables users to retrieve ISCN metadata through a CID — just like with any other piece of content on IPFS — the user experience of interacting with the LikeCoin blockchain is simplified even further. ## How LikeCoin uses IPFS @@ -79,7 +79,7 @@ The LikeCoin ecosystem is made up of four primary components: - The **Liker Land app and browser extensions**, through which users participate in the LikeCoin ecosystem - The **LikeCoin button** for third-party blogs and other publishing platforms, connecting likes on those platforms to the LikeCoin reward system -Critical to all of these is LikeCoin's use of IPFS and its integration with the ISCN blockchain. This takes place via the [IPLD plugin](https://github.com/ipfs/go-ipfs/tree/master/plugin) included out of the box with [`go-ipfs`](https://github.com/ipfs/go-ipfs), enabling ISCN metadata for content items to associate with the items themselves stored on IPFS. This enables an integrated data structure in which LikeCoin's blockchain stores content metadata, while IPFS is used for querying and distributing it. This process takes place using a separate custom datastore plugin created by the LikeCoin team. +Critical to all of these is LikeCoin's use of IPFS and its integration with the ISCN blockchain. This takes place via the [IPLD plugin](https://github.com/ipfs/kubo/tree/master/plugin) included out of the box with [`kubo`](https://github.com/ipfs/kubo), enabling ISCN metadata for content items to associate with the items themselves stored on IPFS. This enables an integrated data structure in which LikeCoin's blockchain stores content metadata, while IPFS is used for querying and distributing it. This process takes place using a separate custom datastore plugin created by the LikeCoin team. LikeCoin's [datastore plugin](https://github.com/likecoin/likecoin-ipfs-cosmosds/blob/master/cosmosds/datastore.go) is based on IPFS's [`go-ds-leveldb`](https://github.com/ipfs/go-ds-leveldb) datastore plugin, which implements the [`go-datastore`](https://github.com/ipfs/go-datastore) key-value datastore interface using a LevelDB back end. LikeCoin's custom version implements the `get`, `getSize`, and `has` functions in order to make one primary modification to `go-ds-leveldb`: Since IPFS CIDs can also [include pointers to IPLD objects](https://proto.school/anatomy-of-a-cid), the plugin can identify whether a given CID references data on the LikeCoin blockchain, and if so, delegate it to the LikeCoin chain via an internal HTTP request. If on-chain data is queried, the query request is forwarded to the LikeCoin chain through Remote Procedure Call (RPC) endpoints exposed by the chain. @@ -87,7 +87,7 @@ Just as IPFS is tightly integrated with the LikeCoin ISCN blockchain, chain node ### The tooling -Because the LikeCoin blockchain itself is built in Go, the team was able to easily utilize `go-ipfs` to achieve most of their required functionality out of the box. Just two additional custom plugins were needed in order to successfully integrate `go-ipfs`: +Because the LikeCoin blockchain itself is built in Go, the team was able to easily utilize `kubo` to achieve most of their required functionality out of the box. Just two additional custom plugins were needed in order to successfully integrate `kubo`: - [`ipfs-cosmosds`](https://github.com/likecoin/likecoin-ipfs-cosmosds) (noted above), which delegates ISCN metadata queries to the LikeCoin chain - [`iscn-ipld`](https://github.com/likecoin/iscn-ipld), which parses ISCN-related IPLD data in order to associate ISCN metadata with IPFS artifacts diff --git a/docs/concepts/case-study-morpheus.md b/docs/concepts/case-study-morpheus.md index 7b934e250..1071bd06e 100644 --- a/docs/concepts/case-study-morpheus.md +++ b/docs/concepts/case-study-morpheus.md @@ -52,7 +52,7 @@ From the perspective of the user, all of this is transparent. Shippers and other ## IPFS benefits -The decision to use IPFS was an easy one for the Morpheus.Network team. Not only was their integration of out-of-the-box [`go-ipfs`](https://github.com/ipfs/go-ipfs) and [`ipfs-http-client`](https://github.com/ipfs/js-ipfs/tree/master/packages/ipfs-http-client) a straightforward process that didn't require additional customization, but many of the benefits provided by IPFS were critical to the success of the platform: +The decision to use IPFS was an easy one for the Morpheus.Network team. Not only was their integration of out-of-the-box [`kubo`](https://github.com/ipfs/kubo) and [`ipfs-http-client`](https://github.com/ipfs/js-ipfs/tree/master/packages/ipfs-http-client) a straightforward process that didn't require additional customization, but many of the benefits provided by IPFS were critical to the success of the platform: - Proof of data integrity (or, if appropriate, revision) via IPFS content addressing - Middleman-free architecture enabling multiple parties to access official documents without a central data broker @@ -82,7 +82,7 @@ The Morpheus.Network team chose to run their own private IPFS nodes for two prim ### The tooling -- [`go-ipfs`](https://github.com/ipfs/go-ipfs) core +- [`kubo`](https://github.com/ipfs/kubo) core - [`ipfs-http-client`](https://github.com/ipfs/js-ipfs/tree/master/packages/ipfs-http-client) for integrating with the Digital Footprint web app - Private nodes for document storage diff --git a/docs/concepts/case-study-openbazaar.md b/docs/concepts/case-study-openbazaar.md index aaf9e9c2d..3c7df2e5c 100644 --- a/docs/concepts/case-study-openbazaar.md +++ b/docs/concepts/case-study-openbazaar.md @@ -72,18 +72,18 @@ In terms of concrete benefits, the OpenBazaar team cites the following as key wi ## How OpenBazaar uses IPFS -OpenBazaar's implementation is built upon a fork of the [`go-ipfs`](https://github.com/ipfs/go-ipfs) reference implementation, customized for their specific needs: +OpenBazaar's implementation is built upon a fork of the [`kubo`](https://github.com/ipfs/kubo) reference implementation, customized for their specific needs: - Persistent storage of at least a week (to remember buyers' shopping carts and allow for merchants who may not be online 24/7) - Support for Bitcoin wallets - A customized search engine that crawls the IPFS network to update shop content (the crawler maintains the inventory for OpenBazaar's and Haven's front end) - Circuit relay capabilities for working around firewalls and mobile iOS networks -- The use of `go-ipfs` without IPNS, to optimize for OpenBazaar's extremely large number of product images +- The use of `kubo` without IPNS, to optimize for OpenBazaar's extremely large number of product images - The use of [`libp2p`](https://libp2p.io/), but with its original addressing scheme ### The architecture -At its highest level, the OpenBazaar network is made up of many nodes, whether desktop/laptop (OpenBazaar) or mobile (Haven). Each node is either a merchant or a seller, with a few exceptions; OB1 does run some gateway nodes and caching nodes themselves. Every node has either the OpenBazaar or Haven app installed, and "inside" the respective app is a complete `go-ipfs` node. +At its highest level, the OpenBazaar network is made up of many nodes, whether desktop/laptop (OpenBazaar) or mobile (Haven). Each node is either a merchant or a seller, with a few exceptions; OB1 does run some gateway nodes and caching nodes themselves. Every node has either the OpenBazaar or Haven app installed, and "inside" the respective app is a complete `kubo` node. OpenBazaar high-level architecture diff --git a/docs/concepts/dht.md b/docs/concepts/dht.md index 2c30ea14d..c2e710af9 100644 --- a/docs/concepts/dht.md +++ b/docs/concepts/dht.md @@ -158,4 +158,4 @@ IPFS tries to connect to the peer with ID `H` as soon as we learn addresses abou If you're eager for more information about the DHT, take a look at these resources: - [_Content Routing Improvements: Deep Dive_ blog post](https://blog.ipfs.io/2020-07-20-dht-deep-dive/) -- [Go-IPFS 0.5.0 release highlights](https://www.youtube.com/watch?v=G8FvB_0HlCE) +- [Kubo 0.5.0 release highlights](https://www.youtube.com/watch?v=G8FvB_0HlCE) diff --git a/docs/concepts/file-systems.md b/docs/concepts/file-systems.md index 9097c18ca..8bbe3739c 100644 --- a/docs/concepts/file-systems.md +++ b/docs/concepts/file-systems.md @@ -220,7 +220,7 @@ await ipfs.files.rm('/my/beautiful/directory') When you add a _file_ to IPFS, it might be too big to fit in a single block, so it needs metadata to link all its blocks together. UnixFS is a [protocol-buffers](https://developers.google.com/protocol-buffers/)-based format for describing files, directories, and symlinks in IPFS. This data format is used to represent files and all their links and metadata in IPFS. UnixFS creates a block (or a tree of blocks) of linked objects. -UnixFS currently has [Javascript](https://github.com/ipfs/js-ipfs-unixfs) and [Go](https://github.com/ipfs/go-ipfs/tree/b3faaad1310bcc32dc3dd24e1919e9edf51edba8/unixfs) implementations. These implementations have modules written in to run different functions: +UnixFS currently has [Javascript](https://github.com/ipfs/js-ipfs-unixfs) and [Go](https://github.com/ipfs/kubo/tree/b3faaad1310bcc32dc3dd24e1919e9edf51edba8/unixfs) implementations. These implementations have modules written in to run different functions: - **Data Formats**: manage the serialization/deserialization of UnixFS objects to protocol buffers diff --git a/docs/concepts/glossary.md b/docs/concepts/glossary.md index a49e6a19d..3e133351f 100644 --- a/docs/concepts/glossary.md +++ b/docs/concepts/glossary.md @@ -66,7 +66,7 @@ A Block is a binary blob of data identified by a [CID](#cid). It could be raw by ### Bootstrap node -A Bootstrap Node is a trusted peer on the IPFS network through which an IPFS node learns about other peers on the network. Both go-ipfs and js-ipfs use bootstrap nodes to enter the Distributed Hash Table (DHT). See [Bootstrap](../concepts/nodes/#bootstrap) +A Bootstrap Node is a trusted peer on the IPFS network through which an IPFS node learns about other peers on the network. Both Kubo and js-ipfs use bootstrap nodes to enter the Distributed Hash Table (DHT). See [Bootstrap](../concepts/nodes/#bootstrap) ## C @@ -108,7 +108,7 @@ Unlimited relay that requires some external ACL to control resource usage. [See ### Circuit relay v2 -Truly decentralized relay implementation that provides a limited relay for things like [hole punching](#hole-punching). Support for this type of relay was introduced in go-ipfs 0.11. [See specification](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md). +Truly decentralized relay implementation that provides a limited relay for things like [hole punching](#hole-punching). Support for this type of relay was introduced in Kubo 0.11. [See specification](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md). ### Codec @@ -154,7 +154,7 @@ Did you mean [IPLD Data Model](https://ipld.io/glossary/#data-model)? ### DataStore -The Datastore is the on-disk storage system used by an IPFS node. Configuration parameters control the location, size, construction, and operation of the datastore. [More about Datastore](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#datastore) +The Datastore is the on-disk storage system used by an IPFS node. Configuration parameters control the location, size, construction, and operation of the datastore. [More about Datastore](https://github.com/ipfs/kubo/blob/master/docs/config.md#datastore) ### DCUtR @@ -162,7 +162,7 @@ Direct Connection Upgrade through Relay (DCUtR) protocol enables [hole punching] ### Delegate routing node -GO-IPFS nodes with their API ports exposed and some HTTP API commands accessible. JS-IPFS nodes use them to query the DHT and also publish content without having to actually run DHT logic on their own. See [Delegate routing](../concepts/nodes/#types) +[Kubo](#kubo) nodes with a subset of RPC API commands exposed. JS-IPFS nodes use them to query the DHT and also publish content without having to actually run DHT logic on their own. See [Delegate routing](../concepts/nodes/#types) ### DHT @@ -194,7 +194,7 @@ The Decentralized Web (DWeb) looks like today's World Wide Web, but it is built ### Filestore -An experimental data store used when `--nocopy` is passed to `ipfs add`. It stores the [UnixFS](#unixfs) data components of blocks as files on the file system instead of as blocks. This allows adding content to IPFS without duplicating the content in the IPFS datastore. [More about Filestore experiment](https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#ipfs-filestore) +An experimental data store used when `--nocopy` is passed to `ipfs add`. It stores the [UnixFS](#unixfs) data components of blocks as files on the file system instead of as blocks. This allows adding content to IPFS without duplicating the content in the IPFS datastore. [More about Filestore experiment](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-filestore) ## G @@ -206,9 +206,9 @@ An IPFS Gateway acts as a bridge between traditional web browsers and IPFS. Thro Garbage Collection (GC) is the process within each IPFS node of clearing out cached files and blocks. Nodes need to clear out previously cached resources to make room for new resources. [Pinned resources](#pinning) are never deleted. -### GO-IPFS node +### GO-IPFS -The primary IPFS reference implementation, i.e., implements all requirements from the corresponding IPFS specification. It runs on servers and user machines with full IPFS capabilities, enabling experimentation. See [Nodes > GO-IPFS](../concepts/nodes/#go-ipfs). +Old name of [Kubo](#kubo). ### Graph @@ -248,9 +248,9 @@ The InterPlanetary Name System (IPNS) is a system for creating and updating muta ## J -### JS-IPFS node +### JS-IPFS -* Runs in the browser with a limited set of capabilities. See [Nodes > JS-IPFS](../concepts/nodes/#implementations). +An implementation of IPFS written entirely in JavaScript. It runs in a Browser, a Service Worker, Electron and Node.js. See [Nodes > JS-IPFS](../concepts/nodes/#js-ipfs) ### JSON @@ -258,6 +258,10 @@ JavaScript Object Notation (JSON) is a lightweight data-interchange format. JSON ## K +### Kubo + +AKA [go-ipfs](#go-ipfs). The earliest and most widely used implementation of IPFS, written in Go. It runs on servers and user machines with full IPFS capabilities. See [Nodes > Kubo](../concepts/nodes/#kubo). + ## L ### LAN @@ -456,7 +460,7 @@ The Unix File System (UnixFS) is the data format used to represent files and all ### Urlstore -An experimental data store similar to [`filestore`](#filestore), but it retrieves blocks contents via a HTTP URL instead of a local filesystem. [More about urlstore experiment](https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#ipfs-urlstore) +An experimental data store similar to [`filestore`](#filestore), but it retrieves blocks contents via a HTTP URL instead of a local filesystem. [More about urlstore experiment](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipfs-urlstore) ## V diff --git a/docs/concepts/ipfs-gateway.md b/docs/concepts/ipfs-gateway.md index 60b753c08..fcd39f4f6 100644 --- a/docs/concepts/ipfs-gateway.md +++ b/docs/concepts/ipfs-gateway.md @@ -4,7 +4,7 @@ description: Learn why gateways are an important part of using IPFS in conjuncti related: 'IPFS Docs: Address IPFS on the Web': /how-to/address-ipfs-on-web/ 'IPFS public gateway checker': https://ipfs.github.io/public-gateway-checker/ - 'GitHub repo: Gateway summary from go-ipfs': https://github.com/ipfs/go-ipfs/blob/master/docs/gateway.md + 'Gateway specifications': https://github.com/ipfs/specs/blob/main/http-gateways/#readme 'Article: Solving the IPFS Gateway Problem (Pinata)': https://medium.com/pinata/the-ipfs-gateway-problem-64bbe7eb8170 'Tutorial: Setting up an IPFS gateway on Google Cloud Platform (Stacktical)': https://blog.stacktical.com/ipfs/gateway/dapp/2019/09/21/ipfs-server-google-cloud-platform.html --- @@ -103,7 +103,7 @@ Path-resolving gateways, however, violate the [same-origin policy](https://en.wi Subdomain resolution style maintains compliance with the [single-origin policy](https://en.wikipedia.org/wiki/Same-origin_policy). The canonical form of access, `https://{CID}.ipfs.{gatewayURL}/{optional path to resource}`, causes the browser to interpret each returned file as being from a different origin. -Subdomain resolution support began with [Go-IPFS](https://github.com/ipfs/go-ipfs) release `0.5.0`. +Subdomain resolution support began with [Kubo](https://github.com/ipfs/kubo) release `0.5.0`. #### DNSlink @@ -276,5 +276,6 @@ No. The ipfs.io gateway is one of many portals used to view content stored by th ## Learning more -- [Gateway configuration options](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#gateway) - [A Practical Explainer for IPFS Gateways – Part 1](https://blog.ipfs.io/2022-06-09-practical-explainer-ipfs-gateways-1/), [Part 2](https://blog.ipfs.io/2022-06-30-practical-explainer-ipfs-gateways-2/) +- [Kubo: Gateway configuration options](https://github.com/ipfs/kubo/blob/master/docs/config.md#gateway) +- [Gateway specifications](https://github.com/ipfs/specs/blob/main/http-gateways/#readme) \ No newline at end of file diff --git a/docs/concepts/nodes.md b/docs/concepts/nodes.md index 5a94b5ea3..c52cc56e5 100644 --- a/docs/concepts/nodes.md +++ b/docs/concepts/nodes.md @@ -13,7 +13,7 @@ You're likely to see the term _node_ throughout the IPFS docs, issues, and relat * _node_: Use _node_ when you're referring to an individual point on the network. It's a very general term. For example, when you open IPFS Desktop, you establish yourself as a node with the potential to interact with other nodes. See [Configure a node](https://docs.ipfs.io/how-to/configure-node/). * _peer_: Use _peer_ when you're talking about the relationship of one node (even your own) to other nodes. It refers to their relationship as equals, with no central authority, so your node is a peer to other peers. See [Observe peers](../how-to/observe-peers/), [Exchange files between nodes](../how-to/exchange-files-between-nodes/), and [Peering with content providers](https://docs.ipfs.io/how-to/peering-with-content-providers/). * _daemon_: Use _daemon_ when talking about a node's activity status. When a node is online and running in the background, listening for requests for its data, it's called a _daemon_. See [Take your node online](../how-to/command-line-quick-start/#take-your-node-online) - * _instance_: Use _instance_ when talking about a library or program, such as a Go or JS version, running on an IPFS node at a particular point in time. The peer ID is the same, so it's still the same _node_ as far as the IPFS network is concerned. See [Go-IPFS](../reference/go/api/) and [JS-IPFS](../reference/js/api/#ipfs-and-javascript). + * _instance_: Use _instance_ when talking about a library or program, such as a Go or JS version, running on an IPFS node at a particular point in time. The peer ID is the same, so it's still the same _node_ as far as the IPFS network is concerned. See [Kubo](../reference/go/api/) and [JS-IPFS](../reference/js/api/#ipfs-and-javascript). * __Data nodes__, Use _data nodes_ when talking about actual pieces of data on IPFS, such as DAG nodes, UnixFS nodes, and IPLD nodes. When you add a file with the `ipfs add myfile.txt` command, IPFS breaks them up into several nodes that each contain a chunk of the file and are linked to each other. See [Merkle Directed Acyclic Graphs (DAGs)](../concepts/merkle-dag/), [Unix File System (UnixFS)](../concepts/file-systems/#unix-file-system-unixfs), and stay tuned for [InterPlanetary Linked Data (IPLD) model](../concepts/ipld/) docs, which is in progress. @@ -35,7 +35,7 @@ Use to make a UnixFS DAG publicly available by calling `ipfs refs -r ` on a Features of a preload node: -- They are Go-IPFS nodes with API ports exposed. Some HTTP API commands are accessible. +- They are Kubo nodes with API ports exposed. Some HTTP API commands are accessible. - Used by JS-IPFS nodes running in browser contexts. - JS-ipfs nodes remain connected to the libp2p swarm ports of all preload nodes by having preload nodes on the bootstrap list. - Often on the same _server_ as a [delegate routing node](#delegate-routing), though both the delegate routing service and preload service are addressed differently. This is done by having different multiaddrs that resolve to the same machine. @@ -55,27 +55,27 @@ If an IPFS node deems itself unreachable by the public internet, IPFS nodes may Features of a relay node: - Implements either [v1](https://github.com/libp2p/specs/blob/master/relay/circuit-v1.md) or [v2](https://github.com/libp2p/specs/blob/master/relay/circuit-v2.md) of the Circuit Relay protocol. -- Can be either Go-IPFS or JS-IPFS nodes; however there are standalone implementations as well: +- Can be either Kubo or JS-IPFS nodes; however there are standalone implementations as well: - [js-libp2p-relay-server](https://github.com/libp2p/js-libp2p-relay-server) (supports circuit v1) - [go-libp2p-relay-daemon](https://github.com/libp2p/go-libp2p-relay-daemon) (supports circuit v1 & v2) -- They're used by both Go-IPFS and JS-IPFS nodes. - - JS-IPFS nodes can also use relay nodes to overcome the lack of transport compatibility within the JS-IPFS implementation. A browser node with WebSockets/webRTC transports can talk with a Go-IPFS node that only communicates through TCP using a relay that supports both transports. This is not enabled by default and needs to be set up. +- They're used by both Kubo and JS-IPFS nodes. + - JS-IPFS nodes can also use relay nodes to overcome the lack of transport compatibility within the JS-IPFS implementation. A browser node with WebSockets/webRTC transports can talk with a Kubo node that only communicates through TCP using a relay that supports both transports. This is not enabled by default and needs to be set up. Limitations of relay nodes: - v1 relays can be used by anyone without any limits, unless [go-libp2p-relay-daemon](https://github.com/libp2p/go-libp2p-relay-daemon) is used with ACLs (Access Control Lists) set up. - v2 relays are "limited relays" that are designed to be used for [Direct Connection Upgrade through Relay](https://github.com/libp2p/specs/blob/master/relay/DCUtR.md) (aka hole punching). -- Not configurable in go-ipfs; uses a preset list of relays +- Not configurable in Kubo; uses a preset list of relays See [p2p-circuit relay](https://github.com/libp2p/specs/tree/master/relay) ### Bootstrap -Both Go-IPFS and JS-IPFS nodes use bootstrap nodes to initially enter the DHT. +Both Kubo and JS-IPFS nodes use bootstrap nodes to initially enter the DHT. Features of a bootstrap node: - All default bootstrap nodes are part of the public DHT. -- The list of bootstrap nodes a Go-IPFS or JS-IPFS node connects to is configurable in their config files. +- The list of bootstrap nodes a or JS-IPFS node connects to is configurable in their config files. Limitations of a bootstrap node: @@ -89,8 +89,8 @@ When IPFS nodes are unable to run Distributed Hash Table (DHT) logic on their ow Features of a delegate routing node: -- They are Go-IPFS nodes with their API ports exposed and some API commands accessible under `/api/v0`. -- Usable by both Go-IPFS and JS-IPFS nodes. +- They are Kubo nodes with their API ports exposed and some API commands accessible under `/api/v0`. +- Usable by both Kubo and JS-IPFS nodes. - JS-IPFS nodes use them to query the DHT and also publish content without having to actually run DHT logic on their own. - Often on the same _server_ as a [preload](#preload) node, though both the delegate routing service and preload service are addressed differently. This is done by having different multiaddrs that resolve to the same machine. - Delegate routing nodes are in the default JS-IPFS configuration as bootstrap nodes, so they will maintain libp2p swarm connections to them at all times. @@ -102,26 +102,33 @@ Limitations of a delegate routing node: ## Implementations -Protocol Labs manages two primary implementations of the IPFS spec: Go-IPFS and JS-IPFS. These implementations use specific types of nodes to perform server, browser, and other client functions. +Protocol Labs manages two implementations of the IPFS spec: Kubo and JS-IPFS. These implementations use specific types of nodes to perform server, browser, and other client functions. -### Go-IPFS +### Kubo -The Go implementation is designed to run on servers and user machines with full IPFS capabilities, enabling experimentation. New IPFS features are usually created on Go-IPFS before any other implementation. +An implementation of IPFS in Go. Designed to run on servers and user machines with full IPFS capabilities, enabling experimentation. New IPFS features are usually created on Kubo before any other implementation. +More at [Github](https://github.com/ipfs/kubo#readme). Features include: - TCP and QUIC transports are enabled by default. - `/ws/` transport disabled by default. - HTTP gateway with subdomain support for origin isolation between content roots. -- Various [experimental features](https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md) +- Various [experimental features](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md) -See [API > Working with Go](https://docs.ipfs.io/reference/go/api/#working-with-go) +See [Working with Go](../reference/go/api/#working-with-go) and [CLI Quick Start](../how-to/command-line-quick-start.md) ### JS-IPFS -The Javascript implementation is designed to run in the browser with a limited set of IPFS capabilities. +An implementation of IPFS written entirely in JavaScript. It runs in a browser, a Service Worker, Electron and Node.js. Capabilities depend on the runtime. -Features include: +More at [js.ipfs.io](https://js.ipfs.io) and [Github](https://github.com/ipfs/js-ipfs#readme). + +#### JS-IPFS node in a browser + +When in a browser, runs with a limited set of IPFS capabilities. + +Browser features include: - Can connect to server nodes using secure WebSockets. - WSS requires manual setup of TLS at the server. @@ -130,7 +137,7 @@ Features include: Specific limitations of the JS-IPFS implementation are: - Unless using WSS, a JS-IPFS node cannot connect to the main public DHT. They will only connect to other JS-IPFS nodes. -- The performance of the DHT is not on-par with the Go-IPFS implementation. +- The performance of the DHT is not on-par with the Kubo implementation. - The HTTP gateway is present, but it has no subdomain support (can't open TCP port) -See [More about IPFS Node](../how-to/command-line-quick-start.md#take-your-node-online) +More at [js.ipfs.io](https://js.ipfs.io) and [Github](https://github.com/ipfs/js-ipfs#readme). diff --git a/docs/concepts/persistence.md b/docs/concepts/persistence.md index a414a96ff..671d5349e 100644 --- a/docs/concepts/persistence.md +++ b/docs/concepts/persistence.md @@ -21,13 +21,13 @@ To ensure that data _persists_ on IPFS, and is not deleted during garbage collec [Garbage collection]() is a form of automatic resource management widely used in software development. The garbage collector attempts to reclaim memory occupied by objects that are no longer in use. IPFS uses garbage collection to free disk space on your IPFS node by deleting data that it thinks is no longer needed. -The IPFS garbage collector is configured in the `Datastore`section of [the go-ipfs config file](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md). The important settings related to the garbage collector are: +The IPFS garbage collector is configured in the `Datastore`section of [the Kubo config file](https://github.com/ipfs/kubo/blob/master/docs/config.md). The important settings related to the garbage collector are: - `StorageGCWatermark`: The percentage of the `StorageMax` value at which a garbage collection will be triggered automatically, if the daemon is running with automatic garbage collection enabled. The default is 90`. - `GCPeriod`: Specify how frequently garbage collection should run. Only used if automatic garbage collection is enabled. The default is 1 hour. -To manually start garbage collection, [run `ipfs repo gc`](../reference/cli.md#ipfs-repo-gc): +To manually start garbage collection, [run `ipfs repo gc`](../reference/kubo/cli.md#ipfs-repo-gc): ```bash ipfs repo gc @@ -43,7 +43,7 @@ To enable automatic garbage collection use `--enable-gc` when starting the IPFS ipfs daemon --enable-gc > Initializing daemon... -> go-ipfs version: 0.9.0 +> Kubo version: 0.9.0 > Repo version: 10 > ... ``` @@ -55,7 +55,7 @@ If you use IPFS Desktop, you can trigger garbage collection by clicking on the t ## Pinning in context An IPFS node can protect data from garbage collection based on different kinds of user events. -- The universal way is by adding a low-level [local pin](../how-to/pin-files.md). This works for all data types and can be done manually, but if you add a file using the CLI command [`ipfs add`](../reference/cli.md#ipfs-add), your IPFS node will automatically pin that file for you. +- The universal way is by adding a low-level [local pin](../how-to/pin-files.md). This works for all data types and can be done manually, but if you add a file using the CLI command [`ipfs add`](../reference/kubo/cli.md#ipfs-add), your IPFS node will automatically pin that file for you. - When working with files and directories, a better way may be to add them to the local [Mutable File System (MFS)](glossary.md#mfs). This protects the data from garbage collection in the same way as local pinning, but is somewhat easier to manage. diff --git a/docs/concepts/privacy-and-encryption.md b/docs/concepts/privacy-and-encryption.md index c8429c56c..b25609f33 100644 --- a/docs/concepts/privacy-and-encryption.md +++ b/docs/concepts/privacy-and-encryption.md @@ -31,7 +31,7 @@ This is one of the advantages of IPFS over traditional legacy web hosting. It me The other half of the equation when considering the prospect of IPFS traffic monitoring is that nodes' unique identifiers are themselves public. Just like with CIDs, every individual IPFS node has its own public identifier (known as a PeerID), such as `QmRGgYP1P5bjgapLaShMVhGMSwGN9SfYG3CM2TfhpJ3igE`. -While a long string of letters and numbers may not be a "Johnny Appleseed" level of human-readable specificity, your PeerID is still a long-lived, unique identifier for your node. Keep in mind that it's possible to do a DHT lookup on your PeerID and, particularly if your node is regularly running from the same location (like your home), find your IP address. (It's possible to [reset your PeerID](../reference/cli.md#ipfs-key-rotate) if necessary, but similarly to changing your user ID on legacy web apps and services, is likely to involve extra effort.) Additionally, longer-term monitoring of the public IPFS network could yield information about what CIDs your node is requesting and/or reproviding and when. +While a long string of letters and numbers may not be a "Johnny Appleseed" level of human-readable specificity, your PeerID is still a long-lived, unique identifier for your node. Keep in mind that it's possible to do a DHT lookup on your PeerID and, particularly if your node is regularly running from the same location (like your home), find your IP address. (It's possible to [reset your PeerID](../reference/kubo/cli.md#ipfs-key-rotate) if necessary, but similarly to changing your user ID on legacy web apps and services, is likely to involve extra effort.) Additionally, longer-term monitoring of the public IPFS network could yield information about what CIDs your node is requesting and/or reproviding and when. ## Enhancing your privacy @@ -39,7 +39,7 @@ If there are situations in which you know you'll need to remain private but stil ### Controlling what you share -By default, an IPFS node announces to the rest of the network that it is willing to share every CID in its cache (in other words, _reproviding_ content that it's retrieved from other nodes), as well as CIDs that you've explicitly pinned or added to MFS to make them consistently available. If you'd like to disable this behavior, you can do so in the [reprovider settings](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#reprovider) of your node's config file. +By default, an IPFS node announces to the rest of the network that it is willing to share every CID in its cache (in other words, _reproviding_ content that it's retrieved from other nodes), as well as CIDs that you've explicitly pinned or added to MFS to make them consistently available. If you'd like to disable this behavior, you can do so in the [reprovider settings](https://github.com/ipfs/kubo/blob/master/docs/config.md#reprovider) of your node's config file. Changing your reprovider settings to "pinned" or "roots" will keep your node from announcing itself as a provider of non-pinned CIDs that are in your cache — so you can still use pinning to provide other nodes with content that you care about and want to make sure continues to be available over IPFS. @@ -90,7 +90,7 @@ If you're curious about implementing encryption with IPFS on a large scale, you ### Creating a private network -[Private IPFS networks](https://github.com/ipfs/go-ipfs/blob/release-v0.9.0/docs/experimental-features.md#private-networks) provide full protection from public monitoring but can lack the scale benefits provided by the public IPFS network. A private network operates identically to the public one, but with one critical difference: it can only be accessed by nodes that have been given access, and it will only ever scale to those nodes. This means that the benefits of the public IPFS network's massive scale, such as geographic resiliency and speedy retrieval of high-demand content, won't be realized unless the private network is explicitly designed and scaled with this in mind. +[Private IPFS networks](https://github.com/ipfs/kubo/blob/release-v0.9.0/docs/experimental-features.md#private-networks) provide full protection from public monitoring but can lack the scale benefits provided by the public IPFS network. A private network operates identically to the public one, but with one critical difference: it can only be accessed by nodes that have been given access, and it will only ever scale to those nodes. This means that the benefits of the public IPFS network's massive scale, such as geographic resiliency and speedy retrieval of high-demand content, won't be realized unless the private network is explicitly designed and scaled with this in mind. Running a private network can be a great option for corporate implementations of IPFS — for one example, see [this case study on Morpheus.Network](case-study-morpheus.md) — because the network's topology can be specified and built exactly as desired. diff --git a/docs/how-to/README.md b/docs/how-to/README.md index d95a364a6..6eee0331b 100644 --- a/docs/how-to/README.md +++ b/docs/how-to/README.md @@ -15,7 +15,7 @@ No matter what you're looking to do with IPFS, you can find how-tos and tutorial See the site navigation menu for all our how-tos, organized by topic area, including favorites like these: - **Customize your install** by [configuring a node](configure-node.md), modifying the [bootstrap list](modify-bootstrap-list.md), and more -- **Learn how to manage files** in IPFS with tutorials on concepts like [pinning](pin-files.md), how to [work with blocks](work-with-blocks.md), learning how to [troubleshoot file transfers](https://github.com/ipfs/go-ipfs/blob/master/docs/file-transfer.md), and understanding [working with large datasets](https://github.com/ipfs/archives/tree/master/tutorials/replicating-large-datasets) +- **Learn how to manage files** in IPFS with tutorials on concepts like [pinning](pin-files.md), how to [work with blocks](work-with-blocks.md), learning how to [troubleshoot file transfers](https://github.com/ipfs/kubo/blob/master/docs/file-transfer.md), and understanding [working with large datasets](https://github.com/ipfs/archives/tree/master/tutorials/replicating-large-datasets) - **See how to work with peers** using methods like [customizing libp2p bundles](https://github.com/ipfs-examples/js-ipfs-examples/tree/master/examples/custom-libp2p) and using circuit relay - **Understand website hosting** by starting with how to [host a simple single-page site](websites-on-ipfs/single-page-website.md) - **Learn how to build apps** on IPFS, starting with [exploring the IPFS API](https://github.com/ipfs/camp/tree/master/CORE_AND_ELECTIVE_COURSES/CORE_COURSE_C) and [making a basic libp2p app](https://github.com/ipfs/camp/tree/master/CORE_AND_ELECTIVE_COURSES/CORE_COURSE_B) diff --git a/docs/how-to/address-ipfs-on-web.md b/docs/how-to/address-ipfs-on-web.md index ef4cf0f7e..19530489f 100644 --- a/docs/how-to/address-ipfs-on-web.md +++ b/docs/how-to/address-ipfs-on-web.md @@ -55,7 +55,7 @@ For example, a website can load static assets from content-addressed paths: User agents that support IPFS, such as a browser with [ipfs-companion](https://docs.ipfs.io/install/ipfs-companion/), may recognize the `/ipfs/` content path and load the related asset over IPFS instead of HTTP. User agents without IPFS support still get the correct data from the original HTTP server. ::: -### Path gateway +## Path gateway In the most basic scheme, a URL path used for content addressing is effectively a resource name without a canonical location. The HTTP server provides the location part, which makes it possible for browsers to interpret an IPFS content path as relative to the current server and just work without a need for any conversion: @@ -78,7 +78,7 @@ https://ipfs.io/ipfs/QmT5NvUtoM5nWFfrQdVrFtvGfKFmG7AHE8P34isapyhCxX/wiki/Mars.ht https://ipfs.io/ipns/tr.wikipedia-on-ipfs.org/wiki/Anasayfa.html ``` -### Subdomain gateway +## Subdomain gateway When [origin-based security](https://en.wikipedia.org/wiki/Same-origin_policy) is needed, [CIDv1](../concepts/content-addressing.md#identifier-formats) in case-insensitive encoding such as Base32 or Base36 should be used in the subdomain: @@ -94,14 +94,14 @@ https://bafybeiemxf5abjwjbikoz4mc3a3dla6ual3jsgpdr4cjr3oz3evfyavhwq.ipfs.cf-ipfs https://bafybeiemxf5abjwjbikoz4mc3a3dla6ual3jsgpdr4cjr3oz3evfyavhwq.ipfs.localhost:8080/wiki/ ``` -#### Native support in go-ipfs 0.5+ +#### Native support in Kubo 0.5+ -[go-ipfs](https://dist.ipfs.io/#go-ipfs) provides native support for subdomain gateways on hostnames defined in the [`Gateway.PublicGateways`](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#gatewaypublicgateways) configuration map. +[Kubo](https://dist.ipfs.io/#kubo) provides native support for subdomain gateways on hostnames defined in the [`Gateway.PublicGateways`](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaypublicgateways) configuration map. Learn more about daemon configuration for hosting a public gateway: -- [`Gateway.PublicGateways` docs](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#gatewaypublicgateways) for defining gateway behavior on specified hostnames -- [`Gateway` recipes](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#gateway-recipes) with ready to use one-liners for most common use cases +- [`Gateway.PublicGateways` docs](https://github.com/ipfs/kubo/blob/master/docs/config.md#gatewaypublicgateways) for defining gateway behavior on specified hostnames +- [`Gateway` recipes](https://github.com/ipfs/kubo/blob/master/docs/config.md#gateway-recipes) with ready to use one-liners for most common use cases ::: warning Known issues @@ -119,7 +119,7 @@ See the next section to learn how to convert an existing CIDv0 to a DNS-safe rep If you have content identified by an older CIDv0, there are easy ways to safely represent it as CIDv1 for use in subdomains and other case-insensitive contexts. -#### Automatic — leverage the gateway in go-ipfs +#### Automatic — leverage the gateway in Kubo **TL;DR:** Using a subdomain gateway as a drop-in replacement for a path one removes the need for manual CID conversion. @@ -157,7 +157,7 @@ $ ipfs cid format -v 1 -b base36 --codec libp2p-key QmNnooDu7bfjPFoTZYxMNLWUQJyr k2k4r8jl0yz8qjgqbmc2cdu5hkqek5rj6flgnlkyywynci20j0iuyfuj ``` -### DNSLink gateway +## DNSLink gateway The gateway provided by the IPFS daemon understands the `Host` header present in HTTP requests and will check if [DNSLink](../concepts/dnslink.md) exists for a specified [domain name](https://en.wikipedia.org/wiki/Fully_qualified_domain_name). If DNSLink is present, the gateway will return content from a path resolved via DNS TXT record. diff --git a/docs/how-to/best-practices-for-ipfs-builders.md b/docs/how-to/best-practices-for-ipfs-builders.md index 9dc5e34cc..5e177b54f 100644 --- a/docs/how-to/best-practices-for-ipfs-builders.md +++ b/docs/how-to/best-practices-for-ipfs-builders.md @@ -15,12 +15,12 @@ There are two versions of CIDs ([Content Identifiers](../concepts/content-addres CIDv0 is simpler but much less flexible than CIDv1. It doesn't offer the future-proof and case-insensitive addressing that CIDv1 offers. You can quickly tell the difference between v0 and v1 CIDs, because v0 CIDs always start with `Qm`. Many of the existing IPFS tools still generate CIDs in v0, for example: - [IPFS Desktop](../install/ipfs-desktop/#ipfs-desktop) -- [/api/v0/add](..//reference/http/api/#api-v0-add), where the `cid-version` defaults to 0 unless an option that depends on CIDv1 is passed. +- [/api/v0/add](../reference/kubo/rpc/#api-v0-add), where the `cid-version` defaults to 0 unless an option that depends on CIDv1 is passed. Some features use CIDv1 by default: - `files` ([Mutable File System](../concepts/file-systems/#mutable-file-system-mfs)) -- `dag` operations ([ipfs object](https://docs.ipfs.io/reference/cli/#ipfs-dag)) +- `dag` operations ([ipfs object](../reference/kubo/cli/#ipfs-dag)) Use CIDv1 when you want: @@ -50,8 +50,8 @@ ipfs daemon From this point on, IPNS will be resolved using both the `pubsub` and the DHT. Learn more about the limitations of this experimental feature here:  -- [Experimental features > IPNS pubsub](https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md#ipns-pubsub) -- [Enable IPNS over pubsub by default, Issue 8591](https://github.com/ipfs/go-ipfs/issues/8591) +- [Experimental features > IPNS pubsub](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md#ipns-pubsub) +- [Enable IPNS over pubsub by default, Issue 8591](https://github.com/ipfs/kubo/issues/8591) ## Enable Garbage Collection if your data churn is expected to be high @@ -68,7 +68,7 @@ However, you may also have data that you value. To make sure that you keep data Then you can safely enable garbage collection for all other data. See: - [Garbage collection](../concepts/persistence/#garbage-collection) -- [api/v0/repo/gc](../reference/http/api/#api-v0-repo-gc) +- [api/v0/repo/gc](../reference/kubo/rpc/#api-v0-repo-gc) ## Use subdomain gateways or DNSLink when publishing apps for secure context and origin isolation diff --git a/docs/how-to/command-line-quick-start.md b/docs/how-to/command-line-quick-start.md index 513e2da32..a844aa72c 100644 --- a/docs/how-to/command-line-quick-start.md +++ b/docs/how-to/command-line-quick-start.md @@ -1,12 +1,12 @@ --- title: Command-line quick start legacyUrl: https://docs.ipfs.io/introduction/usage/ -description: Quick-start guide for installing and getting started with IPFS from the command line. +description: Quick-start guide for installing and getting started with Kubo IPFS from the command line. --- # Command-line quick start -If you're command-line savvy and just want to get up and running with IPFS right away, follow this quick-start guide. Please note that this guide assumes that you'll be installing go-ipfs, the reference implementation written in Go. +If you're command-line savvy and just want to get up and running with IPFS right away, follow this quick-start guide. Please note that this guide assumes that you'll be installing Kubo, the reference implementation written in Go. ::: tip Don’t want to use the command line right now? Give the desktop-app implementation of IPFS a try. It also does all the steps listed on this page automatically, so you can run IPFS from the terminal later whenever you want. [Download IPFS Desktop now](https://github.com/ipfs/ipfs-desktop) @@ -14,7 +14,7 @@ Don’t want to use the command line right now? Give the desktop-app implementat ## Prerequisites -If you have not yet installed Go-IPFS, follow the [install instructions](../install/command-line.md). +If you have not yet installed Kubo, follow the [install instructions](../install/command-line.md). ## Initialize the repository @@ -41,7 +41,7 @@ If you are running on a server in a data center, you should initialize IPFS with ipfs init --profile server ``` -There are a whole host of other configuration options you may want to set — check [the full reference](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md) for more. +There are a whole host of other configuration options you may want to set — check [the full reference](https://github.com/ipfs/kubo/blob/master/docs/config.md) for more. The hash after `peer identity:` is your node’s ID and will be different from the one shown in the above output. Other nodes on the network use it to find and connect to you. You can run `ipfs id` at any time to get it again if you need it. @@ -100,7 +100,7 @@ Make a note of the TCP ports you receive. If they are different, use yours in th ::: danger NEVER EXPOSE THE RPC API TO THE PUBLIC INTERNET -The API port provides admin-level access to your IPFS node. See [RPC API docs](/reference/http/api/) for more information. +The API port provides admin-level access to your Kubo IPFS node. See [RPC API v0 docs](/reference/kubo/rpc/) for more information. ::: @@ -191,8 +191,8 @@ If you need to update, we recommend you install from the [canonical Go packages] ### Check that FUSE is installed -You need to install and set up FUSE in order to mount the file system. For more details on setting up FUSE, see [github.com/ipfs/go-ipfs/blob/master/docs/fuse.md](https://github.com/ipfs/go-ipfs/blob/master/docs/fuse.md) +You need to install and set up FUSE in order to mount the file system. For more details on setting up FUSE, see [github.com/ipfs/kubo/blob/master/docs/fuse.md](https://github.com/ipfs/kubo/blob/master/docs/fuse.md) ### Further help -The IPFS community is friendly and able to help! Get support from other IPFS developers in the official [IPFS forums](https://discuss.ipfs.io/), or join the conversation on [Discord](https://discord.com/invite/KKucsCpZmY). +The IPFS community is friendly and able to help! Get support from other IPFS developers in the official [IPFS forums](https://discuss.ipfs.io/), or join [community chat channels](../community/chat/). diff --git a/docs/how-to/companion-node-types.md b/docs/how-to/companion-node-types.md index c770ca7e4..581bf628e 100644 --- a/docs/how-to/companion-node-types.md +++ b/docs/how-to/companion-node-types.md @@ -23,25 +23,25 @@ An _external_ node can be any instance of an IPFS daemon that: - Runs outside of your web browser. - Exposes a _gateway_ and writeable _API_ over HTTP at TCP ports. -The [go-ipfs](https://github.com/ipfs/go-ipfs) implementation of IPFS is the recommended choice for running an external IPFS node. It's less power-hungry than other implementations and uses the `dhtclient` mode to decrease ambient bandwidth use and reduce battery drain. +The [Kubo](https://github.com/ipfs/kubo) implementation of IPFS is the recommended choice for running an external IPFS node. It's less power-hungry than other implementations and uses the `dhtclient` mode to decrease ambient bandwidth use and reduce battery drain. -A good practice is to run your go-ipfs daemon on localhost (`127.0.0.1`), as it provides: +A good practice is to run your Kubo daemon on localhost (`127.0.0.1`), as it provides: - Increased security: native IPFS used as end-to-end transport. - Better UX in the browser: no mixed-content warnings. - Improved performance: local loopback is used, so no network overhead. -You can get started with running a go-ipfs node on your local machine in several ways: +You can get started with running a Kubo node on your local machine in several ways: - [IPFS Desktop](https://github.com/ipfs-shipyard/ipfs-desktop) installs and manages a local node for you, as well as offering an easy, convenient user interface for managing files and peers. - If you're comfortable with the command line and don't need the convenience of the IPFS Desktop UI, follow the directions in the [command line quick-start guide](command-line-quick-start.md). -- Docker fans can run and use go-ipfs from [inside a Docker container](https://github.com/ipfs/go-ipfs#running-ipfs-inside-docker). +- Docker fans can run and use Kubo from [inside a Docker container](https://github.com/ipfs/kubo#running-ipfs-inside-docker). ## Native ### Provided by Brave -Users of the [Brave](https://brave.com/) browser (v1.19 or later) can enable native support for IPFS using a go-ipfs node built directly into the browser itself. This is a great way to experiment with IPFS without having to install or run IPFS Desktop or the command-line daemon. +Users of the [Brave](https://brave.com/) browser (v1.19 or later) can enable native support for IPFS using a Kubo node built directly into the browser itself. This is a great way to experiment with IPFS without having to install or run IPFS Desktop or the command-line daemon. This node type offers the same benefits as an [external](#external) node, with additional features provided within Brave itself: @@ -50,7 +50,7 @@ This node type offers the same benefits as an [external](#external) node, with a - Ability to change your preferred public gateway from Brave's settings page. - Options for default resolution of IPFS resources: through a public gateway, through a local node, or asking each time. - The IPFS node is managed by Brave itself: - - Automatic go-ipfs updates and migrations. + - Automatic Kubo updates and migrations. - Your node is only running when Brave is open. - You can start/stop your Brave-based node by clicking the power button icon in IPFS Companion's main menu. @@ -61,7 +61,7 @@ This node type offers the same benefits as an [external](#external) node, with a - `brave://settings/extensions` One-click Companion install and URI resolution settings - `brave://ipfs` - Status page for Brave's built-in go-ipfs node + Status page for Brave's built-in Kubo node ::: diff --git a/docs/how-to/companion-window-ipfs.md b/docs/how-to/companion-window-ipfs.md index 000b3afd8..e5ce7ce57 100644 --- a/docs/how-to/companion-window-ipfs.md +++ b/docs/how-to/companion-window-ipfs.md @@ -92,7 +92,7 @@ Optional `scope` and `permissions` attributes provide detailed information: ### What is a `window.ipfs`? -It is an IPFS proxy endpoint that enables you to obtain an IPFS API instance. Depending how IPFS Companion is configured, you may be talking directly to a `js-ipfs` node running in Companion, a `go-ipfs` daemon over `js-ipfs-http-client`, or a `js-ipfs` daemon over `js-ipfs-http-client` ... and potentially others in the future. Note that object returned by `window.ipfs.enable` is _not_ an instance of `js-ipfs` or `js-ipfs-http-client`, but is a proxy to one of them, so don't expect to be able to detect either of them or be able to use any undocumented or instance-specific functions. +It is an IPFS proxy endpoint that enables you to obtain an IPFS API instance. Depending how IPFS Companion is configured, you may be talking directly to a `js-ipfs` node running in Companion, a `kubo` daemon over `js-ipfs-http-client`, or a `js-ipfs` daemon over `js-ipfs-http-client` ... and potentially others in the future. Note that object returned by `window.ipfs.enable` is _not_ an instance of `js-ipfs` or `js-ipfs-http-client`, but is a proxy to one of them, so don't expect to be able to detect either of them or be able to use any undocumented or instance-specific functions. For information on available functions, see the [js-ipfs](https://github.com/ipfs/js-ipfs) and [js-ipfs-http-client](https://github.com/ipfs/js-ipfs-http-client) docs for available functions. If you find that some new functions are missing, the proxy might be out of date. Please check the [current status](https://github.com/tableflip/ipfs-postmsg-proxy#current-status) and submit a PR. diff --git a/docs/how-to/configure-node.md b/docs/how-to/configure-node.md index 54e94ed1d..8153d1647 100644 --- a/docs/how-to/configure-node.md +++ b/docs/how-to/configure-node.md @@ -5,5 +5,5 @@ description: IPFS nodes can be customzied using the configuration file. The defa # Configure a node -IPFS is configured through a json formatted text file, located by default at `~/.ipfs/config`. Implementation-specific information can be found within the [go-ipfs](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md) and [js-ipfs](https://github.com/ipfs/js-ipfs/blob/master/docs/CONFIG.md) repositories. It is read once at node instantiation, either for an offline command, or when starting the daemon. Commands that execute on a running daemon do not read the config file at runtime. +IPFS is configured through a json formatted text file, located by default at `~/.ipfs/config`. Implementation-specific information can be found within the [Kubo](https://github.com/ipfs/kubo/blob/master/docs/config.md) and [js-ipfs](https://github.com/ipfs/js-ipfs/blob/master/docs/CONFIG.md) repositories. It is read once at node instantiation, either for an offline command, or when starting the daemon. Commands that execute on a running daemon do not read the config file at runtime. diff --git a/docs/how-to/create-simple-chat-app.md b/docs/how-to/create-simple-chat-app.md index ac2bfd32d..a493d6925 100644 --- a/docs/how-to/create-simple-chat-app.md +++ b/docs/how-to/create-simple-chat-app.md @@ -34,7 +34,7 @@ To test and deploy your own version with your own nodes (recommended for deploym Besides [IPFS](/concepts/what-is-ipfs) (with CIDv1) and JavaScript, our chat app uses these technologies: -- [Libp2p](https://libp2p.io/)’s [WebRTC](https://www.npmjs.com/package/libp2p-webrtc-star)-Star and [circuit relay](https://docs.libp2p.io/concepts/circuit-relay) for discovery and connecting (with two libraries:  [js-ipfs](https://github.com/ipfs/js-ipfs/blob/master/docs/BROWSERS.md),  Bootstrap–with minified CSS), and [go-ipfs](https://docs.ipfs.io/reference/go/api/%23working-with-go) for p2p circuit connecting with websockets, +- [Libp2p](https://libp2p.io/)’s [WebRTC](https://www.npmjs.com/package/libp2p-webrtc-star)-Star and [circuit relay](https://docs.libp2p.io/concepts/circuit-relay) for discovery and connecting (with two libraries:  [js-ipfs](https://github.com/ipfs/js-ipfs/blob/master/docs/BROWSERS.md),  Bootstrap–with minified CSS), and [Kubo](https://docs.ipfs.io/reference/go/api/%23working-with-go) for p2p circuit connecting with websockets, - some [Python code](#advertising) that we supply for advertising, and - Libp2p’s experimental [PubSub](https://docs.libp2p.io/concepts/publish-subscribe) feature for publishing, with some tips for staying connected. @@ -75,7 +75,7 @@ This diagram demonstrates what a three-user network can look like. ![Network graph showing the paths nodes can use to discover and communicate with each other](./create-simple-chat-app/discovery-diagram.png) -The browser nodes can communicate with go-ipfs as well, so BrowserC doesn't have to be a browser at all, but instead could be a go-ipfs node! +The browser nodes can communicate with Kubo as well, so BrowserC doesn't have to be a browser at all, but instead could be a Kubo node! ### WebRTC-Star @@ -108,7 +108,7 @@ Please note that this how-to uses example star nodes — however, those won't ne ### p2p-circuit :::warning -This section is currently only relevant for go-ipfs versions **before** v0.11.0 as it's about circuit relay v1. There is currently no solution available to directly replace it, though most of the work [has been completed](https://github.com/libp2p/go-libp2p-relay-daemon). +This section is currently only relevant for Kubo versions **before** v0.11.0 as it's about circuit relay v1. There is currently no solution available to directly replace it, though most of the work [has been completed](https://github.com/libp2p/go-libp2p-relay-daemon). ::: WebRTC-Star is a very clean and effective method of P2P communications; however, sometimes NATs get in the way, so we use [p2p-circuit](https://docs.libp2p.io/concepts/circuit-relay) to get around that. @@ -204,7 +204,7 @@ setInterval(function(){ipfs.pubsub.publish("announce-circuit", "peer-alive");}, Like the star nodes, it will be important to host your own things, as the ones in this how-to could go offline at any moment. -For the purposes of this example, you'll need to do a few things on a server hosting your own [go-ipfs](https://github.com/ipfs/go-ipfs) node. You'll also need a working Nginx install setup, which will be used for SSL, which is a requirement for browsers. +For the purposes of this example, you'll need to do a few things on a server hosting your own [Kubo](https://github.com/ipfs/kubo) node. You'll also need a working Nginx install setup, which will be used for SSL, which is a requirement for browsers. First configure the Go node, enabling [WebSocket](https://en.wikipedia.org/wiki/WebSocket) support, and designate it as a relay so we can communicate with it from a browser by editing `~/.ipfs/config` to add the following settings: @@ -223,16 +223,16 @@ First configure the Go node, enabling [WebSocket](https://en.wikipedia.org/wiki/ ``` :::tip -Restart your go-ipfs node however you normally would (such as by using `systemctl --user restart ipfs`), and you're mostly set up! You've enabled regular WebSockets with relaying support; however, you still need secure WebSockets (outlined in the [SSL](https://docs.ipfs.io/how-to/create-simple-chat-app/%23ssl-nginx) section below) — otherwise browsers won't be able to connect to us. +Restart your Kubo node however you normally would (such as by using `systemctl --user restart ipfs`), and you're mostly set up! You've enabled regular WebSockets with relaying support; however, you still need secure WebSockets (outlined in the [SSL](https://docs.ipfs.io/how-to/create-simple-chat-app/%23ssl-nginx) section below) — otherwise browsers won't be able to connect to us. ::: ## Advertising -Using p2p-circuit can be a bit tricky. Once you connect to the relay from a browser, you're still not advertising that you're able to be reached through it\! For this purpose, this how-to includes a Python script that runs alongside go-ipfs and advertises the browser js-ipfs peers it encounters over [PubSub](https://docs.libp2p.io/concepts/publish-subscribe/) with a p2p-circuit [multiaddress](https://docs.libp2p.io/concepts/addressing/). +Using p2p-circuit can be a bit tricky. Once you connect to the relay from a browser, you're still not advertising that you're able to be reached through it\! For this purpose, this how-to includes a Python script that runs alongside Kubo and advertises the browser js-ipfs peers it encounters over [PubSub](https://docs.libp2p.io/concepts/publish-subscribe/) with a p2p-circuit [multiaddress](https://docs.libp2p.io/concepts/addressing/). Here is  the [Python script](https://gist.github.com/TheDiscordian/51962fea72f8d5a5c3bba79dd7009e1c). You can  run it with python `ipfs_peeradvertiser.py`. However, first ensure that you edit CIRCUITS with your own node's information, or you won't announce peers correctly, and they won't know how to use your relay to connect to other peers. -You can retrieve your own circuit info by running ipfs id on your go-ipfs node to get your PeerID. Then form the circuit URL like so: +You can retrieve your own circuit info by running ipfs id on your Kubo node to get your PeerID. Then form the circuit URL like so: ```shell /dns6/YOURDOMAIN.COM/tcp/4430/p2p/YOUR\_PEERID/p2p-circuit/p2p/ diff --git a/docs/how-to/make-service.md b/docs/how-to/make-service.md index 7f54b51b7..05e0f1510 100644 --- a/docs/how-to/make-service.md +++ b/docs/how-to/make-service.md @@ -19,9 +19,9 @@ package main import ( "fmt" - core "github.com/ipfs/go-ipfs/core" - corenet "github.com/ipfs/go-ipfs/core/corenet" - fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" + core "github.com/ipfs/kubo/core" + corenet "github.com/ipfs/kubo/core/corenet" + fsrepo "github.com/ipfs/kubo/repo/fsrepo" "code.google.com/p/go.net/context" ) @@ -93,10 +93,10 @@ import ( "io" "os" - core "github.com/ipfs/go-ipfs/core" - corenet "github.com/ipfs/go-ipfs/core/corenet" - peer "github.com/ipfs/go-ipfs/p2p/peer" - fsrepo "github.com/ipfs/go-ipfs/repo/fsrepo" + core "github.com/ipfs/kubo/core" + corenet "github.com/ipfs/kubo/core/corenet" + peer "github.com/ipfs/kubo/p2p/peer" + fsrepo "github.com/ipfs/kubo/repo/fsrepo" "golang.org/x/net/context" ) diff --git a/docs/how-to/mint-nfts-with-ipfs.md b/docs/how-to/mint-nfts-with-ipfs.md index 1f38c9dc9..fe0e5424c 100644 --- a/docs/how-to/mint-nfts-with-ipfs.md +++ b/docs/how-to/mint-nfts-with-ipfs.md @@ -523,7 +523,7 @@ We do a check to see if we've already pinned this CID since the API will return Finally, we call `ipfs.pin.remote.add`, passing in the name of the pinning service. When the pinning service receives the request, it will try to connect to our local IPFS node, and our local node will also try to connect to their IPFS nodes. Once they're connected, the service will fetch the CIDs we asked it to pin and store the data on their infrastructure. -To verify that the data was pinned, you can run `ipfs pin remote ls --service=pinata` to see a list of the content you've pinned to Pinata. If you don't already have a copy of IPFS installed on your machine, you can use the one bundled with Minty by running `npx go-ipfs pin remote ls --service=pinata` instead. Alternatively, you can log into the [Pinata website](https://pinata.cloud) and use the **Pin explorer** to view your data. +To verify that the data was pinned, you can run `ipfs pin remote ls --service=pinata` to see a list of the content you've pinned to Pinata. If you don't already have a copy of IPFS installed on your machine, you can use the one bundled with Minty by running `npx kubo pin remote ls --service=pinata` instead. Alternatively, you can log into the [Pinata website](https://pinata.cloud) and use the **Pin explorer** to view your data. ## Next steps diff --git a/docs/how-to/pin-files.md b/docs/how-to/pin-files.md index 79f03a817..0a266dd8e 100644 --- a/docs/how-to/pin-files.md +++ b/docs/how-to/pin-files.md @@ -161,6 +161,6 @@ While you can use a remote pinning service's own GUI, CLI, or other dev tools to - The [IPFS Pinning Service API](https://ipfs.github.io/pinning-services-api-spec/) offers a specification that enables developers to integrate any pinning service that supports the spec, or create their own. Thanks to the OpenAPI spec format, both clients and servers can be [generated](https://github.com/ipfs/pinning-services-api-spec#code-generation) from the YAML spec file. -- If you use go-ipfs 0.8+ from the command line, you have access to `ipfs pin remote` commands acting as a client for interacting with pinning service APIs. Add your favorite pinning service(s), pin CIDs under human-readable names, get pin statuses, and more, straight from the CLI. [Learn how →](work-with-pinning-services.md) +- If you use Kubo 0.8+ from the command line, you have access to `ipfs pin remote` commands acting as a client for interacting with pinning service APIs. Add your favorite pinning service(s), pin CIDs under human-readable names, get pin statuses, and more, straight from the CLI. [Learn how →](work-with-pinning-services.md) - [IPFS Desktop](https://github.com/ipfs-shipyard/ipfs-desktop) and its equivalent in-browser IPFS web interface, the [IPFS Web UI](https://github.com/ipfs-shipyard/ipfs-webui), both support remote pinning services, so you can pin to your favorite pinning service(s) straight from the UI. [Learn how →](work-with-pinning-services.md) diff --git a/docs/how-to/run-ipfs-inside-docker.md b/docs/how-to/run-ipfs-inside-docker.md index af56c684f..29ccea7de 100644 --- a/docs/how-to/run-ipfs-inside-docker.md +++ b/docs/how-to/run-ipfs-inside-docker.md @@ -1,15 +1,15 @@ --- -title: Run IPFS inside Docker +title: Run Kubo IPFS inside Docker description: You can run IPFS inside Docker to simplify your deployment processes, and horizontally scale your IPFS infrastructure. --- # Run IPFS inside Docker -You can run IPFS inside Docker to simplify your deployment processes, as well as horizontally scale your IPFS infrastructure. +You can run Kubo IPFS inside Docker to simplify your deployment processes, as well as horizontally scale your IPFS infrastructure. ## Set up -1. Grab the IPFS docker image hosted at [hub.docker.com/r/ipfs/go-ipfs](https://hub.docker.com/r/ipfs/go-ipfs/). +1. Grab the IPFS docker image hosted at [hub.docker.com/r/ipfs/kubo](https://hub.docker.com/r/ipfs/kubo/). 1. To make files visible inside the container, you need to mount a host directory with the `-v` option to Docker. Choose a directory that you want to use to import and export files from IPFS. You should also choose a directory to store IPFS files that will persist when you restart the container. ```shell @@ -20,12 +20,12 @@ You can run IPFS inside Docker to simplify your deployment processes, as well as 1. Start a container running ipfs and expose ports `4001` (P2P TCP/QUIC transports), `5001` (RPC API) and `8080` (Gateway): ```shell - docker run -d --name ipfs_host -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest + docker run -d --name ipfs_host -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/kubo:latest ``` ::: danger NEVER EXPOSE THE RPC API TO THE PUBLIC INTERNET - The API port provides admin-level access to your IPFS node. See [RPC API docs](/reference/http/api/) for more information. + The API port provides admin-level access to your IPFS node. See [RPC API docs](/reference/kubo/rpc/) for more information. ::: @@ -68,7 +68,7 @@ You can run IPFS inside Docker to simplify your deployment processes, as well as When starting a container running ipfs for the first time with an empty data directory, it will call `ipfs init` to initialize configuration files and generate a new keypair. At this time, you can choose which profile to apply using the `IPFS_PROFILE` environment variable: ```shell -docker run -d --name ipfs_host -e IPFS_PROFILE=server -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest +docker run -d --name ipfs_host -e IPFS_PROFILE=server -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/kubo:latest ``` ## Customizing your node @@ -96,11 +96,11 @@ docker run -d --name ipfs \ -p 4001:4001 \ -p 127.0.0.1:8080:8080 \ -p 127.0.0.1:5001:5001 \ - ipfs/go-ipfs + ipfs/kubo ``` :::tip Use in custom images -See `gateway` example at[github.com/ipfs-shipyard/go-ipfs-docker-examples](https://github.com/ipfs-shipyard/go-ipfs-docker-examples) +See `gateway` example at[github.com/ipfs-shipyard/kubo-docker-examples](https://github.com/ipfs-shipyard/kubo-docker-examples) ::: ## Private swarms inside Docker @@ -108,14 +108,14 @@ See `gateway` example at[github.com/ipfs-shipyard/go-ipfs-docker-examples](https It is possible to initialize the container with a swarm key file (`/data/ipfs/swarm.key`) using the variables `IPFS_SWARM_KEY` and `IPFS_SWARM_KEY_FILE`. The `IPFS_SWARM_KEY` creates `swarm.key` with the contents of the variable itself, while `IPFS_SWARM_KEY_FILE` copies the key from a path stored in the variable. The `IPFS_SWARM_KEY_FILE` **overwrites** the key generated by `IPFS_SWARM_KEY`. ```shell -docker run -d --name ipfs_host -e IPFS_SWARM_KEY= -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest +docker run -d --name ipfs_host -e IPFS_SWARM_KEY= -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/kubo:latest ``` The swarm key initialization can also be done using docker secrets, and requires `docker swarm` or `docker-compose`: ```shell cat your_swarm.key | docker secret create swarm_key_secret - -docker run -d --name ipfs_host --secret swarm_key_secret -e IPFS_SWARM_KEY_FILE=/run/secrets/swarm_key_secret -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/go-ipfs:latest +docker run -d --name ipfs_host --secret swarm_key_secret -e IPFS_SWARM_KEY_FILE=/run/secrets/swarm_key_secret -v $ipfs_staging:/export -v $ipfs_data:/data/ipfs -p 4001:4001 -p 4001:4001/udp -p 127.0.0.1:8080:8080 -p 127.0.0.1:5001:5001 ipfs/kubo:latest ``` ## Key rotation inside Docker @@ -124,10 +124,10 @@ It is possible to do key rotation in an ephemeral container that is temporarily ```shell # given container named 'ipfs-test' that persists repo at /path/to/persisted/.ipfs -docker run -d --name ipfs-test -v /path/to/persisted/.ipfs:/data/ipfs ipfs/go-ipfs:v0.7.0 +docker run -d --name ipfs-test -v /path/to/persisted/.ipfs:/data/ipfs ipfs/kubo:latest docker stop ipfs-test # key rotation works like this (old key saved under 'old-self') -docker run --rm -it -v /path/to/persisted/.ipfs:/data/ipfs ipfs/go-ipfs:v0.7.0 key rotate -o old-self -t ed25519 +docker run --rm -it -v /path/to/persisted/.ipfs:/data/ipfs ipfs/kubo:latest key rotate -o old-self -t ed25519 docker start ipfs-test # will start with the new key ``` diff --git a/docs/how-to/take-snapshot.md b/docs/how-to/take-snapshot.md index 6db9a697c..b4d8c3860 100644 --- a/docs/how-to/take-snapshot.md +++ b/docs/how-to/take-snapshot.md @@ -35,7 +35,7 @@ sudo mkdir /ipfs /ipns sudo chown `whoami` /ipfs /ipns ``` -You will need to have `FUSE` (Filesystem in Userspace) installed on your machine in order to be able to `mount` directories from the IPFS. You can find instructions for how to install `FUSE` [in the `go-ipfs` docs](https://github.com/ipfs/go-ipfs/blob/master/docs/fuse.md). +You will need to have `FUSE` (Filesystem in Userspace) installed on your machine in order to be able to `mount` directories from the IPFS. You can find instructions for how to install `FUSE` [in the `kubo` docs](https://github.com/ipfs/kubo/blob/master/docs/fuse.md). View your snapshots live: diff --git a/docs/how-to/troubleshooting.md b/docs/how-to/troubleshooting.md index 0a1484653..eec77d052 100644 --- a/docs/how-to/troubleshooting.md +++ b/docs/how-to/troubleshooting.md @@ -13,7 +13,7 @@ If you're getting unexpected behavior when trying to run common commands such as ## IPFS is running slowly -Commands like `ipfs ls` are going to the network to try and find data. If for some reason, that data is not _findable_ then Go-IPFS will just keep looking for who has the data forever. Common reasons for data not being _findable_ are that: +Commands like `ipfs ls` are going to the network to try and find data. If for some reason, that data is not _findable_ then Kubo will just keep looking for who has the data forever. Common reasons for data not being _findable_ are that: - Nobody online has it. - There is one node that has the data, but it's behind a NAT. @@ -23,7 +23,7 @@ You can take a look at what's going on with Bitswap using `ipfs bitswap stat` to Some functions also have flags like `--stream` or `--progress` that can help you see incremental updates. For logging behavior, there is `ipfs log`, where `ipfs log level` can help you inspect subsystems further. -You can pass a timeout flag to basically all Go-IPFS commands if you're concerned about your CLI not responding quickly enough when the data just isn't available on the network. +You can pass a timeout flag to basically all Kubo commands if you're concerned about your CLI not responding quickly enough when the data just isn't available on the network. ## File transfers @@ -58,7 +58,7 @@ The first thing to do is to double-check that both nodes are, in fact, running a "/ip4/192.168.2.131/tcp/4001/p2p/QmTNwsFkLAed15kQEC1ZJWPfoNbBQnMFojfJKQ9sZj1dk8", "/ip4/192.168.2.131/udp/4001/quic/p2p/QmTNwsFkLAed15kQEC1ZJWPfoNbBQnMFojfJKQ9sZj1dk8" ], - "AgentVersion": "go-ipfs/0.4.11-dev/", + "AgentVersion": "kubo/0.4.11-dev/", "ProtocolVersion": "ipfs/0.1.0" } ``` diff --git a/docs/how-to/use-existing-private-key.md b/docs/how-to/use-existing-private-key.md index 2402efe61..daf8f7dbc 100644 --- a/docs/how-to/use-existing-private-key.md +++ b/docs/how-to/use-existing-private-key.md @@ -9,7 +9,7 @@ There are two locations in which IPFS stores private keys: | Location | Purpose | Specification | | ------------------------------ | ----------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | -| Config file (`~/.ipfs/config`) | Holds a single private key; used as the default for initializing the IPFS node, IPNS publishing, etc. | [config#identityprivkey](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#identityprivkey) | +| Config file (`~/.ipfs/config`) | Holds a single private key; used as the default for initializing the IPFS node, IPNS publishing, etc. | [config#identityprivkey](https://github.com/ipfs/kubo/blob/master/docs/config.md#identityprivkey) | | Keystore (`~/.ipfs/keystore`) | Holds additional private keys the node has access to; can be used for IPNS signing, publishing, etc. | [fs-repo#keystore](https://github.com/ipfs/specs/blob/master/REPO_FS.md#keystore) | Private keys are handled programmatically through code or through the CLI. Here we will show you how to use your existing private keys using both methods. @@ -20,7 +20,7 @@ Use the CLI to manage your private keys. ### Go-IPFS -There's no way of directly initializing an IPFS node using your private keys from the Go-IPFS CLI at this time. To learn more and look for possible workarounds, see the discussion on issue [ipfs/go-ipfs#4240](https://github.com/ipfs/go-ipfs/issues/4240). +There's no way of directly initializing an IPFS node using your private keys from the Go-IPFS CLI at this time. To learn more and look for possible workarounds, see the discussion on issue [ipfs/kubo#4240](https://github.com/ipfs/kubo/issues/4240). You can, however, import private keys into the IPFS keystore: diff --git a/docs/how-to/websites-on-ipfs/link-a-domain.md b/docs/how-to/websites-on-ipfs/link-a-domain.md index e60d63edb..550328d24 100644 --- a/docs/how-to/websites-on-ipfs/link-a-domain.md +++ b/docs/how-to/websites-on-ipfs/link-a-domain.md @@ -143,7 +143,7 @@ If you want to create your website on a subdomain rather than a bare TLD (e.g. ` You should now be able to visit your IPFS website at your Handshake domain! If your Handshake name is `sub.yourname/`, you can visit your website at http://sub.yourname/ ::: tip -If you prefer to use Handshake-powered DNSLink with your custom gateway, run [`ipfs daemon`](/install/command-line/) with config that has a Handshake resolver enabled for your domain: `ipfs config --json DNS.Resolvers '{ "yourname": "https://query.hdns.io/dns-query" }`. Learn how to run a self-hosted, site-specific DNSLink gateway from [gateway recipes in go-ipfs config docs](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md#gateway-recipes). +If you prefer to use Handshake-powered DNSLink with your custom gateway, run [`ipfs daemon`](/install/command-line/) with config that has a Handshake resolver enabled for your domain: `ipfs config --json DNS.Resolvers '{ "yourname": "https://query.hdns.io/dns-query" }`. Learn how to run a self-hosted, site-specific DNSLink gateway from [gateway recipes in Kubo config docs](https://github.com/ipfs/kubo/blob/master/docs/config.md#gateway-recipes). ::: ## Up next diff --git a/docs/install/README.md b/docs/install/README.md index b83a735d2..c48d113bc 100644 --- a/docs/install/README.md +++ b/docs/install/README.md @@ -9,7 +9,7 @@ IPFS is a collection of protocols, packages, and specifications that allow compu ## File storage -Anyone can use IPFS to store files in a _decentralized_ way. The easiest way to get up and running is by installing the IPFS Desktop application. This app has Go-IPFS built-in and lets you interact with the network through a simple user-interface. [Check it out →](./ipfs-desktop.md) +Anyone can use IPFS to store files in a _decentralized_ way. The easiest way to get up and running is by installing the IPFS Desktop application. This app has Kubo built-in and lets you interact with the network through a simple user-interface. [Check it out →](./ipfs-desktop.md) For long-term storage, users can use the Filecoin network! Filecoin is a peer-to-peer network that stores files on the internet, with built-in economic incentives to ensure files are stored reliably over time. Available storage and pricing are not controlled by any single company. Instead, Filecoin facilitates open markets for storing and retrieving files that anyone can participate in. Learn more over on the [Filecoin docs website.](https://docs.filecoin.io/) diff --git a/docs/install/command-line.md b/docs/install/command-line.md index c4edcb8f3..9bf12f4d8 100644 --- a/docs/install/command-line.md +++ b/docs/install/command-line.md @@ -1,7 +1,7 @@ --- title: Command-line description: Using IPFS through the command-line allows you to do everything that IPFS Desktop can do, but at a more granular level since you can specify which commands to run. Learn how to install it here. -current-ipfs-version: v0.13.1 +current-ipfs-version: v0.14.0 --- # Command-line @@ -12,11 +12,11 @@ Installing IPFS through the command-line is handy if you plan on building applic ## System requirements -IPFS requires 512MiB of memory and can run an IPFS node on a Raspberry Pi. However, how much disk space your IPFS installation takes up depends on how much data you're sharing. A base installation takes up about 12MB of disk space. One can enable automatic garbage collection via [--enable-gc](/reference/cli/#ipfs-daemon) and adjust the [default maximum disk storage](https://github.com/ipfs/go-ipfs/blob/v0.13.1/docs/config.md#datastorestoragemax) for data retrieved from other peers. +Kubo IPFS requires 512MiB of memory and can run an IPFS node on a Raspberry Pi. However, how much disk space your IPFS installation takes up depends on how much data you're sharing. A base installation takes up about 12MB of disk space. One can enable automatic garbage collection via [--enable-gc](/reference/kubo/cli/#ipfs-daemon) and adjust the [default maximum disk storage](https://github.com/ipfs/kubo/blob/v0.14.0/docs/config.md#datastorestoragemax) for data retrieved from other peers. ## Official distributions -The IPFS team manages the [dist.ipfs.io website](https://dist.ipfs.io/) to help users quickly find the latest version of every IPFS package. As soon as a new release of an IPFS package comes out, it is automatically shown on `dist.ipfs.io`, so you can be sure you're getting the latest software. These steps detail how to download and install the latest `go-ipfs` from `dist.ipfs.io` using the command-line. +The IPFS team manages the [dist.ipfs.io website](https://dist.ipfs.io/) to help users quickly find the latest version of every IPFS package. As soon as a new release of an IPFS package comes out, it is automatically shown on `dist.ipfs.io`, so you can be sure you're getting the latest software. These steps detail how to download and install the latest `kubo` from `dist.ipfs.io` using the command-line. | [Windows](#windows) | [macOS](#macos) | [Linux](#linux) | | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------- | ---------------------------------------------------------------------- | @@ -24,26 +24,26 @@ The IPFS team manages the [dist.ipfs.io website](https://dist.ipfs.io/) to help ### Windows -1. Download the Windows binary from [`dist.ipfs.io`](https://dist.ipfs.io/#go-ipfs). +1. Download the Windows binary from [`dist.ipfs.io`](https://dist.ipfs.io/#kubo). ```powershell cd ~\ - wget https://dist.ipfs.io/go-ipfs/v0.13.1/go-ipfs_v0.13.1_windows-amd64.zip -Outfile go-ipfs_v0.13.1.zip + wget https://dist.ipfs.io/kubo/v0.14.0/kubo_v0.14.0_windows-amd64.zip -Outfile kubo_v0.14.0.zip ``` 1. Unzip the file and move it somewhere handy. ```powershell - Expand-Archive -Path go-ipfs_v0.13.1.zip -DestinationPath ~\Apps\go-ipfs_v0.13.1 + Expand-Archive -Path kubo_v0.14.0.zip -DestinationPath ~\Apps\kubo_v0.14.0 ``` -1. Move into the `go-ipfs_v0.13.1` folder and check that the `ipfs.exe` works: +1. Move into the `kubo_v0.14.0` folder and check that the `ipfs.exe` works: ```powershell - cd ~\Apps\go-ipfs_v0.13.1\go-ipfs + cd ~\Apps\kubo_v0.14.0\kubo .\ipfs.exe --version - > ipfs version 0.13.1 + > ipfs version 0.14.0 ``` While you can use IPFS right now, it's better to add `ipfs.exe` to your `PATH` by using the following steps. @@ -66,7 +66,7 @@ The IPFS team manages the [dist.ipfs.io website](https://dist.ipfs.io/) to help notepad $PROFILE ``` -1. Add the location of your Go-IPFS daemon and add it to PowerShell's `PATH` by truncating it to the end of your PowerShell profile: +1. Add the location of your Kubo daemon and add it to PowerShell's `PATH` by truncating it to the end of your PowerShell profile: ```powershell Add-Content $PROFILE "`n[System.Environment]::SetEnvironmentVariable('PATH',`$Env:PATH+';;$GO_IPFS_LOCATION')" @@ -84,7 +84,7 @@ The IPFS team manages the [dist.ipfs.io website](https://dist.ipfs.io/) to help cd ~ ipfs --version - > ipfs version 0.13.1 + > ipfs version 0.14.0 ``` ### macOS @@ -93,29 +93,29 @@ The IPFS team manages the [dist.ipfs.io website](https://dist.ipfs.io/) to help You can install IPFS on M1-based Macs by using the `darwin-arm64` binary instead of the `amd64` binary listed in these instructions. ::: -1. Download the macOS binary from [`dist.ipfs.io`](https://dist.ipfs.io/#go-ipfs). +1. Download the macOS binary from [`dist.ipfs.io`](https://dist.ipfs.io/#kubo). ```bash - curl -O https://dist.ipfs.io/go-ipfs/v0.13.1/go-ipfs_v0.13.1_darwin-amd64.tar.gz + curl -O https://dist.ipfs.io/kubo/v0.14.0/kubo_v0.14.0_darwin-amd64.tar.gz ``` 1. Unzip the file: ```bash - tar -xvzf go-ipfs_v0.13.1_darwin-amd64.tar.gz - - > x go-ipfs/install.sh - > x go-ipfs/ipfs - > x go-ipfs/LICENSE - > x go-ipfs/LICENSE-APACHE - > x go-ipfs/LICENSE-MIT - > x go-ipfs/README.md + tar -xvzf kubo_v0.14.0_darwin-amd64.tar.gz + + > x kubo/install.sh + > x kubo/ipfs + > x kubo/LICENSE + > x kubo/LICENSE-APACHE + > x kubo/LICENSE-MIT + > x kubo/README.md ``` -1. Move into the `go-ipfs` folder and run the install script: +1. Move into the `kubo` folder and run the install script: ```bash - cd go-ipfs + cd kubo bash install.sh > Moved ./ipfs to /usr/local/bin @@ -126,34 +126,34 @@ You can install IPFS on M1-based Macs by using the `darwin-arm64` binary instead ```bash ipfs --version - > ipfs version 0.13.1 + > ipfs version 0.14.0 ``` ### Linux -1. Download the Linux binary from [`dist.ipfs.io`](https://dist.ipfs.io/#go-ipfs). +1. Download the Linux binary from [`dist.ipfs.io`](https://dist.ipfs.io/#kubo). ```bash - wget https://dist.ipfs.io/go-ipfs/v0.13.1/go-ipfs_v0.13.1_linux-amd64.tar.gz + wget https://dist.ipfs.io/kubo/v0.14.0/kubo_v0.14.0_linux-amd64.tar.gz ``` 1. Unzip the file: ```bash - tar -xvzf go-ipfs_v0.13.1_linux-amd64.tar.gz - - > x go-ipfs/install.sh - > x go-ipfs/ipfs - > x go-ipfs/LICENSE - > x go-ipfs/LICENSE-APACHE - > x go-ipfs/LICENSE-MIT - > x go-ipfs/README.md + tar -xvzf kubo_v0.14.0_linux-amd64.tar.gz + + > x kubo/install.sh + > x kubo/ipfs + > x kubo/LICENSE + > x kubo/LICENSE-APACHE + > x kubo/LICENSE-MIT + > x kubo/README.md ``` -1. Move into the `go-ipfs` folder and run the install script: +1. Move into the `kubo` folder and run the install script: ```bash - cd go-ipfs + cd kubo sudo bash install.sh > Moved ./ipfs to /usr/local/bin @@ -164,12 +164,13 @@ You can install IPFS on M1-based Macs by using the `darwin-arm64` binary instead ```bash ipfs --version - > ipfs version 0.13.1 + > ipfs version 0.14.0 ``` ## Compile manually -Manually compiling IPFS is a fairly involved process that changes frequently. It can be handy if you'd like to build a specific branch or use the _bleeding-edge_ version of Go-IPFS. See the [`ipfs/go-ipfs` GitHub repository for details →](https://github.com/ipfs/go-ipfs) +Manually compiling IPFS is a fairly involved process that changes frequently. It can be handy if you'd like to build a specific branch or use the _bleeding-edge_ version of Kubo +. See the [`ipfs/kubo` GitHub repository for details →](https://github.com/ipfs/kubo) ## Which node should you use with the command line @@ -177,11 +178,11 @@ The command line can detect and use any node that's running, unless it's configu ### Local daemon -The local daemon process is automatically started in the CLI with the command `ipfs daemon`. It creates an `$IPFS_PATH/api` file with an [RPC API](./reference/http/api/#http-rpc-api-reference) address. +The local daemon process is automatically started in the CLI with the command `ipfs daemon`. It creates an `$IPFS_PATH/api` file with an [RPC API](./reference/kubo/rpc/#http-rpc-api-reference) address. ### Remote client -You can install the standalone IPFS CLI client independently and use it to talk to an IPFS Desktop node or a Brave node. Use the [RPC API](./reference/http/api/#http-rpc-api-reference) to talk to the `ipfs` daemon. +You can install the standalone IPFS CLI client independently and use it to talk to an IPFS Desktop node or a Brave node. Use the [RPC API](./reference/kubo/rpc/#http-rpc-api-reference) to talk to the `ipfs` daemon. When an IPFS command is executed without parameters, the CLI client checks whether the `$IPFS_PATH/api` file exists and connects to the address listed there. diff --git a/docs/install/ipfs-updater.md b/docs/install/ipfs-updater.md index 4d014fe39..eda6fe240 100644 --- a/docs/install/ipfs-updater.md +++ b/docs/install/ipfs-updater.md @@ -1,12 +1,12 @@ --- title: IPFS updater -description: The IPFS updater is a command-line tool originally used to help users update their IPFS version. Learn how to install, upgrade, and downgrade Go-IPFS using the IPFS updater. +description: The IPFS updater is a command-line tool originally used to help users update their IPFS version. Learn how to install, upgrade, and downgrade Kubo using the IPFS updater. current-ipfs-updater-version: v1.8.0 --- # IPFS updater -The IPFS updater is a command-line tool originally used to help users update their IPFS version. It has since been upgraded to allow users to _install_ Go-IPFS as well. The easiest way to install the IPFS updater is by using the pre-built binaries, detailed below. See the [project repository](https://github.com/ipfs/ipfs-update#from-source) if you'd prefer to build it from source. +The IPFS updater is a command-line tool originally used to help users update their IPFS version. It has since been upgraded to allow users to _install_ Kubo as well. The easiest way to install the IPFS updater is by using the pre-built binaries, detailed below. See the [project repository](https://github.com/ipfs/ipfs-update#from-source) if you'd prefer to build it from source. ## Install updater @@ -155,29 +155,29 @@ You can download pre-built binaries from [`dist.ipfs.io`](https://dist.ipfs.io/# ## Install IPFS -Run `ipfs-update install` followed by the version of Go-IPFS you want to install: +Run `ipfs-update install` followed by the version of Kubo you want to install: ```bash ipfs-update install 0.9.0 ``` -To install the latest release of Go-IPFS use the `latest` tag: +To install the latest release of Kubo use the `latest` tag: ```bash ipfs-update install latest ``` -`ipfs-update install` downloads, tests, and installs the specified version of Go-IPFS. If a version of IPFS is already installed, that version is _stashed_ and can be reverted to later. +`ipfs-update install` downloads, tests, and installs the specified version of Kubo. If a version of IPFS is already installed, that version is _stashed_ and can be reverted to later. ## Downgrade IPFS -Use the `revert` function to roll-back to a previous version of Go-IPFS: +Use the `revert` function to roll-back to a previous version of Kubo: ```bash ipfs-update revert ``` -`ipfs-update revert` reverts to the previously installed version of Go-IPFS. This is useful if the newly installed version has issues and you would like to switch back to your older stable installation. +`ipfs-update revert` reverts to the previously installed version of Kubo. This is useful if the newly installed version has issues and you would like to switch back to your older stable installation. ## Uninstall updater diff --git a/docs/install/recent-releases.md b/docs/install/recent-releases.md index 7bbba644d..f94ef357f 100644 --- a/docs/install/recent-releases.md +++ b/docs/install/recent-releases.md @@ -6,26 +6,26 @@ title: Recent releases This section contains information about recent IPFS releases. You can find installation instructions, update information, and release notes here. -## [Go-IPFS 0.10](https://github.com/ipfs/go-ipfs/releases/tag/v0.10.0) +## [Go-IPFS 0.10](https://github.com/ipfs/kubo/releases/tag/v0.10.0) This release brings modern IPLD to IPFS, and some useful utilities. -## [Go-IPFS 0.9](https://github.com/ipfs/go-ipfs/releases/tag/v0.9.0) +## [Go-IPFS 0.9](https://github.com/ipfs/kubo/releases/tag/v0.9.0) This release makes Go IPFS even more configurable, with some fun experiments to boot! We're also deprecating or removing some uncommonly used features to make it easier for users to discover the easy ways to use Go IPFS safely and efficiently. -## [Go-IPFS 0.8](https://github.com/ipfs/go-ipfs/releases/tag/v0.8.0) +## [Go-IPFS 0.8](https://github.com/ipfs/kubo/releases/tag/v0.8.0) This release is focused on making it easier to work with pins. We have some snazzy new features around being able to ask remote services to pin data for you, and modifying large pin sets is much faster than ever before. -## [Go-IPFS 0.7](https://github.com/ipfs/go-ipfs/releases/tag/v0.7.0) +## [Go-IPFS 0.7](https://github.com/ipfs/kubo/releases/tag/v0.7.0) With a focus on security and stability, this release removes support for SECIO, and enables `Ed25519` keys by default. -## [Go-IPFS 0.6](https://github.com/ipfs/go-ipfs/releases/tag/v0.6.0) +## [Go-IPFS 0.6](https://github.com/ipfs/kubo/releases/tag/v0.6.0) This is a relatively small release in terms of code changes, but it contains some significant changes to the IPFS protocol. -## [Go-IPFS 0.5](https://github.com/ipfs/go-ipfs/releases/tag/v0.5.0) +## [Go-IPFS 0.5](https://github.com/ipfs/kubo/releases/tag/v0.5.0) IPFS 0.5 features major performance upgrades to adding, providing, finding, and fetching data on IPFS. diff --git a/docs/project/README.md b/docs/project/README.md index 773f10498..96caf117b 100644 --- a/docs/project/README.md +++ b/docs/project/README.md @@ -21,7 +21,7 @@ See the overall roadmap of [IPFS project requirements](https://github.com/ipfs/r ## Implementation status -Want to know the current state of your favorite IPFS feature? See the [current implementation status](implementation-status.md) for go-ipfs and js-ipfs, listed by commands and endpoints. +Want to know the current state of your favorite IPFS feature? See the [current implementation status](implementation-status.md) for Kubo and js-ipfs, listed by commands and endpoints. ## IPFS specifications diff --git a/docs/project/contribute.md b/docs/project/contribute.md index 0215ae919..cb3598a4d 100644 --- a/docs/project/contribute.md +++ b/docs/project/contribute.md @@ -14,7 +14,7 @@ to start contributing code. The biggest and most active repositories we have today are: -- [https://github.com/ipfs/go-ipfs](https://github.com/ipfs/go-ipfs) +- [https://github.com/ipfs/kubo](https://github.com/ipfs/kubo) (go-ipfs) - [https://github.com/ipfs/js-ipfs](https://github.com/ipfs/js-ipfs) - [https://github.com/libp2p/go-libp2p](https://github.com/libp2p/go-libp2p) - [https://github.com/libp2p/js-libp2p](https://github.com/libp2p/js-libp2p) diff --git a/docs/project/history.md b/docs/project/history.md index 20a1c14fd..54119219e 100644 --- a/docs/project/history.md +++ b/docs/project/history.md @@ -46,11 +46,11 @@ Protocol Labs entered the [Y Combinator Summer 2014 Class](https://www.ycombinat The whitepaper was published in July 2014. It caught the attention of P2P and internet enthusiasts, including Jeromy Johnson (aka [whyrusleeping](https://github.com/whyrusleeping)). "Why" and other early contributors shared Juan's vision for a distributed, uncensorable, and permissionless file system. They worked nights and weekends, and initially for free, because they believed in the positive impact that open networks like IPFS could have on the world. -Juan, Why, and other contributors spent many late nights in Juan's living room with takeout food and too many coffees ([Philz Mint Mojitos](https://www.philzcoffee.com/menu) FTW!) to create the alpha release of [go-ipfs](https://github.com/ipfs/go-ipfs/blob/master/CHANGELOG.md#023---2015-03-01). IPFS was ready to begin its growth journey in the open. +Juan, Why, and other contributors spent many late nights in Juan's living room with takeout food and too many coffees ([Philz Mint Mojitos](https://www.philzcoffee.com/menu) FTW!) to create the alpha release of [kubo](https://github.com/ipfs/kubo/blob/master/CHANGELOG.md#023---2015-03-01). IPFS was ready to begin its growth journey in the open. In the summer of 2015, the small but growing IPFS team (about five or six full-time contributors) settled into a coworking space in Seattle. They hammered out improvements to the Go and JavaScript implementations of IPFS as interest in the project grew. Satoshi Nakamoto's 2009 [Bitcoin whitepaper](https://bitcoin.org/bitcoin.pdf) had ushered in a renaissance of P2P innovation. A P2P summer was in full swing. IPFS gained usage in the [Ethereum](https://ethereum.org/) and wider blockchain communities. In September 2015, [Neocities](https://neocities.org/) became the first major site to [implement IPFS in production](https://blog.neocities.org/blog/2015/09/08/its-time-for-the-distributed-web.html). -The work done in Seattle and the lessons learned working with Neocities culminated in the 0.4.0 release of go-ipfs in April 2016. The improvements of 0.4.0 transitioned IPFS from an "exciting demo" to a genuinely useful tool for early adopters. +The work done in Seattle and the lessons learned working with Neocities culminated in the 0.4.0 release of Kubo in April 2016. The improvements of 0.4.0 transitioned IPFS from an "exciting demo" to a genuinely useful tool for early adopters. The project saw further technical and community growth in 2016. [Multiformats](https://multiformats.io/), [libp2p](https://libp2p.io/), and [IPLD](https://ipld.io/) were spun out as separate projects from IPFS. [OpenBazaar](https://docs.ipfs.io/concepts/case-study-openbazaar/) began [integrating IPFS](https://bitcoinmagazine.com/articles/openbazaar-integrating-interplanetary-file-system-to-help-keep-stores-open-longer-1460660998) into their decentralized online marketplace. The IPFS team attended and hosted many community gatherings highlighted by the [Decentralized Web Summit](https://2016.decentralizedweb.net/). @@ -62,7 +62,7 @@ According to [Uncle Ben](https://en.wikipedia.org/wiki/Uncle_Ben#%22With_great_p This focus bore significant results in the IPFS community in 2019. Protocol Labs hosted the [first IPFS Camp](https://camp.ipfs.io/) in Barcelona in June. The retreat brought together 150 distributed-web pioneers to learn, collaborate, and build. It inspired a [successful collaboration](https://blog.ipfs.io/2020-02-14-improved-bitswap-for-container-distribution/) with one of the biggest, most innovative corporations in world, Netflix. By the end of 2019, the IPFS network had grown by more than 30x. The community of open-source contributors stood at more than 4,000. -The April 2020 [go-ipfs 0.5.0 release](https://blog.ipfs.io/2020-04-28-go-ipfs-0-5-0/) provided the largest performance upgrades to the network yet: faster file adding (2x), providing (2.5x), finding (2-6x), and fetching (2-5x). For the ever-growing [IPFS ecosystem](https://ipfs.io/images/ipfs-applications-diagram.png), reliability is just as important as speed. For that, Protocol Labs developed, used, and released [Testground](https://blog.ipfs.io/2020-05-06-launching-testground/). Testground is a huge step forward in testing and hardening P2P systems not just for IPFS, but the community at-large. +The April 2020 [Kubo 0.5.0 release](https://blog.ipfs.io/2020-04-28-kubo-0-5-0/) provided the largest performance upgrades to the network yet: faster file adding (2x), providing (2.5x), finding (2-6x), and fetching (2-5x). For the ever-growing [IPFS ecosystem](https://ipfs.io/images/ipfs-applications-diagram.png), reliability is just as important as speed. For that, Protocol Labs developed, used, and released [Testground](https://blog.ipfs.io/2020-05-06-launching-testground/). Testground is a huge step forward in testing and hardening P2P systems not just for IPFS, but the community at-large. Major collaborations with [Opera](https://blog.ipfs.io/2020-03-30-ipfs-in-opera-for-android/), [Microsoft ION](https://techcommunity.microsoft.com/t5/azure-active-directory-identity/toward-scalable-decentralized-identifier-systems/ba-p/560168), and [Cloudflare](https://www.cloudflare.com/distributed-web-gateway/) just scratch the surface of possibilities for IPFS. The H2 2020 Filecoin Mainnet launch is poised to fundamentally shift economic incentives of the P2P IPFS network to compete with the entrenched client-server web. diff --git a/docs/project/implementation-status.md b/docs/project/implementation-status.md index 492af556e..49e5a380f 100644 --- a/docs/project/implementation-status.md +++ b/docs/project/implementation-status.md @@ -538,7 +538,7 @@ See [object](https://github.com/ipfs/js-ipfs/blob/master/docs/core-api/OBJECT.md ## p2p (libp2p exposed API) -> **This is blocked until there is a formalized `interface-libp2p`**. Currently, js-ipfs exposes libp2p directly while go-ipfs exposes a subset of commands that use libp2p. +> **This is blocked until there is a formalized `interface-libp2p`**. Currently, js-ipfs exposes libp2p directly while Kubo exposes a subset of commands that use libp2p. ### CLI diff --git a/docs/project/repository-guide.md b/docs/project/repository-guide.md index bf290f702..f2f82cf98 100644 --- a/docs/project/repository-guide.md +++ b/docs/project/repository-guide.md @@ -19,7 +19,7 @@ Organization and repository links for the top-level projects shipped as part of ### Protocol implementations -- [go-ipfs](https://github.com/ipfs/go-ipfs): The reference implementation written in Go. +- [Kubo](https://github.com/ipfs/kubo): The reference implementation written in Go. - [js-ipfs](https://github.com/ipfs/js-ipfs): The JavaScript implementation of IPFS. - [rust-ipfs](https://github.com/rs-ipfs/rust-ipfs): Alpha implementation in Rust. - [Other implementations](https://github.com/ipfs/ipfs#protocol-implementations): Up-to-date links to all other protocol implementations. diff --git a/docs/reference/README.md b/docs/reference/README.md index 7bd95a47d..8f3884fef 100644 --- a/docs/reference/README.md +++ b/docs/reference/README.md @@ -6,22 +6,34 @@ description: API and CLI reference materials for IPFS, the InterPlanetary File S # API & CLI reference -Looking for developer references for IPFS? Find them here. +Looking for user and developer references for IPFS? Find them here. -## Go (go-ipfs) +## HTTP Gateway -[API resources for go-ipfs](go/api.md) (the reference implementation of IPFS), including the Go CoreAPI, the Go embedded client, and the experimental Go CoreAPI implementation using HTTP API. +The [Gateway API](http/gateway.md) provides implementation-agnostic HTTP interface for retrieving [content-addressed](../concepts/glossary/#content-addressing) data from IPFS with regular HTTP clients and libraries. Use it for future-proofing your applications. -## JavaScript (js-ipfs) +## Custom APIs -[API resources for js-ipfs](js/api.md), including the JS core API reference and the JS HTTP client library. +Implementation and language specific interfaces can be used when [HTTP Gateway API](http/gateway.md) is not enough, or you need additional flexibility. -Explore the Mutable File System, Regular Files API, and DAG API through ProtoSchool's [coding challenges](https://proto.school/course/ipfs). +### Kubo (go-ipfs) + +Kubo is the earliest and most widely used implementation of IPFS, written in Go. + +Use it as: + +- **CLI tool** + Working in the terminal? Here's where you'll find [Kubo's command-line interface (CLI) reference](kubo/cli.md). -## HTTP +- **HTTP RPC endpoint** + [RPC API v0 reference for Kubo](kubo/rpc.md) — control your node over HTTP using the same commands you can from the command line! -[HTTP API reference](http/api.md) for IPFS — control your node using the same commands you can from the command line! +- **Go library** + See [Go API reference for Kubo](go/api.md), including the Go CoreAPI, the Go embedded client, and a Go client for interacting with Kubo over HTTP RPC API. -## CLI commands -Working in the terminal? Here's where you'll find the IPFS [command-line interface (CLI) reference](cli.md). +### JavaScript (js-ipfs) + +[API resources for js-ipfs](js/api.md), including the JS core API reference and the JS HTTP client library. + +Explore the Mutable File System, Regular Files API, and DAG API through ProtoSchool's [coding challenges](https://proto.school/course/ipfs). diff --git a/docs/reference/go/api.md b/docs/reference/go/api.md index a1dc68ab1..6c7ff4800 100644 --- a/docs/reference/go/api.md +++ b/docs/reference/go/api.md @@ -1,37 +1,37 @@ --- -title: Go-IPFS +title: IPFS in Go legacyUrl: https://docs.ipfs.io/reference/go/overview/ description: Developer resources for working in Go with IPFS, the InterPlanetary File System. --- -# Go-IPFS +# IPFS in Go ## Working with Go -Go-IPFS is the primary reference implementation of IPFS. It is a command-line application, but can also be used as a library in other Go programs. +Kubo (go-ipfs) is the oldest implementation of IPFS. It is a command-line application, but can also be used as a library in other Go programs. -For more about using Go-IPFS, see any of the following reference documents: +For more about using Kubo, see any of the following reference documents: -- [Configuration reference](https://github.com/ipfs/go-ipfs/blob/master/docs/config.md) - - [Datastore configuration](https://github.com/ipfs/go-ipfs/blob/master/docs/datastores.md) - - [Experimental features](https://github.com/ipfs/go-ipfs/blob/master/docs/experimental-features.md) -- [Installing command completion](https://github.com/ipfs/go-ipfs/blob/master/docs/command-completion.md) -- [Mounting IPFS with FUSE](https://github.com/ipfs/go-ipfs/blob/master/docs/fuse.md) -- [Installing plugins](https://github.com/ipfs/go-ipfs/blob/master/docs/plugins.md) +- [Configuration reference](https://github.com/ipfs/kubo/blob/master/docs/config.md) + - [Datastore configuration](https://github.com/ipfs/kubo/blob/master/docs/datastores.md) + - [Experimental features](https://github.com/ipfs/kubo/blob/master/docs/experimental-features.md) +- [Installing command completion](https://github.com/ipfs/kubo/blob/master/docs/command-completion.md) +- [Mounting IPFS with FUSE](https://github.com/ipfs/kubo/blob/master/docs/fuse.md) +- [Installing plugins](https://github.com/ipfs/kubo/blob/master/docs/plugins.md) For more technical information about building, debugging or using the API, see: -- [Performance Debugging Guidelines](https://github.com/ipfs/go-ipfs/blob/master/docs/debug-guide.md) -- [IPFS API Implementation](https://github.com/ipfs/go-ipfs/blob/master/docs/implement-api-bindings.md) -- [Connecting with Websockets](https://github.com/ipfs/go-ipfs/blob/master/docs/transports.md) -- Building on [Windows](https://github.com/ipfs/go-ipfs/blob/master/docs/windows.md) -- [Additional guides](https://github.com/ipfs/go-ipfs/blob/master/docs/) +- [Performance Debugging Guidelines](https://github.com/ipfs/kubo/blob/master/docs/debug-guide.md) +- [IPFS API Implementation](https://github.com/ipfs/kubo/blob/master/docs/implement-api-bindings.md) +- [Connecting with Websockets](https://github.com/ipfs/kubo/blob/master/docs/transports.md) +- Building on [Windows](https://github.com/ipfs/kubo/blob/master/docs/windows.md) +- [Additional guides](https://github.com/ipfs/kubo/blob/master/docs/) -If you plan to use Go-IPFS as a package in your own Go application, you can take any of three main approaches: +If you plan to use Kubo as a package in your own Go application, you can take any of three main approaches: -- Use [Go-IPFS](https://github.com/ipfs/go-ipfs) to run IPFS directly in your own process. -- Use [Go-IPFS](https://github.com/ipfs/go-ipfs-http-client) to communicate with an IPFS daemon in a separate process via its HTTP API (this is what Go-IPFS does if a daemon is already running). -- Use other Go packages to communicate with the HTTP API directly. See the [HTTP API reference](/reference/http/api/). +- Use [kubo](https://github.com/ipfs/kubo) to run Kubo IPFS directly in your own process. +- Use [kubo RPC client](https://github.com/ipfs/go-ipfs-http-client) to communicate with a Kubo IPFS daemon in a separate process via its HTTP RPC API (this is what Kubo does if a daemon is already running). +- Use other Go packages to communicate with the HTTP RPC API directly. See the [RPC API reference](/reference/kubo/rpc/). ## Go CoreAPI @@ -39,9 +39,9 @@ If you plan to use Go-IPFS as a package in your own Go application, you can take ## Go embedded client -[Package coreapi](https://godoc.org/github.com/ipfs/go-ipfs/core/coreapi) provides direct access to the core commands in IPFS. If you are embedding IPFS directly in your Go program, this package is the public interface you should use to read and write files or otherwise control IPFS. **This package is experimental and subject to change.** +[Package coreapi](https://godoc.org/github.com/ipfs/kubo/core/coreapi) provides direct access to the core commands in IPFS. If you are embedding IPFS directly in your Go program, this package is the public interface you should use to read and write files or otherwise control IPFS. **This package is experimental and subject to change.** -If you are running IPFS as a separate process, you should use `go-ipfs-api` to work with it via HTTP. As we finalize the interfaces in this embedded client, `go-ipfs-api` will transparently adopt them so you can use the same code with either package. +If you are running IPFS as a separate process, you should use `go-ipfs-api` to work with it via RPC. As we finalize the interfaces in this embedded client, `go-ipfs-api` will transparently adopt them so you can use the same code with either package. ## Go HTTP clients @@ -49,6 +49,6 @@ If you are running IPFS as a separate process, you should use `go-ipfs-api` to w ## Hands-on examples -There are use-case examples in the [`ipfs/go-ipfs` GitHub repository](https://github.com/ipfs/go-ipfs). They're all self-contained projects that let your spin up and test environments quickly. [Check them out →](https://github.com/ipfs/go-ipfs/tree/master/docs/examples). +There are use-case examples in the [`ipfs/kubo` GitHub repository](https://github.com/ipfs/go-ipfs). They're all self-contained projects that let your spin up and test environments quickly. [Check them out →](https://github.com/ipfs/kubo/tree/master/docs/examples). -A good starting place is the [Use go-ipfs as a library to spawn a node and add a file](https://github.com/ipfs/go-ipfs/blob/master/docs/examples/go-ipfs-as-a-library/README.md). +A good starting place is the [Use kubo as a library to spawn a node and add a file](https://github.com/ipfs/kubo/blob/master/docs/examples/go-ipfs-as-a-library/README.md). diff --git a/docs/reference/http/api.md b/docs/reference/http/api.md index 4097d33a6..bd8ecf2cd 100644 --- a/docs/reference/http/api.md +++ b/docs/reference/http/api.md @@ -1,5166 +1,24 @@ --- -title: HTTP RPC API +title: HTTP APIs for IPFS legacyUrl: https://docs.ipfs.io/reference/api/http/ -description: HTTP RPC API reference for IPFS daemon. +description: HTTP APIs provided by IPFS implementations. --- -# HTTP RPC API reference +# HTTP APIs - -::: tip Generated on 2022-07-06, from go-ipfs v0.13.1 -This document was autogenerated. -For issues and support, check out the [http-api-docs](https://github.com/ipfs/ipfs-docs/tree/main/tools/http-api-docs) generator on GitHub. -::: - -When an IPFS node is running as a daemon, it exposes an HTTP RPC API that allows you to control the node and run the same commands you can from the command line. - -In many cases, using this RPC API is preferable to embedding IPFS directly in your program — it allows you to maintain peer connections that are longer lived than your app and you can keep a single IPFS node running instead of several if your app can be launched multiple times. In fact, the `ipfs` CLI commands use this RPC API when operating in online mode. - -::: danger NEVER EXPOSE THE RPC API TO THE PUBLIC INTERNET - -The RPC API provides admin-level access to your IPFS node, including `/api/v0/config`. - -It is bound to `localhost` by default on purpose. You should never expose it to the public internet, just like you would never expose a SQL database or other backend service. - -If you are looking for an interface designed for browsers and public internet, consider [Gateway](/concepts/glossary/#gateway) instead. -::: - -## Getting started - -### Alignment with CLI commands - -The HTTP API under `/api/v0/` is an RPC-style API over HTTP, not a REST API. - -[Every command](/reference/cli/) usable from the CLI is also available through the HTTP RPC API. For example: -```sh -> ipfs swarm peers -/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ -/ip4/104.236.151.122/tcp/4001/p2p/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx -/ip4/104.236.176.52/tcp/4001/p2p/QmSoLnSGccFuZQJzRadHn95W2CrSFmZuTdDWP8HXaHca9z - -> curl -X POST http://127.0.0.1:5001/api/v0/swarm/peers -{ - "Strings": [ - "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ", - "/ip4/104.236.151.122/tcp/4001/p2p/QmSoLju6m7xTh3DuokvT3886QRYqxAzb1kShaanJgW36yx", - "/ip4/104.236.176.52/tcp/4001/p2p/QmSoLnSGccFuZQJzRadHn95W2CrSFmZuTdDWP8HXaHca9z", - ] -} -``` - -### Arguments - -Arguments are added through the special query string key "arg": - -``` -> curl -X POST "http://127.0.0.1:5001/api/v0/swarm/disconnect?arg=/ip4/54.93.113.247/tcp/48131/p2p/QmUDS3nsBD1X4XK5Jo836fed7SErTyTuQzRqWaiQAyBYMP" -{ - "Strings": [ - "disconnect QmUDS3nsBD1X4XK5Jo836fed7SErTyTuQzRqWaiQAyBYMP success", - ] -} -``` - -Note that it can be used multiple times to signify multiple arguments. - -### Flags - -Flags are added through the query string. For example, the `--encoding=json` flag is the `&encoding=json` query parameter below: - -``` -> curl -X POST "http://127.0.0.1:5001/api/v0/object/get?arg=QmaaqrHyAQm7gALkRW8DcfGX3u8q9rWKnxEMmf7m9z515w&encoding=json" -{ - "Links": [ - { - "Name": "index.html", - "Hash": "QmYftndCvcEiuSZRX7njywX2AGSeHY2ASa7VryCq1mKwEw", - "Size": 1700 - }, - { - "Name": "static", - "Hash": "QmdtWFiasJeh2ymW3TD2cLHYxn1ryTuWoNpwieFyJriGTS", - "Size": 2428803 - } - ], - "Data": "CAE=" -} -``` - -Some flags may be repeated. For example, the `--status` flag may be reused as below: - -``` -> curl -X POST "http://127.0.0.1:5001/api/v0/pin/remote/service/ls?name=myservice&status=pinned&status=pinning" -``` - -::: tip -Some arguments may belong only to the CLI but appear here too. These usually belong to client-side processing of input, particularly in the `add` command. - -Additionally, as a convenience certain CLI commands may allow passing repeated flags as delimited lists such as -`ipfs pin remote service ls --status=pinned,pinning`; however, this does not apply to the HTTP API. -::: - -## HTTP status codes - -Status codes used at the RPC layer are simple: - -- `200` - The request was processed or is being processed (streaming) -- `500` - RPC endpoint returned an error -- `400` - Malformed RPC, argument type error, etc -- `403` - RPC call forbidden -- `404` - RPC endpoint doesn't exist -- `405` - HTTP Method Not Allowed - -Status code `500` means that the function _does_ exist, but IPFS was not able to fulfil the request because of an error. To know that reason, you have to look at the error message that is usually returned with the body of the response (if no error, check the daemon logs). - -Streaming endpoints fail as above, unless they have started streaming. That means they will have sent a `200` status code already. If an error happens during the stream, it will be included in a Trailer response header (some endpoints may additionally include an error in the last streamed object). - -A `405` error may mean that you are using the wrong HTTP method (i.e. GET instead of POST), and a `403` error occurs in a browser due to Origin / CORS. - -## Origin-based security - -When a request is sent from a browser, HTTP RPC API follows the [Origin-based security model](https://en.wikipedia.org/wiki/Same-origin_policy), and expects the `Origin` HTTP header to be present. -The API will return HTTP Error 403 when Origin is missing, does not match the API port, or is not safelisted via `API.HTTPHeaders.Access-Control-Allow-Origin` in the config. - - -## RPC commands - - - - - -## /api/v0/add - -Add a file or directory to IPFS. - -### Arguments - - -- `quiet` [bool]: Write minimal output. Required: no. -- `quieter` [bool]: Write only final hash. Required: no. -- `silent` [bool]: Write no output. Required: no. -- `progress` [bool]: Stream progress data. Required: no. -- `trickle` [bool]: Use trickle-dag format for dag generation. Required: no. -- `only-hash` [bool]: Only chunk and hash - do not write to disk. Required: no. -- `wrap-with-directory` [bool]: Wrap files with a directory object. Required: no. -- `chunker` [string]: Chunking algorithm, size-[bytes], rabin-[min]-[avg]-[max] or buzhash. Default: `size-262144`. Required: no. -- `pin` [bool]: Pin this object when adding. Default: `true`. Required: no. -- `raw-leaves` [bool]: Use raw blocks for leaf nodes. Required: no. -- `nocopy` [bool]: Add the file using filestore. Implies raw-leaves. (experimental). Required: no. -- `fscache` [bool]: Check the filestore for pre-existing blocks. (experimental). Required: no. -- `cid-version` [int]: CID version. Defaults to 0 unless an option that depends on CIDv1 is passed. Passing version 1 will cause the raw-leaves option to default to true. Required: no. -- `hash` [string]: Hash function to use. Implies CIDv1 if not sha2-256. (experimental). Default: `sha2-256`. Required: no. -- `inline` [bool]: Inline small blocks into CIDs. (experimental). Required: no. -- `inline-limit` [int]: Maximum block size to inline. (experimental). Default: `32`. Required: no. - - -### Request Body - -Argument `path` is of file type. This endpoint expects one or several files (depending on the command) in the body of the request as 'multipart/form-data'. - - - -The `add` command not only allows adding files, but also uploading directories and complex hierarchies. - -This happens as follows: Every part in the multipart request is a *directory* or a *file* to be added to IPFS. - -Directory parts have a special content type `application/x-directory`. These parts do not carry any data. The part headers look as follows: - -``` -Content-Disposition: form-data; name="file"; filename="folderName" -Content-Type: application/x-directory -``` - -File parts carry the file payload after the following headers: - -``` -Abspath: /absolute/path/to/file.txt -Content-Disposition: form-data; name="file"; filename="folderName%2Ffile.txt" -Content-Type: application/octet-stream - -...contents... -``` - -The above file includes its path in the "folderName/file.txt" hierarchy and IPFS will therefore be able to add it inside "folderName". The parts declaring the directories are optional when they have files inside and will be inferred from the filenames. In any case, a depth-first traversal of the directory tree is recommended to order the different parts making the request. - -The `Abspath` header is included for filestore/urlstore features that are enabled with the `nocopy` option and it can be set to the location of the file in the filesystem (within the IPFS root), or to its full web URL. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Bytes": "", - "Hash": "", - "Name": "", - "Size": "" -} - -``` - -### cURL Example - -`curl -X POST -F file=@myfile "http://127.0.0.1:5001/api/v0/add?quiet=&quieter=&silent=&progress=&trickle=&only-hash=&wrap-with-directory=&chunker=size-262144&pin=true&raw-leaves=&nocopy=&fscache=&cid-version=&hash=sha2-256&inline=&inline-limit=32"` - ---- - - -## /api/v0/bitswap/ledger - -Show the current ledger for a peer. - -### Arguments - -- `arg` [string]: The PeerID (B58) of the ledger to inspect. Required: **yes**. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Exchanged": "", - "Peer": "", - "Recv": "", - "Sent": "", - "Value": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/bitswap/ledger?arg="` - ---- - - -## /api/v0/bitswap/reprovide - -Trigger reprovider. - -### Arguments - -This endpoint takes no arguments. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -This endpoint returns a `text/plain` response body. -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/bitswap/reprovide"` - ---- - - -## /api/v0/bitswap/stat - -Show some diagnostic information on the bitswap agent. - -### Arguments - -- `verbose` [bool]: Print extra information. Required: no. -- `human` [bool]: Print sizes in human readable format (e.g., 1K 234M 2G). Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "BlocksReceived": "", - "BlocksSent": "", - "DataReceived": "", - "DataSent": "", - "DupBlksReceived": "", - "DupDataReceived": "", - "MessagesReceived": "", - "Peers": [ - "" - ], - "ProvideBufLen": "", - "Wantlist": [ - { - "/": "" - } - ] -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/bitswap/stat?verbose=&human="` - ---- - - -## /api/v0/bitswap/wantlist - -Show blocks currently on the wantlist. - -### Arguments - -- `peer` [string]: Specify which peer to show wantlist for. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Keys": [ - { - "/": "" - } - ] -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/bitswap/wantlist?peer="` - ---- - - -## /api/v0/block/get - -Get a raw IPFS block. - -### Arguments - -- `arg` [string]: The CID of an existing block to get. Required: **yes**. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -This endpoint returns a `text/plain` response body. -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/block/get?arg="` - ---- - - -## /api/v0/block/put - -Store input as an IPFS block. - -### Arguments - - -- `cid-codec` [string]: Multicodec to use in returned CID. Default: `raw`. Required: no. -- `mhtype` [string]: Multihash hash function. Default: `sha2-256`. Required: no. -- `mhlen` [int]: Multihash hash length. Default: `-1`. Required: no. -- `pin` [bool]: Pin added blocks recursively. Default: `false`. Required: no. -- `allow-big-block` [bool]: Disable block size check and allow creation of blocks bigger than 1MiB. WARNING: such blocks won't be transferable over the standard bitswap. Default: `false`. Required: no. -- `format` [string]: Use legacy format for returned CID (DEPRECATED). Required: no. - - -### Request Body - -Argument `data` is of file type. This endpoint expects one or several files (depending on the command) in the body of the request as 'multipart/form-data'. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Key": "", - "Size": "" -} - -``` - -### cURL Example - -`curl -X POST -F file=@myfile "http://127.0.0.1:5001/api/v0/block/put?cid-codec=raw&mhtype=sha2-256&mhlen=-1&pin=false&allow-big-block=false&format="` - ---- - - -## /api/v0/block/rm - -Remove IPFS block(s) from the local datastore. - -### Arguments - -- `arg` [string]: CIDs of block(s) to remove. Required: **yes**. -- `force` [bool]: Ignore nonexistent blocks. Required: no. -- `quiet` [bool]: Write minimal output. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Error": "", - "Hash": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/block/rm?arg=&force=&quiet="` - ---- - - -## /api/v0/block/stat - -Print information of a raw IPFS block. - -### Arguments - -- `arg` [string]: The CID of an existing block to stat. Required: **yes**. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Key": "", - "Size": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/block/stat?arg="` - ---- - - -## /api/v0/bootstrap - -Show or edit the list of bootstrap peers. - -### Arguments - -This endpoint takes no arguments. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Peers": [ - "" - ] -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/bootstrap"` - ---- - - -## /api/v0/bootstrap/add - -Add peers to the bootstrap list. - -### Arguments - -- `arg` [string]: A peer to add to the bootstrap list (in the format '<multiaddr>/<peerID>') Required: no. -- `default` [bool]: Add default bootstrap nodes. (Deprecated, use 'default' subcommand instead). Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Peers": [ - "" - ] -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/bootstrap/add?arg=&default="` - ---- - - -## /api/v0/bootstrap/add/default - -Add default peers to the bootstrap list. - -### Arguments - -This endpoint takes no arguments. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Peers": [ - "" - ] -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/bootstrap/add/default"` - ---- - - -## /api/v0/bootstrap/list - -Show peers in the bootstrap list. - -### Arguments - -This endpoint takes no arguments. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Peers": [ - "" - ] -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/bootstrap/list"` - ---- - - -## /api/v0/bootstrap/rm - -Remove peers from the bootstrap list. - -### Arguments - -- `arg` [string]: A peer to add to the bootstrap list (in the format '<multiaddr>/<peerID>') Required: no. -- `all` [bool]: Remove all bootstrap peers. (Deprecated, use 'all' subcommand). Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Peers": [ - "" - ] -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/bootstrap/rm?arg=&all="` - ---- - - -## /api/v0/bootstrap/rm/all - -Remove all peers from the bootstrap list. - -### Arguments - -This endpoint takes no arguments. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Peers": [ - "" - ] -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/bootstrap/rm/all"` - ---- - - -## /api/v0/cat - -Show IPFS object data. - -### Arguments - -- `arg` [string]: The path to the IPFS object(s) to be outputted. Required: **yes**. -- `offset` [int64]: Byte offset to begin reading from. Required: no. -- `length` [int64]: Maximum number of bytes to read. Required: no. -- `progress` [bool]: Stream progress data. Default: `true`. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -This endpoint returns a `text/plain` response body. -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/cat?arg=&offset=&length=&progress=true"` - ---- - - -## /api/v0/cid/base32 - -Convert CIDs to Base32 CID version 1. - -### Arguments - -- `arg` [string]: CIDs to convert. Required: **yes**. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "CidStr": "", - "ErrorMsg": "", - "Formatted": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/cid/base32?arg="` - ---- - - -## /api/v0/cid/bases - -List available multibase encodings. - -### Arguments - -- `prefix` [bool]: also include the single letter prefixes in addition to the code. Required: no. -- `numeric` [bool]: also include numeric codes. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -[ - { - "Code": "", - "Name": "" - } -] - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/cid/bases?prefix=&numeric="` - ---- - - -## /api/v0/cid/codecs - -List available CID multicodecs. - -### Arguments - -- `numeric` [bool]: also include numeric codes. Required: no. -- `supported` [bool]: list only codecs supported by go-ipfs commands. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -[ - { - "Code": "", - "Name": "" - } -] - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/cid/codecs?numeric=&supported="` - ---- - - -## /api/v0/cid/format - -Format and convert a CID in various useful ways. - -### Arguments - -- `arg` [string]: CIDs to format. Required: **yes**. -- `f` [string]: Printf style format string. Default: %s. Default: `%s`. Required: no. -- `v` [string]: CID version to convert to. Required: no. -- `mc` [string]: CID multicodec to convert to. Required: no. -- `b` [string]: Multibase to display CID in. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "CidStr": "", - "ErrorMsg": "", - "Formatted": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/cid/format?arg=&f=%s&v=&mc=&b="` - ---- - - -## /api/v0/cid/hashes - -List available multihashes. - -### Arguments - -- `numeric` [bool]: also include numeric codes. Required: no. -- `supported` [bool]: list only codecs supported by go-ipfs commands. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -[ - { - "Code": "", - "Name": "" - } -] - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/cid/hashes?numeric=&supported="` - ---- - - -## /api/v0/commands - -List all available commands. - -### Arguments - -- `flags` [bool]: Show command flags. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Name": "", - "Options": [ - { - "Names": [ - "" - ] - } - ], - "Subcommands": [ - { - "Name": "", - "Options": [ - { - "Names": [ - "" - ] - } - ], - "Subcommands": [ - "..." - ] - } - ] -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/commands?flags="` - ---- - - -## /api/v0/commands/completion/bash - -Generate bash shell completions. - -### Arguments - -This endpoint takes no arguments. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -This endpoint returns a `text/plain` response body. -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/commands/completion/bash"` - ---- - - -## /api/v0/config - -Get and set IPFS config values. - -### Arguments - -- `arg` [string]: The key of the config entry (e.g. "Addresses.API"). Required: **yes**. -- `arg` [string]: The value to set the config entry to. Required: no. -- `bool` [bool]: Set a boolean value. Required: no. -- `json` [bool]: Parse stringified JSON. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Key": "", - "Value": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/config?arg=&arg=&bool=&json="` - ---- - - -## /api/v0/config/edit - -Open the config file for editing in $EDITOR. - -### Arguments - -This endpoint takes no arguments. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -This endpoint returns a `text/plain` response body. -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/config/edit"` - ---- - - -## /api/v0/config/profile/apply - -Apply profile to config. - -### Arguments - -- `arg` [string]: The profile to apply to the config. Required: **yes**. -- `dry-run` [bool]: print difference between the current config and the config that would be generated. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "NewCfg": { - "": "" - }, - "OldCfg": { - "": "" - } -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/config/profile/apply?arg=&dry-run="` - ---- - - -## /api/v0/config/replace - -Replace the config with <file>. - -### Arguments - - - - -### Request Body - -Argument `file` is of file type. This endpoint expects one or several files (depending on the command) in the body of the request as 'multipart/form-data'. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -This endpoint returns a `text/plain` response body. -``` - -### cURL Example - -`curl -X POST -F file=@myfile "http://127.0.0.1:5001/api/v0/config/replace"` - ---- - - -## /api/v0/config/show - -Output config file contents. - -### Arguments - -This endpoint takes no arguments. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/config/show"` - ---- - - -## /api/v0/dag/export - -Streams the selected DAG as a .car stream on stdout. - -### Arguments - -- `arg` [string]: CID of a root to recursively export Required: **yes**. -- `progress` [bool]: Display progress on CLI. Defaults to true when STDERR is a TTY. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -This endpoint returns a `text/plain` response body. -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/dag/export?arg=&progress="` - ---- - - -## /api/v0/dag/get - -Get a DAG node from IPFS. - -### Arguments - -- `arg` [string]: The object to get Required: **yes**. -- `output-codec` [string]: Format that the object will be encoded as. Default: `dag-json`. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -This endpoint returns a `text/plain` response body. -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/dag/get?arg=&output-codec=dag-json"` - ---- - - -## /api/v0/dag/import - -Import the contents of .car files - -### Arguments - - -- `pin-roots` [bool]: Pin optional roots listed in the .car headers after importing. Default: `true`. Required: no. -- `silent` [bool]: No output. Required: no. -- `stats` [bool]: Output stats. Required: no. -- `allow-big-block` [bool]: Disable block size check and allow creation of blocks bigger than 1MiB. WARNING: such blocks won't be transferable over the standard bitswap. Default: `false`. Required: no. - - -### Request Body - -Argument `path` is of file type. This endpoint expects one or several files (depending on the command) in the body of the request as 'multipart/form-data'. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Root": { - "Cid": { - "/": "" - }, - "PinErrorMsg": "" - }, - "Stats": { - "BlockBytesCount": "", - "BlockCount": "" - } -} - -``` - -### cURL Example - -`curl -X POST -F file=@myfile "http://127.0.0.1:5001/api/v0/dag/import?pin-roots=true&silent=&stats=&allow-big-block=false"` - ---- - - -## /api/v0/dag/put - -Add a DAG node to IPFS. - -### Arguments - - -- `store-codec` [string]: Codec that the stored object will be encoded with. Default: `dag-cbor`. Required: no. -- `input-codec` [string]: Codec that the input object is encoded in. Default: `dag-json`. Required: no. -- `pin` [bool]: Pin this object when adding. Required: no. -- `hash` [string]: Hash function to use. Default: `sha2-256`. Required: no. -- `allow-big-block` [bool]: Disable block size check and allow creation of blocks bigger than 1MiB. WARNING: such blocks won't be transferable over the standard bitswap. Default: `false`. Required: no. - - -### Request Body - -Argument `object data` is of file type. This endpoint expects one or several files (depending on the command) in the body of the request as 'multipart/form-data'. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Cid": { - "/": "" - } -} - -``` - -### cURL Example - -`curl -X POST -F file=@myfile "http://127.0.0.1:5001/api/v0/dag/put?store-codec=dag-cbor&input-codec=dag-json&pin=&hash=sha2-256&allow-big-block=false"` - ---- - - -## /api/v0/dag/resolve - -Resolve IPLD block. - -### Arguments - -- `arg` [string]: The path to resolve Required: **yes**. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Cid": { - "/": "" - }, - "RemPath": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/dag/resolve?arg="` - ---- - - -## /api/v0/dag/stat - -Gets stats for a DAG. - -### Arguments - -- `arg` [string]: CID of a DAG root to get statistics for Required: **yes**. -- `progress` [bool]: Return progressive data while reading through the DAG. Default: `true`. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "NumBlocks": "", - "Size": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/dag/stat?arg=&progress=true"` - ---- - - -## /api/v0/dht/findpeer - -Find the multiaddresses associated with a Peer ID. - -### Arguments - -- `arg` [string]: The ID of the peer to search for. Required: **yes**. -- `verbose` [bool]: Print extra information. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Extra": "", - "ID": "", - "Responses": [ - { - "Addrs": [ - "" - ], - "ID": "peer-id" - } - ], - "Type": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/dht/findpeer?arg=&verbose="` - ---- - - -## /api/v0/dht/findprovs - -Find peers that can provide a specific value, given a key. - -### Arguments - -- `arg` [string]: The key to find providers for. Required: **yes**. -- `verbose` [bool]: Print extra information. Required: no. -- `num-providers` [int]: The number of providers to find. Default: `20`. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Extra": "", - "ID": "", - "Responses": [ - { - "Addrs": [ - "" - ], - "ID": "peer-id" - } - ], - "Type": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/dht/findprovs?arg=&verbose=&num-providers=20"` - ---- - - -## /api/v0/dht/get - -Given a key, query the routing system for its best value. - -### Arguments - -- `arg` [string]: The key to find a value for. Required: **yes**. -- `verbose` [bool]: Print extra information. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Extra": "", - "ID": "", - "Responses": [ - { - "Addrs": [ - "" - ], - "ID": "peer-id" - } - ], - "Type": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/dht/get?arg=&verbose="` - ---- - - -## /api/v0/dht/provide - -Announce to the network that you are providing given values. - -### Arguments - -- `arg` [string]: The key[s] to send provide records for. Required: **yes**. -- `verbose` [bool]: Print extra information. Required: no. -- `recursive` [bool]: Recursively provide entire graph. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Extra": "", - "ID": "", - "Responses": [ - { - "Addrs": [ - "" - ], - "ID": "peer-id" - } - ], - "Type": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/dht/provide?arg=&verbose=&recursive="` - ---- - - -## /api/v0/dht/put - -Write a key/value pair to the routing system. - -### Arguments - -- `arg` [string]: The key to store the value at. Required: **yes**. - -- `verbose` [bool]: Print extra information. Required: no. - - -### Request Body - -Argument `value-file` is of file type. This endpoint expects one or several files (depending on the command) in the body of the request as 'multipart/form-data'. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Extra": "", - "ID": "", - "Responses": [ - { - "Addrs": [ - "" - ], - "ID": "peer-id" - } - ], - "Type": "" -} - -``` - -### cURL Example - -`curl -X POST -F file=@myfile "http://127.0.0.1:5001/api/v0/dht/put?arg=&verbose="` - ---- - - -## /api/v0/dht/query - -Find the closest Peer IDs to a given Peer ID by querying the DHT. - -### Arguments - -- `arg` [string]: The peerID to run the query against. Required: **yes**. -- `verbose` [bool]: Print extra information. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -{ - "Extra": "", - "ID": "", - "Responses": [ - { - "Addrs": [ - "" - ], - "ID": "peer-id" - } - ], - "Type": "" -} - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/dht/query?arg=&verbose="` - ---- - - -## /api/v0/diag/cmds - -List commands run on this IPFS node. - -### Arguments - -- `verbose` [bool]: Print extra information. Required: no. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -[ - { - "Active": "", - "Args": [ - "" - ], - "Command": "", - "EndTime": "", - "ID": "", - "Options": { - "": "" - }, - "StartTime": "" - } -] - -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/diag/cmds?verbose="` - ---- - - -## /api/v0/diag/cmds/clear - -Clear inactive requests from the log. - -### Arguments - -This endpoint takes no arguments. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -This endpoint returns a `text/plain` response body. -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/diag/cmds/clear"` - ---- - - -## /api/v0/diag/cmds/set-time - -Set how long to keep inactive requests in the log. - -### Arguments - -- `arg` [string]: Time to keep inactive requests in log. Required: **yes**. - - -### Response - -On success, the call to this endpoint will return with 200 and the following body: - -```json -This endpoint returns a `text/plain` response body. -``` - -### cURL Example - -`curl -X POST "http://127.0.0.1:5001/api/v0/diag/cmds/set-time?arg=