|
1 | 1 | /** |
2 | 2 | * @packageDocumentation |
3 | 3 | * |
4 | | - * The `performanceService` implements the [perf protocol](https://github.com/libp2p/specs/blob/master/perf/perf.md), which is used to measure performance within and across libp2p implementations |
5 | | - * addresses. |
| 4 | + * The {@link PerfService} implements the [perf protocol](https://github.com/libp2p/specs/blob/master/perf/perf.md), which can be used to measure transfer performance within and across libp2p implementations. |
6 | 5 | * |
7 | 6 | * @example |
8 | 7 | * |
9 | 8 | * ```typescript |
10 | | - * import { createLibp2p } from 'libp2p' |
11 | | - * import { perfService } from '@libp2p/perf' |
| 9 | + * import { noise } from '@chainsafe/libp2p-noise' |
| 10 | + * import { yamux } from '@chainsafe/libp2p-yamux' |
| 11 | + * import { mplex } from '@libp2p/mplex' |
| 12 | + * import { tcp } from '@libp2p/tcp' |
| 13 | + * import { createLibp2p, type Libp2p } from 'libp2p' |
| 14 | + * import { plaintext } from 'libp2p/insecure' |
| 15 | + * import { perfService, type PerfService } from '@libp2p/perf' |
12 | 16 | * |
13 | | - * const node = await createLibp2p({ |
14 | | - * service: [ |
15 | | - * perfService() |
16 | | - * ] |
17 | | - * }) |
18 | | - * ``` |
19 | | - * |
20 | | - * The `measurePerformance` function can be used to measure the latency and throughput of a connection. |
21 | | - * server. This will not work in browsers. |
| 17 | + * const ONE_MEG = 1024 * 1024 |
| 18 | + * const UPLOAD_BYTES = ONE_MEG * 1024 |
| 19 | + * const DOWNLOAD_BYTES = ONE_MEG * 1024 |
22 | 20 | * |
23 | | - * @example |
24 | | - * |
25 | | - * ```typescript |
26 | | - * import { createLibp2p } from 'libp2p' |
27 | | - * import { perfService } from 'libp2p/perf' |
| 21 | + * async function createNode (): Promise<Libp2p<{ perf: PerfService }>> { |
| 22 | + * return createLibp2p({ |
| 23 | + * addresses: { |
| 24 | + * listen: [ |
| 25 | + * '/ip4/0.0.0.0/tcp/0' |
| 26 | + * ] |
| 27 | + * }, |
| 28 | + * transports: [ |
| 29 | + * tcp() |
| 30 | + * ], |
| 31 | + * connectionEncryption: [ |
| 32 | + * noise(), plaintext() |
| 33 | + * ], |
| 34 | + * streamMuxers: [ |
| 35 | + * yamux(), mplex() |
| 36 | + * ], |
| 37 | + * services: { |
| 38 | + * perf: perfService() |
| 39 | + * } |
| 40 | + * }) |
| 41 | + * } |
28 | 42 | * |
29 | | - * const node = await createLibp2p({ |
30 | | - * services: [ |
31 | | - * perf: perfService() |
32 | | - * ] |
33 | | - * }) |
| 43 | + * const libp2p1 = await createNode() |
| 44 | + * const libp2p2 = await createNode() |
34 | 45 | * |
35 | | - * const connection = await node.dial(multiaddr(multiaddrAddress)) |
36 | | - * |
37 | | - * const startTime = Date.now() |
38 | | - * |
39 | | - * await node.services.perf.measurePerformance(startTime, connection, BigInt(uploadBytes), BigInt(downloadBytes)) |
| 46 | + * for await (const output of libp2p1.services.perf.measurePerformance(libp2p2.getMultiaddrs()[0], UPLOAD_BYTES, DOWNLOAD_BYTES)) { |
| 47 | + * console.info(output) |
| 48 | + * } |
40 | 49 | * |
| 50 | + * await libp2p1.stop() |
| 51 | + * await libp2p2.stop() |
41 | 52 | * ``` |
42 | 53 | */ |
43 | 54 |
|
44 | | -import { logger } from '@libp2p/logger' |
45 | | -import { PROTOCOL_NAME, WRITE_BLOCK_SIZE } from './constants.js' |
46 | | -import type { Connection } from '@libp2p/interface/connection' |
47 | | -import type { Startable } from '@libp2p/interface/startable' |
| 55 | +import { PerfService as PerfServiceClass } from './perf-service.js' |
| 56 | +import type { AbortOptions } from '@libp2p/interface' |
48 | 57 | import type { ConnectionManager } from '@libp2p/interface-internal/connection-manager' |
49 | | -import type { IncomingStreamData, Registrar } from '@libp2p/interface-internal/registrar' |
50 | | -import type { AbortOptions } from '@libp2p/interfaces' |
51 | | - |
52 | | -const log = logger('libp2p:perf') |
53 | | - |
54 | | -export const defaultInit: PerfServiceInit = { |
55 | | - protocolName: '/perf/1.0.0', |
56 | | - writeBlockSize: BigInt(64 << 10) |
| 58 | +import type { Registrar } from '@libp2p/interface-internal/registrar' |
| 59 | +import type { Multiaddr } from '@multiformats/multiaddr' |
| 60 | + |
| 61 | +export interface PerfOptions extends AbortOptions { |
| 62 | + /** |
| 63 | + * By default measuring perf should include the time it takes to establish a |
| 64 | + * connection, so a new connection will be opened for every performance run. |
| 65 | + * |
| 66 | + * To override this and re-use an existing connection if one is present, pass |
| 67 | + * `true` here. (default: false) |
| 68 | + */ |
| 69 | + reuseExistingConnection?: boolean |
57 | 70 | } |
58 | 71 |
|
59 | 72 | export interface PerfService { |
60 | | - measurePerformance(startTime: number, connection: Connection, sendBytes: bigint, recvBytes: bigint, options?: AbortOptions): Promise<number> |
| 73 | + measurePerformance(multiaddr: Multiaddr, sendBytes: number, recvBytes: number, options?: PerfOptions): AsyncGenerator<PerfOutput> |
| 74 | +} |
| 75 | + |
| 76 | +export interface PerfOutput { |
| 77 | + type: 'intermediary' | 'final' |
| 78 | + timeSeconds: number |
| 79 | + uploadBytes: number |
| 80 | + downloadBytes: number |
61 | 81 | } |
62 | 82 |
|
63 | 83 | export interface PerfServiceInit { |
64 | 84 | protocolName?: string |
65 | 85 | maxInboundStreams?: number |
66 | 86 | maxOutboundStreams?: number |
67 | | - timeout?: number |
68 | | - writeBlockSize?: bigint |
| 87 | + runOnTransientConnection?: boolean |
| 88 | + |
| 89 | + /** |
| 90 | + * Data sent/received will be sent in chunks of this size (default: 64KiB) |
| 91 | + */ |
| 92 | + writeBlockSize?: number |
69 | 93 | } |
70 | 94 |
|
71 | 95 | export interface PerfServiceComponents { |
72 | 96 | registrar: Registrar |
73 | 97 | connectionManager: ConnectionManager |
74 | 98 | } |
75 | 99 |
|
76 | | -class DefaultPerfService implements Startable, PerfService { |
77 | | - public readonly protocol: string |
78 | | - private readonly components: PerfServiceComponents |
79 | | - private started: boolean |
80 | | - private readonly databuf: ArrayBuffer |
81 | | - private readonly writeBlockSize: bigint |
82 | | - |
83 | | - constructor (components: PerfServiceComponents, init: PerfServiceInit) { |
84 | | - this.components = components |
85 | | - this.started = false |
86 | | - this.protocol = init.protocolName ?? PROTOCOL_NAME |
87 | | - this.writeBlockSize = init.writeBlockSize ?? WRITE_BLOCK_SIZE |
88 | | - this.databuf = new ArrayBuffer(Number(init.writeBlockSize)) |
89 | | - } |
90 | | - |
91 | | - async start (): Promise<void> { |
92 | | - await this.components.registrar.handle(this.protocol, (data: IncomingStreamData) => { |
93 | | - void this.handleMessage(data).catch((err) => { |
94 | | - log.error('error handling perf protocol message', err) |
95 | | - }) |
96 | | - }) |
97 | | - this.started = true |
98 | | - } |
99 | | - |
100 | | - async stop (): Promise<void> { |
101 | | - await this.components.registrar.unhandle(this.protocol) |
102 | | - this.started = false |
103 | | - } |
104 | | - |
105 | | - isStarted (): boolean { |
106 | | - return this.started |
107 | | - } |
108 | | - |
109 | | - async handleMessage (data: IncomingStreamData): Promise<void> { |
110 | | - const { stream } = data |
111 | | - |
112 | | - const writeBlockSize = this.writeBlockSize |
113 | | - |
114 | | - let bytesToSendBack: bigint | null = null |
115 | | - |
116 | | - for await (const buf of stream.source) { |
117 | | - if (bytesToSendBack === null) { |
118 | | - bytesToSendBack = BigInt(buf.getBigUint64(0, false)) |
119 | | - } |
120 | | - // Ingest all the bufs and wait for the read side to close |
121 | | - } |
122 | | - |
123 | | - const uint8Buf = new Uint8Array(this.databuf) |
124 | | - |
125 | | - if (bytesToSendBack === null) { |
126 | | - throw new Error('bytesToSendBack was not set') |
127 | | - } |
128 | | - |
129 | | - await stream.sink(async function * () { |
130 | | - while (bytesToSendBack > 0n) { |
131 | | - let toSend: bigint = writeBlockSize |
132 | | - if (toSend > bytesToSendBack) { |
133 | | - toSend = bytesToSendBack |
134 | | - } |
135 | | - bytesToSendBack = bytesToSendBack - toSend |
136 | | - yield uint8Buf.subarray(0, Number(toSend)) |
137 | | - } |
138 | | - }()) |
139 | | - } |
140 | | - |
141 | | - async measurePerformance (startTime: number, connection: Connection, sendBytes: bigint, recvBytes: bigint, options: AbortOptions = {}): Promise<number> { |
142 | | - log('opening stream on protocol %s to %p', this.protocol, connection.remotePeer) |
143 | | - |
144 | | - const uint8Buf = new Uint8Array(this.databuf) |
145 | | - |
146 | | - const writeBlockSize = this.writeBlockSize |
147 | | - |
148 | | - const stream = await connection.newStream([this.protocol], options) |
149 | | - |
150 | | - // Convert sendBytes to uint64 big endian buffer |
151 | | - const view = new DataView(this.databuf) |
152 | | - view.setBigInt64(0, recvBytes, false) |
153 | | - |
154 | | - log('sending %i bytes to %p', sendBytes, connection.remotePeer) |
155 | | - try { |
156 | | - await stream.sink((async function * () { |
157 | | - // Send the number of bytes to receive |
158 | | - yield uint8Buf.subarray(0, 8) |
159 | | - // Send the number of bytes to send |
160 | | - while (sendBytes > 0n) { |
161 | | - let toSend: bigint = writeBlockSize |
162 | | - if (toSend > sendBytes) { |
163 | | - toSend = sendBytes |
164 | | - } |
165 | | - sendBytes = sendBytes - toSend |
166 | | - yield uint8Buf.subarray(0, Number(toSend)) |
167 | | - } |
168 | | - })()) |
169 | | - |
170 | | - // Read the received bytes |
171 | | - let actualRecvdBytes = BigInt(0) |
172 | | - for await (const buf of stream.source) { |
173 | | - actualRecvdBytes += BigInt(buf.length) |
174 | | - } |
175 | | - |
176 | | - if (actualRecvdBytes !== recvBytes) { |
177 | | - throw new Error(`Expected to receive ${recvBytes} bytes, but received ${actualRecvdBytes}`) |
178 | | - } |
179 | | - } catch (err) { |
180 | | - log('error sending %i bytes to %p: %s', sendBytes, connection.remotePeer, err) |
181 | | - throw err |
182 | | - } finally { |
183 | | - log('performed %s to %p', this.protocol, connection.remotePeer) |
184 | | - await stream.close() |
185 | | - } |
186 | | - |
187 | | - // Return the latency |
188 | | - return Date.now() - startTime |
189 | | - } |
190 | | -} |
191 | | - |
192 | 100 | export function perfService (init: PerfServiceInit = {}): (components: PerfServiceComponents) => PerfService { |
193 | | - return (components) => new DefaultPerfService(components, init) |
| 101 | + return (components) => new PerfServiceClass(components, init) |
194 | 102 | } |
0 commit comments