-
Notifications
You must be signed in to change notification settings - Fork 3.4k
Integrate the pipe() syscall implementation originally written by @cynecx #4935
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
47038b1
1b9709f
8872a86
b3b9607
71d32cd
1709da1
d9c3120
59e2cda
bc35790
ddcd951
5932f6d
b4d4d12
0ae0817
6d14096
71f49b3
0fca85b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,209 @@ | ||
| mergeInto(LibraryManager.library, { | ||
| $PIPEFS__postset: '__ATINIT__.push(function() { PIPEFS.root = FS.mount(PIPEFS, {}, null); });', | ||
| $PIPEFS__deps: ['$FS'], | ||
| $PIPEFS: { | ||
| BUCKET_BUFFER_SIZE: 1024 * 8, // 8KiB Buffer | ||
| mount: function (mount) { | ||
| // Do not pollute the real root directory or its child nodes with pipes | ||
| // Looks like it is OK to create another pseudo-root node not linked to the FS.root hierarchy this way | ||
| return FS.createNode(null, '/', {{{ cDefine('S_IFDIR') }}} | 511 /* 0777 */, 0); | ||
| }, | ||
| createPipe: function () { | ||
| var pipe = { | ||
| buckets: [] | ||
| }; | ||
|
|
||
| pipe.buckets.push({ | ||
| buffer: new Uint8Array(PIPEFS.BUCKET_BUFFER_SIZE), | ||
| offset: 0, | ||
| roffset: 0 | ||
| }); | ||
|
|
||
| var rName = PIPEFS.nextname(); | ||
| var wName = PIPEFS.nextname(); | ||
| var rNode = FS.createNode(PIPEFS.root, rName, {{{ cDefine('S_IFIFO') }}}, 0); | ||
| var wNode = FS.createNode(PIPEFS.root, wName, {{{ cDefine('S_IFIFO') }}}, 0); | ||
|
|
||
| rNode.pipe = pipe; | ||
| wNode.pipe = pipe; | ||
|
|
||
| var readableStream = FS.createStream({ | ||
| path: rName, | ||
| node: rNode, | ||
| flags: FS.modeStringToFlags('r'), | ||
| seekable: false, | ||
| stream_ops: PIPEFS.stream_ops | ||
| }); | ||
| rNode.stream = readableStream; | ||
|
|
||
| var writableStream = FS.createStream({ | ||
| path: wName, | ||
| node: wNode, | ||
| flags: FS.modeStringToFlags('w'), | ||
| seekable: false, | ||
| stream_ops: PIPEFS.stream_ops | ||
| }); | ||
| wNode.stream = writableStream; | ||
|
|
||
| return { | ||
| readable_fd: readableStream.fd, | ||
| writable_fd: writableStream.fd | ||
| }; | ||
| }, | ||
| stream_ops: { | ||
| poll: function (stream) { | ||
| var pipe = stream.node.pipe; | ||
|
|
||
| if ((stream.flags & {{{ cDefine('O_ACCMODE') }}}) === {{{ cDefine('O_WRONLY') }}}) { | ||
| return ({{{ cDefine('POLLWRNORM') }}} | {{{ cDefine('POLLOUT') }}}); | ||
| } else { | ||
| if (pipe.buckets.length > 0) { | ||
| for (var i = 0; i < pipe.buckets.length; i++) { | ||
| var bucket = pipe.buckets[i]; | ||
| if (bucket.offset - bucket.roffset > 0) { | ||
| return ({{{ cDefine('POLLRDNORM') }}} | {{{ cDefine('POLLIN') }}}); | ||
| } | ||
| } | ||
| } | ||
| } | ||
|
|
||
| return 0; | ||
| }, | ||
| ioctl: function (stream, request, varargs) { | ||
| return ERRNO_CODES.EINVAL; | ||
| }, | ||
| read: function (stream, buffer, offset, length, position /* ignored */) { | ||
| var pipe = stream.node.pipe; | ||
| var currentLength = 0; | ||
|
|
||
| for (var i = 0; i < pipe.buckets.length; i++) { | ||
| var bucket = pipe.buckets[i]; | ||
| currentLength += bucket.offset - bucket.roffset; | ||
| } | ||
|
|
||
| assert(buffer instanceof ArrayBuffer || ArrayBuffer.isView(buffer)); | ||
| var data = buffer.subarray(offset, offset + length); | ||
|
|
||
| if (length <= 0) { | ||
| return 0; | ||
| } | ||
| if (currentLength == 0) { | ||
| // Behave as if the read end is always non-blocking | ||
| throw new FS.ErrnoError(ERRNO_CODES.EAGAIN); | ||
| } | ||
| var toRead = Math.min(currentLength, length); | ||
|
|
||
| var totalRead = toRead; | ||
| var toRemove = 0; | ||
|
|
||
| for (var i = 0; i < pipe.buckets.length; i++) { | ||
| var currBucket = pipe.buckets[i]; | ||
| var bucketSize = currBucket.offset - currBucket.roffset; | ||
|
|
||
| if (toRead <= bucketSize) { | ||
| var tmpSlice = currBucket.buffer.subarray(currBucket.roffset, currBucket.offset); | ||
| if (toRead < bucketSize) { | ||
| tmpSlice = tmpSlice.subarray(0, toRead); | ||
| currBucket.roffset += toRead; | ||
| } else { | ||
| toRemove++; | ||
| } | ||
| data.set(tmpSlice); | ||
| break; | ||
| } else { | ||
| var tmpSlice = currBucket.buffer.subarray(currBucket.roffset, currBucket.offset); | ||
| data.set(tmpSlice); | ||
| data = data.subarray(tmpSlice.byteLength); | ||
| toRead -= tmpSlice.byteLength; | ||
| toRemove++; | ||
| } | ||
| } | ||
|
|
||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think we could add something like this to prevent deallocating all buffers and leave one bucket alive: There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thank you @cynecx, integrated this code too :) |
||
| if (toRemove && toRemove == pipe.buckets.length) { | ||
| // Do not generate excessive garbage in use cases such as | ||
| // write several bytes, read everything, write several bytes, read everything... | ||
| toRemove--; | ||
| pipe.buckets[toRemove].offset = 0; | ||
| pipe.buckets[toRemove].roffset = 0; | ||
| } | ||
|
|
||
| pipe.buckets.splice(0, toRemove); | ||
|
|
||
| return totalRead; | ||
| }, | ||
| write: function (stream, buffer, offset, length, position /* ignored */) { | ||
| var pipe = stream.node.pipe; | ||
|
|
||
| assert(buffer instanceof ArrayBuffer || ArrayBuffer.isView(buffer)); | ||
| var data = buffer.subarray(offset, offset + length); | ||
|
|
||
| var dataLen = data.byteLength; | ||
| if (dataLen <= 0) { | ||
| return 0; | ||
| } | ||
|
|
||
| var currBucket = null; | ||
|
|
||
| if (pipe.buckets.length == 0) { | ||
| currBucket = { | ||
| buffer: new Uint8Array(PIPEFS.BUCKET_BUFFER_SIZE), | ||
| offset: 0, | ||
| roffset: 0 | ||
| }; | ||
| pipe.buckets.push(currBucket); | ||
| } else { | ||
| currBucket = pipe.buckets[pipe.buckets.length - 1]; | ||
| } | ||
|
|
||
| assert(currBucket.offset <= PIPEFS.BUCKET_BUFFER_SIZE); | ||
|
|
||
| var freeBytesInCurrBuffer = PIPEFS.BUCKET_BUFFER_SIZE - currBucket.offset; | ||
| if (freeBytesInCurrBuffer >= dataLen) { | ||
| currBucket.buffer.set(data, currBucket.offset); | ||
| currBucket.offset += dataLen; | ||
| return dataLen; | ||
| } else if (freeBytesInCurrBuffer > 0) { | ||
| currBucket.buffer.set(data.subarray(0, freeBytesInCurrBuffer), currBucket.offset); | ||
| currBucket.offset += freeBytesInCurrBuffer; | ||
| data = data.subarray(freeBytesInCurrBuffer, data.byteLength); | ||
| } | ||
|
|
||
| var numBuckets = (data.byteLength / PIPEFS.BUCKET_BUFFER_SIZE) | 0; | ||
| var remElements = data.byteLength % PIPEFS.BUCKET_BUFFER_SIZE; | ||
|
|
||
| for (var i = 0; i < numBuckets; i++) { | ||
| var newBucket = { | ||
| buffer: new Uint8Array(PIPEFS.BUCKET_BUFFER_SIZE), | ||
| offset: PIPEFS.BUCKET_BUFFER_SIZE, | ||
| roffset: 0 | ||
| }; | ||
| pipe.buckets.push(newBucket); | ||
| newBucket.buffer.set(data.subarray(0, PIPEFS.BUCKET_BUFFER_SIZE)); | ||
| data = data.subarray(PIPEFS.BUCKET_BUFFER_SIZE, data.byteLength); | ||
| } | ||
|
|
||
| if (remElements > 0) { | ||
| var newBucket = { | ||
| buffer: new Uint8Array(PIPEFS.BUCKET_BUFFER_SIZE), | ||
| offset: data.byteLength, | ||
| roffset: 0 | ||
| }; | ||
| pipe.buckets.push(newBucket); | ||
| newBucket.buffer.set(data); | ||
| } | ||
|
|
||
| return dataLen; | ||
| }, | ||
| close: function (stream) { | ||
| var pipe = stream.node.pipe; | ||
| pipe.buckets = null; | ||
| } | ||
| }, | ||
| nextname: function () { | ||
| if (!PIPEFS.nextname.current) { | ||
| PIPEFS.nextname.current = 0; | ||
| } | ||
| return 'pipe[' + (PIPEFS.nextname.current++) + ']'; | ||
| }, | ||
| }, | ||
| }); | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -113,6 +113,7 @@ var LibraryManager = { | |
| 'library_fs.js', | ||
| 'library_memfs.js', | ||
| 'library_tty.js', | ||
| 'library_pipefs.js', | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh, I presume the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Unfortunately, moving There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It is likely that these names go to a dictionary later on, so the alphabetical ordering will be restored later even if they are not present now. However I think no need to worry if it works, since issues of not having initialized the filesystem should be apparent if that happens. (For curiosity, you could try renaming the file to |
||
| ]); | ||
|
|
||
| // Additional filesystem libraries (in strict mode, link to these explicitly via -lxxx.js) | ||
|
|
||
Large diffs are not rendered by default.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -153,6 +153,7 @@ | |
| "POLLHUP", | ||
| "POLLERR", | ||
| "POLLRDNORM", | ||
| "POLLWRNORM", | ||
| "POLLPRI", | ||
| "POLLIN", | ||
| "POLLOUT", | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Hmm, this is actually a bit suspect: I don't think we have any type of ordering guaranteed in our JS dependencies, so calling
FS.mount()here as a postset to PIPEFS might not guarantee thatFS.staticInit()would have been called before. I think if it does, then it's due to 'f' < 'p' alphabetically by accident?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think, it may be worth looking at sockfs -- it seems to have the same dependency.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
That is a good observation.
Reading closer, it looks like
FS.init.initializedis not needed to be true in order to be able toFS.mount()(FSdoes that itself inFS.staticInit()as well) HoweverFS.staticInit()must have been called beforeFS.mount()can. I think if this does not hold, then errors will naturally manifest by the absence ofFS.nameTable, so things should be good here.