Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
209 changes: 209 additions & 0 deletions src/library_pipefs.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,209 @@
mergeInto(LibraryManager.library, {
$PIPEFS__postset: '__ATINIT__.push(function() { PIPEFS.root = FS.mount(PIPEFS, {}, null); });',
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm, this is actually a bit suspect: I don't think we have any type of ordering guaranteed in our JS dependencies, so calling FS.mount() here as a postset to PIPEFS might not guarantee that FS.staticInit() would have been called before. I think if it does, then it's due to 'f' < 'p' alphabetically by accident?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think, it may be worth looking at sockfs -- it seems to have the same dependency.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That is a good observation.

Reading closer, it looks like FS.init.initialized is not needed to be true in order to be able to FS.mount() (FS does that itself in FS.staticInit() as well) However FS.staticInit() must have been called before FS.mount() can. I think if this does not hold, then errors will naturally manifest by the absence of FS.nameTable, so things should be good here.

$PIPEFS__deps: ['$FS'],
$PIPEFS: {
BUCKET_BUFFER_SIZE: 1024 * 8, // 8KiB Buffer
mount: function (mount) {
// Do not pollute the real root directory or its child nodes with pipes
// Looks like it is OK to create another pseudo-root node not linked to the FS.root hierarchy this way
return FS.createNode(null, '/', {{{ cDefine('S_IFDIR') }}} | 511 /* 0777 */, 0);
},
createPipe: function () {
var pipe = {
buckets: []
};

pipe.buckets.push({
buffer: new Uint8Array(PIPEFS.BUCKET_BUFFER_SIZE),
offset: 0,
roffset: 0
});

var rName = PIPEFS.nextname();
var wName = PIPEFS.nextname();
var rNode = FS.createNode(PIPEFS.root, rName, {{{ cDefine('S_IFIFO') }}}, 0);
var wNode = FS.createNode(PIPEFS.root, wName, {{{ cDefine('S_IFIFO') }}}, 0);

rNode.pipe = pipe;
wNode.pipe = pipe;

var readableStream = FS.createStream({
path: rName,
node: rNode,
flags: FS.modeStringToFlags('r'),
seekable: false,
stream_ops: PIPEFS.stream_ops
});
rNode.stream = readableStream;

var writableStream = FS.createStream({
path: wName,
node: wNode,
flags: FS.modeStringToFlags('w'),
seekable: false,
stream_ops: PIPEFS.stream_ops
});
wNode.stream = writableStream;

return {
readable_fd: readableStream.fd,
writable_fd: writableStream.fd
};
},
stream_ops: {
poll: function (stream) {
var pipe = stream.node.pipe;

if ((stream.flags & {{{ cDefine('O_ACCMODE') }}}) === {{{ cDefine('O_WRONLY') }}}) {
return ({{{ cDefine('POLLWRNORM') }}} | {{{ cDefine('POLLOUT') }}});
} else {
if (pipe.buckets.length > 0) {
for (var i = 0; i < pipe.buckets.length; i++) {
var bucket = pipe.buckets[i];
if (bucket.offset - bucket.roffset > 0) {
return ({{{ cDefine('POLLRDNORM') }}} | {{{ cDefine('POLLIN') }}});
}
}
}
}

return 0;
},
ioctl: function (stream, request, varargs) {
return ERRNO_CODES.EINVAL;
},
read: function (stream, buffer, offset, length, position /* ignored */) {
var pipe = stream.node.pipe;
var currentLength = 0;

for (var i = 0; i < pipe.buckets.length; i++) {
var bucket = pipe.buckets[i];
currentLength += bucket.offset - bucket.roffset;
}

assert(buffer instanceof ArrayBuffer || ArrayBuffer.isView(buffer));
var data = buffer.subarray(offset, offset + length);

if (length <= 0) {
return 0;
}
if (currentLength == 0) {
// Behave as if the read end is always non-blocking
throw new FS.ErrnoError(ERRNO_CODES.EAGAIN);
}
var toRead = Math.min(currentLength, length);

var totalRead = toRead;
var toRemove = 0;

for (var i = 0; i < pipe.buckets.length; i++) {
var currBucket = pipe.buckets[i];
var bucketSize = currBucket.offset - currBucket.roffset;

if (toRead <= bucketSize) {
var tmpSlice = currBucket.buffer.subarray(currBucket.roffset, currBucket.offset);
if (toRead < bucketSize) {
tmpSlice = tmpSlice.subarray(0, toRead);
currBucket.roffset += toRead;
} else {
toRemove++;
}
data.set(tmpSlice);
break;
} else {
var tmpSlice = currBucket.buffer.subarray(currBucket.roffset, currBucket.offset);
data.set(tmpSlice);
data = data.subarray(tmpSlice.byteLength);
toRead -= tmpSlice.byteLength;
toRemove++;
}
}

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think we could add something like this to prevent deallocating all buffers and leave one bucket alive:

if (toRemove && toRemove == pipe.buckets.length) {
    toRemove--;
    pipe.buckets[toRemove].offset = 0;
    pipe.buckets[toRemove].roffset = 0;
}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thank you @cynecx, integrated this code too :)

if (toRemove && toRemove == pipe.buckets.length) {
// Do not generate excessive garbage in use cases such as
// write several bytes, read everything, write several bytes, read everything...
toRemove--;
pipe.buckets[toRemove].offset = 0;
pipe.buckets[toRemove].roffset = 0;
}

pipe.buckets.splice(0, toRemove);

return totalRead;
},
write: function (stream, buffer, offset, length, position /* ignored */) {
var pipe = stream.node.pipe;

assert(buffer instanceof ArrayBuffer || ArrayBuffer.isView(buffer));
var data = buffer.subarray(offset, offset + length);

var dataLen = data.byteLength;
if (dataLen <= 0) {
return 0;
}

var currBucket = null;

if (pipe.buckets.length == 0) {
currBucket = {
buffer: new Uint8Array(PIPEFS.BUCKET_BUFFER_SIZE),
offset: 0,
roffset: 0
};
pipe.buckets.push(currBucket);
} else {
currBucket = pipe.buckets[pipe.buckets.length - 1];
}

assert(currBucket.offset <= PIPEFS.BUCKET_BUFFER_SIZE);

var freeBytesInCurrBuffer = PIPEFS.BUCKET_BUFFER_SIZE - currBucket.offset;
if (freeBytesInCurrBuffer >= dataLen) {
currBucket.buffer.set(data, currBucket.offset);
currBucket.offset += dataLen;
return dataLen;
} else if (freeBytesInCurrBuffer > 0) {
currBucket.buffer.set(data.subarray(0, freeBytesInCurrBuffer), currBucket.offset);
currBucket.offset += freeBytesInCurrBuffer;
data = data.subarray(freeBytesInCurrBuffer, data.byteLength);
}

var numBuckets = (data.byteLength / PIPEFS.BUCKET_BUFFER_SIZE) | 0;
var remElements = data.byteLength % PIPEFS.BUCKET_BUFFER_SIZE;

for (var i = 0; i < numBuckets; i++) {
var newBucket = {
buffer: new Uint8Array(PIPEFS.BUCKET_BUFFER_SIZE),
offset: PIPEFS.BUCKET_BUFFER_SIZE,
roffset: 0
};
pipe.buckets.push(newBucket);
newBucket.buffer.set(data.subarray(0, PIPEFS.BUCKET_BUFFER_SIZE));
data = data.subarray(PIPEFS.BUCKET_BUFFER_SIZE, data.byteLength);
}

if (remElements > 0) {
var newBucket = {
buffer: new Uint8Array(PIPEFS.BUCKET_BUFFER_SIZE),
offset: data.byteLength,
roffset: 0
};
pipe.buckets.push(newBucket);
newBucket.buffer.set(data);
}

return dataLen;
},
close: function (stream) {
var pipe = stream.node.pipe;
pipe.buckets = null;
}
},
nextname: function () {
if (!PIPEFS.nextname.current) {
PIPEFS.nextname.current = 0;
}
return 'pipe[' + (PIPEFS.nextname.current++) + ']';
},
},
});
17 changes: 15 additions & 2 deletions src/library_syscall.js
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,21 @@ var SyscallsLibrary = {
var old = SYSCALLS.getStreamFromFD();
return FS.open(old.path, old.flags, 0).fd;
},
__syscall42: '__syscall51', // pipe
__syscall42__deps: ['$PIPEFS'],
__syscall42: function(which, varargs) { // pipe
var fdPtr = SYSCALLS.get();

if (fdPtr == 0) {
throw new FS.ErrnoError(ERRNO_CODES.EFAULT);
}

var res = PIPEFS.createPipe();

{{{ makeSetValue('fdPtr', 0, 'res.readable_fd', 'i32') }}};
{{{ makeSetValue('fdPtr', 4, 'res.writable_fd', 'i32') }}};

return 0;
},
__syscall51: function(which, varargs) { // acct
return -ERRNO_CODES.ENOSYS; // unsupported features
},
Expand Down Expand Up @@ -1696,4 +1710,3 @@ SyscallsLibrary.emscripten_syscall = eval('(' + switcher + ')');
#endif

mergeInto(LibraryManager.library, SyscallsLibrary);

1 change: 1 addition & 0 deletions src/modules.js
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,7 @@ var LibraryManager = {
'library_fs.js',
'library_memfs.js',
'library_tty.js',
'library_pipefs.js',
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh, I presume the fs < pipefs order does end up coming from here. In that case, probably worth to add a note here that pipefs static init depends on fs static init being called before, so the ordering in this list is fixed.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unfortunately, moving library_pipefs.js above the library_fs.js here seems not to change the order in which postsets are emitted in the resulting js. Reversing __deps to $FS -> $PIPEFS seems not to change it too. So it works, but no one know how. :(

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It is likely that these names go to a dictionary later on, so the alphabetical ordering will be restored later even if they are not present now. However I think no need to worry if it works, since issues of not having initialized the filesystem should be apparent if that happens. (For curiosity, you could try renaming the file to library_aipefs.js or something similar and see if that causes issues)

]);

// Additional filesystem libraries (in strict mode, link to these explicitly via -lxxx.js)
Expand Down
2 changes: 1 addition & 1 deletion src/struct_info.compiled.json

Large diffs are not rendered by default.

1 change: 1 addition & 0 deletions src/struct_info.json
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@
"POLLHUP",
"POLLERR",
"POLLRDNORM",
"POLLWRNORM",
"POLLPRI",
"POLLIN",
"POLLOUT",
Expand Down
4 changes: 4 additions & 0 deletions tests/test_core.py
Original file line number Diff line number Diff line change
Expand Up @@ -4466,6 +4466,10 @@ def test_unistd_ttyname(self):
src = open(path_from_root('tests', 'unistd', 'ttyname.c'), 'r').read()
self.do_run(src, 'success', force_c=True)

def test_unistd_pipe(self):
src = open(path_from_root('tests', 'unistd', 'pipe.c'), 'r').read()
self.do_run(src, 'success', force_c=True)

def test_unistd_dup(self):
src = open(path_from_root('tests', 'unistd', 'dup.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'dup.out'), 'r').read()
Expand Down
4 changes: 2 additions & 2 deletions tests/unistd/misc.out
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,8 @@ lockf(good): 0, errno: 0
lockf(bad): -1, errno: 9
nice: -1, errno: 1
pause: -1, errno: 4
pipe(good): -1, errno: 38
pipe(bad): -1, errno: 38
pipe(good): 0, errno: 0
pipe(bad): -1, errno: 14
execl: -1, errno: 8
execle: -1, errno: 8
execlp: -1, errno: 8
Expand Down
Loading