// FIXME: Maybe also an abstraction for 'handle queue of requests', as this is used in multiple stream implementations
// TODO: Improve robustness of stream-end handling using https://nodejs.org/dist/latest-v14.x/docs/api/stream.html#stream_stream_finished_stream_options_callback?
// FIXME: Sequentialize all of these?
// readable
// writable
// transform
// duplex
module.exports=functionconvert(stream){
// FIXME: Proper validation and tagging
// FIXME: Wrap v1 streams
// NOTE: Standard I/O streams are specialcased here because they may be Duplex streams; even though the other half is never actually used. We're only interested in the interface that *is* being used.
thrownewError(`Duplex streams cannot be converted with the auto-detection API. Instead, use 'fromReadable' and/or 'fromWritable' manually, depending on which parts of the Duplex stream you are interested in.`);
}
}else{
returnfromWritable(stream);
}
}elseif(stream.readable!=null){
returnfromReadable(stream);
}else{
thrownewError(`Not a Node stream`);
}
};
// FIXME: Duplex APIs
functionfromReadable(stream){
letreadable=wireUpReadableInterface(stream);
returnsimpleSource({
onRequest:()=>{
returnreadable.request();
},
onAbort:()=>{
returnreadable.destroy();
}
});
}
functionfromWritable(stream){
letupstreamHasEnded=false;
letmostRecentSource={abort:function(){}};// FIXME: Replace with a proper spec-compliant dummy stream
letconvertedStream=simpleSink({
onResult:(result)=>{
returnwritable.write(result);
},
onEnd:()=>{
upstreamHasEnded=true;
returnwritable.end();
},
onAbort:(_reason)=>{
returnwritable.destroy();
},
onSourceChanged:(source)=>{
mostRecentSource=source;
}
});
// NOTE: The use of `var` is intentional, to make hoisting possible here; otherwise we'd have a broken cyclical reference
varwritable=wireUpWritableInterface(stream,{
onClose:()=>{
if(!upstreamHasEnded){
convertedStream.abort(true);
}
},
onError:(error)=>{
// Make sure we notify the pipeline, if any, by passing in the most recent source stream that we've seen.
convertedStream.abort(mostRecentSource,error);
}
});
returnconvertedStream;
}
functionfromTransform(stream){
letcompletionDefer;
letendHandled=false;
// FIXME: we need to specifically watch for the `error` and `end` events on the readable interface, to know when the transform stream has fully completed processing
// Respond to the EndOfStream produced by the pushbuffer in this case
// This will quite possibly return an empty buffer, but that is fine; the `buffer` stream downstream from us will just keep reading (and therefore queueing up new items to be transformed) until it gets some results.
returnreadable.consumeImmediateBuffer();
}).catch(isEndOfStream,(marker)=>{
// Wait for transform stream to drain fully, `error`/`end` event, and then return whatever buffer remains.
// FIXME: Error propagation logic is pretty shaky here. Verify that we don't end up with double error reports.