Complete internals refactor for readability

master
Sven Slootweg 3 years ago
parent 923e87efaa
commit 13ba7a2641

@ -0,0 +1,21 @@
"use strict";
const Promise = require("bluebird");
module.exports = function asyncWhile(predicate, body) {
function doTick() {
return Promise.try(() => {
return predicate();
}).then((shouldRun) => {
if (shouldRun) {
return Promise.try(() => {
return body();
}).then(() => {
return doTick();
});
}
});
}
return doTick();
};

@ -1,186 +1,156 @@
"use strict";
const Promise = require("bluebird");
const consumable = require("@joepie91/consumable");
const defaultValue = require("default-value");
const debug = require("debug")("promistream:parallelize");
const isEndOfStream = require("@promistream/is-end-of-stream");
const isAborted = require("@promistream/is-aborted");
const propagateAbort = require("@promistream/propagate-abort");
const pipe = require("@promistream/pipe");
const sequentialize = require("@promistream/sequentialize");
const defaultValue = require("default-value");
const debug = require("debug")("promistream:parallelize");
const debugListener = require("debug")("promistream:parallelize:queue-listener");
const createPromiseListener = require("./promise-listener");
const createRequestQueue = require("./request-queue");
const asyncWhile = require("./async-while");
// FIXME: Verify that if an EndOfStream or Aborted marker comes in, it is left queued up until all of the in-flight non-marker results have been processed; otherwise the downstream may erroneously believe that the stream has already ended, while more items are still on their way
function isRejectedWithMarker(promise) {
if (promise.isRejected()) {
let reason = promise.reason();
return (isAborted(reason) || isEndOfStream(reason));
} else {
return false;
}
}
module.exports = function parallelizeStream(threadCount, options = {}) {
let ordered = defaultValue(options.ordered, true);
/* TODO: Does this need a more efficient FIFO queue implementation? */
let signals = [];
let queueListener = createPromiseListener();
let ended = false;
let parallelMode = true;
let filling = false;
let currentSource = null;
let threadsRunning = 0;
let peekPointer = 0;
let markerBuffer = consumable([]);
function stopFilling() {
debug(`Paused internal queue fill (parallel mode = ${parallelMode}, thread count = ${threadCount}, in-flight requests = ${signals.length})`);
filling = false;
}
let requestQueue = createRequestQueue({
onMatch: () => {
tryStartFilling();
}
});
function canStartRead() {
let maximumThreads = (parallelMode === true)
? threadCount
: 1;
function fillRequest() {
if (parallelMode === true && signals.length < threadCount) {
if (threadsRunning >= maximumThreads) {
return false;
} else if (ended) {
// Special case: never optimistically read when the stream has been ended, because it will lead to an unnecessary read
let canRead = (requestQueue.requestCount() > 0);
debug(` [filling] stream ended; is there a request in queue? ${canRead}`);
return canRead;
} else {
return Promise.try(() => {
return currentSource.peek();
}).then((valueAvailable) => {
debug(`Result of upstream peek: ${valueAvailable}`);
}).then((dataAvailable) => {
if (dataAvailable && parallelMode === false && ended === false) {
switchToParallelMode();
} else if (!dataAvailable && parallelMode === true) {
switchToSequentialMode();
}
if (valueAvailable) {
queueRead();
debug(` [filling] data available upstream: ${dataAvailable}`);
return fillRequest(currentSource);
} else {
return switchToSequentialMode();
}
return dataAvailable;
});
} else {
stopFilling();
}
}
function tryStartFilling() {
if (!filling) {
if (filling === false) {
debug(` [filling] started`);
filling = true;
Promise.try(() => {
debug("Starting internal queue fill...");
filling = true;
return fillRequest(currentSource);
}).catch((err) => {
debugListener("Rejecting", err);
queueListener.reject(err);
debug(`Error occurred during filling: ${err.stack}`);
return asyncWhile(canStartRead, () => {
return startRead();
});
}).then(() => {
debug(` [filling] completed`);
filling = false;
}).catch((error) => {
debug(` [filling] error:`, error);
requestQueue.failAllRequests(error);
});
}
}
function switchToParallelMode() {
debug("Switching to parallel mode");
parallelMode = true;
return tryStartFilling();
}
function switchToSequentialMode() {
debug("Switching to sequential mode");
parallelMode = false;
function startRead() {
threadsRunning += 1;
debug(`[read] started (parallel mode = ${parallelMode}, thread count = ${threadCount}, in-flight requests = ${threadsRunning})`);
return stopFilling();
}
let readOperation = Promise.try(() => {
return currentSource.read();
}).finally(() => {
threadsRunning -= 1;
debug(`[read] completed (parallel mode = ${parallelMode}, thread count = ${threadCount}, in-flight requests = ${threadsRunning})`);
}).tapCatch(isEndOfStream, isAborted, () => {
if (ended === false) {
debug(" [mode] marking stream as ended");
ended = true;
switchToSequentialMode();
}
});
function bufferNotEmpty() {
/* NOTE: This should *only* take into account items that have not been peeked yet! */
let peekedSignal;
// This noop is just used to silence unhandled rejection warnings - those get handled by the read request that they are attached to instead. But because they may only actually get attached in a later event loop tick, Bluebird will incorrectly believe them to be 'unhandled'.
// TODO: Is the assertion that they get handled always true? Is it possible for some read results to never get read out? Will that re-emit a warning elsewhere somehow?
function noop() {}
if (ordered) {
/* This will return only if there is a contiguous sequence of settled signals from the start, of which *at least one* has not been peeked yet. */
for (let signal of signals) {
if (signal.trackingPromise.isPending()) {
break;
} else if (signal.peeked) {
continue;
} else {
/* Settled, and not peeked yet. */
peekedSignal = signal;
break;
}
}
} else {
peekedSignal = signals.find((signal) => {
return !(signal.trackingPromise.isPending()) && !signal.peeked;
});
}
if (peekedSignal != null) {
peekedSignal.peeked = true;
return true;
} else {
return false;
}
}
// In ordered mode, (a Promise for) the result is reported to the queue immediately so that it gets matched to a read request in-order
debug("[response] pushed in-flight read operation");
requestQueue.pushResponse(readOperation);
function awaitInFlightRequests() {
if (signals.length > 0) {
return true;
// Unhandled rejection silencer
readOperation.catch(noop);
} else {
debug("Waiting for queue to be non-empty...");
// In unordered mode, results are reported to the queue as and when they come in
readOperation.finally(() => {
if (isRejectedWithMarker(readOperation)) {
// We place Aborted/EndOfStream markers in a separate queue in unordered mode; they can occur *before* some other successful reads complete, and we don't want the downstream to prematurely stop reading, so we need to make sure that all non-marker results are processed before throwing the markers downstream.
debug("[response] pushed read result (to marker buffer)");
markerBuffer.peek().push(readOperation);
} else {
debug("[response] pushed read result");
requestQueue.pushResponse(readOperation);
}
return Promise.try(() => {
debugListener("Listening...");
return queueListener.listen();
}).tap(() => {
debug("Got queue-filled notification");
}).tapCatch((error) => {
debug(`Queue listener rejected: ${error.stack}`);
});
if (ended === true && threadsRunning === 0 && markerBuffer.peek().length > 0) {
for (let marker of markerBuffer.replace([])) {
requestQueue.pushResponse(marker);
}
}
}).catch(noop); // Unhandled rejection silencer
}
}
function queueRead() {
let promise = Promise.try(() => {
return currentSource.read();
});
let signalObject = { promise: promise };
signals.push({
peeked: false,
object: signalObject,
trackingPromise: Promise.try(() => {
return promise.reflect();
}).then(() => {
return signalObject;
})
});
function switchToParallelMode() {
debug(" [mode] switching to parallel");
parallelMode = true;
debugListener("Resolving");
queueListener.resolve();
return tryStartFilling();
}
function awaitResult() {
return Promise.try(() => {
return awaitInFlightRequests();
}).then(() => {
debug("Awaiting next finished result...");
if (ordered) {
return signals[0].trackingPromise;
} else {
return Promise.race(signals.map((item) => item.trackingPromise));
}
}).then((signalObject) => {
debug("A read attempt completed!");
let resultPromise = signalObject.promise;
signals = signals.filter((signal) => (signal.object !== signalObject));
let isRejected = resultPromise.isRejected();
let isEndOfStream_ = isRejected && isEndOfStream(resultPromise.reason());
let isAborted_ = isRejected && isAborted(resultPromise.reason());
if (isEndOfStream_ || isAborted_) {
switchToSequentialMode();
if (signals.length > 0) {
/* Throw away the marker, and wait for the next result */
return awaitResult();
} else {
/* Queue has been exhausted, so this marker will be the final result; pass it through */
return signalObject.promise;
}
} else {
return signalObject.promise;
}
});
function switchToSequentialMode() {
debug(" [mode] switching to sequential");
parallelMode = false;
}
let parallelizer = {
@ -188,49 +158,38 @@ module.exports = function parallelizeStream(threadCount, options = {}) {
description: `parallelize (${threadCount} threads)`,
abort: propagateAbort,
peek: async function (source) {
debug("Processing peek...");
debug("[peek] requested");
if (requestQueue.responseCount() > peekPointer) {
peekPointer += 1;
if (bufferNotEmpty()) {
return true;
} else if (parallelMode === true) {
return source.peek();
} else {
if (parallelMode === true) {
return source.peek();
} else {
return false;
}
return false;
}
},
read: function (source) {
return Promise.try(() => {
debug("Processing read...");
currentSource = source;
if (parallelMode) {
/* This runs in the background, potentially perpetually */
tryStartFilling();
return awaitResult();
} else {
/* Sequential mode */
if (signals.length > 0) {
/* Clear out the remaining in-flight reads from the previous parallel-mode operation, first. */
debug(`Awaiting in-flight read... (in-flight reads = ${signals.length})`);
return awaitResult();
} else {
debug("Passing through read to upstream...");
return Promise.try(() => {
return source.read();
}).then((result) => {
switchToParallelMode();
return result;
});
}
if (peekPointer > 0) {
peekPointer -= 1;
}
// We leave it up to the request queue to match a read request to a result - we don't assume that it will be the next read, because in unordered mode reads can complete out-of-order
let request = requestQueue.pushRequest();
debug("[request] started");
// NOTE: This should always happen *after* creating a request in the queue, to ensure correct behaviour when the stream has ended; otherwise canStartRead may be called while the requestCount is still 0, and it will fail to initiate a read to satisfy the request
tryStartFilling();
return request;
}).tap(() => {
debug("Read request was satisfied with a value");
debug("[request] satisfied with a value");
}).tapCatch((error) => {
debug("Read request was satisfied with an error:", error.message);
debug("[request] satisfied with an error:", error.message);
});
}
};

@ -14,7 +14,8 @@
"@promistream/sequentialize": "^0.1.0",
"bluebird": "^3.5.4",
"debug": "^4.1.1",
"default-value": "^1.0.0"
"default-value": "^1.0.0",
"p-defer": "^3"
},
"devDependencies": {
"@promistream/collect": "^0.1.1",

@ -1,30 +0,0 @@
"use strict";
const consumable = require("@joepie91/consumable");
module.exports = function createPromiseListener() {
let listeners = consumable([]);
function consumeListeners() {
/* To ensure that a resolve/reject is atomic, and can never apply to the same listener queue twice. */
return listeners.replace([]);
}
return {
listen: function () {
return new Promise((resolve, reject) => {
listeners.peek().push({ resolve, reject });
});
},
resolve: function (value) {
consumeListeners().forEach((listener) => {
listener.resolve(value);
});
},
reject: function (value) {
consumeListeners().forEach((listener) => {
listener.reject(value);
});
},
}
};

@ -0,0 +1,63 @@
"use strict";
const Promise = require("bluebird");
const pDefer = require("p-defer");
module.exports = function createRequestQueue(options = {}) {
let requestBuffer = [];
let responseBuffer = [];
let seenError;
function failAllRequests(error) {
let failedRequests = requestBuffer;
requestBuffer = [];
seenError = error;
for (let request of failedRequests) {
request.reject(error);
}
}
function maybeOnMatch() {
if (options.onMatch != null) {
return Promise.try(() => {
return options.onMatch();
}).catch((error) => {
failAllRequests(error);
});
}
}
return {
pushRequest: function () {
if (responseBuffer.length > 0) {
let returnValue = Promise.resolve(responseBuffer.shift());
maybeOnMatch();
return returnValue;
} else if (seenError !== undefined) {
return Promise.reject(seenError);
} else {
let { resolve, reject, promise } = pDefer();
requestBuffer.push({ resolve, reject });
return promise;
}
},
pushResponse: function (response) {
if (requestBuffer.length > 0) {
let request = requestBuffer.shift();
request.resolve(response);
maybeOnMatch();
} else {
responseBuffer.push(response);
}
},
failAllRequests: failAllRequests,
requestCount: function () {
return requestBuffer.length;
},
responseCount: function () {
return responseBuffer.length;
}
};
};

@ -71,12 +71,7 @@
"@validatem/required" "^0.1.1"
"@validatem/wrap-error" "^0.3.0"
"@promistream/propagate-abort@^0.1.2":
version "0.1.6"
resolved "https://registry.yarnpkg.com/@promistream/propagate-abort/-/propagate-abort-0.1.6.tgz#dfc3c78c2e22662b9e5d548afce2180c40584ef5"
integrity sha512-Ap4eDFiIcLb4yuJdin2tQM1+2ZJZm78sYWkKVdqECJY0UGkwNsbaMMeYyfZpFRpJGmW8mCCuOkWs0fQl5H9DGA==
"@promistream/propagate-abort@^0.1.6":
"@promistream/propagate-abort@^0.1.2", "@promistream/propagate-abort@^0.1.6":
version "0.1.7"
resolved "https://registry.yarnpkg.com/@promistream/propagate-abort/-/propagate-abort-0.1.7.tgz#06a5af16adb433ae27b25bb38b957b01619bf9e8"
integrity sha512-BR0XZMirAjO1IRpyTtOG4n0fGuuvRGJsO8Hmn4HOJXhi10onX3GlfCNZN2tqe4Mq/5fEDgRNGNUHjCY7naDYUA==
@ -426,12 +421,7 @@ assure-array@^1.0.0:
resolved "https://registry.yarnpkg.com/assure-array/-/assure-array-1.0.0.tgz#4f4ad16a87659d6200a4fb7103462033d216ec1f"
integrity sha1-T0rRaodlnWIApPtxA0YgM9IW7B8=
bluebird@^3.5.4:
version "3.5.4"
resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.5.4.tgz#d6cc661595de30d5b3af5fcedd3c0b3ef6ec5714"
integrity sha512-FG+nFEZChJrbQ9tIccIfZJBz3J7mLrAhxakAbnrJWn8d7aKOC+LWifa0G+p4ZqKp4y13T7juYvdhq9NzKdsrjw==
bluebird@^3.7.2:
bluebird@^3.5.4, bluebird@^3.7.2:
version "3.7.2"
resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f"
integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==
@ -478,11 +468,11 @@ create-error@^0.3.1:
integrity sha1-aYECRaYp5lRDK/BDdzYAA6U1GiM=
debug@^4.1.1:
version "4.1.1"
resolved "https://registry.yarnpkg.com/debug/-/debug-4.1.1.tgz#3b72260255109c6b589cee050f1d516139664791"
integrity sha512-pYAIzeRo8J6KPEaJ0VWOh5Pzkbw/RetuzehGM7QRRX5he4fPHx2rdKMB256ehJCkX+XRQm16eZLqLNS8RSZXZw==
version "4.3.2"
resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.2.tgz#f0a49c18ac8779e31d4a0c6029dfb76873c7428b"
integrity sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==
dependencies:
ms "^2.1.1"
ms "2.1.2"
default-value@^1.0.0:
version "1.0.0"
@ -621,12 +611,12 @@ is.object@^1.0.0:
resolved "https://registry.yarnpkg.com/is.object/-/is.object-1.0.0.tgz#e4f4117e9f083b35c8df5cf817ea3efb0452fdfa"
integrity sha1-5PQRfp8IOzXI31z4F+o++wRS/fo=
ms@^2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.1.tgz#30a5864eb3ebb0a66f2ebe6d727af06a09d86e0a"
integrity sha512-tgp+dl5cGk28utYktBsrFqA7HKgrhgPsg6Z/EfhWI4gl1Hwq8B/GmY/0oXZ6nF8hDVesS/FpnYaD/kOWhYQvyg==
ms@2.1.2:
version "2.1.2"
resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009"
integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
p-defer@^3.0.0:
p-defer@^3, p-defer@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-3.0.0.tgz#d1dceb4ee9b2b604b1d94ffec83760175d4e6f83"
integrity sha512-ugZxsxmtTln604yeYd29EGrNhazN2lywetzpKhfmQjW/VJmhpDmWbiX+h0zL8V91R0UXkhb3KtPmyq9PZw3aYw==

Loading…
Cancel
Save