Updated the files.
This commit is contained in:
parent
1553e6b971
commit
753967d4f5
23418 changed files with 3784666 additions and 0 deletions
52
my-app/node_modules/piscina/dist/src/common.d.ts
generated
vendored
Executable file
52
my-app/node_modules/piscina/dist/src/common.d.ts
generated
vendored
Executable file
|
@ -0,0 +1,52 @@
|
|||
/// <reference types="node" />
|
||||
import type { MessagePort } from 'worker_threads';
|
||||
export declare const READY = "_WORKER_READY";
|
||||
export interface StartupMessage {
|
||||
filename: string | null;
|
||||
name: string;
|
||||
port: MessagePort;
|
||||
sharedBuffer: Int32Array;
|
||||
useAtomics: boolean;
|
||||
niceIncrement: number;
|
||||
}
|
||||
export interface RequestMessage {
|
||||
taskId: number;
|
||||
task: any;
|
||||
filename: string;
|
||||
name: string;
|
||||
}
|
||||
export interface ReadyMessage {
|
||||
[READY]: true;
|
||||
}
|
||||
export interface ResponseMessage {
|
||||
taskId: number;
|
||||
result: any;
|
||||
error: Error | null;
|
||||
}
|
||||
export declare const commonState: {
|
||||
isWorkerThread: boolean;
|
||||
workerData: undefined;
|
||||
};
|
||||
export declare const kTransferable: unique symbol;
|
||||
export declare const kValue: unique symbol;
|
||||
export declare const kQueueOptions: unique symbol;
|
||||
export declare function isTransferable(value: any): boolean;
|
||||
export declare function isMovable(value: any): boolean;
|
||||
export declare function markMovable(value: object): void;
|
||||
export interface Transferable {
|
||||
readonly [kTransferable]: object;
|
||||
readonly [kValue]: object;
|
||||
}
|
||||
export interface Task {
|
||||
readonly [kQueueOptions]: object | null;
|
||||
}
|
||||
export interface TaskQueue {
|
||||
readonly size: number;
|
||||
shift(): Task | null;
|
||||
remove(task: Task): void;
|
||||
push(task: Task): void;
|
||||
}
|
||||
export declare function isTaskQueue(value: any): boolean;
|
||||
export declare const kRequestCountField = 0;
|
||||
export declare const kResponseCountField = 1;
|
||||
export declare const kFieldCount = 2;
|
51
my-app/node_modules/piscina/dist/src/common.js
generated
vendored
Executable file
51
my-app/node_modules/piscina/dist/src/common.js
generated
vendored
Executable file
|
@ -0,0 +1,51 @@
|
|||
"use strict";
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.kFieldCount = exports.kResponseCountField = exports.kRequestCountField = exports.isTaskQueue = exports.markMovable = exports.isMovable = exports.isTransferable = exports.kQueueOptions = exports.kValue = exports.kTransferable = exports.commonState = exports.READY = void 0;
|
||||
exports.READY = '_WORKER_READY';
|
||||
;
|
||||
exports.commonState = {
|
||||
isWorkerThread: false,
|
||||
workerData: undefined
|
||||
};
|
||||
// Internal symbol used to mark Transferable objects returned
|
||||
// by the Piscina.move() function
|
||||
const kMovable = Symbol('Piscina.kMovable');
|
||||
exports.kTransferable = Symbol.for('Piscina.transferable');
|
||||
exports.kValue = Symbol.for('Piscina.valueOf');
|
||||
exports.kQueueOptions = Symbol.for('Piscina.queueOptions');
|
||||
// True if the object implements the Transferable interface
|
||||
function isTransferable(value) {
|
||||
return value != null &&
|
||||
typeof value === 'object' &&
|
||||
exports.kTransferable in value &&
|
||||
exports.kValue in value;
|
||||
}
|
||||
exports.isTransferable = isTransferable;
|
||||
// True if object implements Transferable and has been returned
|
||||
// by the Piscina.move() function
|
||||
function isMovable(value) {
|
||||
return isTransferable(value) && value[kMovable] === true;
|
||||
}
|
||||
exports.isMovable = isMovable;
|
||||
function markMovable(value) {
|
||||
Object.defineProperty(value, kMovable, {
|
||||
enumerable: false,
|
||||
configurable: true,
|
||||
writable: true,
|
||||
value: true
|
||||
});
|
||||
}
|
||||
exports.markMovable = markMovable;
|
||||
function isTaskQueue(value) {
|
||||
return typeof value === 'object' &&
|
||||
value !== null &&
|
||||
'size' in value &&
|
||||
typeof value.shift === 'function' &&
|
||||
typeof value.remove === 'function' &&
|
||||
typeof value.push === 'function';
|
||||
}
|
||||
exports.isTaskQueue = isTaskQueue;
|
||||
exports.kRequestCountField = 0;
|
||||
exports.kResponseCountField = 1;
|
||||
exports.kFieldCount = 2;
|
||||
//# sourceMappingURL=common.js.map
|
1
my-app/node_modules/piscina/dist/src/common.js.map
generated
vendored
Executable file
1
my-app/node_modules/piscina/dist/src/common.js.map
generated
vendored
Executable file
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"common.js","sourceRoot":"","sources":["../../src/common.ts"],"names":[],"mappings":";;;AAEa,QAAA,KAAK,GAAG,eAAe,CAAC;AAoBpC,CAAC;AAOW,QAAA,WAAW,GAAG;IACzB,cAAc,EAAE,KAAK;IACrB,UAAU,EAAE,SAAS;CACtB,CAAC;AAEF,6DAA6D;AAC7D,iCAAiC;AACjC,MAAM,QAAQ,GAAG,MAAM,CAAC,kBAAkB,CAAC,CAAC;AAC/B,QAAA,aAAa,GAAG,MAAM,CAAC,GAAG,CAAC,sBAAsB,CAAC,CAAC;AACnD,QAAA,MAAM,GAAG,MAAM,CAAC,GAAG,CAAC,iBAAiB,CAAC,CAAC;AACvC,QAAA,aAAa,GAAG,MAAM,CAAC,GAAG,CAAC,sBAAsB,CAAC,CAAC;AAEhE,2DAA2D;AAC3D,SAAgB,cAAc,CAAE,KAAW;IACzC,OAAO,KAAK,IAAI,IAAI;QACb,OAAO,KAAK,KAAK,QAAQ;QACzB,qBAAa,IAAI,KAAK;QACtB,cAAM,IAAI,KAAK,CAAC;AACzB,CAAC;AALD,wCAKC;AAED,+DAA+D;AAC/D,iCAAiC;AACjC,SAAgB,SAAS,CAAE,KAAW;IACpC,OAAO,cAAc,CAAC,KAAK,CAAC,IAAI,KAAK,CAAC,QAAQ,CAAC,KAAK,IAAI,CAAC;AAC3D,CAAC;AAFD,8BAEC;AAED,SAAgB,WAAW,CAAE,KAAc;IACzC,MAAM,CAAC,cAAc,CAAC,KAAK,EAAE,QAAQ,EAAE;QACrC,UAAU,EAAE,KAAK;QACjB,YAAY,EAAE,IAAI;QAClB,QAAQ,EAAE,IAAI;QACd,KAAK,EAAE,IAAI;KACZ,CAAC,CAAC;AACL,CAAC;AAPD,kCAOC;AAkBD,SAAgB,WAAW,CAAE,KAAW;IACtC,OAAO,OAAO,KAAK,KAAK,QAAQ;QACzB,KAAK,KAAK,IAAI;QACd,MAAM,IAAI,KAAK;QACf,OAAO,KAAK,CAAC,KAAK,KAAK,UAAU;QACjC,OAAO,KAAK,CAAC,MAAM,KAAK,UAAU;QAClC,OAAO,KAAK,CAAC,IAAI,KAAK,UAAU,CAAC;AAC1C,CAAC;AAPD,kCAOC;AAEY,QAAA,kBAAkB,GAAG,CAAC,CAAC;AACvB,QAAA,mBAAmB,GAAG,CAAC,CAAC;AACxB,QAAA,WAAW,GAAG,CAAC,CAAC"}
|
107
my-app/node_modules/piscina/dist/src/index.d.ts
generated
vendored
Executable file
107
my-app/node_modules/piscina/dist/src/index.d.ts
generated
vendored
Executable file
|
@ -0,0 +1,107 @@
|
|||
/// <reference types="node" />
|
||||
/// <reference types="node" />
|
||||
import { Worker, MessagePort } from 'worker_threads';
|
||||
import { EventEmitterAsyncResource } from 'events';
|
||||
import { Transferable, TaskQueue } from './common';
|
||||
interface AbortSignalEventTargetAddOptions {
|
||||
once: boolean;
|
||||
}
|
||||
interface AbortSignalEventTarget {
|
||||
addEventListener: (name: 'abort', listener: () => void, options?: AbortSignalEventTargetAddOptions) => void;
|
||||
removeEventListener: (name: 'abort', listener: () => void) => void;
|
||||
aborted?: boolean;
|
||||
reason?: unknown;
|
||||
}
|
||||
interface AbortSignalEventEmitter {
|
||||
off: (name: 'abort', listener: () => void) => void;
|
||||
once: (name: 'abort', listener: () => void) => void;
|
||||
}
|
||||
type AbortSignalAny = AbortSignalEventTarget | AbortSignalEventEmitter;
|
||||
type ResourceLimits = Worker extends {
|
||||
resourceLimits?: infer T;
|
||||
} ? T : {};
|
||||
type EnvSpecifier = typeof Worker extends {
|
||||
new (filename: never, options?: {
|
||||
env: infer T;
|
||||
}): Worker;
|
||||
} ? T : never;
|
||||
interface Options {
|
||||
filename?: string | null;
|
||||
name?: string;
|
||||
minThreads?: number;
|
||||
maxThreads?: number;
|
||||
idleTimeout?: number;
|
||||
maxQueue?: number | 'auto';
|
||||
concurrentTasksPerWorker?: number;
|
||||
useAtomics?: boolean;
|
||||
resourceLimits?: ResourceLimits;
|
||||
argv?: string[];
|
||||
execArgv?: string[];
|
||||
env?: EnvSpecifier;
|
||||
workerData?: any;
|
||||
taskQueue?: TaskQueue;
|
||||
niceIncrement?: number;
|
||||
trackUnmanagedFds?: boolean;
|
||||
closeTimeout?: number;
|
||||
}
|
||||
interface FilledOptions extends Options {
|
||||
filename: string | null;
|
||||
name: string;
|
||||
minThreads: number;
|
||||
maxThreads: number;
|
||||
idleTimeout: number;
|
||||
maxQueue: number;
|
||||
concurrentTasksPerWorker: number;
|
||||
useAtomics: boolean;
|
||||
taskQueue: TaskQueue;
|
||||
niceIncrement: number;
|
||||
closeTimeout: number;
|
||||
}
|
||||
interface RunOptions {
|
||||
transferList?: TransferList;
|
||||
filename?: string | null;
|
||||
signal?: AbortSignalAny | null;
|
||||
name?: string | null;
|
||||
}
|
||||
interface CloseOptions {
|
||||
force?: boolean;
|
||||
}
|
||||
type TransferList = MessagePort extends {
|
||||
postMessage(value: any, transferList: infer T): any;
|
||||
} ? T : never;
|
||||
type TransferListItem = TransferList extends (infer T)[] ? T : never;
|
||||
declare class Piscina extends EventEmitterAsyncResource {
|
||||
#private;
|
||||
constructor(options?: Options);
|
||||
/** @deprecated Use run(task, options) instead **/
|
||||
runTask(task: any, transferList?: TransferList, filename?: string, abortSignal?: AbortSignalAny): Promise<any>;
|
||||
/** @deprecated Use run(task, options) instead **/
|
||||
runTask(task: any, transferList?: TransferList, filename?: AbortSignalAny, abortSignal?: undefined): Promise<any>;
|
||||
/** @deprecated Use run(task, options) instead **/
|
||||
runTask(task: any, transferList?: string, filename?: AbortSignalAny, abortSignal?: undefined): Promise<any>;
|
||||
/** @deprecated Use run(task, options) instead **/
|
||||
runTask(task: any, transferList?: AbortSignalAny, filename?: undefined, abortSignal?: undefined): Promise<any>;
|
||||
run(task: any, options?: RunOptions): Promise<any>;
|
||||
close(options?: CloseOptions): Promise<void>;
|
||||
destroy(): Promise<void>;
|
||||
get maxThreads(): number;
|
||||
get minThreads(): number;
|
||||
get options(): FilledOptions;
|
||||
get threads(): Worker[];
|
||||
get queueSize(): number;
|
||||
get completed(): number;
|
||||
get waitTime(): any;
|
||||
get runTime(): any;
|
||||
get utilization(): number;
|
||||
get duration(): number;
|
||||
get needsDrain(): boolean;
|
||||
static get isWorkerThread(): boolean;
|
||||
static get workerData(): any;
|
||||
static get version(): string;
|
||||
static get Piscina(): typeof Piscina;
|
||||
static move(val: Transferable | TransferListItem | ArrayBufferView | ArrayBuffer | MessagePort): ArrayBuffer | ArrayBufferView | MessagePort | Transferable;
|
||||
static get transferableSymbol(): symbol;
|
||||
static get valueSymbol(): symbol;
|
||||
static get queueOptionsSymbol(): symbol;
|
||||
}
|
||||
export = Piscina;
|
992
my-app/node_modules/piscina/dist/src/index.js
generated
vendored
Executable file
992
my-app/node_modules/piscina/dist/src/index.js
generated
vendored
Executable file
|
@ -0,0 +1,992 @@
|
|||
"use strict";
|
||||
var __classPrivateFieldSet = (this && this.__classPrivateFieldSet) || function (receiver, state, value, kind, f) {
|
||||
if (kind === "m") throw new TypeError("Private method is not writable");
|
||||
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a setter");
|
||||
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot write private member to an object whose class did not declare it");
|
||||
return (kind === "a" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;
|
||||
};
|
||||
var __classPrivateFieldGet = (this && this.__classPrivateFieldGet) || function (receiver, state, kind, f) {
|
||||
if (kind === "a" && !f) throw new TypeError("Private accessor was defined without a getter");
|
||||
if (typeof state === "function" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError("Cannot read private member from an object whose class did not declare it");
|
||||
return kind === "m" ? f : kind === "a" ? f.call(receiver) : f ? f.value : state.get(receiver);
|
||||
};
|
||||
var __importDefault = (this && this.__importDefault) || function (mod) {
|
||||
return (mod && mod.__esModule) ? mod : { "default": mod };
|
||||
};
|
||||
var _DirectlyTransferable_value, _ArrayBufferViewTransferable_view, _Piscina_pool;
|
||||
const worker_threads_1 = require("worker_threads");
|
||||
const events_1 = require("events");
|
||||
const async_hooks_1 = require("async_hooks");
|
||||
const os_1 = require("os");
|
||||
const url_1 = require("url");
|
||||
const path_1 = require("path");
|
||||
const util_1 = require("util");
|
||||
const assert_1 = __importDefault(require("assert"));
|
||||
const hdr_histogram_js_1 = require("hdr-histogram-js");
|
||||
const perf_hooks_1 = require("perf_hooks");
|
||||
const hdr_histogram_percentiles_obj_1 = __importDefault(require("hdr-histogram-percentiles-obj"));
|
||||
const common_1 = require("./common");
|
||||
const package_json_1 = require("../package.json");
|
||||
const promises_1 = require("timers/promises");
|
||||
const cpuCount = (() => {
|
||||
try {
|
||||
return (0, os_1.cpus)().length;
|
||||
}
|
||||
catch {
|
||||
/* istanbul ignore next */
|
||||
return 1;
|
||||
}
|
||||
})();
|
||||
;
|
||||
function onabort(abortSignal, listener) {
|
||||
if ('addEventListener' in abortSignal) {
|
||||
abortSignal.addEventListener('abort', listener, { once: true });
|
||||
}
|
||||
else {
|
||||
abortSignal.once('abort', listener);
|
||||
}
|
||||
}
|
||||
class AbortError extends Error {
|
||||
constructor(reason) {
|
||||
// TS does not recognizes the cause clause
|
||||
// @ts-expect-error
|
||||
super('The task has been aborted', { cause: reason });
|
||||
}
|
||||
get name() { return 'AbortError'; }
|
||||
}
|
||||
class ArrayTaskQueue {
|
||||
constructor() {
|
||||
this.tasks = [];
|
||||
}
|
||||
get size() { return this.tasks.length; }
|
||||
shift() {
|
||||
return this.tasks.shift();
|
||||
}
|
||||
push(task) {
|
||||
this.tasks.push(task);
|
||||
}
|
||||
remove(task) {
|
||||
const index = this.tasks.indexOf(task);
|
||||
assert_1.default.notStrictEqual(index, -1);
|
||||
this.tasks.splice(index, 1);
|
||||
}
|
||||
}
|
||||
const kDefaultOptions = {
|
||||
filename: null,
|
||||
name: 'default',
|
||||
minThreads: Math.max(Math.floor(cpuCount / 2), 1),
|
||||
maxThreads: cpuCount * 1.5,
|
||||
idleTimeout: 0,
|
||||
maxQueue: Infinity,
|
||||
concurrentTasksPerWorker: 1,
|
||||
useAtomics: true,
|
||||
taskQueue: new ArrayTaskQueue(),
|
||||
niceIncrement: 0,
|
||||
trackUnmanagedFds: true,
|
||||
closeTimeout: 30000
|
||||
};
|
||||
const kDefaultRunOptions = {
|
||||
transferList: undefined,
|
||||
filename: null,
|
||||
signal: null,
|
||||
name: null
|
||||
};
|
||||
const kDefaultCloseOptions = {
|
||||
force: false
|
||||
};
|
||||
class DirectlyTransferable {
|
||||
constructor(value) {
|
||||
_DirectlyTransferable_value.set(this, void 0);
|
||||
__classPrivateFieldSet(this, _DirectlyTransferable_value, value, "f");
|
||||
}
|
||||
get [(_DirectlyTransferable_value = new WeakMap(), common_1.kTransferable)]() { return __classPrivateFieldGet(this, _DirectlyTransferable_value, "f"); }
|
||||
get [common_1.kValue]() { return __classPrivateFieldGet(this, _DirectlyTransferable_value, "f"); }
|
||||
}
|
||||
class ArrayBufferViewTransferable {
|
||||
constructor(view) {
|
||||
_ArrayBufferViewTransferable_view.set(this, void 0);
|
||||
__classPrivateFieldSet(this, _ArrayBufferViewTransferable_view, view, "f");
|
||||
}
|
||||
get [(_ArrayBufferViewTransferable_view = new WeakMap(), common_1.kTransferable)]() { return __classPrivateFieldGet(this, _ArrayBufferViewTransferable_view, "f").buffer; }
|
||||
get [common_1.kValue]() { return __classPrivateFieldGet(this, _ArrayBufferViewTransferable_view, "f"); }
|
||||
}
|
||||
let taskIdCounter = 0;
|
||||
function maybeFileURLToPath(filename) {
|
||||
return filename.startsWith('file:')
|
||||
? (0, url_1.fileURLToPath)(new url_1.URL(filename))
|
||||
: filename;
|
||||
}
|
||||
// Extend AsyncResource so that async relations between posting a task and
|
||||
// receiving its result are visible to diagnostic tools.
|
||||
class TaskInfo extends async_hooks_1.AsyncResource {
|
||||
constructor(task, transferList, filename, name, callback, abortSignal, triggerAsyncId) {
|
||||
super('Piscina.Task', { requireManualDestroy: true, triggerAsyncId });
|
||||
this.abortListener = null;
|
||||
this.workerInfo = null;
|
||||
this.callback = callback;
|
||||
this.task = task;
|
||||
this.transferList = transferList;
|
||||
// If the task is a Transferable returned by
|
||||
// Piscina.move(), then add it to the transferList
|
||||
// automatically
|
||||
if ((0, common_1.isMovable)(task)) {
|
||||
// This condition should never be hit but typescript
|
||||
// complains if we dont do the check.
|
||||
/* istanbul ignore if */
|
||||
if (this.transferList == null) {
|
||||
this.transferList = [];
|
||||
}
|
||||
this.transferList =
|
||||
this.transferList.concat(task[common_1.kTransferable]);
|
||||
this.task = task[common_1.kValue];
|
||||
}
|
||||
this.filename = filename;
|
||||
this.name = name;
|
||||
this.taskId = taskIdCounter++;
|
||||
this.abortSignal = abortSignal;
|
||||
this.created = perf_hooks_1.performance.now();
|
||||
this.started = 0;
|
||||
}
|
||||
releaseTask() {
|
||||
const ret = this.task;
|
||||
this.task = null;
|
||||
return ret;
|
||||
}
|
||||
done(err, result) {
|
||||
this.runInAsyncScope(this.callback, null, err, result);
|
||||
this.emitDestroy(); // `TaskInfo`s are used only once.
|
||||
// If an abort signal was used, remove the listener from it when
|
||||
// done to make sure we do not accidentally leak.
|
||||
if (this.abortSignal && this.abortListener) {
|
||||
if ('removeEventListener' in this.abortSignal && this.abortListener) {
|
||||
this.abortSignal.removeEventListener('abort', this.abortListener);
|
||||
}
|
||||
else {
|
||||
this.abortSignal.off('abort', this.abortListener);
|
||||
}
|
||||
}
|
||||
}
|
||||
get [common_1.kQueueOptions]() {
|
||||
return common_1.kQueueOptions in this.task ? this.task[common_1.kQueueOptions] : null;
|
||||
}
|
||||
}
|
||||
class AsynchronouslyCreatedResource {
|
||||
constructor() {
|
||||
this.onreadyListeners = [];
|
||||
}
|
||||
markAsReady() {
|
||||
const listeners = this.onreadyListeners;
|
||||
(0, assert_1.default)(listeners !== null);
|
||||
this.onreadyListeners = null;
|
||||
for (const listener of listeners) {
|
||||
listener();
|
||||
}
|
||||
}
|
||||
isReady() {
|
||||
return this.onreadyListeners === null;
|
||||
}
|
||||
onReady(fn) {
|
||||
if (this.onreadyListeners === null) {
|
||||
fn(); // Zalgo is okay here.
|
||||
return;
|
||||
}
|
||||
this.onreadyListeners.push(fn);
|
||||
}
|
||||
}
|
||||
class AsynchronouslyCreatedResourcePool {
|
||||
constructor(maximumUsage) {
|
||||
this.pendingItems = new Set();
|
||||
this.readyItems = new Set();
|
||||
this.maximumUsage = maximumUsage;
|
||||
this.onAvailableListeners = [];
|
||||
}
|
||||
add(item) {
|
||||
this.pendingItems.add(item);
|
||||
item.onReady(() => {
|
||||
/* istanbul ignore else */
|
||||
if (this.pendingItems.has(item)) {
|
||||
this.pendingItems.delete(item);
|
||||
this.readyItems.add(item);
|
||||
this.maybeAvailable(item);
|
||||
}
|
||||
});
|
||||
}
|
||||
delete(item) {
|
||||
this.pendingItems.delete(item);
|
||||
this.readyItems.delete(item);
|
||||
}
|
||||
findAvailable() {
|
||||
let minUsage = this.maximumUsage;
|
||||
let candidate = null;
|
||||
for (const item of this.readyItems) {
|
||||
const usage = item.currentUsage();
|
||||
if (usage === 0)
|
||||
return item;
|
||||
if (usage < minUsage) {
|
||||
candidate = item;
|
||||
minUsage = usage;
|
||||
}
|
||||
}
|
||||
return candidate;
|
||||
}
|
||||
*[Symbol.iterator]() {
|
||||
yield* this.pendingItems;
|
||||
yield* this.readyItems;
|
||||
}
|
||||
get size() {
|
||||
return this.pendingItems.size + this.readyItems.size;
|
||||
}
|
||||
maybeAvailable(item) {
|
||||
/* istanbul ignore else */
|
||||
if (item.currentUsage() < this.maximumUsage) {
|
||||
for (const listener of this.onAvailableListeners) {
|
||||
listener(item);
|
||||
}
|
||||
}
|
||||
}
|
||||
onAvailable(fn) {
|
||||
this.onAvailableListeners.push(fn);
|
||||
}
|
||||
}
|
||||
const Errors = {
|
||||
ThreadTermination: () => new Error('Terminating worker thread'),
|
||||
FilenameNotProvided: () => new Error('filename must be provided to run() or in options object'),
|
||||
TaskQueueAtLimit: () => new Error('Task queue is at limit'),
|
||||
NoTaskQueueAvailable: () => new Error('No task queue available and all Workers are busy'),
|
||||
CloseTimeout: () => new Error('Close operation timed out')
|
||||
};
|
||||
class WorkerInfo extends AsynchronouslyCreatedResource {
|
||||
constructor(worker, port, onMessage) {
|
||||
super();
|
||||
this.idleTimeout = null; // eslint-disable-line no-undef
|
||||
this.lastSeenResponseCount = 0;
|
||||
this.worker = worker;
|
||||
this.port = port;
|
||||
this.port.on('message', (message) => this._handleResponse(message));
|
||||
this.onMessage = onMessage;
|
||||
this.taskInfos = new Map();
|
||||
this.sharedBuffer = new Int32Array(new SharedArrayBuffer(common_1.kFieldCount * Int32Array.BYTES_PER_ELEMENT));
|
||||
}
|
||||
destroy() {
|
||||
this.worker.terminate();
|
||||
this.port.close();
|
||||
this.clearIdleTimeout();
|
||||
for (const taskInfo of this.taskInfos.values()) {
|
||||
taskInfo.done(Errors.ThreadTermination());
|
||||
}
|
||||
this.taskInfos.clear();
|
||||
}
|
||||
clearIdleTimeout() {
|
||||
if (this.idleTimeout !== null) {
|
||||
clearTimeout(this.idleTimeout);
|
||||
this.idleTimeout = null;
|
||||
}
|
||||
}
|
||||
ref() {
|
||||
this.port.ref();
|
||||
return this;
|
||||
}
|
||||
unref() {
|
||||
// Note: Do not call ref()/unref() on the Worker itself since that may cause
|
||||
// a hard crash, see https://github.com/nodejs/node/pull/33394.
|
||||
this.port.unref();
|
||||
return this;
|
||||
}
|
||||
_handleResponse(message) {
|
||||
this.onMessage(message);
|
||||
if (this.taskInfos.size === 0) {
|
||||
// No more tasks running on this Worker means it should not keep the
|
||||
// process running.
|
||||
this.unref();
|
||||
}
|
||||
}
|
||||
postTask(taskInfo) {
|
||||
(0, assert_1.default)(!this.taskInfos.has(taskInfo.taskId));
|
||||
const message = {
|
||||
task: taskInfo.releaseTask(),
|
||||
taskId: taskInfo.taskId,
|
||||
filename: taskInfo.filename,
|
||||
name: taskInfo.name
|
||||
};
|
||||
try {
|
||||
this.port.postMessage(message, taskInfo.transferList);
|
||||
}
|
||||
catch (err) {
|
||||
// This would mostly happen if e.g. message contains unserializable data
|
||||
// or transferList is invalid.
|
||||
taskInfo.done(err);
|
||||
return;
|
||||
}
|
||||
taskInfo.workerInfo = this;
|
||||
this.taskInfos.set(taskInfo.taskId, taskInfo);
|
||||
this.ref();
|
||||
this.clearIdleTimeout();
|
||||
// Inform the worker that there are new messages posted, and wake it up
|
||||
// if it is waiting for one.
|
||||
Atomics.add(this.sharedBuffer, common_1.kRequestCountField, 1);
|
||||
Atomics.notify(this.sharedBuffer, common_1.kRequestCountField, 1);
|
||||
}
|
||||
processPendingMessages() {
|
||||
// If we *know* that there are more messages than we have received using
|
||||
// 'message' events yet, then try to load and handle them synchronously,
|
||||
// without the need to wait for more expensive events on the event loop.
|
||||
// This would usually break async tracking, but in our case, we already have
|
||||
// the extra TaskInfo/AsyncResource layer that rectifies that situation.
|
||||
const actualResponseCount = Atomics.load(this.sharedBuffer, common_1.kResponseCountField);
|
||||
if (actualResponseCount !== this.lastSeenResponseCount) {
|
||||
this.lastSeenResponseCount = actualResponseCount;
|
||||
let entry;
|
||||
while ((entry = (0, worker_threads_1.receiveMessageOnPort)(this.port)) !== undefined) {
|
||||
this._handleResponse(entry.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
isRunningAbortableTask() {
|
||||
// If there are abortable tasks, we are running one at most per Worker.
|
||||
if (this.taskInfos.size !== 1)
|
||||
return false;
|
||||
const [[, task]] = this.taskInfos;
|
||||
return task.abortSignal !== null;
|
||||
}
|
||||
currentUsage() {
|
||||
if (this.isRunningAbortableTask())
|
||||
return Infinity;
|
||||
return this.taskInfos.size;
|
||||
}
|
||||
}
|
||||
class ThreadPool {
|
||||
constructor(publicInterface, options) {
|
||||
var _a;
|
||||
this.skipQueue = [];
|
||||
this.completed = 0;
|
||||
this.start = perf_hooks_1.performance.now();
|
||||
this.inProcessPendingMessages = false;
|
||||
this.startingUp = false;
|
||||
this.closingUp = false;
|
||||
this.workerFailsDuringBootstrap = false;
|
||||
this.destroying = false;
|
||||
this.publicInterface = publicInterface;
|
||||
this.taskQueue = options.taskQueue || new ArrayTaskQueue();
|
||||
this.runTime = (0, hdr_histogram_js_1.build)({ lowestDiscernibleValue: 1 });
|
||||
this.waitTime = (0, hdr_histogram_js_1.build)({ lowestDiscernibleValue: 1 });
|
||||
const filename = options.filename ? maybeFileURLToPath(options.filename) : null;
|
||||
this.options = { ...kDefaultOptions, ...options, filename, maxQueue: 0 };
|
||||
// The >= and <= could be > and < but this way we get 100 % coverage 🙃
|
||||
if (options.maxThreads !== undefined &&
|
||||
this.options.minThreads >= options.maxThreads) {
|
||||
this.options.minThreads = options.maxThreads;
|
||||
}
|
||||
if (options.minThreads !== undefined &&
|
||||
this.options.maxThreads <= options.minThreads) {
|
||||
this.options.maxThreads = options.minThreads;
|
||||
}
|
||||
if (options.maxQueue === 'auto') {
|
||||
this.options.maxQueue = this.options.maxThreads ** 2;
|
||||
}
|
||||
else {
|
||||
this.options.maxQueue = (_a = options.maxQueue) !== null && _a !== void 0 ? _a : kDefaultOptions.maxQueue;
|
||||
}
|
||||
this.workers = new AsynchronouslyCreatedResourcePool(this.options.concurrentTasksPerWorker);
|
||||
this.workers.onAvailable((w) => this._onWorkerAvailable(w));
|
||||
this.startingUp = true;
|
||||
this._ensureMinimumWorkers();
|
||||
this.startingUp = false;
|
||||
this.needsDrain = false;
|
||||
}
|
||||
_ensureMinimumWorkers() {
|
||||
if (this.closingUp || this.destroying) {
|
||||
return;
|
||||
}
|
||||
while (this.workers.size < this.options.minThreads) {
|
||||
this._addNewWorker();
|
||||
}
|
||||
}
|
||||
_addNewWorker() {
|
||||
const pool = this;
|
||||
const worker = new worker_threads_1.Worker((0, path_1.resolve)(__dirname, 'worker.js'), {
|
||||
env: this.options.env,
|
||||
argv: this.options.argv,
|
||||
execArgv: this.options.execArgv,
|
||||
resourceLimits: this.options.resourceLimits,
|
||||
workerData: this.options.workerData,
|
||||
trackUnmanagedFds: this.options.trackUnmanagedFds
|
||||
});
|
||||
const { port1, port2 } = new worker_threads_1.MessageChannel();
|
||||
const workerInfo = new WorkerInfo(worker, port1, onMessage);
|
||||
if (this.startingUp) {
|
||||
// There is no point in waiting for the initial set of Workers to indicate
|
||||
// that they are ready, we just mark them as such from the start.
|
||||
workerInfo.markAsReady();
|
||||
}
|
||||
const message = {
|
||||
filename: this.options.filename,
|
||||
name: this.options.name,
|
||||
port: port2,
|
||||
sharedBuffer: workerInfo.sharedBuffer,
|
||||
useAtomics: this.options.useAtomics,
|
||||
niceIncrement: this.options.niceIncrement
|
||||
};
|
||||
worker.postMessage(message, [port2]);
|
||||
function onMessage(message) {
|
||||
const { taskId, result } = message;
|
||||
// In case of success: Call the callback that was passed to `runTask`,
|
||||
// remove the `TaskInfo` associated with the Worker, which marks it as
|
||||
// free again.
|
||||
const taskInfo = workerInfo.taskInfos.get(taskId);
|
||||
workerInfo.taskInfos.delete(taskId);
|
||||
pool.workers.maybeAvailable(workerInfo);
|
||||
/* istanbul ignore if */
|
||||
if (taskInfo === undefined) {
|
||||
const err = new Error(`Unexpected message from Worker: ${(0, util_1.inspect)(message)}`);
|
||||
pool.publicInterface.emit('error', err);
|
||||
}
|
||||
else {
|
||||
taskInfo.done(message.error, result);
|
||||
}
|
||||
pool._processPendingMessages();
|
||||
}
|
||||
function onReady() {
|
||||
if (workerInfo.currentUsage() === 0) {
|
||||
workerInfo.unref();
|
||||
}
|
||||
if (!workerInfo.isReady()) {
|
||||
workerInfo.markAsReady();
|
||||
}
|
||||
}
|
||||
function onEventMessage(message) {
|
||||
pool.publicInterface.emit('message', message);
|
||||
}
|
||||
worker.on('message', (message) => {
|
||||
message instanceof Object && common_1.READY in message ? onReady() : onEventMessage(message);
|
||||
});
|
||||
worker.on('error', (err) => {
|
||||
this._onError(worker, workerInfo, err, false);
|
||||
});
|
||||
worker.on('exit', (exitCode) => {
|
||||
if (this.destroying) {
|
||||
return;
|
||||
}
|
||||
const err = new Error(`worker exited with code: ${exitCode}`);
|
||||
// Only error unfinished tasks on process exit, since there are legitimate
|
||||
// reasons to exit workers and we want to handle that gracefully when possible.
|
||||
this._onError(worker, workerInfo, err, true);
|
||||
});
|
||||
worker.unref();
|
||||
port1.on('close', () => {
|
||||
// The port is only closed if the Worker stops for some reason, but we
|
||||
// always .unref() the Worker itself. We want to receive e.g. 'error'
|
||||
// events on it, so we ref it once we know it's going to exit anyway.
|
||||
worker.ref();
|
||||
});
|
||||
this.workers.add(workerInfo);
|
||||
}
|
||||
_onError(worker, workerInfo, err, onlyErrorUnfinishedTasks) {
|
||||
// Work around the bug in https://github.com/nodejs/node/pull/33394
|
||||
worker.ref = () => { };
|
||||
const taskInfos = [...workerInfo.taskInfos.values()];
|
||||
workerInfo.taskInfos.clear();
|
||||
// Remove the worker from the list and potentially start a new Worker to
|
||||
// replace the current one.
|
||||
this._removeWorker(workerInfo);
|
||||
if (workerInfo.isReady() && !this.workerFailsDuringBootstrap) {
|
||||
this._ensureMinimumWorkers();
|
||||
}
|
||||
else {
|
||||
// Do not start new workers over and over if they already fail during
|
||||
// bootstrap, there's no point.
|
||||
this.workerFailsDuringBootstrap = true;
|
||||
}
|
||||
if (taskInfos.length > 0) {
|
||||
// If there are remaining unfinished tasks, call the callback that was
|
||||
// passed to `postTask` with the error
|
||||
for (const taskInfo of taskInfos) {
|
||||
taskInfo.done(err, null);
|
||||
}
|
||||
}
|
||||
else if (!onlyErrorUnfinishedTasks) {
|
||||
// If there are no unfinished tasks, instead emit an 'error' event
|
||||
this.publicInterface.emit('error', err);
|
||||
}
|
||||
}
|
||||
_processPendingMessages() {
|
||||
if (this.inProcessPendingMessages || !this.options.useAtomics) {
|
||||
return;
|
||||
}
|
||||
this.inProcessPendingMessages = true;
|
||||
try {
|
||||
for (const workerInfo of this.workers) {
|
||||
workerInfo.processPendingMessages();
|
||||
}
|
||||
}
|
||||
finally {
|
||||
this.inProcessPendingMessages = false;
|
||||
}
|
||||
}
|
||||
_removeWorker(workerInfo) {
|
||||
workerInfo.destroy();
|
||||
this.workers.delete(workerInfo);
|
||||
}
|
||||
_onWorkerAvailable(workerInfo) {
|
||||
while ((this.taskQueue.size > 0 || this.skipQueue.length > 0) &&
|
||||
workerInfo.currentUsage() < this.options.concurrentTasksPerWorker) {
|
||||
// The skipQueue will have tasks that we previously shifted off
|
||||
// the task queue but had to skip over... we have to make sure
|
||||
// we drain that before we drain the taskQueue.
|
||||
const taskInfo = this.skipQueue.shift() ||
|
||||
this.taskQueue.shift();
|
||||
// If the task has an abortSignal and the worker has any other
|
||||
// tasks, we cannot distribute the task to it. Skip for now.
|
||||
if (taskInfo.abortSignal && workerInfo.taskInfos.size > 0) {
|
||||
this.skipQueue.push(taskInfo);
|
||||
break;
|
||||
}
|
||||
const now = perf_hooks_1.performance.now();
|
||||
this.waitTime.recordValue(now - taskInfo.created);
|
||||
taskInfo.started = now;
|
||||
workerInfo.postTask(taskInfo);
|
||||
this._maybeDrain();
|
||||
return;
|
||||
}
|
||||
if (workerInfo.taskInfos.size === 0 &&
|
||||
this.workers.size > this.options.minThreads) {
|
||||
workerInfo.idleTimeout = setTimeout(() => {
|
||||
assert_1.default.strictEqual(workerInfo.taskInfos.size, 0);
|
||||
if (this.workers.size > this.options.minThreads) {
|
||||
this._removeWorker(workerInfo);
|
||||
}
|
||||
}, this.options.idleTimeout).unref();
|
||||
}
|
||||
}
|
||||
runTask(task, options) {
|
||||
var _a;
|
||||
let { filename, name } = options;
|
||||
const { transferList = [] } = options;
|
||||
if (filename == null) {
|
||||
filename = this.options.filename;
|
||||
}
|
||||
if (name == null) {
|
||||
name = this.options.name;
|
||||
}
|
||||
if (typeof filename !== 'string') {
|
||||
return Promise.reject(Errors.FilenameNotProvided());
|
||||
}
|
||||
filename = maybeFileURLToPath(filename);
|
||||
let signal;
|
||||
if (this.closingUp) {
|
||||
const closingUpAbortController = new AbortController();
|
||||
closingUpAbortController.abort('queue is closing up');
|
||||
signal = closingUpAbortController.signal;
|
||||
}
|
||||
else {
|
||||
signal = (_a = options.signal) !== null && _a !== void 0 ? _a : null;
|
||||
}
|
||||
let resolve;
|
||||
let reject;
|
||||
// eslint-disable-next-line
|
||||
const ret = new Promise((res, rej) => { resolve = res; reject = rej; });
|
||||
const taskInfo = new TaskInfo(task, transferList, filename, name, (err, result) => {
|
||||
this.completed++;
|
||||
if (taskInfo.started) {
|
||||
this.runTime.recordValue(perf_hooks_1.performance.now() - taskInfo.started);
|
||||
}
|
||||
if (err !== null) {
|
||||
reject(err);
|
||||
}
|
||||
else {
|
||||
resolve(result);
|
||||
}
|
||||
this._maybeDrain();
|
||||
}, signal, this.publicInterface.asyncResource.asyncId());
|
||||
if (signal !== null) {
|
||||
// If the AbortSignal has an aborted property and it's truthy,
|
||||
// reject immediately.
|
||||
if (signal.aborted) {
|
||||
return Promise.reject(new AbortError(signal.reason));
|
||||
}
|
||||
taskInfo.abortListener = () => {
|
||||
// Call reject() first to make sure we always reject with the AbortError
|
||||
// if the task is aborted, not with an Error from the possible
|
||||
// thread termination below.
|
||||
reject(new AbortError(signal.reason));
|
||||
if (taskInfo.workerInfo !== null) {
|
||||
// Already running: We cancel the Worker this is running on.
|
||||
this._removeWorker(taskInfo.workerInfo);
|
||||
this._ensureMinimumWorkers();
|
||||
}
|
||||
else {
|
||||
// Not yet running: Remove it from the queue.
|
||||
this.taskQueue.remove(taskInfo);
|
||||
}
|
||||
};
|
||||
onabort(signal, taskInfo.abortListener);
|
||||
}
|
||||
// If there is a task queue, there's no point in looking for an available
|
||||
// Worker thread. Add this task to the queue, if possible.
|
||||
if (this.taskQueue.size > 0) {
|
||||
const totalCapacity = this.options.maxQueue + this.pendingCapacity();
|
||||
if (this.taskQueue.size >= totalCapacity) {
|
||||
if (this.options.maxQueue === 0) {
|
||||
return Promise.reject(Errors.NoTaskQueueAvailable());
|
||||
}
|
||||
else {
|
||||
return Promise.reject(Errors.TaskQueueAtLimit());
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (this.workers.size < this.options.maxThreads) {
|
||||
this._addNewWorker();
|
||||
}
|
||||
this.taskQueue.push(taskInfo);
|
||||
}
|
||||
this._maybeDrain();
|
||||
return ret;
|
||||
}
|
||||
// Look for a Worker with a minimum number of tasks it is currently running.
|
||||
let workerInfo = this.workers.findAvailable();
|
||||
// If we want the ability to abort this task, use only workers that have
|
||||
// no running tasks.
|
||||
if (workerInfo !== null && workerInfo.currentUsage() > 0 && signal) {
|
||||
workerInfo = null;
|
||||
}
|
||||
// If no Worker was found, or that Worker was handling another task in some
|
||||
// way, and we still have the ability to spawn new threads, do so.
|
||||
let waitingForNewWorker = false;
|
||||
if ((workerInfo === null || workerInfo.currentUsage() > 0) &&
|
||||
this.workers.size < this.options.maxThreads) {
|
||||
this._addNewWorker();
|
||||
waitingForNewWorker = true;
|
||||
}
|
||||
// If no Worker is found, try to put the task into the queue.
|
||||
if (workerInfo === null) {
|
||||
if (this.options.maxQueue <= 0 && !waitingForNewWorker) {
|
||||
return Promise.reject(Errors.NoTaskQueueAvailable());
|
||||
}
|
||||
else {
|
||||
this.taskQueue.push(taskInfo);
|
||||
}
|
||||
this._maybeDrain();
|
||||
return ret;
|
||||
}
|
||||
// TODO(addaleax): Clean up the waitTime/runTime recording.
|
||||
const now = perf_hooks_1.performance.now();
|
||||
this.waitTime.recordValue(now - taskInfo.created);
|
||||
taskInfo.started = now;
|
||||
workerInfo.postTask(taskInfo);
|
||||
this._maybeDrain();
|
||||
return ret;
|
||||
}
|
||||
pendingCapacity() {
|
||||
return this.workers.pendingItems.size *
|
||||
this.options.concurrentTasksPerWorker;
|
||||
}
|
||||
_maybeDrain() {
|
||||
const totalCapacity = this.options.maxQueue + this.pendingCapacity();
|
||||
const totalQueueSize = this.taskQueue.size + this.skipQueue.length;
|
||||
if (totalQueueSize === 0) {
|
||||
this.needsDrain = false;
|
||||
this.publicInterface.emit('drain');
|
||||
}
|
||||
if (totalQueueSize >= totalCapacity) {
|
||||
this.needsDrain = true;
|
||||
this.publicInterface.emit('needsDrain');
|
||||
}
|
||||
}
|
||||
async destroy() {
|
||||
this.destroying = true;
|
||||
while (this.skipQueue.length > 0) {
|
||||
const taskInfo = this.skipQueue.shift();
|
||||
taskInfo.done(new Error('Terminating worker thread'));
|
||||
}
|
||||
while (this.taskQueue.size > 0) {
|
||||
const taskInfo = this.taskQueue.shift();
|
||||
taskInfo.done(new Error('Terminating worker thread'));
|
||||
}
|
||||
const exitEvents = [];
|
||||
while (this.workers.size > 0) {
|
||||
const [workerInfo] = this.workers;
|
||||
exitEvents.push((0, events_1.once)(workerInfo.worker, 'exit'));
|
||||
this._removeWorker(workerInfo);
|
||||
}
|
||||
try {
|
||||
await Promise.all(exitEvents);
|
||||
}
|
||||
finally {
|
||||
this.destroying = false;
|
||||
}
|
||||
}
|
||||
async close(options) {
|
||||
this.closingUp = true;
|
||||
if (options.force) {
|
||||
const skipQueueLength = this.skipQueue.length;
|
||||
for (let i = 0; i < skipQueueLength; i++) {
|
||||
const taskInfo = this.skipQueue.shift();
|
||||
if (taskInfo.workerInfo === null) {
|
||||
taskInfo.done(new AbortError('pool is closed'));
|
||||
}
|
||||
else {
|
||||
this.skipQueue.push(taskInfo);
|
||||
}
|
||||
}
|
||||
const taskQueueLength = this.taskQueue.size;
|
||||
for (let i = 0; i < taskQueueLength; i++) {
|
||||
const taskInfo = this.taskQueue.shift();
|
||||
if (taskInfo.workerInfo === null) {
|
||||
taskInfo.done(new AbortError('pool is closed'));
|
||||
}
|
||||
else {
|
||||
this.taskQueue.push(taskInfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
const onPoolFlushed = () => new Promise((resolve) => {
|
||||
const numberOfWorkers = this.workers.size;
|
||||
let numberOfWorkersDone = 0;
|
||||
const checkIfWorkerIsDone = (workerInfo) => {
|
||||
if (workerInfo.taskInfos.size === 0) {
|
||||
numberOfWorkersDone++;
|
||||
}
|
||||
if (numberOfWorkers === numberOfWorkersDone) {
|
||||
resolve();
|
||||
}
|
||||
};
|
||||
for (const workerInfo of this.workers) {
|
||||
checkIfWorkerIsDone(workerInfo);
|
||||
workerInfo.port.on('message', () => checkIfWorkerIsDone(workerInfo));
|
||||
}
|
||||
});
|
||||
const throwOnTimeOut = async (timeout) => {
|
||||
await (0, promises_1.setTimeout)(timeout);
|
||||
throw Errors.CloseTimeout();
|
||||
};
|
||||
try {
|
||||
await Promise.race([
|
||||
onPoolFlushed(),
|
||||
throwOnTimeOut(this.options.closeTimeout)
|
||||
]);
|
||||
}
|
||||
catch (error) {
|
||||
this.publicInterface.emit('error', error);
|
||||
}
|
||||
finally {
|
||||
await this.destroy();
|
||||
this.publicInterface.emit('close');
|
||||
this.closingUp = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
class Piscina extends events_1.EventEmitterAsyncResource {
|
||||
constructor(options = {}) {
|
||||
super({ ...options, name: 'Piscina' });
|
||||
_Piscina_pool.set(this, void 0);
|
||||
if (typeof options.filename !== 'string' && options.filename != null) {
|
||||
throw new TypeError('options.filename must be a string or null');
|
||||
}
|
||||
if (typeof options.name !== 'string' && options.name != null) {
|
||||
throw new TypeError('options.name must be a string or null');
|
||||
}
|
||||
if (options.minThreads !== undefined &&
|
||||
(typeof options.minThreads !== 'number' || options.minThreads < 0)) {
|
||||
throw new TypeError('options.minThreads must be a non-negative integer');
|
||||
}
|
||||
if (options.maxThreads !== undefined &&
|
||||
(typeof options.maxThreads !== 'number' || options.maxThreads < 1)) {
|
||||
throw new TypeError('options.maxThreads must be a positive integer');
|
||||
}
|
||||
if (options.minThreads !== undefined && options.maxThreads !== undefined &&
|
||||
options.minThreads > options.maxThreads) {
|
||||
throw new RangeError('options.minThreads and options.maxThreads must not conflict');
|
||||
}
|
||||
if (options.idleTimeout !== undefined &&
|
||||
(typeof options.idleTimeout !== 'number' || options.idleTimeout < 0)) {
|
||||
throw new TypeError('options.idleTimeout must be a non-negative integer');
|
||||
}
|
||||
if (options.maxQueue !== undefined &&
|
||||
options.maxQueue !== 'auto' &&
|
||||
(typeof options.maxQueue !== 'number' || options.maxQueue < 0)) {
|
||||
throw new TypeError('options.maxQueue must be a non-negative integer');
|
||||
}
|
||||
if (options.concurrentTasksPerWorker !== undefined &&
|
||||
(typeof options.concurrentTasksPerWorker !== 'number' ||
|
||||
options.concurrentTasksPerWorker < 1)) {
|
||||
throw new TypeError('options.concurrentTasksPerWorker must be a positive integer');
|
||||
}
|
||||
if (options.useAtomics !== undefined &&
|
||||
typeof options.useAtomics !== 'boolean') {
|
||||
throw new TypeError('options.useAtomics must be a boolean value');
|
||||
}
|
||||
if (options.resourceLimits !== undefined &&
|
||||
(typeof options.resourceLimits !== 'object' ||
|
||||
options.resourceLimits === null)) {
|
||||
throw new TypeError('options.resourceLimits must be an object');
|
||||
}
|
||||
if (options.taskQueue !== undefined && !(0, common_1.isTaskQueue)(options.taskQueue)) {
|
||||
throw new TypeError('options.taskQueue must be a TaskQueue object');
|
||||
}
|
||||
if (options.niceIncrement !== undefined &&
|
||||
(typeof options.niceIncrement !== 'number' || options.niceIncrement < 0)) {
|
||||
throw new TypeError('options.niceIncrement must be a non-negative integer');
|
||||
}
|
||||
if (options.trackUnmanagedFds !== undefined &&
|
||||
typeof options.trackUnmanagedFds !== 'boolean') {
|
||||
throw new TypeError('options.trackUnmanagedFds must be a boolean value');
|
||||
}
|
||||
if (options.closeTimeout !== undefined && (typeof options.closeTimeout !== 'number' || options.closeTimeout < 0)) {
|
||||
throw new TypeError('options.closeTimeout must be a non-negative integer');
|
||||
}
|
||||
__classPrivateFieldSet(this, _Piscina_pool, new ThreadPool(this, options), "f");
|
||||
}
|
||||
/** @deprecated Use run(task, options) instead **/
|
||||
runTask(task, transferList, filename, signal) {
|
||||
// If transferList is a string or AbortSignal, shift it.
|
||||
if ((typeof transferList === 'object' && !Array.isArray(transferList)) ||
|
||||
typeof transferList === 'string') {
|
||||
signal = filename;
|
||||
filename = transferList;
|
||||
transferList = undefined;
|
||||
}
|
||||
// If filename is an AbortSignal, shift it.
|
||||
if (typeof filename === 'object' && !Array.isArray(filename)) {
|
||||
signal = filename;
|
||||
filename = undefined;
|
||||
}
|
||||
if (transferList !== undefined && !Array.isArray(transferList)) {
|
||||
return Promise.reject(new TypeError('transferList argument must be an Array'));
|
||||
}
|
||||
if (filename !== undefined && typeof filename !== 'string') {
|
||||
return Promise.reject(new TypeError('filename argument must be a string'));
|
||||
}
|
||||
if (signal !== undefined && typeof signal !== 'object') {
|
||||
return Promise.reject(new TypeError('signal argument must be an object'));
|
||||
}
|
||||
return __classPrivateFieldGet(this, _Piscina_pool, "f").runTask(task, {
|
||||
transferList,
|
||||
filename: filename || null,
|
||||
name: 'default',
|
||||
signal: signal || null
|
||||
});
|
||||
}
|
||||
run(task, options = kDefaultRunOptions) {
|
||||
if (options === null || typeof options !== 'object') {
|
||||
return Promise.reject(new TypeError('options must be an object'));
|
||||
}
|
||||
const { transferList, filename, name, signal } = options;
|
||||
if (transferList !== undefined && !Array.isArray(transferList)) {
|
||||
return Promise.reject(new TypeError('transferList argument must be an Array'));
|
||||
}
|
||||
if (filename != null && typeof filename !== 'string') {
|
||||
return Promise.reject(new TypeError('filename argument must be a string'));
|
||||
}
|
||||
if (name != null && typeof name !== 'string') {
|
||||
return Promise.reject(new TypeError('name argument must be a string'));
|
||||
}
|
||||
if (signal != null && typeof signal !== 'object') {
|
||||
return Promise.reject(new TypeError('signal argument must be an object'));
|
||||
}
|
||||
return __classPrivateFieldGet(this, _Piscina_pool, "f").runTask(task, { transferList, filename, name, signal });
|
||||
}
|
||||
async close(options = kDefaultCloseOptions) {
|
||||
if (options === null || typeof options !== 'object') {
|
||||
throw TypeError('options must be an object');
|
||||
}
|
||||
let { force } = options;
|
||||
if (force !== undefined && typeof force !== 'boolean') {
|
||||
return Promise.reject(new TypeError('force argument must be a boolean'));
|
||||
}
|
||||
force !== null && force !== void 0 ? force : (force = kDefaultCloseOptions.force);
|
||||
return __classPrivateFieldGet(this, _Piscina_pool, "f").close({
|
||||
force
|
||||
});
|
||||
}
|
||||
destroy() {
|
||||
return __classPrivateFieldGet(this, _Piscina_pool, "f").destroy();
|
||||
}
|
||||
get maxThreads() {
|
||||
return __classPrivateFieldGet(this, _Piscina_pool, "f").options.maxThreads;
|
||||
}
|
||||
get minThreads() {
|
||||
return __classPrivateFieldGet(this, _Piscina_pool, "f").options.minThreads;
|
||||
}
|
||||
get options() {
|
||||
return __classPrivateFieldGet(this, _Piscina_pool, "f").options;
|
||||
}
|
||||
get threads() {
|
||||
const ret = [];
|
||||
for (const workerInfo of __classPrivateFieldGet(this, _Piscina_pool, "f").workers) {
|
||||
ret.push(workerInfo.worker);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
get queueSize() {
|
||||
const pool = __classPrivateFieldGet(this, _Piscina_pool, "f");
|
||||
return Math.max(pool.taskQueue.size - pool.pendingCapacity(), 0);
|
||||
}
|
||||
get completed() {
|
||||
return __classPrivateFieldGet(this, _Piscina_pool, "f").completed;
|
||||
}
|
||||
get waitTime() {
|
||||
const result = hdr_histogram_percentiles_obj_1.default.histAsObj(__classPrivateFieldGet(this, _Piscina_pool, "f").waitTime);
|
||||
return hdr_histogram_percentiles_obj_1.default.addPercentiles(__classPrivateFieldGet(this, _Piscina_pool, "f").waitTime, result);
|
||||
}
|
||||
get runTime() {
|
||||
const result = hdr_histogram_percentiles_obj_1.default.histAsObj(__classPrivateFieldGet(this, _Piscina_pool, "f").runTime);
|
||||
return hdr_histogram_percentiles_obj_1.default.addPercentiles(__classPrivateFieldGet(this, _Piscina_pool, "f").runTime, result);
|
||||
}
|
||||
get utilization() {
|
||||
// The capacity is the max compute time capacity of the
|
||||
// pool to this point in time as determined by the length
|
||||
// of time the pool has been running multiplied by the
|
||||
// maximum number of threads.
|
||||
const capacity = this.duration * __classPrivateFieldGet(this, _Piscina_pool, "f").options.maxThreads;
|
||||
const totalMeanRuntime = __classPrivateFieldGet(this, _Piscina_pool, "f").runTime.mean *
|
||||
__classPrivateFieldGet(this, _Piscina_pool, "f").runTime.totalCount;
|
||||
// We calculate the appoximate pool utilization by multiplying
|
||||
// the mean run time of all tasks by the number of runtime
|
||||
// samples taken and dividing that by the capacity. The
|
||||
// theory here is that capacity represents the absolute upper
|
||||
// limit of compute time this pool could ever attain (but
|
||||
// never will for a variety of reasons. Multiplying the
|
||||
// mean run time by the number of tasks sampled yields an
|
||||
// approximation of the realized compute time. The utilization
|
||||
// then becomes a point-in-time measure of how active the
|
||||
// pool is.
|
||||
return totalMeanRuntime / capacity;
|
||||
}
|
||||
get duration() {
|
||||
return perf_hooks_1.performance.now() - __classPrivateFieldGet(this, _Piscina_pool, "f").start;
|
||||
}
|
||||
get needsDrain() {
|
||||
return __classPrivateFieldGet(this, _Piscina_pool, "f").needsDrain;
|
||||
}
|
||||
static get isWorkerThread() {
|
||||
return common_1.commonState.isWorkerThread;
|
||||
}
|
||||
static get workerData() {
|
||||
return common_1.commonState.workerData;
|
||||
}
|
||||
static get version() {
|
||||
return package_json_1.version;
|
||||
}
|
||||
static get Piscina() {
|
||||
return Piscina;
|
||||
}
|
||||
static move(val) {
|
||||
if (val != null && typeof val === 'object' && typeof val !== 'function') {
|
||||
if (!(0, common_1.isTransferable)(val)) {
|
||||
if (util_1.types.isArrayBufferView(val)) {
|
||||
val = new ArrayBufferViewTransferable(val);
|
||||
}
|
||||
else {
|
||||
val = new DirectlyTransferable(val);
|
||||
}
|
||||
}
|
||||
(0, common_1.markMovable)(val);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
static get transferableSymbol() { return common_1.kTransferable; }
|
||||
static get valueSymbol() { return common_1.kValue; }
|
||||
static get queueOptionsSymbol() { return common_1.kQueueOptions; }
|
||||
}
|
||||
_Piscina_pool = new WeakMap();
|
||||
module.exports = Piscina;
|
||||
//# sourceMappingURL=index.js.map
|
1
my-app/node_modules/piscina/dist/src/index.js.map
generated
vendored
Executable file
1
my-app/node_modules/piscina/dist/src/index.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
0
my-app/node_modules/piscina/dist/src/typescript.d.ts
generated
vendored
Executable file
0
my-app/node_modules/piscina/dist/src/typescript.d.ts
generated
vendored
Executable file
2
my-app/node_modules/piscina/dist/src/typescript.js
generated
vendored
Executable file
2
my-app/node_modules/piscina/dist/src/typescript.js
generated
vendored
Executable file
|
@ -0,0 +1,2 @@
|
|||
"use strict";
|
||||
//# sourceMappingURL=typescript.js.map
|
1
my-app/node_modules/piscina/dist/src/typescript.js.map
generated
vendored
Executable file
1
my-app/node_modules/piscina/dist/src/typescript.js.map
generated
vendored
Executable file
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"typescript.js","sourceRoot":"","sources":["../../src/typescript.ts"],"names":[],"mappings":""}
|
1
my-app/node_modules/piscina/dist/src/worker.d.ts
generated
vendored
Executable file
1
my-app/node_modules/piscina/dist/src/worker.d.ts
generated
vendored
Executable file
|
@ -0,0 +1 @@
|
|||
export {};
|
187
my-app/node_modules/piscina/dist/src/worker.js
generated
vendored
Executable file
187
my-app/node_modules/piscina/dist/src/worker.js
generated
vendored
Executable file
|
@ -0,0 +1,187 @@
|
|||
"use strict";
|
||||
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
var desc = Object.getOwnPropertyDescriptor(m, k);
|
||||
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
|
||||
desc = { enumerable: true, get: function() { return m[k]; } };
|
||||
}
|
||||
Object.defineProperty(o, k2, desc);
|
||||
}) : (function(o, m, k, k2) {
|
||||
if (k2 === undefined) k2 = k;
|
||||
o[k2] = m[k];
|
||||
}));
|
||||
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
|
||||
Object.defineProperty(o, "default", { enumerable: true, value: v });
|
||||
}) : function(o, v) {
|
||||
o["default"] = v;
|
||||
});
|
||||
var __importStar = (this && this.__importStar) || function (mod) {
|
||||
if (mod && mod.__esModule) return mod;
|
||||
var result = {};
|
||||
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
|
||||
__setModuleDefault(result, mod);
|
||||
return result;
|
||||
};
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
const worker_threads_1 = require("worker_threads");
|
||||
const url_1 = require("url");
|
||||
const common_1 = require("./common");
|
||||
common_1.commonState.isWorkerThread = true;
|
||||
common_1.commonState.workerData = worker_threads_1.workerData;
|
||||
const handlerCache = new Map();
|
||||
let useAtomics = process.env.PISCINA_DISABLE_ATOMICS !== '1';
|
||||
// Get `import(x)` as a function that isn't transpiled to `require(x)` by
|
||||
// TypeScript for dual ESM/CJS support.
|
||||
// Load this lazily, so that there is no warning about the ESM loader being
|
||||
// experimental (on Node v12.x) until we actually try to use it.
|
||||
let importESMCached;
|
||||
function getImportESM() {
|
||||
if (importESMCached === undefined) {
|
||||
// eslint-disable-next-line no-new-func
|
||||
importESMCached = new Function('specifier', 'return import(specifier)');
|
||||
}
|
||||
return importESMCached;
|
||||
}
|
||||
// Look up the handler function that we call when a task is posted.
|
||||
// This is either going to be "the" export from a file, or the default export.
|
||||
async function getHandler(filename, name) {
|
||||
let handler = handlerCache.get(`${filename}/${name}`);
|
||||
if (handler !== undefined) {
|
||||
return handler;
|
||||
}
|
||||
try {
|
||||
// With our current set of TypeScript options, this is transpiled to
|
||||
// `require(filename)`.
|
||||
handler = await Promise.resolve(`${filename}`).then(s => __importStar(require(s)));
|
||||
if (typeof handler !== 'function') {
|
||||
handler = await (handler[name]);
|
||||
}
|
||||
}
|
||||
catch { }
|
||||
if (typeof handler !== 'function') {
|
||||
handler = await getImportESM()((0, url_1.pathToFileURL)(filename).href);
|
||||
if (typeof handler !== 'function') {
|
||||
handler = await (handler[name]);
|
||||
}
|
||||
}
|
||||
if (typeof handler !== 'function') {
|
||||
return null;
|
||||
}
|
||||
// Limit the handler cache size. This should not usually be an issue and is
|
||||
// only provided for pathological cases.
|
||||
if (handlerCache.size > 1000) {
|
||||
const [[key]] = handlerCache;
|
||||
handlerCache.delete(key);
|
||||
}
|
||||
handlerCache.set(`${filename}/${name}`, handler);
|
||||
return handler;
|
||||
}
|
||||
// We should only receive this message once, when the Worker starts. It gives
|
||||
// us the MessagePort used for receiving tasks, a SharedArrayBuffer for fast
|
||||
// communication using Atomics, and the name of the default filename for tasks
|
||||
// (so we can pre-load and cache the handler).
|
||||
worker_threads_1.parentPort.on('message', (message) => {
|
||||
useAtomics = process.env.PISCINA_DISABLE_ATOMICS === '1' ? false : message.useAtomics;
|
||||
const { port, sharedBuffer, filename, name, niceIncrement } = message;
|
||||
(async function () {
|
||||
try {
|
||||
if (niceIncrement !== 0 && process.platform === 'linux') {
|
||||
// ts-ignore because the dependency is not installed on Windows.
|
||||
// @ts-ignore
|
||||
(await Promise.resolve().then(() => __importStar(require('nice-napi')))).default(niceIncrement);
|
||||
}
|
||||
}
|
||||
catch { }
|
||||
if (filename !== null) {
|
||||
await getHandler(filename, name);
|
||||
}
|
||||
const readyMessage = { [common_1.READY]: true };
|
||||
worker_threads_1.parentPort.postMessage(readyMessage);
|
||||
port.on('message', onMessage.bind(null, port, sharedBuffer));
|
||||
atomicsWaitLoop(port, sharedBuffer);
|
||||
})().catch(throwInNextTick);
|
||||
});
|
||||
let currentTasks = 0;
|
||||
let lastSeenRequestCount = 0;
|
||||
function atomicsWaitLoop(port, sharedBuffer) {
|
||||
if (!useAtomics)
|
||||
return;
|
||||
// This function is entered either after receiving the startup message, or
|
||||
// when we are done with a task. In those situations, the *only* thing we
|
||||
// expect to happen next is a 'message' on `port`.
|
||||
// That call would come with the overhead of a C++ → JS boundary crossing,
|
||||
// including async tracking. So, instead, if there is no task currently
|
||||
// running, we wait for a signal from the parent thread using Atomics.wait(),
|
||||
// and read the message from the port instead of generating an event,
|
||||
// in order to avoid that overhead.
|
||||
// The one catch is that this stops asynchronous operations that are still
|
||||
// running from proceeding. Generally, tasks should not spawn asynchronous
|
||||
// operations without waiting for them to finish, though.
|
||||
while (currentTasks === 0) {
|
||||
// Check whether there are new messages by testing whether the current
|
||||
// number of requests posted by the parent thread matches the number of
|
||||
// requests received.
|
||||
Atomics.wait(sharedBuffer, common_1.kRequestCountField, lastSeenRequestCount);
|
||||
lastSeenRequestCount = Atomics.load(sharedBuffer, common_1.kRequestCountField);
|
||||
// We have to read messages *after* updating lastSeenRequestCount in order
|
||||
// to avoid race conditions.
|
||||
let entry;
|
||||
while ((entry = (0, worker_threads_1.receiveMessageOnPort)(port)) !== undefined) {
|
||||
onMessage(port, sharedBuffer, entry.message);
|
||||
}
|
||||
}
|
||||
}
|
||||
function onMessage(port, sharedBuffer, message) {
|
||||
currentTasks++;
|
||||
const { taskId, task, filename, name } = message;
|
||||
(async function () {
|
||||
let response;
|
||||
let transferList = [];
|
||||
try {
|
||||
const handler = await getHandler(filename, name);
|
||||
if (handler === null) {
|
||||
throw new Error(`No handler function exported from ${filename}`);
|
||||
}
|
||||
let result = await handler(task);
|
||||
if ((0, common_1.isMovable)(result)) {
|
||||
transferList = transferList.concat(result[common_1.kTransferable]);
|
||||
result = result[common_1.kValue];
|
||||
}
|
||||
response = {
|
||||
taskId,
|
||||
result: result,
|
||||
error: null
|
||||
};
|
||||
// If the task used e.g. console.log(), wait for the stream to drain
|
||||
// before potentially entering the `Atomics.wait()` loop, and before
|
||||
// returning the result so that messages will always be printed even
|
||||
// if the process would otherwise be ready to exit.
|
||||
if (process.stdout.writableLength > 0) {
|
||||
await new Promise((resolve) => process.stdout.write('', resolve));
|
||||
}
|
||||
if (process.stderr.writableLength > 0) {
|
||||
await new Promise((resolve) => process.stderr.write('', resolve));
|
||||
}
|
||||
}
|
||||
catch (error) {
|
||||
response = {
|
||||
taskId,
|
||||
result: null,
|
||||
// It may be worth taking a look at the error cloning algorithm we
|
||||
// use in Node.js core here, it's quite a bit more flexible
|
||||
error: error instanceof Error ? error : null
|
||||
};
|
||||
}
|
||||
currentTasks--;
|
||||
// Post the response to the parent thread, and let it know that we have
|
||||
// an additional message available. If possible, use Atomics.wait()
|
||||
// to wait for the next message.
|
||||
port.postMessage(response, transferList);
|
||||
Atomics.add(sharedBuffer, common_1.kResponseCountField, 1);
|
||||
atomicsWaitLoop(port, sharedBuffer);
|
||||
})().catch(throwInNextTick);
|
||||
}
|
||||
function throwInNextTick(error) {
|
||||
process.nextTick(() => { throw error; });
|
||||
}
|
||||
//# sourceMappingURL=worker.js.map
|
1
my-app/node_modules/piscina/dist/src/worker.js.map
generated
vendored
Executable file
1
my-app/node_modules/piscina/dist/src/worker.js.map
generated
vendored
Executable file
|
@ -0,0 +1 @@
|
|||
{"version":3,"file":"worker.js","sourceRoot":"","sources":["../../src/worker.ts"],"names":[],"mappings":";;;;;;;;;;;;;;;;;;;;;;;;;AAAA,mDAA2F;AAC3F,6BAAoC;AACpC,qCAYkB;AAElB,oBAAW,CAAC,cAAc,GAAG,IAAI,CAAC;AAClC,oBAAW,CAAC,UAAU,GAAG,2BAAU,CAAC;AAEpC,MAAM,YAAY,GAA2B,IAAI,GAAG,EAAE,CAAC;AACvD,IAAI,UAAU,GAAa,OAAO,CAAC,GAAG,CAAC,uBAAuB,KAAK,GAAG,CAAC;AAEvE,yEAAyE;AACzE,uCAAuC;AACvC,2EAA2E;AAC3E,gEAAgE;AAChE,IAAI,eAAkE,CAAC;AACvE,SAAS,YAAY;IACnB,IAAI,eAAe,KAAK,SAAS,EAAE;QACjC,uCAAuC;QACvC,eAAe,GAAG,IAAI,QAAQ,CAAC,WAAW,EAAE,0BAA0B,CAA2B,CAAC;KACnG;IACD,OAAO,eAAe,CAAC;AACzB,CAAC;AAED,mEAAmE;AACnE,8EAA8E;AAC9E,KAAK,UAAU,UAAU,CAAE,QAAiB,EAAE,IAAa;IACzD,IAAI,OAAO,GAAG,YAAY,CAAC,GAAG,CAAC,GAAG,QAAQ,IAAI,IAAI,EAAE,CAAC,CAAC;IACtD,IAAI,OAAO,KAAK,SAAS,EAAE;QACzB,OAAO,OAAO,CAAC;KAChB;IAED,IAAI;QACF,oEAAoE;QACpE,uBAAuB;QACvB,OAAO,GAAG,yBAAa,QAAQ,uCAAC,CAAC;QACjC,IAAI,OAAO,OAAO,KAAK,UAAU,EAAE;YACjC,OAAO,GAAG,MAAM,CAAE,OAAe,CAAC,IAAI,CAAC,CAAC,CAAC;SAC1C;KACF;IAAC,MAAM,GAAE;IACV,IAAI,OAAO,OAAO,KAAK,UAAU,EAAE;QACjC,OAAO,GAAG,MAAM,YAAY,EAAE,CAAC,IAAA,mBAAa,EAAC,QAAQ,CAAC,CAAC,IAAI,CAAC,CAAC;QAC7D,IAAI,OAAO,OAAO,KAAK,UAAU,EAAE;YACjC,OAAO,GAAG,MAAM,CAAE,OAAe,CAAC,IAAI,CAAC,CAAC,CAAC;SAC1C;KACF;IACD,IAAI,OAAO,OAAO,KAAK,UAAU,EAAE;QACjC,OAAO,IAAI,CAAC;KACb;IAED,2EAA2E;IAC3E,wCAAwC;IACxC,IAAI,YAAY,CAAC,IAAI,GAAG,IAAI,EAAE;QAC5B,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,GAAG,YAAY,CAAC;QAC7B,YAAY,CAAC,MAAM,CAAC,GAAG,CAAC,CAAC;KAC1B;IAED,YAAY,CAAC,GAAG,CAAC,GAAG,QAAQ,IAAI,IAAI,EAAE,EAAE,OAAO,CAAC,CAAC;IACjD,OAAO,OAAO,CAAC;AACjB,CAAC;AAED,6EAA6E;AAC7E,4EAA4E;AAC5E,8EAA8E;AAC9E,8CAA8C;AAC9C,2BAAW,CAAC,EAAE,CAAC,SAAS,EAAE,CAAC,OAAwB,EAAE,EAAE;IACrD,UAAU,GAAG,OAAO,CAAC,GAAG,CAAC,uBAAuB,KAAK,GAAG,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,OAAO,CAAC,UAAU,CAAC;IACtF,MAAM,EAAE,IAAI,EAAE,YAAY,EAAE,QAAQ,EAAE,IAAI,EAAE,aAAa,EAAE,GAAG,OAAO,CAAC;IACtE,CAAC,KAAK;QACJ,IAAI;YACF,IAAI,aAAa,KAAK,CAAC,IAAI,OAAO,CAAC,QAAQ,KAAK,OAAO,EAAE;gBACvD,gEAAgE;gBAChE,aAAa;gBACb,CAAC,wDAAa,WAAW,GAAC,CAAC,CAAC,OAAO,CAAC,aAAa,CAAC,CAAC;aACpD;SACF;QAAC,MAAM,GAAE;QAEV,IAAI,QAAQ,KAAK,IAAI,EAAE;YACrB,MAAM,UAAU,CAAC,QAAQ,EAAE,IAAI,CAAC,CAAC;SAClC;QAED,MAAM,YAAY,GAAkB,EAAE,CAAC,cAAK,CAAC,EAAE,IAAI,EAAE,CAAC;QACtD,2BAAW,CAAC,WAAW,CAAC,YAAY,CAAC,CAAC;QAEtC,IAAI,CAAC,EAAE,CAAC,SAAS,EAAE,SAAS,CAAC,IAAI,CAAC,IAAI,EAAE,IAAI,EAAE,YAAY,CAAC,CAAC,CAAC;QAC7D,eAAe,CAAC,IAAI,EAAE,YAAY,CAAC,CAAC;IACtC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,CAAC;AAC9B,CAAC,CAAC,CAAC;AAEH,IAAI,YAAY,GAAY,CAAC,CAAC;AAC9B,IAAI,oBAAoB,GAAY,CAAC,CAAC;AACtC,SAAS,eAAe,CAAE,IAAkB,EAAE,YAAyB;IACrE,IAAI,CAAC,UAAU;QAAE,OAAO;IAExB,0EAA0E;IAC1E,yEAAyE;IACzE,kDAAkD;IAClD,0EAA0E;IAC1E,uEAAuE;IACvE,6EAA6E;IAC7E,qEAAqE;IACrE,mCAAmC;IACnC,0EAA0E;IAC1E,0EAA0E;IAC1E,yDAAyD;IACzD,OAAO,YAAY,KAAK,CAAC,EAAE;QACzB,sEAAsE;QACtE,uEAAuE;QACvE,qBAAqB;QACrB,OAAO,CAAC,IAAI,CAAC,YAAY,EAAE,2BAAkB,EAAE,oBAAoB,CAAC,CAAC;QACrE,oBAAoB,GAAG,OAAO,CAAC,IAAI,CAAC,YAAY,EAAE,2BAAkB,CAAC,CAAC;QAEtE,0EAA0E;QAC1E,4BAA4B;QAC5B,IAAI,KAAK,CAAC;QACV,OAAO,CAAC,KAAK,GAAG,IAAA,qCAAoB,EAAC,IAAI,CAAC,CAAC,KAAK,SAAS,EAAE;YACzD,SAAS,CAAC,IAAI,EAAE,YAAY,EAAE,KAAK,CAAC,OAAO,CAAC,CAAC;SAC9C;KACF;AACH,CAAC;AAED,SAAS,SAAS,CAChB,IAAkB,EAClB,YAAyB,EACzB,OAAwB;IACxB,YAAY,EAAE,CAAC;IACf,MAAM,EAAE,MAAM,EAAE,IAAI,EAAE,QAAQ,EAAE,IAAI,EAAE,GAAG,OAAO,CAAC;IAEjD,CAAC,KAAK;QACJ,IAAI,QAA0B,CAAC;QAC/B,IAAI,YAAY,GAAW,EAAE,CAAC;QAC9B,IAAI;YACF,MAAM,OAAO,GAAG,MAAM,UAAU,CAAC,QAAQ,EAAE,IAAI,CAAC,CAAC;YACjD,IAAI,OAAO,KAAK,IAAI,EAAE;gBACpB,MAAM,IAAI,KAAK,CAAC,qCAAqC,QAAQ,EAAE,CAAC,CAAC;aAClE;YACD,IAAI,MAAM,GAAG,MAAM,OAAO,CAAC,IAAI,CAAC,CAAC;YACjC,IAAI,IAAA,kBAAS,EAAC,MAAM,CAAC,EAAE;gBACrB,YAAY,GAAG,YAAY,CAAC,MAAM,CAAC,MAAM,CAAC,sBAAa,CAAC,CAAC,CAAC;gBAC1D,MAAM,GAAG,MAAM,CAAC,eAAM,CAAC,CAAC;aACzB;YACD,QAAQ,GAAG;gBACT,MAAM;gBACN,MAAM,EAAE,MAAM;gBACd,KAAK,EAAE,IAAI;aACZ,CAAC;YAEF,oEAAoE;YACpE,oEAAoE;YACpE,oEAAoE;YACpE,mDAAmD;YACnD,IAAI,OAAO,CAAC,MAAM,CAAC,cAAc,GAAG,CAAC,EAAE;gBACrC,MAAM,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,EAAE,OAAO,CAAC,CAAC,CAAC;aACnE;YACD,IAAI,OAAO,CAAC,MAAM,CAAC,cAAc,GAAG,CAAC,EAAE;gBACrC,MAAM,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,KAAK,CAAC,EAAE,EAAE,OAAO,CAAC,CAAC,CAAC;aACnE;SACF;QAAC,OAAO,KAAK,EAAE;YACd,QAAQ,GAAG;gBACT,MAAM;gBACN,MAAM,EAAE,IAAI;gBACZ,kEAAkE;gBAClE,2DAA2D;gBAC3D,KAAK,EAAE,KAAK,YAAY,KAAK,CAAC,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC,IAAI;aAC7C,CAAC;SACH;QACD,YAAY,EAAE,CAAC;QAEf,uEAAuE;QACvE,mEAAmE;QACnE,gCAAgC;QAChC,IAAI,CAAC,WAAW,CAAC,QAAQ,EAAE,YAAY,CAAC,CAAC;QACzC,OAAO,CAAC,GAAG,CAAC,YAAY,EAAE,4BAAmB,EAAE,CAAC,CAAC,CAAC;QAClD,eAAe,CAAC,IAAI,EAAE,YAAY,CAAC,CAAC;IACtC,CAAC,CAAC,EAAE,CAAC,KAAK,CAAC,eAAe,CAAC,CAAC;AAC9B,CAAC;AAED,SAAS,eAAe,CAAE,KAAa;IACrC,OAAO,CAAC,QAAQ,CAAC,GAAG,EAAE,GAAG,MAAM,KAAK,CAAC,CAAC,CAAC,CAAC,CAAC;AAC3C,CAAC"}
|
Loading…
Add table
Add a link
Reference in a new issue