Deployed the page to Github Pages.
This commit is contained in:
parent
1d79754e93
commit
2c89899458
62797 changed files with 6551425 additions and 15279 deletions
15
node_modules/tar/LICENSE
generated
vendored
Normal file
15
node_modules/tar/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
The ISC License
|
||||
|
||||
Copyright (c) Isaac Z. Schlueter and Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
|
||||
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
1080
node_modules/tar/README.md
generated
vendored
Normal file
1080
node_modules/tar/README.md
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
18
node_modules/tar/index.js
generated
vendored
Normal file
18
node_modules/tar/index.js
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
'use strict'
|
||||
|
||||
// high-level commands
|
||||
exports.c = exports.create = require('./lib/create.js')
|
||||
exports.r = exports.replace = require('./lib/replace.js')
|
||||
exports.t = exports.list = require('./lib/list.js')
|
||||
exports.u = exports.update = require('./lib/update.js')
|
||||
exports.x = exports.extract = require('./lib/extract.js')
|
||||
|
||||
// classes
|
||||
exports.Pack = require('./lib/pack.js')
|
||||
exports.Unpack = require('./lib/unpack.js')
|
||||
exports.Parse = require('./lib/parse.js')
|
||||
exports.ReadEntry = require('./lib/read-entry.js')
|
||||
exports.WriteEntry = require('./lib/write-entry.js')
|
||||
exports.Header = require('./lib/header.js')
|
||||
exports.Pax = require('./lib/pax.js')
|
||||
exports.types = require('./lib/types.js')
|
111
node_modules/tar/lib/create.js
generated
vendored
Normal file
111
node_modules/tar/lib/create.js
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
'use strict'
|
||||
|
||||
// tar -c
|
||||
const hlo = require('./high-level-opt.js')
|
||||
|
||||
const Pack = require('./pack.js')
|
||||
const fsm = require('fs-minipass')
|
||||
const t = require('./list.js')
|
||||
const path = require('path')
|
||||
|
||||
module.exports = (opt_, files, cb) => {
|
||||
if (typeof files === 'function') {
|
||||
cb = files
|
||||
}
|
||||
|
||||
if (Array.isArray(opt_)) {
|
||||
files = opt_, opt_ = {}
|
||||
}
|
||||
|
||||
if (!files || !Array.isArray(files) || !files.length) {
|
||||
throw new TypeError('no files or directories specified')
|
||||
}
|
||||
|
||||
files = Array.from(files)
|
||||
|
||||
const opt = hlo(opt_)
|
||||
|
||||
if (opt.sync && typeof cb === 'function') {
|
||||
throw new TypeError('callback not supported for sync tar functions')
|
||||
}
|
||||
|
||||
if (!opt.file && typeof cb === 'function') {
|
||||
throw new TypeError('callback only supported with file option')
|
||||
}
|
||||
|
||||
return opt.file && opt.sync ? createFileSync(opt, files)
|
||||
: opt.file ? createFile(opt, files, cb)
|
||||
: opt.sync ? createSync(opt, files)
|
||||
: create(opt, files)
|
||||
}
|
||||
|
||||
const createFileSync = (opt, files) => {
|
||||
const p = new Pack.Sync(opt)
|
||||
const stream = new fsm.WriteStreamSync(opt.file, {
|
||||
mode: opt.mode || 0o666,
|
||||
})
|
||||
p.pipe(stream)
|
||||
addFilesSync(p, files)
|
||||
}
|
||||
|
||||
const createFile = (opt, files, cb) => {
|
||||
const p = new Pack(opt)
|
||||
const stream = new fsm.WriteStream(opt.file, {
|
||||
mode: opt.mode || 0o666,
|
||||
})
|
||||
p.pipe(stream)
|
||||
|
||||
const promise = new Promise((res, rej) => {
|
||||
stream.on('error', rej)
|
||||
stream.on('close', res)
|
||||
p.on('error', rej)
|
||||
})
|
||||
|
||||
addFilesAsync(p, files)
|
||||
|
||||
return cb ? promise.then(cb, cb) : promise
|
||||
}
|
||||
|
||||
const addFilesSync = (p, files) => {
|
||||
files.forEach(file => {
|
||||
if (file.charAt(0) === '@') {
|
||||
t({
|
||||
file: path.resolve(p.cwd, file.slice(1)),
|
||||
sync: true,
|
||||
noResume: true,
|
||||
onentry: entry => p.add(entry),
|
||||
})
|
||||
} else {
|
||||
p.add(file)
|
||||
}
|
||||
})
|
||||
p.end()
|
||||
}
|
||||
|
||||
const addFilesAsync = (p, files) => {
|
||||
while (files.length) {
|
||||
const file = files.shift()
|
||||
if (file.charAt(0) === '@') {
|
||||
return t({
|
||||
file: path.resolve(p.cwd, file.slice(1)),
|
||||
noResume: true,
|
||||
onentry: entry => p.add(entry),
|
||||
}).then(_ => addFilesAsync(p, files))
|
||||
} else {
|
||||
p.add(file)
|
||||
}
|
||||
}
|
||||
p.end()
|
||||
}
|
||||
|
||||
const createSync = (opt, files) => {
|
||||
const p = new Pack.Sync(opt)
|
||||
addFilesSync(p, files)
|
||||
return p
|
||||
}
|
||||
|
||||
const create = (opt, files) => {
|
||||
const p = new Pack(opt)
|
||||
addFilesAsync(p, files)
|
||||
return p
|
||||
}
|
113
node_modules/tar/lib/extract.js
generated
vendored
Normal file
113
node_modules/tar/lib/extract.js
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
|||
'use strict'
|
||||
|
||||
// tar -x
|
||||
const hlo = require('./high-level-opt.js')
|
||||
const Unpack = require('./unpack.js')
|
||||
const fs = require('fs')
|
||||
const fsm = require('fs-minipass')
|
||||
const path = require('path')
|
||||
const stripSlash = require('./strip-trailing-slashes.js')
|
||||
|
||||
module.exports = (opt_, files, cb) => {
|
||||
if (typeof opt_ === 'function') {
|
||||
cb = opt_, files = null, opt_ = {}
|
||||
} else if (Array.isArray(opt_)) {
|
||||
files = opt_, opt_ = {}
|
||||
}
|
||||
|
||||
if (typeof files === 'function') {
|
||||
cb = files, files = null
|
||||
}
|
||||
|
||||
if (!files) {
|
||||
files = []
|
||||
} else {
|
||||
files = Array.from(files)
|
||||
}
|
||||
|
||||
const opt = hlo(opt_)
|
||||
|
||||
if (opt.sync && typeof cb === 'function') {
|
||||
throw new TypeError('callback not supported for sync tar functions')
|
||||
}
|
||||
|
||||
if (!opt.file && typeof cb === 'function') {
|
||||
throw new TypeError('callback only supported with file option')
|
||||
}
|
||||
|
||||
if (files.length) {
|
||||
filesFilter(opt, files)
|
||||
}
|
||||
|
||||
return opt.file && opt.sync ? extractFileSync(opt)
|
||||
: opt.file ? extractFile(opt, cb)
|
||||
: opt.sync ? extractSync(opt)
|
||||
: extract(opt)
|
||||
}
|
||||
|
||||
// construct a filter that limits the file entries listed
|
||||
// include child entries if a dir is included
|
||||
const filesFilter = (opt, files) => {
|
||||
const map = new Map(files.map(f => [stripSlash(f), true]))
|
||||
const filter = opt.filter
|
||||
|
||||
const mapHas = (file, r) => {
|
||||
const root = r || path.parse(file).root || '.'
|
||||
const ret = file === root ? false
|
||||
: map.has(file) ? map.get(file)
|
||||
: mapHas(path.dirname(file), root)
|
||||
|
||||
map.set(file, ret)
|
||||
return ret
|
||||
}
|
||||
|
||||
opt.filter = filter
|
||||
? (file, entry) => filter(file, entry) && mapHas(stripSlash(file))
|
||||
: file => mapHas(stripSlash(file))
|
||||
}
|
||||
|
||||
const extractFileSync = opt => {
|
||||
const u = new Unpack.Sync(opt)
|
||||
|
||||
const file = opt.file
|
||||
const stat = fs.statSync(file)
|
||||
// This trades a zero-byte read() syscall for a stat
|
||||
// However, it will usually result in less memory allocation
|
||||
const readSize = opt.maxReadSize || 16 * 1024 * 1024
|
||||
const stream = new fsm.ReadStreamSync(file, {
|
||||
readSize: readSize,
|
||||
size: stat.size,
|
||||
})
|
||||
stream.pipe(u)
|
||||
}
|
||||
|
||||
const extractFile = (opt, cb) => {
|
||||
const u = new Unpack(opt)
|
||||
const readSize = opt.maxReadSize || 16 * 1024 * 1024
|
||||
|
||||
const file = opt.file
|
||||
const p = new Promise((resolve, reject) => {
|
||||
u.on('error', reject)
|
||||
u.on('close', resolve)
|
||||
|
||||
// This trades a zero-byte read() syscall for a stat
|
||||
// However, it will usually result in less memory allocation
|
||||
fs.stat(file, (er, stat) => {
|
||||
if (er) {
|
||||
reject(er)
|
||||
} else {
|
||||
const stream = new fsm.ReadStream(file, {
|
||||
readSize: readSize,
|
||||
size: stat.size,
|
||||
})
|
||||
stream.on('error', reject)
|
||||
stream.pipe(u)
|
||||
}
|
||||
})
|
||||
})
|
||||
return cb ? p.then(cb, cb) : p
|
||||
}
|
||||
|
||||
const extractSync = opt => new Unpack.Sync(opt)
|
||||
|
||||
const extract = opt => new Unpack(opt)
|
20
node_modules/tar/lib/get-write-flag.js
generated
vendored
Normal file
20
node_modules/tar/lib/get-write-flag.js
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
// Get the appropriate flag to use for creating files
|
||||
// We use fmap on Windows platforms for files less than
|
||||
// 512kb. This is a fairly low limit, but avoids making
|
||||
// things slower in some cases. Since most of what this
|
||||
// library is used for is extracting tarballs of many
|
||||
// relatively small files in npm packages and the like,
|
||||
// it can be a big boost on Windows platforms.
|
||||
// Only supported in Node v12.9.0 and above.
|
||||
const platform = process.env.__FAKE_PLATFORM__ || process.platform
|
||||
const isWindows = platform === 'win32'
|
||||
const fs = global.__FAKE_TESTING_FS__ || require('fs')
|
||||
|
||||
/* istanbul ignore next */
|
||||
const { O_CREAT, O_TRUNC, O_WRONLY, UV_FS_O_FILEMAP = 0 } = fs.constants
|
||||
|
||||
const fMapEnabled = isWindows && !!UV_FS_O_FILEMAP
|
||||
const fMapLimit = 512 * 1024
|
||||
const fMapFlag = UV_FS_O_FILEMAP | O_TRUNC | O_CREAT | O_WRONLY
|
||||
module.exports = !fMapEnabled ? () => 'w'
|
||||
: size => size < fMapLimit ? fMapFlag : 'w'
|
304
node_modules/tar/lib/header.js
generated
vendored
Normal file
304
node_modules/tar/lib/header.js
generated
vendored
Normal file
|
@ -0,0 +1,304 @@
|
|||
'use strict'
|
||||
// parse a 512-byte header block to a data object, or vice-versa
|
||||
// encode returns `true` if a pax extended header is needed, because
|
||||
// the data could not be faithfully encoded in a simple header.
|
||||
// (Also, check header.needPax to see if it needs a pax header.)
|
||||
|
||||
const types = require('./types.js')
|
||||
const pathModule = require('path').posix
|
||||
const large = require('./large-numbers.js')
|
||||
|
||||
const SLURP = Symbol('slurp')
|
||||
const TYPE = Symbol('type')
|
||||
|
||||
class Header {
|
||||
constructor (data, off, ex, gex) {
|
||||
this.cksumValid = false
|
||||
this.needPax = false
|
||||
this.nullBlock = false
|
||||
|
||||
this.block = null
|
||||
this.path = null
|
||||
this.mode = null
|
||||
this.uid = null
|
||||
this.gid = null
|
||||
this.size = null
|
||||
this.mtime = null
|
||||
this.cksum = null
|
||||
this[TYPE] = '0'
|
||||
this.linkpath = null
|
||||
this.uname = null
|
||||
this.gname = null
|
||||
this.devmaj = 0
|
||||
this.devmin = 0
|
||||
this.atime = null
|
||||
this.ctime = null
|
||||
|
||||
if (Buffer.isBuffer(data)) {
|
||||
this.decode(data, off || 0, ex, gex)
|
||||
} else if (data) {
|
||||
this.set(data)
|
||||
}
|
||||
}
|
||||
|
||||
decode (buf, off, ex, gex) {
|
||||
if (!off) {
|
||||
off = 0
|
||||
}
|
||||
|
||||
if (!buf || !(buf.length >= off + 512)) {
|
||||
throw new Error('need 512 bytes for header')
|
||||
}
|
||||
|
||||
this.path = decString(buf, off, 100)
|
||||
this.mode = decNumber(buf, off + 100, 8)
|
||||
this.uid = decNumber(buf, off + 108, 8)
|
||||
this.gid = decNumber(buf, off + 116, 8)
|
||||
this.size = decNumber(buf, off + 124, 12)
|
||||
this.mtime = decDate(buf, off + 136, 12)
|
||||
this.cksum = decNumber(buf, off + 148, 12)
|
||||
|
||||
// if we have extended or global extended headers, apply them now
|
||||
// See https://github.com/npm/node-tar/pull/187
|
||||
this[SLURP](ex)
|
||||
this[SLURP](gex, true)
|
||||
|
||||
// old tar versions marked dirs as a file with a trailing /
|
||||
this[TYPE] = decString(buf, off + 156, 1)
|
||||
if (this[TYPE] === '') {
|
||||
this[TYPE] = '0'
|
||||
}
|
||||
if (this[TYPE] === '0' && this.path.slice(-1) === '/') {
|
||||
this[TYPE] = '5'
|
||||
}
|
||||
|
||||
// tar implementations sometimes incorrectly put the stat(dir).size
|
||||
// as the size in the tarball, even though Directory entries are
|
||||
// not able to have any body at all. In the very rare chance that
|
||||
// it actually DOES have a body, we weren't going to do anything with
|
||||
// it anyway, and it'll just be a warning about an invalid header.
|
||||
if (this[TYPE] === '5') {
|
||||
this.size = 0
|
||||
}
|
||||
|
||||
this.linkpath = decString(buf, off + 157, 100)
|
||||
if (buf.slice(off + 257, off + 265).toString() === 'ustar\u000000') {
|
||||
this.uname = decString(buf, off + 265, 32)
|
||||
this.gname = decString(buf, off + 297, 32)
|
||||
this.devmaj = decNumber(buf, off + 329, 8)
|
||||
this.devmin = decNumber(buf, off + 337, 8)
|
||||
if (buf[off + 475] !== 0) {
|
||||
// definitely a prefix, definitely >130 chars.
|
||||
const prefix = decString(buf, off + 345, 155)
|
||||
this.path = prefix + '/' + this.path
|
||||
} else {
|
||||
const prefix = decString(buf, off + 345, 130)
|
||||
if (prefix) {
|
||||
this.path = prefix + '/' + this.path
|
||||
}
|
||||
this.atime = decDate(buf, off + 476, 12)
|
||||
this.ctime = decDate(buf, off + 488, 12)
|
||||
}
|
||||
}
|
||||
|
||||
let sum = 8 * 0x20
|
||||
for (let i = off; i < off + 148; i++) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
for (let i = off + 156; i < off + 512; i++) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
this.cksumValid = sum === this.cksum
|
||||
if (this.cksum === null && sum === 8 * 0x20) {
|
||||
this.nullBlock = true
|
||||
}
|
||||
}
|
||||
|
||||
[SLURP] (ex, global) {
|
||||
for (const k in ex) {
|
||||
// we slurp in everything except for the path attribute in
|
||||
// a global extended header, because that's weird.
|
||||
if (ex[k] !== null && ex[k] !== undefined &&
|
||||
!(global && k === 'path')) {
|
||||
this[k] = ex[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
encode (buf, off) {
|
||||
if (!buf) {
|
||||
buf = this.block = Buffer.alloc(512)
|
||||
off = 0
|
||||
}
|
||||
|
||||
if (!off) {
|
||||
off = 0
|
||||
}
|
||||
|
||||
if (!(buf.length >= off + 512)) {
|
||||
throw new Error('need 512 bytes for header')
|
||||
}
|
||||
|
||||
const prefixSize = this.ctime || this.atime ? 130 : 155
|
||||
const split = splitPrefix(this.path || '', prefixSize)
|
||||
const path = split[0]
|
||||
const prefix = split[1]
|
||||
this.needPax = split[2]
|
||||
|
||||
this.needPax = encString(buf, off, 100, path) || this.needPax
|
||||
this.needPax = encNumber(buf, off + 100, 8, this.mode) || this.needPax
|
||||
this.needPax = encNumber(buf, off + 108, 8, this.uid) || this.needPax
|
||||
this.needPax = encNumber(buf, off + 116, 8, this.gid) || this.needPax
|
||||
this.needPax = encNumber(buf, off + 124, 12, this.size) || this.needPax
|
||||
this.needPax = encDate(buf, off + 136, 12, this.mtime) || this.needPax
|
||||
buf[off + 156] = this[TYPE].charCodeAt(0)
|
||||
this.needPax = encString(buf, off + 157, 100, this.linkpath) || this.needPax
|
||||
buf.write('ustar\u000000', off + 257, 8)
|
||||
this.needPax = encString(buf, off + 265, 32, this.uname) || this.needPax
|
||||
this.needPax = encString(buf, off + 297, 32, this.gname) || this.needPax
|
||||
this.needPax = encNumber(buf, off + 329, 8, this.devmaj) || this.needPax
|
||||
this.needPax = encNumber(buf, off + 337, 8, this.devmin) || this.needPax
|
||||
this.needPax = encString(buf, off + 345, prefixSize, prefix) || this.needPax
|
||||
if (buf[off + 475] !== 0) {
|
||||
this.needPax = encString(buf, off + 345, 155, prefix) || this.needPax
|
||||
} else {
|
||||
this.needPax = encString(buf, off + 345, 130, prefix) || this.needPax
|
||||
this.needPax = encDate(buf, off + 476, 12, this.atime) || this.needPax
|
||||
this.needPax = encDate(buf, off + 488, 12, this.ctime) || this.needPax
|
||||
}
|
||||
|
||||
let sum = 8 * 0x20
|
||||
for (let i = off; i < off + 148; i++) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
for (let i = off + 156; i < off + 512; i++) {
|
||||
sum += buf[i]
|
||||
}
|
||||
|
||||
this.cksum = sum
|
||||
encNumber(buf, off + 148, 8, this.cksum)
|
||||
this.cksumValid = true
|
||||
|
||||
return this.needPax
|
||||
}
|
||||
|
||||
set (data) {
|
||||
for (const i in data) {
|
||||
if (data[i] !== null && data[i] !== undefined) {
|
||||
this[i] = data[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get type () {
|
||||
return types.name.get(this[TYPE]) || this[TYPE]
|
||||
}
|
||||
|
||||
get typeKey () {
|
||||
return this[TYPE]
|
||||
}
|
||||
|
||||
set type (type) {
|
||||
if (types.code.has(type)) {
|
||||
this[TYPE] = types.code.get(type)
|
||||
} else {
|
||||
this[TYPE] = type
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const splitPrefix = (p, prefixSize) => {
|
||||
const pathSize = 100
|
||||
let pp = p
|
||||
let prefix = ''
|
||||
let ret
|
||||
const root = pathModule.parse(p).root || '.'
|
||||
|
||||
if (Buffer.byteLength(pp) < pathSize) {
|
||||
ret = [pp, prefix, false]
|
||||
} else {
|
||||
// first set prefix to the dir, and path to the base
|
||||
prefix = pathModule.dirname(pp)
|
||||
pp = pathModule.basename(pp)
|
||||
|
||||
do {
|
||||
if (Buffer.byteLength(pp) <= pathSize &&
|
||||
Buffer.byteLength(prefix) <= prefixSize) {
|
||||
// both fit!
|
||||
ret = [pp, prefix, false]
|
||||
} else if (Buffer.byteLength(pp) > pathSize &&
|
||||
Buffer.byteLength(prefix) <= prefixSize) {
|
||||
// prefix fits in prefix, but path doesn't fit in path
|
||||
ret = [pp.slice(0, pathSize - 1), prefix, true]
|
||||
} else {
|
||||
// make path take a bit from prefix
|
||||
pp = pathModule.join(pathModule.basename(prefix), pp)
|
||||
prefix = pathModule.dirname(prefix)
|
||||
}
|
||||
} while (prefix !== root && !ret)
|
||||
|
||||
// at this point, found no resolution, just truncate
|
||||
if (!ret) {
|
||||
ret = [p.slice(0, pathSize - 1), '', true]
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
const decString = (buf, off, size) =>
|
||||
buf.slice(off, off + size).toString('utf8').replace(/\0.*/, '')
|
||||
|
||||
const decDate = (buf, off, size) =>
|
||||
numToDate(decNumber(buf, off, size))
|
||||
|
||||
const numToDate = num => num === null ? null : new Date(num * 1000)
|
||||
|
||||
const decNumber = (buf, off, size) =>
|
||||
buf[off] & 0x80 ? large.parse(buf.slice(off, off + size))
|
||||
: decSmallNumber(buf, off, size)
|
||||
|
||||
const nanNull = value => isNaN(value) ? null : value
|
||||
|
||||
const decSmallNumber = (buf, off, size) =>
|
||||
nanNull(parseInt(
|
||||
buf.slice(off, off + size)
|
||||
.toString('utf8').replace(/\0.*$/, '').trim(), 8))
|
||||
|
||||
// the maximum encodable as a null-terminated octal, by field size
|
||||
const MAXNUM = {
|
||||
12: 0o77777777777,
|
||||
8: 0o7777777,
|
||||
}
|
||||
|
||||
const encNumber = (buf, off, size, number) =>
|
||||
number === null ? false :
|
||||
number > MAXNUM[size] || number < 0
|
||||
? (large.encode(number, buf.slice(off, off + size)), true)
|
||||
: (encSmallNumber(buf, off, size, number), false)
|
||||
|
||||
const encSmallNumber = (buf, off, size, number) =>
|
||||
buf.write(octalString(number, size), off, size, 'ascii')
|
||||
|
||||
const octalString = (number, size) =>
|
||||
padOctal(Math.floor(number).toString(8), size)
|
||||
|
||||
const padOctal = (string, size) =>
|
||||
(string.length === size - 1 ? string
|
||||
: new Array(size - string.length - 1).join('0') + string + ' ') + '\0'
|
||||
|
||||
const encDate = (buf, off, size, date) =>
|
||||
date === null ? false :
|
||||
encNumber(buf, off, size, date.getTime() / 1000)
|
||||
|
||||
// enough to fill the longest string we've got
|
||||
const NULLS = new Array(156).join('\0')
|
||||
// pad with nulls, return true if it's longer or non-ascii
|
||||
const encString = (buf, off, size, string) =>
|
||||
string === null ? false :
|
||||
(buf.write(string + NULLS, off, size, 'utf8'),
|
||||
string.length !== Buffer.byteLength(string) || string.length > size)
|
||||
|
||||
module.exports = Header
|
29
node_modules/tar/lib/high-level-opt.js
generated
vendored
Normal file
29
node_modules/tar/lib/high-level-opt.js
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
'use strict'
|
||||
|
||||
// turn tar(1) style args like `C` into the more verbose things like `cwd`
|
||||
|
||||
const argmap = new Map([
|
||||
['C', 'cwd'],
|
||||
['f', 'file'],
|
||||
['z', 'gzip'],
|
||||
['P', 'preservePaths'],
|
||||
['U', 'unlink'],
|
||||
['strip-components', 'strip'],
|
||||
['stripComponents', 'strip'],
|
||||
['keep-newer', 'newer'],
|
||||
['keepNewer', 'newer'],
|
||||
['keep-newer-files', 'newer'],
|
||||
['keepNewerFiles', 'newer'],
|
||||
['k', 'keep'],
|
||||
['keep-existing', 'keep'],
|
||||
['keepExisting', 'keep'],
|
||||
['m', 'noMtime'],
|
||||
['no-mtime', 'noMtime'],
|
||||
['p', 'preserveOwner'],
|
||||
['L', 'follow'],
|
||||
['h', 'follow'],
|
||||
])
|
||||
|
||||
module.exports = opt => opt ? Object.keys(opt).map(k => [
|
||||
argmap.has(k) ? argmap.get(k) : k, opt[k],
|
||||
]).reduce((set, kv) => (set[kv[0]] = kv[1], set), Object.create(null)) : {}
|
104
node_modules/tar/lib/large-numbers.js
generated
vendored
Normal file
104
node_modules/tar/lib/large-numbers.js
generated
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
'use strict'
|
||||
// Tar can encode large and negative numbers using a leading byte of
|
||||
// 0xff for negative, and 0x80 for positive.
|
||||
|
||||
const encode = (num, buf) => {
|
||||
if (!Number.isSafeInteger(num)) {
|
||||
// The number is so large that javascript cannot represent it with integer
|
||||
// precision.
|
||||
throw Error('cannot encode number outside of javascript safe integer range')
|
||||
} else if (num < 0) {
|
||||
encodeNegative(num, buf)
|
||||
} else {
|
||||
encodePositive(num, buf)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
const encodePositive = (num, buf) => {
|
||||
buf[0] = 0x80
|
||||
|
||||
for (var i = buf.length; i > 1; i--) {
|
||||
buf[i - 1] = num & 0xff
|
||||
num = Math.floor(num / 0x100)
|
||||
}
|
||||
}
|
||||
|
||||
const encodeNegative = (num, buf) => {
|
||||
buf[0] = 0xff
|
||||
var flipped = false
|
||||
num = num * -1
|
||||
for (var i = buf.length; i > 1; i--) {
|
||||
var byte = num & 0xff
|
||||
num = Math.floor(num / 0x100)
|
||||
if (flipped) {
|
||||
buf[i - 1] = onesComp(byte)
|
||||
} else if (byte === 0) {
|
||||
buf[i - 1] = 0
|
||||
} else {
|
||||
flipped = true
|
||||
buf[i - 1] = twosComp(byte)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const parse = (buf) => {
|
||||
const pre = buf[0]
|
||||
const value = pre === 0x80 ? pos(buf.slice(1, buf.length))
|
||||
: pre === 0xff ? twos(buf)
|
||||
: null
|
||||
if (value === null) {
|
||||
throw Error('invalid base256 encoding')
|
||||
}
|
||||
|
||||
if (!Number.isSafeInteger(value)) {
|
||||
// The number is so large that javascript cannot represent it with integer
|
||||
// precision.
|
||||
throw Error('parsed number outside of javascript safe integer range')
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
const twos = (buf) => {
|
||||
var len = buf.length
|
||||
var sum = 0
|
||||
var flipped = false
|
||||
for (var i = len - 1; i > -1; i--) {
|
||||
var byte = buf[i]
|
||||
var f
|
||||
if (flipped) {
|
||||
f = onesComp(byte)
|
||||
} else if (byte === 0) {
|
||||
f = byte
|
||||
} else {
|
||||
flipped = true
|
||||
f = twosComp(byte)
|
||||
}
|
||||
if (f !== 0) {
|
||||
sum -= f * Math.pow(256, len - i - 1)
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
const pos = (buf) => {
|
||||
var len = buf.length
|
||||
var sum = 0
|
||||
for (var i = len - 1; i > -1; i--) {
|
||||
var byte = buf[i]
|
||||
if (byte !== 0) {
|
||||
sum += byte * Math.pow(256, len - i - 1)
|
||||
}
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
const onesComp = byte => (0xff ^ byte) & 0xff
|
||||
|
||||
const twosComp = byte => ((0xff ^ byte) + 1) & 0xff
|
||||
|
||||
module.exports = {
|
||||
encode,
|
||||
parse,
|
||||
}
|
139
node_modules/tar/lib/list.js
generated
vendored
Normal file
139
node_modules/tar/lib/list.js
generated
vendored
Normal file
|
@ -0,0 +1,139 @@
|
|||
'use strict'
|
||||
|
||||
// XXX: This shares a lot in common with extract.js
|
||||
// maybe some DRY opportunity here?
|
||||
|
||||
// tar -t
|
||||
const hlo = require('./high-level-opt.js')
|
||||
const Parser = require('./parse.js')
|
||||
const fs = require('fs')
|
||||
const fsm = require('fs-minipass')
|
||||
const path = require('path')
|
||||
const stripSlash = require('./strip-trailing-slashes.js')
|
||||
|
||||
module.exports = (opt_, files, cb) => {
|
||||
if (typeof opt_ === 'function') {
|
||||
cb = opt_, files = null, opt_ = {}
|
||||
} else if (Array.isArray(opt_)) {
|
||||
files = opt_, opt_ = {}
|
||||
}
|
||||
|
||||
if (typeof files === 'function') {
|
||||
cb = files, files = null
|
||||
}
|
||||
|
||||
if (!files) {
|
||||
files = []
|
||||
} else {
|
||||
files = Array.from(files)
|
||||
}
|
||||
|
||||
const opt = hlo(opt_)
|
||||
|
||||
if (opt.sync && typeof cb === 'function') {
|
||||
throw new TypeError('callback not supported for sync tar functions')
|
||||
}
|
||||
|
||||
if (!opt.file && typeof cb === 'function') {
|
||||
throw new TypeError('callback only supported with file option')
|
||||
}
|
||||
|
||||
if (files.length) {
|
||||
filesFilter(opt, files)
|
||||
}
|
||||
|
||||
if (!opt.noResume) {
|
||||
onentryFunction(opt)
|
||||
}
|
||||
|
||||
return opt.file && opt.sync ? listFileSync(opt)
|
||||
: opt.file ? listFile(opt, cb)
|
||||
: list(opt)
|
||||
}
|
||||
|
||||
const onentryFunction = opt => {
|
||||
const onentry = opt.onentry
|
||||
opt.onentry = onentry ? e => {
|
||||
onentry(e)
|
||||
e.resume()
|
||||
} : e => e.resume()
|
||||
}
|
||||
|
||||
// construct a filter that limits the file entries listed
|
||||
// include child entries if a dir is included
|
||||
const filesFilter = (opt, files) => {
|
||||
const map = new Map(files.map(f => [stripSlash(f), true]))
|
||||
const filter = opt.filter
|
||||
|
||||
const mapHas = (file, r) => {
|
||||
const root = r || path.parse(file).root || '.'
|
||||
const ret = file === root ? false
|
||||
: map.has(file) ? map.get(file)
|
||||
: mapHas(path.dirname(file), root)
|
||||
|
||||
map.set(file, ret)
|
||||
return ret
|
||||
}
|
||||
|
||||
opt.filter = filter
|
||||
? (file, entry) => filter(file, entry) && mapHas(stripSlash(file))
|
||||
: file => mapHas(stripSlash(file))
|
||||
}
|
||||
|
||||
const listFileSync = opt => {
|
||||
const p = list(opt)
|
||||
const file = opt.file
|
||||
let threw = true
|
||||
let fd
|
||||
try {
|
||||
const stat = fs.statSync(file)
|
||||
const readSize = opt.maxReadSize || 16 * 1024 * 1024
|
||||
if (stat.size < readSize) {
|
||||
p.end(fs.readFileSync(file))
|
||||
} else {
|
||||
let pos = 0
|
||||
const buf = Buffer.allocUnsafe(readSize)
|
||||
fd = fs.openSync(file, 'r')
|
||||
while (pos < stat.size) {
|
||||
const bytesRead = fs.readSync(fd, buf, 0, readSize, pos)
|
||||
pos += bytesRead
|
||||
p.write(buf.slice(0, bytesRead))
|
||||
}
|
||||
p.end()
|
||||
}
|
||||
threw = false
|
||||
} finally {
|
||||
if (threw && fd) {
|
||||
try {
|
||||
fs.closeSync(fd)
|
||||
} catch (er) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const listFile = (opt, cb) => {
|
||||
const parse = new Parser(opt)
|
||||
const readSize = opt.maxReadSize || 16 * 1024 * 1024
|
||||
|
||||
const file = opt.file
|
||||
const p = new Promise((resolve, reject) => {
|
||||
parse.on('error', reject)
|
||||
parse.on('end', resolve)
|
||||
|
||||
fs.stat(file, (er, stat) => {
|
||||
if (er) {
|
||||
reject(er)
|
||||
} else {
|
||||
const stream = new fsm.ReadStream(file, {
|
||||
readSize: readSize,
|
||||
size: stat.size,
|
||||
})
|
||||
stream.on('error', reject)
|
||||
stream.pipe(parse)
|
||||
}
|
||||
})
|
||||
})
|
||||
return cb ? p.then(cb, cb) : p
|
||||
}
|
||||
|
||||
const list = opt => new Parser(opt)
|
229
node_modules/tar/lib/mkdir.js
generated
vendored
Normal file
229
node_modules/tar/lib/mkdir.js
generated
vendored
Normal file
|
@ -0,0 +1,229 @@
|
|||
'use strict'
|
||||
// wrapper around mkdirp for tar's needs.
|
||||
|
||||
// TODO: This should probably be a class, not functionally
|
||||
// passing around state in a gazillion args.
|
||||
|
||||
const mkdirp = require('mkdirp')
|
||||
const fs = require('fs')
|
||||
const path = require('path')
|
||||
const chownr = require('chownr')
|
||||
const normPath = require('./normalize-windows-path.js')
|
||||
|
||||
class SymlinkError extends Error {
|
||||
constructor (symlink, path) {
|
||||
super('Cannot extract through symbolic link')
|
||||
this.path = path
|
||||
this.symlink = symlink
|
||||
}
|
||||
|
||||
get name () {
|
||||
return 'SylinkError'
|
||||
}
|
||||
}
|
||||
|
||||
class CwdError extends Error {
|
||||
constructor (path, code) {
|
||||
super(code + ': Cannot cd into \'' + path + '\'')
|
||||
this.path = path
|
||||
this.code = code
|
||||
}
|
||||
|
||||
get name () {
|
||||
return 'CwdError'
|
||||
}
|
||||
}
|
||||
|
||||
const cGet = (cache, key) => cache.get(normPath(key))
|
||||
const cSet = (cache, key, val) => cache.set(normPath(key), val)
|
||||
|
||||
const checkCwd = (dir, cb) => {
|
||||
fs.stat(dir, (er, st) => {
|
||||
if (er || !st.isDirectory()) {
|
||||
er = new CwdError(dir, er && er.code || 'ENOTDIR')
|
||||
}
|
||||
cb(er)
|
||||
})
|
||||
}
|
||||
|
||||
module.exports = (dir, opt, cb) => {
|
||||
dir = normPath(dir)
|
||||
|
||||
// if there's any overlap between mask and mode,
|
||||
// then we'll need an explicit chmod
|
||||
const umask = opt.umask
|
||||
const mode = opt.mode | 0o0700
|
||||
const needChmod = (mode & umask) !== 0
|
||||
|
||||
const uid = opt.uid
|
||||
const gid = opt.gid
|
||||
const doChown = typeof uid === 'number' &&
|
||||
typeof gid === 'number' &&
|
||||
(uid !== opt.processUid || gid !== opt.processGid)
|
||||
|
||||
const preserve = opt.preserve
|
||||
const unlink = opt.unlink
|
||||
const cache = opt.cache
|
||||
const cwd = normPath(opt.cwd)
|
||||
|
||||
const done = (er, created) => {
|
||||
if (er) {
|
||||
cb(er)
|
||||
} else {
|
||||
cSet(cache, dir, true)
|
||||
if (created && doChown) {
|
||||
chownr(created, uid, gid, er => done(er))
|
||||
} else if (needChmod) {
|
||||
fs.chmod(dir, mode, cb)
|
||||
} else {
|
||||
cb()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (cache && cGet(cache, dir) === true) {
|
||||
return done()
|
||||
}
|
||||
|
||||
if (dir === cwd) {
|
||||
return checkCwd(dir, done)
|
||||
}
|
||||
|
||||
if (preserve) {
|
||||
return mkdirp(dir, { mode }).then(made => done(null, made), done)
|
||||
}
|
||||
|
||||
const sub = normPath(path.relative(cwd, dir))
|
||||
const parts = sub.split('/')
|
||||
mkdir_(cwd, parts, mode, cache, unlink, cwd, null, done)
|
||||
}
|
||||
|
||||
const mkdir_ = (base, parts, mode, cache, unlink, cwd, created, cb) => {
|
||||
if (!parts.length) {
|
||||
return cb(null, created)
|
||||
}
|
||||
const p = parts.shift()
|
||||
const part = normPath(path.resolve(base + '/' + p))
|
||||
if (cGet(cache, part)) {
|
||||
return mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
|
||||
}
|
||||
fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
|
||||
}
|
||||
|
||||
const onmkdir = (part, parts, mode, cache, unlink, cwd, created, cb) => er => {
|
||||
if (er) {
|
||||
fs.lstat(part, (statEr, st) => {
|
||||
if (statEr) {
|
||||
statEr.path = statEr.path && normPath(statEr.path)
|
||||
cb(statEr)
|
||||
} else if (st.isDirectory()) {
|
||||
mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
|
||||
} else if (unlink) {
|
||||
fs.unlink(part, er => {
|
||||
if (er) {
|
||||
return cb(er)
|
||||
}
|
||||
fs.mkdir(part, mode, onmkdir(part, parts, mode, cache, unlink, cwd, created, cb))
|
||||
})
|
||||
} else if (st.isSymbolicLink()) {
|
||||
return cb(new SymlinkError(part, part + '/' + parts.join('/')))
|
||||
} else {
|
||||
cb(er)
|
||||
}
|
||||
})
|
||||
} else {
|
||||
created = created || part
|
||||
mkdir_(part, parts, mode, cache, unlink, cwd, created, cb)
|
||||
}
|
||||
}
|
||||
|
||||
const checkCwdSync = dir => {
|
||||
let ok = false
|
||||
let code = 'ENOTDIR'
|
||||
try {
|
||||
ok = fs.statSync(dir).isDirectory()
|
||||
} catch (er) {
|
||||
code = er.code
|
||||
} finally {
|
||||
if (!ok) {
|
||||
throw new CwdError(dir, code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.sync = (dir, opt) => {
|
||||
dir = normPath(dir)
|
||||
// if there's any overlap between mask and mode,
|
||||
// then we'll need an explicit chmod
|
||||
const umask = opt.umask
|
||||
const mode = opt.mode | 0o0700
|
||||
const needChmod = (mode & umask) !== 0
|
||||
|
||||
const uid = opt.uid
|
||||
const gid = opt.gid
|
||||
const doChown = typeof uid === 'number' &&
|
||||
typeof gid === 'number' &&
|
||||
(uid !== opt.processUid || gid !== opt.processGid)
|
||||
|
||||
const preserve = opt.preserve
|
||||
const unlink = opt.unlink
|
||||
const cache = opt.cache
|
||||
const cwd = normPath(opt.cwd)
|
||||
|
||||
const done = (created) => {
|
||||
cSet(cache, dir, true)
|
||||
if (created && doChown) {
|
||||
chownr.sync(created, uid, gid)
|
||||
}
|
||||
if (needChmod) {
|
||||
fs.chmodSync(dir, mode)
|
||||
}
|
||||
}
|
||||
|
||||
if (cache && cGet(cache, dir) === true) {
|
||||
return done()
|
||||
}
|
||||
|
||||
if (dir === cwd) {
|
||||
checkCwdSync(cwd)
|
||||
return done()
|
||||
}
|
||||
|
||||
if (preserve) {
|
||||
return done(mkdirp.sync(dir, mode))
|
||||
}
|
||||
|
||||
const sub = normPath(path.relative(cwd, dir))
|
||||
const parts = sub.split('/')
|
||||
let created = null
|
||||
for (let p = parts.shift(), part = cwd;
|
||||
p && (part += '/' + p);
|
||||
p = parts.shift()) {
|
||||
part = normPath(path.resolve(part))
|
||||
if (cGet(cache, part)) {
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
fs.mkdirSync(part, mode)
|
||||
created = created || part
|
||||
cSet(cache, part, true)
|
||||
} catch (er) {
|
||||
const st = fs.lstatSync(part)
|
||||
if (st.isDirectory()) {
|
||||
cSet(cache, part, true)
|
||||
continue
|
||||
} else if (unlink) {
|
||||
fs.unlinkSync(part)
|
||||
fs.mkdirSync(part, mode)
|
||||
created = created || part
|
||||
cSet(cache, part, true)
|
||||
continue
|
||||
} else if (st.isSymbolicLink()) {
|
||||
return new SymlinkError(part, part + '/' + parts.join('/'))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return done(created)
|
||||
}
|
27
node_modules/tar/lib/mode-fix.js
generated
vendored
Normal file
27
node_modules/tar/lib/mode-fix.js
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
'use strict'
|
||||
module.exports = (mode, isDir, portable) => {
|
||||
mode &= 0o7777
|
||||
|
||||
// in portable mode, use the minimum reasonable umask
|
||||
// if this system creates files with 0o664 by default
|
||||
// (as some linux distros do), then we'll write the
|
||||
// archive with 0o644 instead. Also, don't ever create
|
||||
// a file that is not readable/writable by the owner.
|
||||
if (portable) {
|
||||
mode = (mode | 0o600) & ~0o22
|
||||
}
|
||||
|
||||
// if dirs are readable, then they should be listable
|
||||
if (isDir) {
|
||||
if (mode & 0o400) {
|
||||
mode |= 0o100
|
||||
}
|
||||
if (mode & 0o40) {
|
||||
mode |= 0o10
|
||||
}
|
||||
if (mode & 0o4) {
|
||||
mode |= 0o1
|
||||
}
|
||||
}
|
||||
return mode
|
||||
}
|
12
node_modules/tar/lib/normalize-unicode.js
generated
vendored
Normal file
12
node_modules/tar/lib/normalize-unicode.js
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// warning: extremely hot code path.
|
||||
// This has been meticulously optimized for use
|
||||
// within npm install on large package trees.
|
||||
// Do not edit without careful benchmarking.
|
||||
const normalizeCache = Object.create(null)
|
||||
const { hasOwnProperty } = Object.prototype
|
||||
module.exports = s => {
|
||||
if (!hasOwnProperty.call(normalizeCache, s)) {
|
||||
normalizeCache[s] = s.normalize('NFD')
|
||||
}
|
||||
return normalizeCache[s]
|
||||
}
|
8
node_modules/tar/lib/normalize-windows-path.js
generated
vendored
Normal file
8
node_modules/tar/lib/normalize-windows-path.js
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
// on windows, either \ or / are valid directory separators.
|
||||
// on unix, \ is a valid character in filenames.
|
||||
// so, on windows, and only on windows, we replace all \ chars with /,
|
||||
// so that we can use / as our one and only directory separator char.
|
||||
|
||||
const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform
|
||||
module.exports = platform !== 'win32' ? p => p
|
||||
: p => p && p.replace(/\\/g, '/')
|
432
node_modules/tar/lib/pack.js
generated
vendored
Normal file
432
node_modules/tar/lib/pack.js
generated
vendored
Normal file
|
@ -0,0 +1,432 @@
|
|||
'use strict'
|
||||
|
||||
// A readable tar stream creator
|
||||
// Technically, this is a transform stream that you write paths into,
|
||||
// and tar format comes out of.
|
||||
// The `add()` method is like `write()` but returns this,
|
||||
// and end() return `this` as well, so you can
|
||||
// do `new Pack(opt).add('files').add('dir').end().pipe(output)
|
||||
// You could also do something like:
|
||||
// streamOfPaths().pipe(new Pack()).pipe(new fs.WriteStream('out.tar'))
|
||||
|
||||
class PackJob {
|
||||
constructor (path, absolute) {
|
||||
this.path = path || './'
|
||||
this.absolute = absolute
|
||||
this.entry = null
|
||||
this.stat = null
|
||||
this.readdir = null
|
||||
this.pending = false
|
||||
this.ignore = false
|
||||
this.piped = false
|
||||
}
|
||||
}
|
||||
|
||||
const { Minipass } = require('minipass')
|
||||
const zlib = require('minizlib')
|
||||
const ReadEntry = require('./read-entry.js')
|
||||
const WriteEntry = require('./write-entry.js')
|
||||
const WriteEntrySync = WriteEntry.Sync
|
||||
const WriteEntryTar = WriteEntry.Tar
|
||||
const Yallist = require('yallist')
|
||||
const EOF = Buffer.alloc(1024)
|
||||
const ONSTAT = Symbol('onStat')
|
||||
const ENDED = Symbol('ended')
|
||||
const QUEUE = Symbol('queue')
|
||||
const CURRENT = Symbol('current')
|
||||
const PROCESS = Symbol('process')
|
||||
const PROCESSING = Symbol('processing')
|
||||
const PROCESSJOB = Symbol('processJob')
|
||||
const JOBS = Symbol('jobs')
|
||||
const JOBDONE = Symbol('jobDone')
|
||||
const ADDFSENTRY = Symbol('addFSEntry')
|
||||
const ADDTARENTRY = Symbol('addTarEntry')
|
||||
const STAT = Symbol('stat')
|
||||
const READDIR = Symbol('readdir')
|
||||
const ONREADDIR = Symbol('onreaddir')
|
||||
const PIPE = Symbol('pipe')
|
||||
const ENTRY = Symbol('entry')
|
||||
const ENTRYOPT = Symbol('entryOpt')
|
||||
const WRITEENTRYCLASS = Symbol('writeEntryClass')
|
||||
const WRITE = Symbol('write')
|
||||
const ONDRAIN = Symbol('ondrain')
|
||||
|
||||
const fs = require('fs')
|
||||
const path = require('path')
|
||||
const warner = require('./warn-mixin.js')
|
||||
const normPath = require('./normalize-windows-path.js')
|
||||
|
||||
const Pack = warner(class Pack extends Minipass {
|
||||
constructor (opt) {
|
||||
super(opt)
|
||||
opt = opt || Object.create(null)
|
||||
this.opt = opt
|
||||
this.file = opt.file || ''
|
||||
this.cwd = opt.cwd || process.cwd()
|
||||
this.maxReadSize = opt.maxReadSize
|
||||
this.preservePaths = !!opt.preservePaths
|
||||
this.strict = !!opt.strict
|
||||
this.noPax = !!opt.noPax
|
||||
this.prefix = normPath(opt.prefix || '')
|
||||
this.linkCache = opt.linkCache || new Map()
|
||||
this.statCache = opt.statCache || new Map()
|
||||
this.readdirCache = opt.readdirCache || new Map()
|
||||
|
||||
this[WRITEENTRYCLASS] = WriteEntry
|
||||
if (typeof opt.onwarn === 'function') {
|
||||
this.on('warn', opt.onwarn)
|
||||
}
|
||||
|
||||
this.portable = !!opt.portable
|
||||
this.zip = null
|
||||
|
||||
if (opt.gzip || opt.brotli) {
|
||||
if (opt.gzip && opt.brotli) {
|
||||
throw new TypeError('gzip and brotli are mutually exclusive')
|
||||
}
|
||||
if (opt.gzip) {
|
||||
if (typeof opt.gzip !== 'object') {
|
||||
opt.gzip = {}
|
||||
}
|
||||
if (this.portable) {
|
||||
opt.gzip.portable = true
|
||||
}
|
||||
this.zip = new zlib.Gzip(opt.gzip)
|
||||
}
|
||||
if (opt.brotli) {
|
||||
if (typeof opt.brotli !== 'object') {
|
||||
opt.brotli = {}
|
||||
}
|
||||
this.zip = new zlib.BrotliCompress(opt.brotli)
|
||||
}
|
||||
this.zip.on('data', chunk => super.write(chunk))
|
||||
this.zip.on('end', _ => super.end())
|
||||
this.zip.on('drain', _ => this[ONDRAIN]())
|
||||
this.on('resume', _ => this.zip.resume())
|
||||
} else {
|
||||
this.on('drain', this[ONDRAIN])
|
||||
}
|
||||
|
||||
this.noDirRecurse = !!opt.noDirRecurse
|
||||
this.follow = !!opt.follow
|
||||
this.noMtime = !!opt.noMtime
|
||||
this.mtime = opt.mtime || null
|
||||
|
||||
this.filter = typeof opt.filter === 'function' ? opt.filter : _ => true
|
||||
|
||||
this[QUEUE] = new Yallist()
|
||||
this[JOBS] = 0
|
||||
this.jobs = +opt.jobs || 4
|
||||
this[PROCESSING] = false
|
||||
this[ENDED] = false
|
||||
}
|
||||
|
||||
[WRITE] (chunk) {
|
||||
return super.write(chunk)
|
||||
}
|
||||
|
||||
add (path) {
|
||||
this.write(path)
|
||||
return this
|
||||
}
|
||||
|
||||
end (path) {
|
||||
if (path) {
|
||||
this.write(path)
|
||||
}
|
||||
this[ENDED] = true
|
||||
this[PROCESS]()
|
||||
return this
|
||||
}
|
||||
|
||||
write (path) {
|
||||
if (this[ENDED]) {
|
||||
throw new Error('write after end')
|
||||
}
|
||||
|
||||
if (path instanceof ReadEntry) {
|
||||
this[ADDTARENTRY](path)
|
||||
} else {
|
||||
this[ADDFSENTRY](path)
|
||||
}
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
[ADDTARENTRY] (p) {
|
||||
const absolute = normPath(path.resolve(this.cwd, p.path))
|
||||
// in this case, we don't have to wait for the stat
|
||||
if (!this.filter(p.path, p)) {
|
||||
p.resume()
|
||||
} else {
|
||||
const job = new PackJob(p.path, absolute, false)
|
||||
job.entry = new WriteEntryTar(p, this[ENTRYOPT](job))
|
||||
job.entry.on('end', _ => this[JOBDONE](job))
|
||||
this[JOBS] += 1
|
||||
this[QUEUE].push(job)
|
||||
}
|
||||
|
||||
this[PROCESS]()
|
||||
}
|
||||
|
||||
[ADDFSENTRY] (p) {
|
||||
const absolute = normPath(path.resolve(this.cwd, p))
|
||||
this[QUEUE].push(new PackJob(p, absolute))
|
||||
this[PROCESS]()
|
||||
}
|
||||
|
||||
[STAT] (job) {
|
||||
job.pending = true
|
||||
this[JOBS] += 1
|
||||
const stat = this.follow ? 'stat' : 'lstat'
|
||||
fs[stat](job.absolute, (er, stat) => {
|
||||
job.pending = false
|
||||
this[JOBS] -= 1
|
||||
if (er) {
|
||||
this.emit('error', er)
|
||||
} else {
|
||||
this[ONSTAT](job, stat)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
[ONSTAT] (job, stat) {
|
||||
this.statCache.set(job.absolute, stat)
|
||||
job.stat = stat
|
||||
|
||||
// now we have the stat, we can filter it.
|
||||
if (!this.filter(job.path, stat)) {
|
||||
job.ignore = true
|
||||
}
|
||||
|
||||
this[PROCESS]()
|
||||
}
|
||||
|
||||
[READDIR] (job) {
|
||||
job.pending = true
|
||||
this[JOBS] += 1
|
||||
fs.readdir(job.absolute, (er, entries) => {
|
||||
job.pending = false
|
||||
this[JOBS] -= 1
|
||||
if (er) {
|
||||
return this.emit('error', er)
|
||||
}
|
||||
this[ONREADDIR](job, entries)
|
||||
})
|
||||
}
|
||||
|
||||
[ONREADDIR] (job, entries) {
|
||||
this.readdirCache.set(job.absolute, entries)
|
||||
job.readdir = entries
|
||||
this[PROCESS]()
|
||||
}
|
||||
|
||||
[PROCESS] () {
|
||||
if (this[PROCESSING]) {
|
||||
return
|
||||
}
|
||||
|
||||
this[PROCESSING] = true
|
||||
for (let w = this[QUEUE].head;
|
||||
w !== null && this[JOBS] < this.jobs;
|
||||
w = w.next) {
|
||||
this[PROCESSJOB](w.value)
|
||||
if (w.value.ignore) {
|
||||
const p = w.next
|
||||
this[QUEUE].removeNode(w)
|
||||
w.next = p
|
||||
}
|
||||
}
|
||||
|
||||
this[PROCESSING] = false
|
||||
|
||||
if (this[ENDED] && !this[QUEUE].length && this[JOBS] === 0) {
|
||||
if (this.zip) {
|
||||
this.zip.end(EOF)
|
||||
} else {
|
||||
super.write(EOF)
|
||||
super.end()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get [CURRENT] () {
|
||||
return this[QUEUE] && this[QUEUE].head && this[QUEUE].head.value
|
||||
}
|
||||
|
||||
[JOBDONE] (job) {
|
||||
this[QUEUE].shift()
|
||||
this[JOBS] -= 1
|
||||
this[PROCESS]()
|
||||
}
|
||||
|
||||
[PROCESSJOB] (job) {
|
||||
if (job.pending) {
|
||||
return
|
||||
}
|
||||
|
||||
if (job.entry) {
|
||||
if (job === this[CURRENT] && !job.piped) {
|
||||
this[PIPE](job)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if (!job.stat) {
|
||||
if (this.statCache.has(job.absolute)) {
|
||||
this[ONSTAT](job, this.statCache.get(job.absolute))
|
||||
} else {
|
||||
this[STAT](job)
|
||||
}
|
||||
}
|
||||
if (!job.stat) {
|
||||
return
|
||||
}
|
||||
|
||||
// filtered out!
|
||||
if (job.ignore) {
|
||||
return
|
||||
}
|
||||
|
||||
if (!this.noDirRecurse && job.stat.isDirectory() && !job.readdir) {
|
||||
if (this.readdirCache.has(job.absolute)) {
|
||||
this[ONREADDIR](job, this.readdirCache.get(job.absolute))
|
||||
} else {
|
||||
this[READDIR](job)
|
||||
}
|
||||
if (!job.readdir) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// we know it doesn't have an entry, because that got checked above
|
||||
job.entry = this[ENTRY](job)
|
||||
if (!job.entry) {
|
||||
job.ignore = true
|
||||
return
|
||||
}
|
||||
|
||||
if (job === this[CURRENT] && !job.piped) {
|
||||
this[PIPE](job)
|
||||
}
|
||||
}
|
||||
|
||||
[ENTRYOPT] (job) {
|
||||
return {
|
||||
onwarn: (code, msg, data) => this.warn(code, msg, data),
|
||||
noPax: this.noPax,
|
||||
cwd: this.cwd,
|
||||
absolute: job.absolute,
|
||||
preservePaths: this.preservePaths,
|
||||
maxReadSize: this.maxReadSize,
|
||||
strict: this.strict,
|
||||
portable: this.portable,
|
||||
linkCache: this.linkCache,
|
||||
statCache: this.statCache,
|
||||
noMtime: this.noMtime,
|
||||
mtime: this.mtime,
|
||||
prefix: this.prefix,
|
||||
}
|
||||
}
|
||||
|
||||
[ENTRY] (job) {
|
||||
this[JOBS] += 1
|
||||
try {
|
||||
return new this[WRITEENTRYCLASS](job.path, this[ENTRYOPT](job))
|
||||
.on('end', () => this[JOBDONE](job))
|
||||
.on('error', er => this.emit('error', er))
|
||||
} catch (er) {
|
||||
this.emit('error', er)
|
||||
}
|
||||
}
|
||||
|
||||
[ONDRAIN] () {
|
||||
if (this[CURRENT] && this[CURRENT].entry) {
|
||||
this[CURRENT].entry.resume()
|
||||
}
|
||||
}
|
||||
|
||||
// like .pipe() but using super, because our write() is special
|
||||
[PIPE] (job) {
|
||||
job.piped = true
|
||||
|
||||
if (job.readdir) {
|
||||
job.readdir.forEach(entry => {
|
||||
const p = job.path
|
||||
const base = p === './' ? '' : p.replace(/\/*$/, '/')
|
||||
this[ADDFSENTRY](base + entry)
|
||||
})
|
||||
}
|
||||
|
||||
const source = job.entry
|
||||
const zip = this.zip
|
||||
|
||||
if (zip) {
|
||||
source.on('data', chunk => {
|
||||
if (!zip.write(chunk)) {
|
||||
source.pause()
|
||||
}
|
||||
})
|
||||
} else {
|
||||
source.on('data', chunk => {
|
||||
if (!super.write(chunk)) {
|
||||
source.pause()
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pause () {
|
||||
if (this.zip) {
|
||||
this.zip.pause()
|
||||
}
|
||||
return super.pause()
|
||||
}
|
||||
})
|
||||
|
||||
class PackSync extends Pack {
|
||||
constructor (opt) {
|
||||
super(opt)
|
||||
this[WRITEENTRYCLASS] = WriteEntrySync
|
||||
}
|
||||
|
||||
// pause/resume are no-ops in sync streams.
|
||||
pause () {}
|
||||
resume () {}
|
||||
|
||||
[STAT] (job) {
|
||||
const stat = this.follow ? 'statSync' : 'lstatSync'
|
||||
this[ONSTAT](job, fs[stat](job.absolute))
|
||||
}
|
||||
|
||||
[READDIR] (job, stat) {
|
||||
this[ONREADDIR](job, fs.readdirSync(job.absolute))
|
||||
}
|
||||
|
||||
// gotta get it all in this tick
|
||||
[PIPE] (job) {
|
||||
const source = job.entry
|
||||
const zip = this.zip
|
||||
|
||||
if (job.readdir) {
|
||||
job.readdir.forEach(entry => {
|
||||
const p = job.path
|
||||
const base = p === './' ? '' : p.replace(/\/*$/, '/')
|
||||
this[ADDFSENTRY](base + entry)
|
||||
})
|
||||
}
|
||||
|
||||
if (zip) {
|
||||
source.on('data', chunk => {
|
||||
zip.write(chunk)
|
||||
})
|
||||
} else {
|
||||
source.on('data', chunk => {
|
||||
super[WRITE](chunk)
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Pack.Sync = PackSync
|
||||
|
||||
module.exports = Pack
|
552
node_modules/tar/lib/parse.js
generated
vendored
Normal file
552
node_modules/tar/lib/parse.js
generated
vendored
Normal file
|
@ -0,0 +1,552 @@
|
|||
'use strict'
|
||||
|
||||
// this[BUFFER] is the remainder of a chunk if we're waiting for
|
||||
// the full 512 bytes of a header to come in. We will Buffer.concat()
|
||||
// it to the next write(), which is a mem copy, but a small one.
|
||||
//
|
||||
// this[QUEUE] is a Yallist of entries that haven't been emitted
|
||||
// yet this can only get filled up if the user keeps write()ing after
|
||||
// a write() returns false, or does a write() with more than one entry
|
||||
//
|
||||
// We don't buffer chunks, we always parse them and either create an
|
||||
// entry, or push it into the active entry. The ReadEntry class knows
|
||||
// to throw data away if .ignore=true
|
||||
//
|
||||
// Shift entry off the buffer when it emits 'end', and emit 'entry' for
|
||||
// the next one in the list.
|
||||
//
|
||||
// At any time, we're pushing body chunks into the entry at WRITEENTRY,
|
||||
// and waiting for 'end' on the entry at READENTRY
|
||||
//
|
||||
// ignored entries get .resume() called on them straight away
|
||||
|
||||
const warner = require('./warn-mixin.js')
|
||||
const Header = require('./header.js')
|
||||
const EE = require('events')
|
||||
const Yallist = require('yallist')
|
||||
const maxMetaEntrySize = 1024 * 1024
|
||||
const Entry = require('./read-entry.js')
|
||||
const Pax = require('./pax.js')
|
||||
const zlib = require('minizlib')
|
||||
const { nextTick } = require('process')
|
||||
|
||||
const gzipHeader = Buffer.from([0x1f, 0x8b])
|
||||
const STATE = Symbol('state')
|
||||
const WRITEENTRY = Symbol('writeEntry')
|
||||
const READENTRY = Symbol('readEntry')
|
||||
const NEXTENTRY = Symbol('nextEntry')
|
||||
const PROCESSENTRY = Symbol('processEntry')
|
||||
const EX = Symbol('extendedHeader')
|
||||
const GEX = Symbol('globalExtendedHeader')
|
||||
const META = Symbol('meta')
|
||||
const EMITMETA = Symbol('emitMeta')
|
||||
const BUFFER = Symbol('buffer')
|
||||
const QUEUE = Symbol('queue')
|
||||
const ENDED = Symbol('ended')
|
||||
const EMITTEDEND = Symbol('emittedEnd')
|
||||
const EMIT = Symbol('emit')
|
||||
const UNZIP = Symbol('unzip')
|
||||
const CONSUMECHUNK = Symbol('consumeChunk')
|
||||
const CONSUMECHUNKSUB = Symbol('consumeChunkSub')
|
||||
const CONSUMEBODY = Symbol('consumeBody')
|
||||
const CONSUMEMETA = Symbol('consumeMeta')
|
||||
const CONSUMEHEADER = Symbol('consumeHeader')
|
||||
const CONSUMING = Symbol('consuming')
|
||||
const BUFFERCONCAT = Symbol('bufferConcat')
|
||||
const MAYBEEND = Symbol('maybeEnd')
|
||||
const WRITING = Symbol('writing')
|
||||
const ABORTED = Symbol('aborted')
|
||||
const DONE = Symbol('onDone')
|
||||
const SAW_VALID_ENTRY = Symbol('sawValidEntry')
|
||||
const SAW_NULL_BLOCK = Symbol('sawNullBlock')
|
||||
const SAW_EOF = Symbol('sawEOF')
|
||||
const CLOSESTREAM = Symbol('closeStream')
|
||||
|
||||
const noop = _ => true
|
||||
|
||||
module.exports = warner(class Parser extends EE {
|
||||
constructor (opt) {
|
||||
opt = opt || {}
|
||||
super(opt)
|
||||
|
||||
this.file = opt.file || ''
|
||||
|
||||
// set to boolean false when an entry starts. 1024 bytes of \0
|
||||
// is technically a valid tarball, albeit a boring one.
|
||||
this[SAW_VALID_ENTRY] = null
|
||||
|
||||
// these BADARCHIVE errors can't be detected early. listen on DONE.
|
||||
this.on(DONE, _ => {
|
||||
if (this[STATE] === 'begin' || this[SAW_VALID_ENTRY] === false) {
|
||||
// either less than 1 block of data, or all entries were invalid.
|
||||
// Either way, probably not even a tarball.
|
||||
this.warn('TAR_BAD_ARCHIVE', 'Unrecognized archive format')
|
||||
}
|
||||
})
|
||||
|
||||
if (opt.ondone) {
|
||||
this.on(DONE, opt.ondone)
|
||||
} else {
|
||||
this.on(DONE, _ => {
|
||||
this.emit('prefinish')
|
||||
this.emit('finish')
|
||||
this.emit('end')
|
||||
})
|
||||
}
|
||||
|
||||
this.strict = !!opt.strict
|
||||
this.maxMetaEntrySize = opt.maxMetaEntrySize || maxMetaEntrySize
|
||||
this.filter = typeof opt.filter === 'function' ? opt.filter : noop
|
||||
// Unlike gzip, brotli doesn't have any magic bytes to identify it
|
||||
// Users need to explicitly tell us they're extracting a brotli file
|
||||
// Or we infer from the file extension
|
||||
const isTBR = (opt.file && (
|
||||
opt.file.endsWith('.tar.br') || opt.file.endsWith('.tbr')))
|
||||
// if it's a tbr file it MIGHT be brotli, but we don't know until
|
||||
// we look at it and verify it's not a valid tar file.
|
||||
this.brotli = !opt.gzip && opt.brotli !== undefined ? opt.brotli
|
||||
: isTBR ? undefined
|
||||
: false
|
||||
|
||||
// have to set this so that streams are ok piping into it
|
||||
this.writable = true
|
||||
this.readable = false
|
||||
|
||||
this[QUEUE] = new Yallist()
|
||||
this[BUFFER] = null
|
||||
this[READENTRY] = null
|
||||
this[WRITEENTRY] = null
|
||||
this[STATE] = 'begin'
|
||||
this[META] = ''
|
||||
this[EX] = null
|
||||
this[GEX] = null
|
||||
this[ENDED] = false
|
||||
this[UNZIP] = null
|
||||
this[ABORTED] = false
|
||||
this[SAW_NULL_BLOCK] = false
|
||||
this[SAW_EOF] = false
|
||||
|
||||
this.on('end', () => this[CLOSESTREAM]())
|
||||
|
||||
if (typeof opt.onwarn === 'function') {
|
||||
this.on('warn', opt.onwarn)
|
||||
}
|
||||
if (typeof opt.onentry === 'function') {
|
||||
this.on('entry', opt.onentry)
|
||||
}
|
||||
}
|
||||
|
||||
[CONSUMEHEADER] (chunk, position) {
|
||||
if (this[SAW_VALID_ENTRY] === null) {
|
||||
this[SAW_VALID_ENTRY] = false
|
||||
}
|
||||
let header
|
||||
try {
|
||||
header = new Header(chunk, position, this[EX], this[GEX])
|
||||
} catch (er) {
|
||||
return this.warn('TAR_ENTRY_INVALID', er)
|
||||
}
|
||||
|
||||
if (header.nullBlock) {
|
||||
if (this[SAW_NULL_BLOCK]) {
|
||||
this[SAW_EOF] = true
|
||||
// ending an archive with no entries. pointless, but legal.
|
||||
if (this[STATE] === 'begin') {
|
||||
this[STATE] = 'header'
|
||||
}
|
||||
this[EMIT]('eof')
|
||||
} else {
|
||||
this[SAW_NULL_BLOCK] = true
|
||||
this[EMIT]('nullBlock')
|
||||
}
|
||||
} else {
|
||||
this[SAW_NULL_BLOCK] = false
|
||||
if (!header.cksumValid) {
|
||||
this.warn('TAR_ENTRY_INVALID', 'checksum failure', { header })
|
||||
} else if (!header.path) {
|
||||
this.warn('TAR_ENTRY_INVALID', 'path is required', { header })
|
||||
} else {
|
||||
const type = header.type
|
||||
if (/^(Symbolic)?Link$/.test(type) && !header.linkpath) {
|
||||
this.warn('TAR_ENTRY_INVALID', 'linkpath required', { header })
|
||||
} else if (!/^(Symbolic)?Link$/.test(type) && header.linkpath) {
|
||||
this.warn('TAR_ENTRY_INVALID', 'linkpath forbidden', { header })
|
||||
} else {
|
||||
const entry = this[WRITEENTRY] = new Entry(header, this[EX], this[GEX])
|
||||
|
||||
// we do this for meta & ignored entries as well, because they
|
||||
// are still valid tar, or else we wouldn't know to ignore them
|
||||
if (!this[SAW_VALID_ENTRY]) {
|
||||
if (entry.remain) {
|
||||
// this might be the one!
|
||||
const onend = () => {
|
||||
if (!entry.invalid) {
|
||||
this[SAW_VALID_ENTRY] = true
|
||||
}
|
||||
}
|
||||
entry.on('end', onend)
|
||||
} else {
|
||||
this[SAW_VALID_ENTRY] = true
|
||||
}
|
||||
}
|
||||
|
||||
if (entry.meta) {
|
||||
if (entry.size > this.maxMetaEntrySize) {
|
||||
entry.ignore = true
|
||||
this[EMIT]('ignoredEntry', entry)
|
||||
this[STATE] = 'ignore'
|
||||
entry.resume()
|
||||
} else if (entry.size > 0) {
|
||||
this[META] = ''
|
||||
entry.on('data', c => this[META] += c)
|
||||
this[STATE] = 'meta'
|
||||
}
|
||||
} else {
|
||||
this[EX] = null
|
||||
entry.ignore = entry.ignore || !this.filter(entry.path, entry)
|
||||
|
||||
if (entry.ignore) {
|
||||
// probably valid, just not something we care about
|
||||
this[EMIT]('ignoredEntry', entry)
|
||||
this[STATE] = entry.remain ? 'ignore' : 'header'
|
||||
entry.resume()
|
||||
} else {
|
||||
if (entry.remain) {
|
||||
this[STATE] = 'body'
|
||||
} else {
|
||||
this[STATE] = 'header'
|
||||
entry.end()
|
||||
}
|
||||
|
||||
if (!this[READENTRY]) {
|
||||
this[QUEUE].push(entry)
|
||||
this[NEXTENTRY]()
|
||||
} else {
|
||||
this[QUEUE].push(entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[CLOSESTREAM] () {
|
||||
nextTick(() => this.emit('close'))
|
||||
}
|
||||
|
||||
[PROCESSENTRY] (entry) {
|
||||
let go = true
|
||||
|
||||
if (!entry) {
|
||||
this[READENTRY] = null
|
||||
go = false
|
||||
} else if (Array.isArray(entry)) {
|
||||
this.emit.apply(this, entry)
|
||||
} else {
|
||||
this[READENTRY] = entry
|
||||
this.emit('entry', entry)
|
||||
if (!entry.emittedEnd) {
|
||||
entry.on('end', _ => this[NEXTENTRY]())
|
||||
go = false
|
||||
}
|
||||
}
|
||||
|
||||
return go
|
||||
}
|
||||
|
||||
[NEXTENTRY] () {
|
||||
do {} while (this[PROCESSENTRY](this[QUEUE].shift()))
|
||||
|
||||
if (!this[QUEUE].length) {
|
||||
// At this point, there's nothing in the queue, but we may have an
|
||||
// entry which is being consumed (readEntry).
|
||||
// If we don't, then we definitely can handle more data.
|
||||
// If we do, and either it's flowing, or it has never had any data
|
||||
// written to it, then it needs more.
|
||||
// The only other possibility is that it has returned false from a
|
||||
// write() call, so we wait for the next drain to continue.
|
||||
const re = this[READENTRY]
|
||||
const drainNow = !re || re.flowing || re.size === re.remain
|
||||
if (drainNow) {
|
||||
if (!this[WRITING]) {
|
||||
this.emit('drain')
|
||||
}
|
||||
} else {
|
||||
re.once('drain', _ => this.emit('drain'))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[CONSUMEBODY] (chunk, position) {
|
||||
// write up to but no more than writeEntry.blockRemain
|
||||
const entry = this[WRITEENTRY]
|
||||
const br = entry.blockRemain
|
||||
const c = (br >= chunk.length && position === 0) ? chunk
|
||||
: chunk.slice(position, position + br)
|
||||
|
||||
entry.write(c)
|
||||
|
||||
if (!entry.blockRemain) {
|
||||
this[STATE] = 'header'
|
||||
this[WRITEENTRY] = null
|
||||
entry.end()
|
||||
}
|
||||
|
||||
return c.length
|
||||
}
|
||||
|
||||
[CONSUMEMETA] (chunk, position) {
|
||||
const entry = this[WRITEENTRY]
|
||||
const ret = this[CONSUMEBODY](chunk, position)
|
||||
|
||||
// if we finished, then the entry is reset
|
||||
if (!this[WRITEENTRY]) {
|
||||
this[EMITMETA](entry)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
[EMIT] (ev, data, extra) {
|
||||
if (!this[QUEUE].length && !this[READENTRY]) {
|
||||
this.emit(ev, data, extra)
|
||||
} else {
|
||||
this[QUEUE].push([ev, data, extra])
|
||||
}
|
||||
}
|
||||
|
||||
[EMITMETA] (entry) {
|
||||
this[EMIT]('meta', this[META])
|
||||
switch (entry.type) {
|
||||
case 'ExtendedHeader':
|
||||
case 'OldExtendedHeader':
|
||||
this[EX] = Pax.parse(this[META], this[EX], false)
|
||||
break
|
||||
|
||||
case 'GlobalExtendedHeader':
|
||||
this[GEX] = Pax.parse(this[META], this[GEX], true)
|
||||
break
|
||||
|
||||
case 'NextFileHasLongPath':
|
||||
case 'OldGnuLongPath':
|
||||
this[EX] = this[EX] || Object.create(null)
|
||||
this[EX].path = this[META].replace(/\0.*/, '')
|
||||
break
|
||||
|
||||
case 'NextFileHasLongLinkpath':
|
||||
this[EX] = this[EX] || Object.create(null)
|
||||
this[EX].linkpath = this[META].replace(/\0.*/, '')
|
||||
break
|
||||
|
||||
/* istanbul ignore next */
|
||||
default: throw new Error('unknown meta: ' + entry.type)
|
||||
}
|
||||
}
|
||||
|
||||
abort (error) {
|
||||
this[ABORTED] = true
|
||||
this.emit('abort', error)
|
||||
// always throws, even in non-strict mode
|
||||
this.warn('TAR_ABORT', error, { recoverable: false })
|
||||
}
|
||||
|
||||
write (chunk) {
|
||||
if (this[ABORTED]) {
|
||||
return
|
||||
}
|
||||
|
||||
// first write, might be gzipped
|
||||
const needSniff = this[UNZIP] === null ||
|
||||
this.brotli === undefined && this[UNZIP] === false
|
||||
if (needSniff && chunk) {
|
||||
if (this[BUFFER]) {
|
||||
chunk = Buffer.concat([this[BUFFER], chunk])
|
||||
this[BUFFER] = null
|
||||
}
|
||||
if (chunk.length < gzipHeader.length) {
|
||||
this[BUFFER] = chunk
|
||||
return true
|
||||
}
|
||||
|
||||
// look for gzip header
|
||||
for (let i = 0; this[UNZIP] === null && i < gzipHeader.length; i++) {
|
||||
if (chunk[i] !== gzipHeader[i]) {
|
||||
this[UNZIP] = false
|
||||
}
|
||||
}
|
||||
|
||||
const maybeBrotli = this.brotli === undefined
|
||||
if (this[UNZIP] === false && maybeBrotli) {
|
||||
// read the first header to see if it's a valid tar file. If so,
|
||||
// we can safely assume that it's not actually brotli, despite the
|
||||
// .tbr or .tar.br file extension.
|
||||
// if we ended before getting a full chunk, yes, def brotli
|
||||
if (chunk.length < 512) {
|
||||
if (this[ENDED]) {
|
||||
this.brotli = true
|
||||
} else {
|
||||
this[BUFFER] = chunk
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
// if it's tar, it's pretty reliably not brotli, chances of
|
||||
// that happening are astronomical.
|
||||
try {
|
||||
new Header(chunk.slice(0, 512))
|
||||
this.brotli = false
|
||||
} catch (_) {
|
||||
this.brotli = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (this[UNZIP] === null || (this[UNZIP] === false && this.brotli)) {
|
||||
const ended = this[ENDED]
|
||||
this[ENDED] = false
|
||||
this[UNZIP] = this[UNZIP] === null
|
||||
? new zlib.Unzip()
|
||||
: new zlib.BrotliDecompress()
|
||||
this[UNZIP].on('data', chunk => this[CONSUMECHUNK](chunk))
|
||||
this[UNZIP].on('error', er => this.abort(er))
|
||||
this[UNZIP].on('end', _ => {
|
||||
this[ENDED] = true
|
||||
this[CONSUMECHUNK]()
|
||||
})
|
||||
this[WRITING] = true
|
||||
const ret = this[UNZIP][ended ? 'end' : 'write'](chunk)
|
||||
this[WRITING] = false
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
this[WRITING] = true
|
||||
if (this[UNZIP]) {
|
||||
this[UNZIP].write(chunk)
|
||||
} else {
|
||||
this[CONSUMECHUNK](chunk)
|
||||
}
|
||||
this[WRITING] = false
|
||||
|
||||
// return false if there's a queue, or if the current entry isn't flowing
|
||||
const ret =
|
||||
this[QUEUE].length ? false :
|
||||
this[READENTRY] ? this[READENTRY].flowing :
|
||||
true
|
||||
|
||||
// if we have no queue, then that means a clogged READENTRY
|
||||
if (!ret && !this[QUEUE].length) {
|
||||
this[READENTRY].once('drain', _ => this.emit('drain'))
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
[BUFFERCONCAT] (c) {
|
||||
if (c && !this[ABORTED]) {
|
||||
this[BUFFER] = this[BUFFER] ? Buffer.concat([this[BUFFER], c]) : c
|
||||
}
|
||||
}
|
||||
|
||||
[MAYBEEND] () {
|
||||
if (this[ENDED] &&
|
||||
!this[EMITTEDEND] &&
|
||||
!this[ABORTED] &&
|
||||
!this[CONSUMING]) {
|
||||
this[EMITTEDEND] = true
|
||||
const entry = this[WRITEENTRY]
|
||||
if (entry && entry.blockRemain) {
|
||||
// truncated, likely a damaged file
|
||||
const have = this[BUFFER] ? this[BUFFER].length : 0
|
||||
this.warn('TAR_BAD_ARCHIVE', `Truncated input (needed ${
|
||||
entry.blockRemain} more bytes, only ${have} available)`, { entry })
|
||||
if (this[BUFFER]) {
|
||||
entry.write(this[BUFFER])
|
||||
}
|
||||
entry.end()
|
||||
}
|
||||
this[EMIT](DONE)
|
||||
}
|
||||
}
|
||||
|
||||
[CONSUMECHUNK] (chunk) {
|
||||
if (this[CONSUMING]) {
|
||||
this[BUFFERCONCAT](chunk)
|
||||
} else if (!chunk && !this[BUFFER]) {
|
||||
this[MAYBEEND]()
|
||||
} else {
|
||||
this[CONSUMING] = true
|
||||
if (this[BUFFER]) {
|
||||
this[BUFFERCONCAT](chunk)
|
||||
const c = this[BUFFER]
|
||||
this[BUFFER] = null
|
||||
this[CONSUMECHUNKSUB](c)
|
||||
} else {
|
||||
this[CONSUMECHUNKSUB](chunk)
|
||||
}
|
||||
|
||||
while (this[BUFFER] &&
|
||||
this[BUFFER].length >= 512 &&
|
||||
!this[ABORTED] &&
|
||||
!this[SAW_EOF]) {
|
||||
const c = this[BUFFER]
|
||||
this[BUFFER] = null
|
||||
this[CONSUMECHUNKSUB](c)
|
||||
}
|
||||
this[CONSUMING] = false
|
||||
}
|
||||
|
||||
if (!this[BUFFER] || this[ENDED]) {
|
||||
this[MAYBEEND]()
|
||||
}
|
||||
}
|
||||
|
||||
[CONSUMECHUNKSUB] (chunk) {
|
||||
// we know that we are in CONSUMING mode, so anything written goes into
|
||||
// the buffer. Advance the position and put any remainder in the buffer.
|
||||
let position = 0
|
||||
const length = chunk.length
|
||||
while (position + 512 <= length && !this[ABORTED] && !this[SAW_EOF]) {
|
||||
switch (this[STATE]) {
|
||||
case 'begin':
|
||||
case 'header':
|
||||
this[CONSUMEHEADER](chunk, position)
|
||||
position += 512
|
||||
break
|
||||
|
||||
case 'ignore':
|
||||
case 'body':
|
||||
position += this[CONSUMEBODY](chunk, position)
|
||||
break
|
||||
|
||||
case 'meta':
|
||||
position += this[CONSUMEMETA](chunk, position)
|
||||
break
|
||||
|
||||
/* istanbul ignore next */
|
||||
default:
|
||||
throw new Error('invalid state: ' + this[STATE])
|
||||
}
|
||||
}
|
||||
|
||||
if (position < length) {
|
||||
if (this[BUFFER]) {
|
||||
this[BUFFER] = Buffer.concat([chunk.slice(position), this[BUFFER]])
|
||||
} else {
|
||||
this[BUFFER] = chunk.slice(position)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
end (chunk) {
|
||||
if (!this[ABORTED]) {
|
||||
if (this[UNZIP]) {
|
||||
this[UNZIP].end(chunk)
|
||||
} else {
|
||||
this[ENDED] = true
|
||||
if (this.brotli === undefined) chunk = chunk || Buffer.alloc(0)
|
||||
this.write(chunk)
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
156
node_modules/tar/lib/path-reservations.js
generated
vendored
Normal file
156
node_modules/tar/lib/path-reservations.js
generated
vendored
Normal file
|
@ -0,0 +1,156 @@
|
|||
// A path exclusive reservation system
|
||||
// reserve([list, of, paths], fn)
|
||||
// When the fn is first in line for all its paths, it
|
||||
// is called with a cb that clears the reservation.
|
||||
//
|
||||
// Used by async unpack to avoid clobbering paths in use,
|
||||
// while still allowing maximal safe parallelization.
|
||||
|
||||
const assert = require('assert')
|
||||
const normalize = require('./normalize-unicode.js')
|
||||
const stripSlashes = require('./strip-trailing-slashes.js')
|
||||
const { join } = require('path')
|
||||
|
||||
const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform
|
||||
const isWindows = platform === 'win32'
|
||||
|
||||
module.exports = () => {
|
||||
// path => [function or Set]
|
||||
// A Set object means a directory reservation
|
||||
// A fn is a direct reservation on that path
|
||||
const queues = new Map()
|
||||
|
||||
// fn => {paths:[path,...], dirs:[path, ...]}
|
||||
const reservations = new Map()
|
||||
|
||||
// return a set of parent dirs for a given path
|
||||
// '/a/b/c/d' -> ['/', '/a', '/a/b', '/a/b/c', '/a/b/c/d']
|
||||
const getDirs = path => {
|
||||
const dirs = path.split('/').slice(0, -1).reduce((set, path) => {
|
||||
if (set.length) {
|
||||
path = join(set[set.length - 1], path)
|
||||
}
|
||||
set.push(path || '/')
|
||||
return set
|
||||
}, [])
|
||||
return dirs
|
||||
}
|
||||
|
||||
// functions currently running
|
||||
const running = new Set()
|
||||
|
||||
// return the queues for each path the function cares about
|
||||
// fn => {paths, dirs}
|
||||
const getQueues = fn => {
|
||||
const res = reservations.get(fn)
|
||||
/* istanbul ignore if - unpossible */
|
||||
if (!res) {
|
||||
throw new Error('function does not have any path reservations')
|
||||
}
|
||||
return {
|
||||
paths: res.paths.map(path => queues.get(path)),
|
||||
dirs: [...res.dirs].map(path => queues.get(path)),
|
||||
}
|
||||
}
|
||||
|
||||
// check if fn is first in line for all its paths, and is
|
||||
// included in the first set for all its dir queues
|
||||
const check = fn => {
|
||||
const { paths, dirs } = getQueues(fn)
|
||||
return paths.every(q => q[0] === fn) &&
|
||||
dirs.every(q => q[0] instanceof Set && q[0].has(fn))
|
||||
}
|
||||
|
||||
// run the function if it's first in line and not already running
|
||||
const run = fn => {
|
||||
if (running.has(fn) || !check(fn)) {
|
||||
return false
|
||||
}
|
||||
running.add(fn)
|
||||
fn(() => clear(fn))
|
||||
return true
|
||||
}
|
||||
|
||||
const clear = fn => {
|
||||
if (!running.has(fn)) {
|
||||
return false
|
||||
}
|
||||
|
||||
const { paths, dirs } = reservations.get(fn)
|
||||
const next = new Set()
|
||||
|
||||
paths.forEach(path => {
|
||||
const q = queues.get(path)
|
||||
assert.equal(q[0], fn)
|
||||
if (q.length === 1) {
|
||||
queues.delete(path)
|
||||
} else {
|
||||
q.shift()
|
||||
if (typeof q[0] === 'function') {
|
||||
next.add(q[0])
|
||||
} else {
|
||||
q[0].forEach(fn => next.add(fn))
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
dirs.forEach(dir => {
|
||||
const q = queues.get(dir)
|
||||
assert(q[0] instanceof Set)
|
||||
if (q[0].size === 1 && q.length === 1) {
|
||||
queues.delete(dir)
|
||||
} else if (q[0].size === 1) {
|
||||
q.shift()
|
||||
|
||||
// must be a function or else the Set would've been reused
|
||||
next.add(q[0])
|
||||
} else {
|
||||
q[0].delete(fn)
|
||||
}
|
||||
})
|
||||
running.delete(fn)
|
||||
|
||||
next.forEach(fn => run(fn))
|
||||
return true
|
||||
}
|
||||
|
||||
const reserve = (paths, fn) => {
|
||||
// collide on matches across case and unicode normalization
|
||||
// On windows, thanks to the magic of 8.3 shortnames, it is fundamentally
|
||||
// impossible to determine whether two paths refer to the same thing on
|
||||
// disk, without asking the kernel for a shortname.
|
||||
// So, we just pretend that every path matches every other path here,
|
||||
// effectively removing all parallelization on windows.
|
||||
paths = isWindows ? ['win32 parallelization disabled'] : paths.map(p => {
|
||||
// don't need normPath, because we skip this entirely for windows
|
||||
return stripSlashes(join(normalize(p))).toLowerCase()
|
||||
})
|
||||
|
||||
const dirs = new Set(
|
||||
paths.map(path => getDirs(path)).reduce((a, b) => a.concat(b))
|
||||
)
|
||||
reservations.set(fn, { dirs, paths })
|
||||
paths.forEach(path => {
|
||||
const q = queues.get(path)
|
||||
if (!q) {
|
||||
queues.set(path, [fn])
|
||||
} else {
|
||||
q.push(fn)
|
||||
}
|
||||
})
|
||||
dirs.forEach(dir => {
|
||||
const q = queues.get(dir)
|
||||
if (!q) {
|
||||
queues.set(dir, [new Set([fn])])
|
||||
} else if (q[q.length - 1] instanceof Set) {
|
||||
q[q.length - 1].add(fn)
|
||||
} else {
|
||||
q.push(new Set([fn]))
|
||||
}
|
||||
})
|
||||
|
||||
return run(fn)
|
||||
}
|
||||
|
||||
return { check, reserve }
|
||||
}
|
150
node_modules/tar/lib/pax.js
generated
vendored
Normal file
150
node_modules/tar/lib/pax.js
generated
vendored
Normal file
|
@ -0,0 +1,150 @@
|
|||
'use strict'
|
||||
const Header = require('./header.js')
|
||||
const path = require('path')
|
||||
|
||||
class Pax {
|
||||
constructor (obj, global) {
|
||||
this.atime = obj.atime || null
|
||||
this.charset = obj.charset || null
|
||||
this.comment = obj.comment || null
|
||||
this.ctime = obj.ctime || null
|
||||
this.gid = obj.gid || null
|
||||
this.gname = obj.gname || null
|
||||
this.linkpath = obj.linkpath || null
|
||||
this.mtime = obj.mtime || null
|
||||
this.path = obj.path || null
|
||||
this.size = obj.size || null
|
||||
this.uid = obj.uid || null
|
||||
this.uname = obj.uname || null
|
||||
this.dev = obj.dev || null
|
||||
this.ino = obj.ino || null
|
||||
this.nlink = obj.nlink || null
|
||||
this.global = global || false
|
||||
}
|
||||
|
||||
encode () {
|
||||
const body = this.encodeBody()
|
||||
if (body === '') {
|
||||
return null
|
||||
}
|
||||
|
||||
const bodyLen = Buffer.byteLength(body)
|
||||
// round up to 512 bytes
|
||||
// add 512 for header
|
||||
const bufLen = 512 * Math.ceil(1 + bodyLen / 512)
|
||||
const buf = Buffer.allocUnsafe(bufLen)
|
||||
|
||||
// 0-fill the header section, it might not hit every field
|
||||
for (let i = 0; i < 512; i++) {
|
||||
buf[i] = 0
|
||||
}
|
||||
|
||||
new Header({
|
||||
// XXX split the path
|
||||
// then the path should be PaxHeader + basename, but less than 99,
|
||||
// prepend with the dirname
|
||||
path: ('PaxHeader/' + path.basename(this.path)).slice(0, 99),
|
||||
mode: this.mode || 0o644,
|
||||
uid: this.uid || null,
|
||||
gid: this.gid || null,
|
||||
size: bodyLen,
|
||||
mtime: this.mtime || null,
|
||||
type: this.global ? 'GlobalExtendedHeader' : 'ExtendedHeader',
|
||||
linkpath: '',
|
||||
uname: this.uname || '',
|
||||
gname: this.gname || '',
|
||||
devmaj: 0,
|
||||
devmin: 0,
|
||||
atime: this.atime || null,
|
||||
ctime: this.ctime || null,
|
||||
}).encode(buf)
|
||||
|
||||
buf.write(body, 512, bodyLen, 'utf8')
|
||||
|
||||
// null pad after the body
|
||||
for (let i = bodyLen + 512; i < buf.length; i++) {
|
||||
buf[i] = 0
|
||||
}
|
||||
|
||||
return buf
|
||||
}
|
||||
|
||||
encodeBody () {
|
||||
return (
|
||||
this.encodeField('path') +
|
||||
this.encodeField('ctime') +
|
||||
this.encodeField('atime') +
|
||||
this.encodeField('dev') +
|
||||
this.encodeField('ino') +
|
||||
this.encodeField('nlink') +
|
||||
this.encodeField('charset') +
|
||||
this.encodeField('comment') +
|
||||
this.encodeField('gid') +
|
||||
this.encodeField('gname') +
|
||||
this.encodeField('linkpath') +
|
||||
this.encodeField('mtime') +
|
||||
this.encodeField('size') +
|
||||
this.encodeField('uid') +
|
||||
this.encodeField('uname')
|
||||
)
|
||||
}
|
||||
|
||||
encodeField (field) {
|
||||
if (this[field] === null || this[field] === undefined) {
|
||||
return ''
|
||||
}
|
||||
const v = this[field] instanceof Date ? this[field].getTime() / 1000
|
||||
: this[field]
|
||||
const s = ' ' +
|
||||
(field === 'dev' || field === 'ino' || field === 'nlink'
|
||||
? 'SCHILY.' : '') +
|
||||
field + '=' + v + '\n'
|
||||
const byteLen = Buffer.byteLength(s)
|
||||
// the digits includes the length of the digits in ascii base-10
|
||||
// so if it's 9 characters, then adding 1 for the 9 makes it 10
|
||||
// which makes it 11 chars.
|
||||
let digits = Math.floor(Math.log(byteLen) / Math.log(10)) + 1
|
||||
if (byteLen + digits >= Math.pow(10, digits)) {
|
||||
digits += 1
|
||||
}
|
||||
const len = digits + byteLen
|
||||
return len + s
|
||||
}
|
||||
}
|
||||
|
||||
Pax.parse = (string, ex, g) => new Pax(merge(parseKV(string), ex), g)
|
||||
|
||||
const merge = (a, b) =>
|
||||
b ? Object.keys(a).reduce((s, k) => (s[k] = a[k], s), b) : a
|
||||
|
||||
const parseKV = string =>
|
||||
string
|
||||
.replace(/\n$/, '')
|
||||
.split('\n')
|
||||
.reduce(parseKVLine, Object.create(null))
|
||||
|
||||
const parseKVLine = (set, line) => {
|
||||
const n = parseInt(line, 10)
|
||||
|
||||
// XXX Values with \n in them will fail this.
|
||||
// Refactor to not be a naive line-by-line parse.
|
||||
if (n !== Buffer.byteLength(line) + 1) {
|
||||
return set
|
||||
}
|
||||
|
||||
line = line.slice((n + ' ').length)
|
||||
const kv = line.split('=')
|
||||
const k = kv.shift().replace(/^SCHILY\.(dev|ino|nlink)/, '$1')
|
||||
if (!k) {
|
||||
return set
|
||||
}
|
||||
|
||||
const v = kv.join('=')
|
||||
set[k] = /^([A-Z]+\.)?([mac]|birth|creation)time$/.test(k)
|
||||
? new Date(v * 1000)
|
||||
: /^[0-9]+$/.test(v) ? +v
|
||||
: v
|
||||
return set
|
||||
}
|
||||
|
||||
module.exports = Pax
|
107
node_modules/tar/lib/read-entry.js
generated
vendored
Normal file
107
node_modules/tar/lib/read-entry.js
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
|||
'use strict'
|
||||
const { Minipass } = require('minipass')
|
||||
const normPath = require('./normalize-windows-path.js')
|
||||
|
||||
const SLURP = Symbol('slurp')
|
||||
module.exports = class ReadEntry extends Minipass {
|
||||
constructor (header, ex, gex) {
|
||||
super()
|
||||
// read entries always start life paused. this is to avoid the
|
||||
// situation where Minipass's auto-ending empty streams results
|
||||
// in an entry ending before we're ready for it.
|
||||
this.pause()
|
||||
this.extended = ex
|
||||
this.globalExtended = gex
|
||||
this.header = header
|
||||
this.startBlockSize = 512 * Math.ceil(header.size / 512)
|
||||
this.blockRemain = this.startBlockSize
|
||||
this.remain = header.size
|
||||
this.type = header.type
|
||||
this.meta = false
|
||||
this.ignore = false
|
||||
switch (this.type) {
|
||||
case 'File':
|
||||
case 'OldFile':
|
||||
case 'Link':
|
||||
case 'SymbolicLink':
|
||||
case 'CharacterDevice':
|
||||
case 'BlockDevice':
|
||||
case 'Directory':
|
||||
case 'FIFO':
|
||||
case 'ContiguousFile':
|
||||
case 'GNUDumpDir':
|
||||
break
|
||||
|
||||
case 'NextFileHasLongLinkpath':
|
||||
case 'NextFileHasLongPath':
|
||||
case 'OldGnuLongPath':
|
||||
case 'GlobalExtendedHeader':
|
||||
case 'ExtendedHeader':
|
||||
case 'OldExtendedHeader':
|
||||
this.meta = true
|
||||
break
|
||||
|
||||
// NOTE: gnutar and bsdtar treat unrecognized types as 'File'
|
||||
// it may be worth doing the same, but with a warning.
|
||||
default:
|
||||
this.ignore = true
|
||||
}
|
||||
|
||||
this.path = normPath(header.path)
|
||||
this.mode = header.mode
|
||||
if (this.mode) {
|
||||
this.mode = this.mode & 0o7777
|
||||
}
|
||||
this.uid = header.uid
|
||||
this.gid = header.gid
|
||||
this.uname = header.uname
|
||||
this.gname = header.gname
|
||||
this.size = header.size
|
||||
this.mtime = header.mtime
|
||||
this.atime = header.atime
|
||||
this.ctime = header.ctime
|
||||
this.linkpath = normPath(header.linkpath)
|
||||
this.uname = header.uname
|
||||
this.gname = header.gname
|
||||
|
||||
if (ex) {
|
||||
this[SLURP](ex)
|
||||
}
|
||||
if (gex) {
|
||||
this[SLURP](gex, true)
|
||||
}
|
||||
}
|
||||
|
||||
write (data) {
|
||||
const writeLen = data.length
|
||||
if (writeLen > this.blockRemain) {
|
||||
throw new Error('writing more to entry than is appropriate')
|
||||
}
|
||||
|
||||
const r = this.remain
|
||||
const br = this.blockRemain
|
||||
this.remain = Math.max(0, r - writeLen)
|
||||
this.blockRemain = Math.max(0, br - writeLen)
|
||||
if (this.ignore) {
|
||||
return true
|
||||
}
|
||||
|
||||
if (r >= writeLen) {
|
||||
return super.write(data)
|
||||
}
|
||||
|
||||
// r < writeLen
|
||||
return super.write(data.slice(0, r))
|
||||
}
|
||||
|
||||
[SLURP] (ex, global) {
|
||||
for (const k in ex) {
|
||||
// we slurp in everything except for the path attribute in
|
||||
// a global extended header, because that's weird.
|
||||
if (ex[k] !== null && ex[k] !== undefined &&
|
||||
!(global && k === 'path')) {
|
||||
this[k] = k === 'path' || k === 'linkpath' ? normPath(ex[k]) : ex[k]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
246
node_modules/tar/lib/replace.js
generated
vendored
Normal file
246
node_modules/tar/lib/replace.js
generated
vendored
Normal file
|
@ -0,0 +1,246 @@
|
|||
'use strict'
|
||||
|
||||
// tar -r
|
||||
const hlo = require('./high-level-opt.js')
|
||||
const Pack = require('./pack.js')
|
||||
const fs = require('fs')
|
||||
const fsm = require('fs-minipass')
|
||||
const t = require('./list.js')
|
||||
const path = require('path')
|
||||
|
||||
// starting at the head of the file, read a Header
|
||||
// If the checksum is invalid, that's our position to start writing
|
||||
// If it is, jump forward by the specified size (round up to 512)
|
||||
// and try again.
|
||||
// Write the new Pack stream starting there.
|
||||
|
||||
const Header = require('./header.js')
|
||||
|
||||
module.exports = (opt_, files, cb) => {
|
||||
const opt = hlo(opt_)
|
||||
|
||||
if (!opt.file) {
|
||||
throw new TypeError('file is required')
|
||||
}
|
||||
|
||||
if (opt.gzip || opt.brotli || opt.file.endsWith('.br') || opt.file.endsWith('.tbr')) {
|
||||
throw new TypeError('cannot append to compressed archives')
|
||||
}
|
||||
|
||||
if (!files || !Array.isArray(files) || !files.length) {
|
||||
throw new TypeError('no files or directories specified')
|
||||
}
|
||||
|
||||
files = Array.from(files)
|
||||
|
||||
return opt.sync ? replaceSync(opt, files)
|
||||
: replace(opt, files, cb)
|
||||
}
|
||||
|
||||
const replaceSync = (opt, files) => {
|
||||
const p = new Pack.Sync(opt)
|
||||
|
||||
let threw = true
|
||||
let fd
|
||||
let position
|
||||
|
||||
try {
|
||||
try {
|
||||
fd = fs.openSync(opt.file, 'r+')
|
||||
} catch (er) {
|
||||
if (er.code === 'ENOENT') {
|
||||
fd = fs.openSync(opt.file, 'w+')
|
||||
} else {
|
||||
throw er
|
||||
}
|
||||
}
|
||||
|
||||
const st = fs.fstatSync(fd)
|
||||
const headBuf = Buffer.alloc(512)
|
||||
|
||||
POSITION: for (position = 0; position < st.size; position += 512) {
|
||||
for (let bufPos = 0, bytes = 0; bufPos < 512; bufPos += bytes) {
|
||||
bytes = fs.readSync(
|
||||
fd, headBuf, bufPos, headBuf.length - bufPos, position + bufPos
|
||||
)
|
||||
|
||||
if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b) {
|
||||
throw new Error('cannot append to compressed archives')
|
||||
}
|
||||
|
||||
if (!bytes) {
|
||||
break POSITION
|
||||
}
|
||||
}
|
||||
|
||||
const h = new Header(headBuf)
|
||||
if (!h.cksumValid) {
|
||||
break
|
||||
}
|
||||
const entryBlockSize = 512 * Math.ceil(h.size / 512)
|
||||
if (position + entryBlockSize + 512 > st.size) {
|
||||
break
|
||||
}
|
||||
// the 512 for the header we just parsed will be added as well
|
||||
// also jump ahead all the blocks for the body
|
||||
position += entryBlockSize
|
||||
if (opt.mtimeCache) {
|
||||
opt.mtimeCache.set(h.path, h.mtime)
|
||||
}
|
||||
}
|
||||
threw = false
|
||||
|
||||
streamSync(opt, p, position, fd, files)
|
||||
} finally {
|
||||
if (threw) {
|
||||
try {
|
||||
fs.closeSync(fd)
|
||||
} catch (er) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const streamSync = (opt, p, position, fd, files) => {
|
||||
const stream = new fsm.WriteStreamSync(opt.file, {
|
||||
fd: fd,
|
||||
start: position,
|
||||
})
|
||||
p.pipe(stream)
|
||||
addFilesSync(p, files)
|
||||
}
|
||||
|
||||
const replace = (opt, files, cb) => {
|
||||
files = Array.from(files)
|
||||
const p = new Pack(opt)
|
||||
|
||||
const getPos = (fd, size, cb_) => {
|
||||
const cb = (er, pos) => {
|
||||
if (er) {
|
||||
fs.close(fd, _ => cb_(er))
|
||||
} else {
|
||||
cb_(null, pos)
|
||||
}
|
||||
}
|
||||
|
||||
let position = 0
|
||||
if (size === 0) {
|
||||
return cb(null, 0)
|
||||
}
|
||||
|
||||
let bufPos = 0
|
||||
const headBuf = Buffer.alloc(512)
|
||||
const onread = (er, bytes) => {
|
||||
if (er) {
|
||||
return cb(er)
|
||||
}
|
||||
bufPos += bytes
|
||||
if (bufPos < 512 && bytes) {
|
||||
return fs.read(
|
||||
fd, headBuf, bufPos, headBuf.length - bufPos,
|
||||
position + bufPos, onread
|
||||
)
|
||||
}
|
||||
|
||||
if (position === 0 && headBuf[0] === 0x1f && headBuf[1] === 0x8b) {
|
||||
return cb(new Error('cannot append to compressed archives'))
|
||||
}
|
||||
|
||||
// truncated header
|
||||
if (bufPos < 512) {
|
||||
return cb(null, position)
|
||||
}
|
||||
|
||||
const h = new Header(headBuf)
|
||||
if (!h.cksumValid) {
|
||||
return cb(null, position)
|
||||
}
|
||||
|
||||
const entryBlockSize = 512 * Math.ceil(h.size / 512)
|
||||
if (position + entryBlockSize + 512 > size) {
|
||||
return cb(null, position)
|
||||
}
|
||||
|
||||
position += entryBlockSize + 512
|
||||
if (position >= size) {
|
||||
return cb(null, position)
|
||||
}
|
||||
|
||||
if (opt.mtimeCache) {
|
||||
opt.mtimeCache.set(h.path, h.mtime)
|
||||
}
|
||||
bufPos = 0
|
||||
fs.read(fd, headBuf, 0, 512, position, onread)
|
||||
}
|
||||
fs.read(fd, headBuf, 0, 512, position, onread)
|
||||
}
|
||||
|
||||
const promise = new Promise((resolve, reject) => {
|
||||
p.on('error', reject)
|
||||
let flag = 'r+'
|
||||
const onopen = (er, fd) => {
|
||||
if (er && er.code === 'ENOENT' && flag === 'r+') {
|
||||
flag = 'w+'
|
||||
return fs.open(opt.file, flag, onopen)
|
||||
}
|
||||
|
||||
if (er) {
|
||||
return reject(er)
|
||||
}
|
||||
|
||||
fs.fstat(fd, (er, st) => {
|
||||
if (er) {
|
||||
return fs.close(fd, () => reject(er))
|
||||
}
|
||||
|
||||
getPos(fd, st.size, (er, position) => {
|
||||
if (er) {
|
||||
return reject(er)
|
||||
}
|
||||
const stream = new fsm.WriteStream(opt.file, {
|
||||
fd: fd,
|
||||
start: position,
|
||||
})
|
||||
p.pipe(stream)
|
||||
stream.on('error', reject)
|
||||
stream.on('close', resolve)
|
||||
addFilesAsync(p, files)
|
||||
})
|
||||
})
|
||||
}
|
||||
fs.open(opt.file, flag, onopen)
|
||||
})
|
||||
|
||||
return cb ? promise.then(cb, cb) : promise
|
||||
}
|
||||
|
||||
const addFilesSync = (p, files) => {
|
||||
files.forEach(file => {
|
||||
if (file.charAt(0) === '@') {
|
||||
t({
|
||||
file: path.resolve(p.cwd, file.slice(1)),
|
||||
sync: true,
|
||||
noResume: true,
|
||||
onentry: entry => p.add(entry),
|
||||
})
|
||||
} else {
|
||||
p.add(file)
|
||||
}
|
||||
})
|
||||
p.end()
|
||||
}
|
||||
|
||||
const addFilesAsync = (p, files) => {
|
||||
while (files.length) {
|
||||
const file = files.shift()
|
||||
if (file.charAt(0) === '@') {
|
||||
return t({
|
||||
file: path.resolve(p.cwd, file.slice(1)),
|
||||
noResume: true,
|
||||
onentry: entry => p.add(entry),
|
||||
}).then(_ => addFilesAsync(p, files))
|
||||
} else {
|
||||
p.add(file)
|
||||
}
|
||||
}
|
||||
p.end()
|
||||
}
|
24
node_modules/tar/lib/strip-absolute-path.js
generated
vendored
Normal file
24
node_modules/tar/lib/strip-absolute-path.js
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// unix absolute paths are also absolute on win32, so we use this for both
|
||||
const { isAbsolute, parse } = require('path').win32
|
||||
|
||||
// returns [root, stripped]
|
||||
// Note that windows will think that //x/y/z/a has a "root" of //x/y, and in
|
||||
// those cases, we want to sanitize it to x/y/z/a, not z/a, so we strip /
|
||||
// explicitly if it's the first character.
|
||||
// drive-specific relative paths on Windows get their root stripped off even
|
||||
// though they are not absolute, so `c:../foo` becomes ['c:', '../foo']
|
||||
module.exports = path => {
|
||||
let r = ''
|
||||
|
||||
let parsed = parse(path)
|
||||
while (isAbsolute(path) || parsed.root) {
|
||||
// windows will think that //x/y/z has a "root" of //x/y/
|
||||
// but strip the //?/C:/ off of //?/C:/path
|
||||
const root = path.charAt(0) === '/' && path.slice(0, 4) !== '//?/' ? '/'
|
||||
: parsed.root
|
||||
path = path.slice(root.length)
|
||||
r += root
|
||||
parsed = parse(path)
|
||||
}
|
||||
return [r, path]
|
||||
}
|
13
node_modules/tar/lib/strip-trailing-slashes.js
generated
vendored
Normal file
13
node_modules/tar/lib/strip-trailing-slashes.js
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
// warning: extremely hot code path.
|
||||
// This has been meticulously optimized for use
|
||||
// within npm install on large package trees.
|
||||
// Do not edit without careful benchmarking.
|
||||
module.exports = str => {
|
||||
let i = str.length - 1
|
||||
let slashesStart = -1
|
||||
while (i > -1 && str.charAt(i) === '/') {
|
||||
slashesStart = i
|
||||
i--
|
||||
}
|
||||
return slashesStart === -1 ? str : str.slice(0, slashesStart)
|
||||
}
|
44
node_modules/tar/lib/types.js
generated
vendored
Normal file
44
node_modules/tar/lib/types.js
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
'use strict'
|
||||
// map types from key to human-friendly name
|
||||
exports.name = new Map([
|
||||
['0', 'File'],
|
||||
// same as File
|
||||
['', 'OldFile'],
|
||||
['1', 'Link'],
|
||||
['2', 'SymbolicLink'],
|
||||
// Devices and FIFOs aren't fully supported
|
||||
// they are parsed, but skipped when unpacking
|
||||
['3', 'CharacterDevice'],
|
||||
['4', 'BlockDevice'],
|
||||
['5', 'Directory'],
|
||||
['6', 'FIFO'],
|
||||
// same as File
|
||||
['7', 'ContiguousFile'],
|
||||
// pax headers
|
||||
['g', 'GlobalExtendedHeader'],
|
||||
['x', 'ExtendedHeader'],
|
||||
// vendor-specific stuff
|
||||
// skip
|
||||
['A', 'SolarisACL'],
|
||||
// like 5, but with data, which should be skipped
|
||||
['D', 'GNUDumpDir'],
|
||||
// metadata only, skip
|
||||
['I', 'Inode'],
|
||||
// data = link path of next file
|
||||
['K', 'NextFileHasLongLinkpath'],
|
||||
// data = path of next file
|
||||
['L', 'NextFileHasLongPath'],
|
||||
// skip
|
||||
['M', 'ContinuationFile'],
|
||||
// like L
|
||||
['N', 'OldGnuLongPath'],
|
||||
// skip
|
||||
['S', 'SparseFile'],
|
||||
// skip
|
||||
['V', 'TapeVolumeHeader'],
|
||||
// like x
|
||||
['X', 'OldExtendedHeader'],
|
||||
])
|
||||
|
||||
// map the other direction
|
||||
exports.code = new Map(Array.from(exports.name).map(kv => [kv[1], kv[0]]))
|
923
node_modules/tar/lib/unpack.js
generated
vendored
Normal file
923
node_modules/tar/lib/unpack.js
generated
vendored
Normal file
|
@ -0,0 +1,923 @@
|
|||
'use strict'
|
||||
|
||||
// the PEND/UNPEND stuff tracks whether we're ready to emit end/close yet.
|
||||
// but the path reservations are required to avoid race conditions where
|
||||
// parallelized unpack ops may mess with one another, due to dependencies
|
||||
// (like a Link depending on its target) or destructive operations (like
|
||||
// clobbering an fs object to create one of a different type.)
|
||||
|
||||
const assert = require('assert')
|
||||
const Parser = require('./parse.js')
|
||||
const fs = require('fs')
|
||||
const fsm = require('fs-minipass')
|
||||
const path = require('path')
|
||||
const mkdir = require('./mkdir.js')
|
||||
const wc = require('./winchars.js')
|
||||
const pathReservations = require('./path-reservations.js')
|
||||
const stripAbsolutePath = require('./strip-absolute-path.js')
|
||||
const normPath = require('./normalize-windows-path.js')
|
||||
const stripSlash = require('./strip-trailing-slashes.js')
|
||||
const normalize = require('./normalize-unicode.js')
|
||||
|
||||
const ONENTRY = Symbol('onEntry')
|
||||
const CHECKFS = Symbol('checkFs')
|
||||
const CHECKFS2 = Symbol('checkFs2')
|
||||
const PRUNECACHE = Symbol('pruneCache')
|
||||
const ISREUSABLE = Symbol('isReusable')
|
||||
const MAKEFS = Symbol('makeFs')
|
||||
const FILE = Symbol('file')
|
||||
const DIRECTORY = Symbol('directory')
|
||||
const LINK = Symbol('link')
|
||||
const SYMLINK = Symbol('symlink')
|
||||
const HARDLINK = Symbol('hardlink')
|
||||
const UNSUPPORTED = Symbol('unsupported')
|
||||
const CHECKPATH = Symbol('checkPath')
|
||||
const MKDIR = Symbol('mkdir')
|
||||
const ONERROR = Symbol('onError')
|
||||
const PENDING = Symbol('pending')
|
||||
const PEND = Symbol('pend')
|
||||
const UNPEND = Symbol('unpend')
|
||||
const ENDED = Symbol('ended')
|
||||
const MAYBECLOSE = Symbol('maybeClose')
|
||||
const SKIP = Symbol('skip')
|
||||
const DOCHOWN = Symbol('doChown')
|
||||
const UID = Symbol('uid')
|
||||
const GID = Symbol('gid')
|
||||
const CHECKED_CWD = Symbol('checkedCwd')
|
||||
const crypto = require('crypto')
|
||||
const getFlag = require('./get-write-flag.js')
|
||||
const platform = process.env.TESTING_TAR_FAKE_PLATFORM || process.platform
|
||||
const isWindows = platform === 'win32'
|
||||
const DEFAULT_MAX_DEPTH = 1024
|
||||
|
||||
// Unlinks on Windows are not atomic.
|
||||
//
|
||||
// This means that if you have a file entry, followed by another
|
||||
// file entry with an identical name, and you cannot re-use the file
|
||||
// (because it's a hardlink, or because unlink:true is set, or it's
|
||||
// Windows, which does not have useful nlink values), then the unlink
|
||||
// will be committed to the disk AFTER the new file has been written
|
||||
// over the old one, deleting the new file.
|
||||
//
|
||||
// To work around this, on Windows systems, we rename the file and then
|
||||
// delete the renamed file. It's a sloppy kludge, but frankly, I do not
|
||||
// know of a better way to do this, given windows' non-atomic unlink
|
||||
// semantics.
|
||||
//
|
||||
// See: https://github.com/npm/node-tar/issues/183
|
||||
/* istanbul ignore next */
|
||||
const unlinkFile = (path, cb) => {
|
||||
if (!isWindows) {
|
||||
return fs.unlink(path, cb)
|
||||
}
|
||||
|
||||
const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
|
||||
fs.rename(path, name, er => {
|
||||
if (er) {
|
||||
return cb(er)
|
||||
}
|
||||
fs.unlink(name, cb)
|
||||
})
|
||||
}
|
||||
|
||||
/* istanbul ignore next */
|
||||
const unlinkFileSync = path => {
|
||||
if (!isWindows) {
|
||||
return fs.unlinkSync(path)
|
||||
}
|
||||
|
||||
const name = path + '.DELETE.' + crypto.randomBytes(16).toString('hex')
|
||||
fs.renameSync(path, name)
|
||||
fs.unlinkSync(name)
|
||||
}
|
||||
|
||||
// this.gid, entry.gid, this.processUid
|
||||
const uint32 = (a, b, c) =>
|
||||
a === a >>> 0 ? a
|
||||
: b === b >>> 0 ? b
|
||||
: c
|
||||
|
||||
// clear the cache if it's a case-insensitive unicode-squashing match.
|
||||
// we can't know if the current file system is case-sensitive or supports
|
||||
// unicode fully, so we check for similarity on the maximally compatible
|
||||
// representation. Err on the side of pruning, since all it's doing is
|
||||
// preventing lstats, and it's not the end of the world if we get a false
|
||||
// positive.
|
||||
// Note that on windows, we always drop the entire cache whenever a
|
||||
// symbolic link is encountered, because 8.3 filenames are impossible
|
||||
// to reason about, and collisions are hazards rather than just failures.
|
||||
const cacheKeyNormalize = path => stripSlash(normPath(normalize(path)))
|
||||
.toLowerCase()
|
||||
|
||||
const pruneCache = (cache, abs) => {
|
||||
abs = cacheKeyNormalize(abs)
|
||||
for (const path of cache.keys()) {
|
||||
const pnorm = cacheKeyNormalize(path)
|
||||
if (pnorm === abs || pnorm.indexOf(abs + '/') === 0) {
|
||||
cache.delete(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const dropCache = cache => {
|
||||
for (const key of cache.keys()) {
|
||||
cache.delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
class Unpack extends Parser {
|
||||
constructor (opt) {
|
||||
if (!opt) {
|
||||
opt = {}
|
||||
}
|
||||
|
||||
opt.ondone = _ => {
|
||||
this[ENDED] = true
|
||||
this[MAYBECLOSE]()
|
||||
}
|
||||
|
||||
super(opt)
|
||||
|
||||
this[CHECKED_CWD] = false
|
||||
|
||||
this.reservations = pathReservations()
|
||||
|
||||
this.transform = typeof opt.transform === 'function' ? opt.transform : null
|
||||
|
||||
this.writable = true
|
||||
this.readable = false
|
||||
|
||||
this[PENDING] = 0
|
||||
this[ENDED] = false
|
||||
|
||||
this.dirCache = opt.dirCache || new Map()
|
||||
|
||||
if (typeof opt.uid === 'number' || typeof opt.gid === 'number') {
|
||||
// need both or neither
|
||||
if (typeof opt.uid !== 'number' || typeof opt.gid !== 'number') {
|
||||
throw new TypeError('cannot set owner without number uid and gid')
|
||||
}
|
||||
if (opt.preserveOwner) {
|
||||
throw new TypeError(
|
||||
'cannot preserve owner in archive and also set owner explicitly')
|
||||
}
|
||||
this.uid = opt.uid
|
||||
this.gid = opt.gid
|
||||
this.setOwner = true
|
||||
} else {
|
||||
this.uid = null
|
||||
this.gid = null
|
||||
this.setOwner = false
|
||||
}
|
||||
|
||||
// default true for root
|
||||
if (opt.preserveOwner === undefined && typeof opt.uid !== 'number') {
|
||||
this.preserveOwner = process.getuid && process.getuid() === 0
|
||||
} else {
|
||||
this.preserveOwner = !!opt.preserveOwner
|
||||
}
|
||||
|
||||
this.processUid = (this.preserveOwner || this.setOwner) && process.getuid ?
|
||||
process.getuid() : null
|
||||
this.processGid = (this.preserveOwner || this.setOwner) && process.getgid ?
|
||||
process.getgid() : null
|
||||
|
||||
// prevent excessively deep nesting of subfolders
|
||||
// set to `Infinity` to remove this restriction
|
||||
this.maxDepth = typeof opt.maxDepth === 'number'
|
||||
? opt.maxDepth
|
||||
: DEFAULT_MAX_DEPTH
|
||||
|
||||
// mostly just for testing, but useful in some cases.
|
||||
// Forcibly trigger a chown on every entry, no matter what
|
||||
this.forceChown = opt.forceChown === true
|
||||
|
||||
// turn ><?| in filenames into 0xf000-higher encoded forms
|
||||
this.win32 = !!opt.win32 || isWindows
|
||||
|
||||
// do not unpack over files that are newer than what's in the archive
|
||||
this.newer = !!opt.newer
|
||||
|
||||
// do not unpack over ANY files
|
||||
this.keep = !!opt.keep
|
||||
|
||||
// do not set mtime/atime of extracted entries
|
||||
this.noMtime = !!opt.noMtime
|
||||
|
||||
// allow .., absolute path entries, and unpacking through symlinks
|
||||
// without this, warn and skip .., relativize absolutes, and error
|
||||
// on symlinks in extraction path
|
||||
this.preservePaths = !!opt.preservePaths
|
||||
|
||||
// unlink files and links before writing. This breaks existing hard
|
||||
// links, and removes symlink directories rather than erroring
|
||||
this.unlink = !!opt.unlink
|
||||
|
||||
this.cwd = normPath(path.resolve(opt.cwd || process.cwd()))
|
||||
this.strip = +opt.strip || 0
|
||||
// if we're not chmodding, then we don't need the process umask
|
||||
this.processUmask = opt.noChmod ? 0 : process.umask()
|
||||
this.umask = typeof opt.umask === 'number' ? opt.umask : this.processUmask
|
||||
|
||||
// default mode for dirs created as parents
|
||||
this.dmode = opt.dmode || (0o0777 & (~this.umask))
|
||||
this.fmode = opt.fmode || (0o0666 & (~this.umask))
|
||||
|
||||
this.on('entry', entry => this[ONENTRY](entry))
|
||||
}
|
||||
|
||||
// a bad or damaged archive is a warning for Parser, but an error
|
||||
// when extracting. Mark those errors as unrecoverable, because
|
||||
// the Unpack contract cannot be met.
|
||||
warn (code, msg, data = {}) {
|
||||
if (code === 'TAR_BAD_ARCHIVE' || code === 'TAR_ABORT') {
|
||||
data.recoverable = false
|
||||
}
|
||||
return super.warn(code, msg, data)
|
||||
}
|
||||
|
||||
[MAYBECLOSE] () {
|
||||
if (this[ENDED] && this[PENDING] === 0) {
|
||||
this.emit('prefinish')
|
||||
this.emit('finish')
|
||||
this.emit('end')
|
||||
}
|
||||
}
|
||||
|
||||
[CHECKPATH] (entry) {
|
||||
const p = normPath(entry.path)
|
||||
const parts = p.split('/')
|
||||
|
||||
if (this.strip) {
|
||||
if (parts.length < this.strip) {
|
||||
return false
|
||||
}
|
||||
if (entry.type === 'Link') {
|
||||
const linkparts = normPath(entry.linkpath).split('/')
|
||||
if (linkparts.length >= this.strip) {
|
||||
entry.linkpath = linkparts.slice(this.strip).join('/')
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
parts.splice(0, this.strip)
|
||||
entry.path = parts.join('/')
|
||||
}
|
||||
|
||||
if (isFinite(this.maxDepth) && parts.length > this.maxDepth) {
|
||||
this.warn('TAR_ENTRY_ERROR', 'path excessively deep', {
|
||||
entry,
|
||||
path: p,
|
||||
depth: parts.length,
|
||||
maxDepth: this.maxDepth,
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
if (!this.preservePaths) {
|
||||
if (parts.includes('..') || isWindows && /^[a-z]:\.\.$/i.test(parts[0])) {
|
||||
this.warn('TAR_ENTRY_ERROR', `path contains '..'`, {
|
||||
entry,
|
||||
path: p,
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
// strip off the root
|
||||
const [root, stripped] = stripAbsolutePath(p)
|
||||
if (root) {
|
||||
entry.path = stripped
|
||||
this.warn('TAR_ENTRY_INFO', `stripping ${root} from absolute path`, {
|
||||
entry,
|
||||
path: p,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if (path.isAbsolute(entry.path)) {
|
||||
entry.absolute = normPath(path.resolve(entry.path))
|
||||
} else {
|
||||
entry.absolute = normPath(path.resolve(this.cwd, entry.path))
|
||||
}
|
||||
|
||||
// if we somehow ended up with a path that escapes the cwd, and we are
|
||||
// not in preservePaths mode, then something is fishy! This should have
|
||||
// been prevented above, so ignore this for coverage.
|
||||
/* istanbul ignore if - defense in depth */
|
||||
if (!this.preservePaths &&
|
||||
entry.absolute.indexOf(this.cwd + '/') !== 0 &&
|
||||
entry.absolute !== this.cwd) {
|
||||
this.warn('TAR_ENTRY_ERROR', 'path escaped extraction target', {
|
||||
entry,
|
||||
path: normPath(entry.path),
|
||||
resolvedPath: entry.absolute,
|
||||
cwd: this.cwd,
|
||||
})
|
||||
return false
|
||||
}
|
||||
|
||||
// an archive can set properties on the extraction directory, but it
|
||||
// may not replace the cwd with a different kind of thing entirely.
|
||||
if (entry.absolute === this.cwd &&
|
||||
entry.type !== 'Directory' &&
|
||||
entry.type !== 'GNUDumpDir') {
|
||||
return false
|
||||
}
|
||||
|
||||
// only encode : chars that aren't drive letter indicators
|
||||
if (this.win32) {
|
||||
const { root: aRoot } = path.win32.parse(entry.absolute)
|
||||
entry.absolute = aRoot + wc.encode(entry.absolute.slice(aRoot.length))
|
||||
const { root: pRoot } = path.win32.parse(entry.path)
|
||||
entry.path = pRoot + wc.encode(entry.path.slice(pRoot.length))
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
[ONENTRY] (entry) {
|
||||
if (!this[CHECKPATH](entry)) {
|
||||
return entry.resume()
|
||||
}
|
||||
|
||||
assert.equal(typeof entry.absolute, 'string')
|
||||
|
||||
switch (entry.type) {
|
||||
case 'Directory':
|
||||
case 'GNUDumpDir':
|
||||
if (entry.mode) {
|
||||
entry.mode = entry.mode | 0o700
|
||||
}
|
||||
|
||||
// eslint-disable-next-line no-fallthrough
|
||||
case 'File':
|
||||
case 'OldFile':
|
||||
case 'ContiguousFile':
|
||||
case 'Link':
|
||||
case 'SymbolicLink':
|
||||
return this[CHECKFS](entry)
|
||||
|
||||
case 'CharacterDevice':
|
||||
case 'BlockDevice':
|
||||
case 'FIFO':
|
||||
default:
|
||||
return this[UNSUPPORTED](entry)
|
||||
}
|
||||
}
|
||||
|
||||
[ONERROR] (er, entry) {
|
||||
// Cwd has to exist, or else nothing works. That's serious.
|
||||
// Other errors are warnings, which raise the error in strict
|
||||
// mode, but otherwise continue on.
|
||||
if (er.name === 'CwdError') {
|
||||
this.emit('error', er)
|
||||
} else {
|
||||
this.warn('TAR_ENTRY_ERROR', er, { entry })
|
||||
this[UNPEND]()
|
||||
entry.resume()
|
||||
}
|
||||
}
|
||||
|
||||
[MKDIR] (dir, mode, cb) {
|
||||
mkdir(normPath(dir), {
|
||||
uid: this.uid,
|
||||
gid: this.gid,
|
||||
processUid: this.processUid,
|
||||
processGid: this.processGid,
|
||||
umask: this.processUmask,
|
||||
preserve: this.preservePaths,
|
||||
unlink: this.unlink,
|
||||
cache: this.dirCache,
|
||||
cwd: this.cwd,
|
||||
mode: mode,
|
||||
noChmod: this.noChmod,
|
||||
}, cb)
|
||||
}
|
||||
|
||||
[DOCHOWN] (entry) {
|
||||
// in preserve owner mode, chown if the entry doesn't match process
|
||||
// in set owner mode, chown if setting doesn't match process
|
||||
return this.forceChown ||
|
||||
this.preserveOwner &&
|
||||
(typeof entry.uid === 'number' && entry.uid !== this.processUid ||
|
||||
typeof entry.gid === 'number' && entry.gid !== this.processGid)
|
||||
||
|
||||
(typeof this.uid === 'number' && this.uid !== this.processUid ||
|
||||
typeof this.gid === 'number' && this.gid !== this.processGid)
|
||||
}
|
||||
|
||||
[UID] (entry) {
|
||||
return uint32(this.uid, entry.uid, this.processUid)
|
||||
}
|
||||
|
||||
[GID] (entry) {
|
||||
return uint32(this.gid, entry.gid, this.processGid)
|
||||
}
|
||||
|
||||
[FILE] (entry, fullyDone) {
|
||||
const mode = entry.mode & 0o7777 || this.fmode
|
||||
const stream = new fsm.WriteStream(entry.absolute, {
|
||||
flags: getFlag(entry.size),
|
||||
mode: mode,
|
||||
autoClose: false,
|
||||
})
|
||||
stream.on('error', er => {
|
||||
if (stream.fd) {
|
||||
fs.close(stream.fd, () => {})
|
||||
}
|
||||
|
||||
// flush all the data out so that we aren't left hanging
|
||||
// if the error wasn't actually fatal. otherwise the parse
|
||||
// is blocked, and we never proceed.
|
||||
stream.write = () => true
|
||||
this[ONERROR](er, entry)
|
||||
fullyDone()
|
||||
})
|
||||
|
||||
let actions = 1
|
||||
const done = er => {
|
||||
if (er) {
|
||||
/* istanbul ignore else - we should always have a fd by now */
|
||||
if (stream.fd) {
|
||||
fs.close(stream.fd, () => {})
|
||||
}
|
||||
|
||||
this[ONERROR](er, entry)
|
||||
fullyDone()
|
||||
return
|
||||
}
|
||||
|
||||
if (--actions === 0) {
|
||||
fs.close(stream.fd, er => {
|
||||
if (er) {
|
||||
this[ONERROR](er, entry)
|
||||
} else {
|
||||
this[UNPEND]()
|
||||
}
|
||||
fullyDone()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
stream.on('finish', _ => {
|
||||
// if futimes fails, try utimes
|
||||
// if utimes fails, fail with the original error
|
||||
// same for fchown/chown
|
||||
const abs = entry.absolute
|
||||
const fd = stream.fd
|
||||
|
||||
if (entry.mtime && !this.noMtime) {
|
||||
actions++
|
||||
const atime = entry.atime || new Date()
|
||||
const mtime = entry.mtime
|
||||
fs.futimes(fd, atime, mtime, er =>
|
||||
er ? fs.utimes(abs, atime, mtime, er2 => done(er2 && er))
|
||||
: done())
|
||||
}
|
||||
|
||||
if (this[DOCHOWN](entry)) {
|
||||
actions++
|
||||
const uid = this[UID](entry)
|
||||
const gid = this[GID](entry)
|
||||
fs.fchown(fd, uid, gid, er =>
|
||||
er ? fs.chown(abs, uid, gid, er2 => done(er2 && er))
|
||||
: done())
|
||||
}
|
||||
|
||||
done()
|
||||
})
|
||||
|
||||
const tx = this.transform ? this.transform(entry) || entry : entry
|
||||
if (tx !== entry) {
|
||||
tx.on('error', er => {
|
||||
this[ONERROR](er, entry)
|
||||
fullyDone()
|
||||
})
|
||||
entry.pipe(tx)
|
||||
}
|
||||
tx.pipe(stream)
|
||||
}
|
||||
|
||||
[DIRECTORY] (entry, fullyDone) {
|
||||
const mode = entry.mode & 0o7777 || this.dmode
|
||||
this[MKDIR](entry.absolute, mode, er => {
|
||||
if (er) {
|
||||
this[ONERROR](er, entry)
|
||||
fullyDone()
|
||||
return
|
||||
}
|
||||
|
||||
let actions = 1
|
||||
const done = _ => {
|
||||
if (--actions === 0) {
|
||||
fullyDone()
|
||||
this[UNPEND]()
|
||||
entry.resume()
|
||||
}
|
||||
}
|
||||
|
||||
if (entry.mtime && !this.noMtime) {
|
||||
actions++
|
||||
fs.utimes(entry.absolute, entry.atime || new Date(), entry.mtime, done)
|
||||
}
|
||||
|
||||
if (this[DOCHOWN](entry)) {
|
||||
actions++
|
||||
fs.chown(entry.absolute, this[UID](entry), this[GID](entry), done)
|
||||
}
|
||||
|
||||
done()
|
||||
})
|
||||
}
|
||||
|
||||
[UNSUPPORTED] (entry) {
|
||||
entry.unsupported = true
|
||||
this.warn('TAR_ENTRY_UNSUPPORTED',
|
||||
`unsupported entry type: ${entry.type}`, { entry })
|
||||
entry.resume()
|
||||
}
|
||||
|
||||
[SYMLINK] (entry, done) {
|
||||
this[LINK](entry, entry.linkpath, 'symlink', done)
|
||||
}
|
||||
|
||||
[HARDLINK] (entry, done) {
|
||||
const linkpath = normPath(path.resolve(this.cwd, entry.linkpath))
|
||||
this[LINK](entry, linkpath, 'link', done)
|
||||
}
|
||||
|
||||
[PEND] () {
|
||||
this[PENDING]++
|
||||
}
|
||||
|
||||
[UNPEND] () {
|
||||
this[PENDING]--
|
||||
this[MAYBECLOSE]()
|
||||
}
|
||||
|
||||
[SKIP] (entry) {
|
||||
this[UNPEND]()
|
||||
entry.resume()
|
||||
}
|
||||
|
||||
// Check if we can reuse an existing filesystem entry safely and
|
||||
// overwrite it, rather than unlinking and recreating
|
||||
// Windows doesn't report a useful nlink, so we just never reuse entries
|
||||
[ISREUSABLE] (entry, st) {
|
||||
return entry.type === 'File' &&
|
||||
!this.unlink &&
|
||||
st.isFile() &&
|
||||
st.nlink <= 1 &&
|
||||
!isWindows
|
||||
}
|
||||
|
||||
// check if a thing is there, and if so, try to clobber it
|
||||
[CHECKFS] (entry) {
|
||||
this[PEND]()
|
||||
const paths = [entry.path]
|
||||
if (entry.linkpath) {
|
||||
paths.push(entry.linkpath)
|
||||
}
|
||||
this.reservations.reserve(paths, done => this[CHECKFS2](entry, done))
|
||||
}
|
||||
|
||||
[PRUNECACHE] (entry) {
|
||||
// if we are not creating a directory, and the path is in the dirCache,
|
||||
// then that means we are about to delete the directory we created
|
||||
// previously, and it is no longer going to be a directory, and neither
|
||||
// is any of its children.
|
||||
// If a symbolic link is encountered, all bets are off. There is no
|
||||
// reasonable way to sanitize the cache in such a way we will be able to
|
||||
// avoid having filesystem collisions. If this happens with a non-symlink
|
||||
// entry, it'll just fail to unpack, but a symlink to a directory, using an
|
||||
// 8.3 shortname or certain unicode attacks, can evade detection and lead
|
||||
// to arbitrary writes to anywhere on the system.
|
||||
if (entry.type === 'SymbolicLink') {
|
||||
dropCache(this.dirCache)
|
||||
} else if (entry.type !== 'Directory') {
|
||||
pruneCache(this.dirCache, entry.absolute)
|
||||
}
|
||||
}
|
||||
|
||||
[CHECKFS2] (entry, fullyDone) {
|
||||
this[PRUNECACHE](entry)
|
||||
|
||||
const done = er => {
|
||||
this[PRUNECACHE](entry)
|
||||
fullyDone(er)
|
||||
}
|
||||
|
||||
const checkCwd = () => {
|
||||
this[MKDIR](this.cwd, this.dmode, er => {
|
||||
if (er) {
|
||||
this[ONERROR](er, entry)
|
||||
done()
|
||||
return
|
||||
}
|
||||
this[CHECKED_CWD] = true
|
||||
start()
|
||||
})
|
||||
}
|
||||
|
||||
const start = () => {
|
||||
if (entry.absolute !== this.cwd) {
|
||||
const parent = normPath(path.dirname(entry.absolute))
|
||||
if (parent !== this.cwd) {
|
||||
return this[MKDIR](parent, this.dmode, er => {
|
||||
if (er) {
|
||||
this[ONERROR](er, entry)
|
||||
done()
|
||||
return
|
||||
}
|
||||
afterMakeParent()
|
||||
})
|
||||
}
|
||||
}
|
||||
afterMakeParent()
|
||||
}
|
||||
|
||||
const afterMakeParent = () => {
|
||||
fs.lstat(entry.absolute, (lstatEr, st) => {
|
||||
if (st && (this.keep || this.newer && st.mtime > entry.mtime)) {
|
||||
this[SKIP](entry)
|
||||
done()
|
||||
return
|
||||
}
|
||||
if (lstatEr || this[ISREUSABLE](entry, st)) {
|
||||
return this[MAKEFS](null, entry, done)
|
||||
}
|
||||
|
||||
if (st.isDirectory()) {
|
||||
if (entry.type === 'Directory') {
|
||||
const needChmod = !this.noChmod &&
|
||||
entry.mode &&
|
||||
(st.mode & 0o7777) !== entry.mode
|
||||
const afterChmod = er => this[MAKEFS](er, entry, done)
|
||||
if (!needChmod) {
|
||||
return afterChmod()
|
||||
}
|
||||
return fs.chmod(entry.absolute, entry.mode, afterChmod)
|
||||
}
|
||||
// Not a dir entry, have to remove it.
|
||||
// NB: the only way to end up with an entry that is the cwd
|
||||
// itself, in such a way that == does not detect, is a
|
||||
// tricky windows absolute path with UNC or 8.3 parts (and
|
||||
// preservePaths:true, or else it will have been stripped).
|
||||
// In that case, the user has opted out of path protections
|
||||
// explicitly, so if they blow away the cwd, c'est la vie.
|
||||
if (entry.absolute !== this.cwd) {
|
||||
return fs.rmdir(entry.absolute, er =>
|
||||
this[MAKEFS](er, entry, done))
|
||||
}
|
||||
}
|
||||
|
||||
// not a dir, and not reusable
|
||||
// don't remove if the cwd, we want that error
|
||||
if (entry.absolute === this.cwd) {
|
||||
return this[MAKEFS](null, entry, done)
|
||||
}
|
||||
|
||||
unlinkFile(entry.absolute, er =>
|
||||
this[MAKEFS](er, entry, done))
|
||||
})
|
||||
}
|
||||
|
||||
if (this[CHECKED_CWD]) {
|
||||
start()
|
||||
} else {
|
||||
checkCwd()
|
||||
}
|
||||
}
|
||||
|
||||
[MAKEFS] (er, entry, done) {
|
||||
if (er) {
|
||||
this[ONERROR](er, entry)
|
||||
done()
|
||||
return
|
||||
}
|
||||
|
||||
switch (entry.type) {
|
||||
case 'File':
|
||||
case 'OldFile':
|
||||
case 'ContiguousFile':
|
||||
return this[FILE](entry, done)
|
||||
|
||||
case 'Link':
|
||||
return this[HARDLINK](entry, done)
|
||||
|
||||
case 'SymbolicLink':
|
||||
return this[SYMLINK](entry, done)
|
||||
|
||||
case 'Directory':
|
||||
case 'GNUDumpDir':
|
||||
return this[DIRECTORY](entry, done)
|
||||
}
|
||||
}
|
||||
|
||||
[LINK] (entry, linkpath, link, done) {
|
||||
// XXX: get the type ('symlink' or 'junction') for windows
|
||||
fs[link](linkpath, entry.absolute, er => {
|
||||
if (er) {
|
||||
this[ONERROR](er, entry)
|
||||
} else {
|
||||
this[UNPEND]()
|
||||
entry.resume()
|
||||
}
|
||||
done()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const callSync = fn => {
|
||||
try {
|
||||
return [null, fn()]
|
||||
} catch (er) {
|
||||
return [er, null]
|
||||
}
|
||||
}
|
||||
class UnpackSync extends Unpack {
|
||||
[MAKEFS] (er, entry) {
|
||||
return super[MAKEFS](er, entry, () => {})
|
||||
}
|
||||
|
||||
[CHECKFS] (entry) {
|
||||
this[PRUNECACHE](entry)
|
||||
|
||||
if (!this[CHECKED_CWD]) {
|
||||
const er = this[MKDIR](this.cwd, this.dmode)
|
||||
if (er) {
|
||||
return this[ONERROR](er, entry)
|
||||
}
|
||||
this[CHECKED_CWD] = true
|
||||
}
|
||||
|
||||
// don't bother to make the parent if the current entry is the cwd,
|
||||
// we've already checked it.
|
||||
if (entry.absolute !== this.cwd) {
|
||||
const parent = normPath(path.dirname(entry.absolute))
|
||||
if (parent !== this.cwd) {
|
||||
const mkParent = this[MKDIR](parent, this.dmode)
|
||||
if (mkParent) {
|
||||
return this[ONERROR](mkParent, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const [lstatEr, st] = callSync(() => fs.lstatSync(entry.absolute))
|
||||
if (st && (this.keep || this.newer && st.mtime > entry.mtime)) {
|
||||
return this[SKIP](entry)
|
||||
}
|
||||
|
||||
if (lstatEr || this[ISREUSABLE](entry, st)) {
|
||||
return this[MAKEFS](null, entry)
|
||||
}
|
||||
|
||||
if (st.isDirectory()) {
|
||||
if (entry.type === 'Directory') {
|
||||
const needChmod = !this.noChmod &&
|
||||
entry.mode &&
|
||||
(st.mode & 0o7777) !== entry.mode
|
||||
const [er] = needChmod ? callSync(() => {
|
||||
fs.chmodSync(entry.absolute, entry.mode)
|
||||
}) : []
|
||||
return this[MAKEFS](er, entry)
|
||||
}
|
||||
// not a dir entry, have to remove it
|
||||
const [er] = callSync(() => fs.rmdirSync(entry.absolute))
|
||||
this[MAKEFS](er, entry)
|
||||
}
|
||||
|
||||
// not a dir, and not reusable.
|
||||
// don't remove if it's the cwd, since we want that error.
|
||||
const [er] = entry.absolute === this.cwd ? []
|
||||
: callSync(() => unlinkFileSync(entry.absolute))
|
||||
this[MAKEFS](er, entry)
|
||||
}
|
||||
|
||||
[FILE] (entry, done) {
|
||||
const mode = entry.mode & 0o7777 || this.fmode
|
||||
|
||||
const oner = er => {
|
||||
let closeError
|
||||
try {
|
||||
fs.closeSync(fd)
|
||||
} catch (e) {
|
||||
closeError = e
|
||||
}
|
||||
if (er || closeError) {
|
||||
this[ONERROR](er || closeError, entry)
|
||||
}
|
||||
done()
|
||||
}
|
||||
|
||||
let fd
|
||||
try {
|
||||
fd = fs.openSync(entry.absolute, getFlag(entry.size), mode)
|
||||
} catch (er) {
|
||||
return oner(er)
|
||||
}
|
||||
const tx = this.transform ? this.transform(entry) || entry : entry
|
||||
if (tx !== entry) {
|
||||
tx.on('error', er => this[ONERROR](er, entry))
|
||||
entry.pipe(tx)
|
||||
}
|
||||
|
||||
tx.on('data', chunk => {
|
||||
try {
|
||||
fs.writeSync(fd, chunk, 0, chunk.length)
|
||||
} catch (er) {
|
||||
oner(er)
|
||||
}
|
||||
})
|
||||
|
||||
tx.on('end', _ => {
|
||||
let er = null
|
||||
// try both, falling futimes back to utimes
|
||||
// if either fails, handle the first error
|
||||
if (entry.mtime && !this.noMtime) {
|
||||
const atime = entry.atime || new Date()
|
||||
const mtime = entry.mtime
|
||||
try {
|
||||
fs.futimesSync(fd, atime, mtime)
|
||||
} catch (futimeser) {
|
||||
try {
|
||||
fs.utimesSync(entry.absolute, atime, mtime)
|
||||
} catch (utimeser) {
|
||||
er = futimeser
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (this[DOCHOWN](entry)) {
|
||||
const uid = this[UID](entry)
|
||||
const gid = this[GID](entry)
|
||||
|
||||
try {
|
||||
fs.fchownSync(fd, uid, gid)
|
||||
} catch (fchowner) {
|
||||
try {
|
||||
fs.chownSync(entry.absolute, uid, gid)
|
||||
} catch (chowner) {
|
||||
er = er || fchowner
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
oner(er)
|
||||
})
|
||||
}
|
||||
|
||||
[DIRECTORY] (entry, done) {
|
||||
const mode = entry.mode & 0o7777 || this.dmode
|
||||
const er = this[MKDIR](entry.absolute, mode)
|
||||
if (er) {
|
||||
this[ONERROR](er, entry)
|
||||
done()
|
||||
return
|
||||
}
|
||||
if (entry.mtime && !this.noMtime) {
|
||||
try {
|
||||
fs.utimesSync(entry.absolute, entry.atime || new Date(), entry.mtime)
|
||||
} catch (er) {}
|
||||
}
|
||||
if (this[DOCHOWN](entry)) {
|
||||
try {
|
||||
fs.chownSync(entry.absolute, this[UID](entry), this[GID](entry))
|
||||
} catch (er) {}
|
||||
}
|
||||
done()
|
||||
entry.resume()
|
||||
}
|
||||
|
||||
[MKDIR] (dir, mode) {
|
||||
try {
|
||||
return mkdir.sync(normPath(dir), {
|
||||
uid: this.uid,
|
||||
gid: this.gid,
|
||||
processUid: this.processUid,
|
||||
processGid: this.processGid,
|
||||
umask: this.processUmask,
|
||||
preserve: this.preservePaths,
|
||||
unlink: this.unlink,
|
||||
cache: this.dirCache,
|
||||
cwd: this.cwd,
|
||||
mode: mode,
|
||||
})
|
||||
} catch (er) {
|
||||
return er
|
||||
}
|
||||
}
|
||||
|
||||
[LINK] (entry, linkpath, link, done) {
|
||||
try {
|
||||
fs[link + 'Sync'](linkpath, entry.absolute)
|
||||
done()
|
||||
entry.resume()
|
||||
} catch (er) {
|
||||
return this[ONERROR](er, entry)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Unpack.Sync = UnpackSync
|
||||
module.exports = Unpack
|
40
node_modules/tar/lib/update.js
generated
vendored
Normal file
40
node_modules/tar/lib/update.js
generated
vendored
Normal file
|
@ -0,0 +1,40 @@
|
|||
'use strict'
|
||||
|
||||
// tar -u
|
||||
|
||||
const hlo = require('./high-level-opt.js')
|
||||
const r = require('./replace.js')
|
||||
// just call tar.r with the filter and mtimeCache
|
||||
|
||||
module.exports = (opt_, files, cb) => {
|
||||
const opt = hlo(opt_)
|
||||
|
||||
if (!opt.file) {
|
||||
throw new TypeError('file is required')
|
||||
}
|
||||
|
||||
if (opt.gzip || opt.brotli || opt.file.endsWith('.br') || opt.file.endsWith('.tbr')) {
|
||||
throw new TypeError('cannot append to compressed archives')
|
||||
}
|
||||
|
||||
if (!files || !Array.isArray(files) || !files.length) {
|
||||
throw new TypeError('no files or directories specified')
|
||||
}
|
||||
|
||||
files = Array.from(files)
|
||||
|
||||
mtimeFilter(opt)
|
||||
return r(opt, files, cb)
|
||||
}
|
||||
|
||||
const mtimeFilter = opt => {
|
||||
const filter = opt.filter
|
||||
|
||||
if (!opt.mtimeCache) {
|
||||
opt.mtimeCache = new Map()
|
||||
}
|
||||
|
||||
opt.filter = filter ? (path, stat) =>
|
||||
filter(path, stat) && !(opt.mtimeCache.get(path) > stat.mtime)
|
||||
: (path, stat) => !(opt.mtimeCache.get(path) > stat.mtime)
|
||||
}
|
24
node_modules/tar/lib/warn-mixin.js
generated
vendored
Normal file
24
node_modules/tar/lib/warn-mixin.js
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
'use strict'
|
||||
module.exports = Base => class extends Base {
|
||||
warn (code, message, data = {}) {
|
||||
if (this.file) {
|
||||
data.file = this.file
|
||||
}
|
||||
if (this.cwd) {
|
||||
data.cwd = this.cwd
|
||||
}
|
||||
data.code = message instanceof Error && message.code || code
|
||||
data.tarCode = code
|
||||
if (!this.strict && data.recoverable !== false) {
|
||||
if (message instanceof Error) {
|
||||
data = Object.assign(message, data)
|
||||
message = message.message
|
||||
}
|
||||
this.emit('warn', data.tarCode, message, data)
|
||||
} else if (message instanceof Error) {
|
||||
this.emit('error', Object.assign(message, data))
|
||||
} else {
|
||||
this.emit('error', Object.assign(new Error(`${code}: ${message}`), data))
|
||||
}
|
||||
}
|
||||
}
|
23
node_modules/tar/lib/winchars.js
generated
vendored
Normal file
23
node_modules/tar/lib/winchars.js
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
'use strict'
|
||||
|
||||
// When writing files on Windows, translate the characters to their
|
||||
// 0xf000 higher-encoded versions.
|
||||
|
||||
const raw = [
|
||||
'|',
|
||||
'<',
|
||||
'>',
|
||||
'?',
|
||||
':',
|
||||
]
|
||||
|
||||
const win = raw.map(char =>
|
||||
String.fromCharCode(0xf000 + char.charCodeAt(0)))
|
||||
|
||||
const toWin = new Map(raw.map((char, i) => [char, win[i]]))
|
||||
const toRaw = new Map(win.map((char, i) => [char, raw[i]]))
|
||||
|
||||
module.exports = {
|
||||
encode: s => raw.reduce((s, c) => s.split(c).join(toWin.get(c)), s),
|
||||
decode: s => win.reduce((s, c) => s.split(c).join(toRaw.get(c)), s),
|
||||
}
|
546
node_modules/tar/lib/write-entry.js
generated
vendored
Normal file
546
node_modules/tar/lib/write-entry.js
generated
vendored
Normal file
|
@ -0,0 +1,546 @@
|
|||
'use strict'
|
||||
const { Minipass } = require('minipass')
|
||||
const Pax = require('./pax.js')
|
||||
const Header = require('./header.js')
|
||||
const fs = require('fs')
|
||||
const path = require('path')
|
||||
const normPath = require('./normalize-windows-path.js')
|
||||
const stripSlash = require('./strip-trailing-slashes.js')
|
||||
|
||||
const prefixPath = (path, prefix) => {
|
||||
if (!prefix) {
|
||||
return normPath(path)
|
||||
}
|
||||
path = normPath(path).replace(/^\.(\/|$)/, '')
|
||||
return stripSlash(prefix) + '/' + path
|
||||
}
|
||||
|
||||
const maxReadSize = 16 * 1024 * 1024
|
||||
const PROCESS = Symbol('process')
|
||||
const FILE = Symbol('file')
|
||||
const DIRECTORY = Symbol('directory')
|
||||
const SYMLINK = Symbol('symlink')
|
||||
const HARDLINK = Symbol('hardlink')
|
||||
const HEADER = Symbol('header')
|
||||
const READ = Symbol('read')
|
||||
const LSTAT = Symbol('lstat')
|
||||
const ONLSTAT = Symbol('onlstat')
|
||||
const ONREAD = Symbol('onread')
|
||||
const ONREADLINK = Symbol('onreadlink')
|
||||
const OPENFILE = Symbol('openfile')
|
||||
const ONOPENFILE = Symbol('onopenfile')
|
||||
const CLOSE = Symbol('close')
|
||||
const MODE = Symbol('mode')
|
||||
const AWAITDRAIN = Symbol('awaitDrain')
|
||||
const ONDRAIN = Symbol('ondrain')
|
||||
const PREFIX = Symbol('prefix')
|
||||
const HAD_ERROR = Symbol('hadError')
|
||||
const warner = require('./warn-mixin.js')
|
||||
const winchars = require('./winchars.js')
|
||||
const stripAbsolutePath = require('./strip-absolute-path.js')
|
||||
|
||||
const modeFix = require('./mode-fix.js')
|
||||
|
||||
const WriteEntry = warner(class WriteEntry extends Minipass {
|
||||
constructor (p, opt) {
|
||||
opt = opt || {}
|
||||
super(opt)
|
||||
if (typeof p !== 'string') {
|
||||
throw new TypeError('path is required')
|
||||
}
|
||||
this.path = normPath(p)
|
||||
// suppress atime, ctime, uid, gid, uname, gname
|
||||
this.portable = !!opt.portable
|
||||
// until node has builtin pwnam functions, this'll have to do
|
||||
this.myuid = process.getuid && process.getuid() || 0
|
||||
this.myuser = process.env.USER || ''
|
||||
this.maxReadSize = opt.maxReadSize || maxReadSize
|
||||
this.linkCache = opt.linkCache || new Map()
|
||||
this.statCache = opt.statCache || new Map()
|
||||
this.preservePaths = !!opt.preservePaths
|
||||
this.cwd = normPath(opt.cwd || process.cwd())
|
||||
this.strict = !!opt.strict
|
||||
this.noPax = !!opt.noPax
|
||||
this.noMtime = !!opt.noMtime
|
||||
this.mtime = opt.mtime || null
|
||||
this.prefix = opt.prefix ? normPath(opt.prefix) : null
|
||||
|
||||
this.fd = null
|
||||
this.blockLen = null
|
||||
this.blockRemain = null
|
||||
this.buf = null
|
||||
this.offset = null
|
||||
this.length = null
|
||||
this.pos = null
|
||||
this.remain = null
|
||||
|
||||
if (typeof opt.onwarn === 'function') {
|
||||
this.on('warn', opt.onwarn)
|
||||
}
|
||||
|
||||
let pathWarn = false
|
||||
if (!this.preservePaths) {
|
||||
const [root, stripped] = stripAbsolutePath(this.path)
|
||||
if (root) {
|
||||
this.path = stripped
|
||||
pathWarn = root
|
||||
}
|
||||
}
|
||||
|
||||
this.win32 = !!opt.win32 || process.platform === 'win32'
|
||||
if (this.win32) {
|
||||
// force the \ to / normalization, since we might not *actually*
|
||||
// be on windows, but want \ to be considered a path separator.
|
||||
this.path = winchars.decode(this.path.replace(/\\/g, '/'))
|
||||
p = p.replace(/\\/g, '/')
|
||||
}
|
||||
|
||||
this.absolute = normPath(opt.absolute || path.resolve(this.cwd, p))
|
||||
|
||||
if (this.path === '') {
|
||||
this.path = './'
|
||||
}
|
||||
|
||||
if (pathWarn) {
|
||||
this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
|
||||
entry: this,
|
||||
path: pathWarn + this.path,
|
||||
})
|
||||
}
|
||||
|
||||
if (this.statCache.has(this.absolute)) {
|
||||
this[ONLSTAT](this.statCache.get(this.absolute))
|
||||
} else {
|
||||
this[LSTAT]()
|
||||
}
|
||||
}
|
||||
|
||||
emit (ev, ...data) {
|
||||
if (ev === 'error') {
|
||||
this[HAD_ERROR] = true
|
||||
}
|
||||
return super.emit(ev, ...data)
|
||||
}
|
||||
|
||||
[LSTAT] () {
|
||||
fs.lstat(this.absolute, (er, stat) => {
|
||||
if (er) {
|
||||
return this.emit('error', er)
|
||||
}
|
||||
this[ONLSTAT](stat)
|
||||
})
|
||||
}
|
||||
|
||||
[ONLSTAT] (stat) {
|
||||
this.statCache.set(this.absolute, stat)
|
||||
this.stat = stat
|
||||
if (!stat.isFile()) {
|
||||
stat.size = 0
|
||||
}
|
||||
this.type = getType(stat)
|
||||
this.emit('stat', stat)
|
||||
this[PROCESS]()
|
||||
}
|
||||
|
||||
[PROCESS] () {
|
||||
switch (this.type) {
|
||||
case 'File': return this[FILE]()
|
||||
case 'Directory': return this[DIRECTORY]()
|
||||
case 'SymbolicLink': return this[SYMLINK]()
|
||||
// unsupported types are ignored.
|
||||
default: return this.end()
|
||||
}
|
||||
}
|
||||
|
||||
[MODE] (mode) {
|
||||
return modeFix(mode, this.type === 'Directory', this.portable)
|
||||
}
|
||||
|
||||
[PREFIX] (path) {
|
||||
return prefixPath(path, this.prefix)
|
||||
}
|
||||
|
||||
[HEADER] () {
|
||||
if (this.type === 'Directory' && this.portable) {
|
||||
this.noMtime = true
|
||||
}
|
||||
|
||||
this.header = new Header({
|
||||
path: this[PREFIX](this.path),
|
||||
// only apply the prefix to hard links.
|
||||
linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
|
||||
: this.linkpath,
|
||||
// only the permissions and setuid/setgid/sticky bitflags
|
||||
// not the higher-order bits that specify file type
|
||||
mode: this[MODE](this.stat.mode),
|
||||
uid: this.portable ? null : this.stat.uid,
|
||||
gid: this.portable ? null : this.stat.gid,
|
||||
size: this.stat.size,
|
||||
mtime: this.noMtime ? null : this.mtime || this.stat.mtime,
|
||||
type: this.type,
|
||||
uname: this.portable ? null :
|
||||
this.stat.uid === this.myuid ? this.myuser : '',
|
||||
atime: this.portable ? null : this.stat.atime,
|
||||
ctime: this.portable ? null : this.stat.ctime,
|
||||
})
|
||||
|
||||
if (this.header.encode() && !this.noPax) {
|
||||
super.write(new Pax({
|
||||
atime: this.portable ? null : this.header.atime,
|
||||
ctime: this.portable ? null : this.header.ctime,
|
||||
gid: this.portable ? null : this.header.gid,
|
||||
mtime: this.noMtime ? null : this.mtime || this.header.mtime,
|
||||
path: this[PREFIX](this.path),
|
||||
linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
|
||||
: this.linkpath,
|
||||
size: this.header.size,
|
||||
uid: this.portable ? null : this.header.uid,
|
||||
uname: this.portable ? null : this.header.uname,
|
||||
dev: this.portable ? null : this.stat.dev,
|
||||
ino: this.portable ? null : this.stat.ino,
|
||||
nlink: this.portable ? null : this.stat.nlink,
|
||||
}).encode())
|
||||
}
|
||||
super.write(this.header.block)
|
||||
}
|
||||
|
||||
[DIRECTORY] () {
|
||||
if (this.path.slice(-1) !== '/') {
|
||||
this.path += '/'
|
||||
}
|
||||
this.stat.size = 0
|
||||
this[HEADER]()
|
||||
this.end()
|
||||
}
|
||||
|
||||
[SYMLINK] () {
|
||||
fs.readlink(this.absolute, (er, linkpath) => {
|
||||
if (er) {
|
||||
return this.emit('error', er)
|
||||
}
|
||||
this[ONREADLINK](linkpath)
|
||||
})
|
||||
}
|
||||
|
||||
[ONREADLINK] (linkpath) {
|
||||
this.linkpath = normPath(linkpath)
|
||||
this[HEADER]()
|
||||
this.end()
|
||||
}
|
||||
|
||||
[HARDLINK] (linkpath) {
|
||||
this.type = 'Link'
|
||||
this.linkpath = normPath(path.relative(this.cwd, linkpath))
|
||||
this.stat.size = 0
|
||||
this[HEADER]()
|
||||
this.end()
|
||||
}
|
||||
|
||||
[FILE] () {
|
||||
if (this.stat.nlink > 1) {
|
||||
const linkKey = this.stat.dev + ':' + this.stat.ino
|
||||
if (this.linkCache.has(linkKey)) {
|
||||
const linkpath = this.linkCache.get(linkKey)
|
||||
if (linkpath.indexOf(this.cwd) === 0) {
|
||||
return this[HARDLINK](linkpath)
|
||||
}
|
||||
}
|
||||
this.linkCache.set(linkKey, this.absolute)
|
||||
}
|
||||
|
||||
this[HEADER]()
|
||||
if (this.stat.size === 0) {
|
||||
return this.end()
|
||||
}
|
||||
|
||||
this[OPENFILE]()
|
||||
}
|
||||
|
||||
[OPENFILE] () {
|
||||
fs.open(this.absolute, 'r', (er, fd) => {
|
||||
if (er) {
|
||||
return this.emit('error', er)
|
||||
}
|
||||
this[ONOPENFILE](fd)
|
||||
})
|
||||
}
|
||||
|
||||
[ONOPENFILE] (fd) {
|
||||
this.fd = fd
|
||||
if (this[HAD_ERROR]) {
|
||||
return this[CLOSE]()
|
||||
}
|
||||
|
||||
this.blockLen = 512 * Math.ceil(this.stat.size / 512)
|
||||
this.blockRemain = this.blockLen
|
||||
const bufLen = Math.min(this.blockLen, this.maxReadSize)
|
||||
this.buf = Buffer.allocUnsafe(bufLen)
|
||||
this.offset = 0
|
||||
this.pos = 0
|
||||
this.remain = this.stat.size
|
||||
this.length = this.buf.length
|
||||
this[READ]()
|
||||
}
|
||||
|
||||
[READ] () {
|
||||
const { fd, buf, offset, length, pos } = this
|
||||
fs.read(fd, buf, offset, length, pos, (er, bytesRead) => {
|
||||
if (er) {
|
||||
// ignoring the error from close(2) is a bad practice, but at
|
||||
// this point we already have an error, don't need another one
|
||||
return this[CLOSE](() => this.emit('error', er))
|
||||
}
|
||||
this[ONREAD](bytesRead)
|
||||
})
|
||||
}
|
||||
|
||||
[CLOSE] (cb) {
|
||||
fs.close(this.fd, cb)
|
||||
}
|
||||
|
||||
[ONREAD] (bytesRead) {
|
||||
if (bytesRead <= 0 && this.remain > 0) {
|
||||
const er = new Error('encountered unexpected EOF')
|
||||
er.path = this.absolute
|
||||
er.syscall = 'read'
|
||||
er.code = 'EOF'
|
||||
return this[CLOSE](() => this.emit('error', er))
|
||||
}
|
||||
|
||||
if (bytesRead > this.remain) {
|
||||
const er = new Error('did not encounter expected EOF')
|
||||
er.path = this.absolute
|
||||
er.syscall = 'read'
|
||||
er.code = 'EOF'
|
||||
return this[CLOSE](() => this.emit('error', er))
|
||||
}
|
||||
|
||||
// null out the rest of the buffer, if we could fit the block padding
|
||||
// at the end of this loop, we've incremented bytesRead and this.remain
|
||||
// to be incremented up to the blockRemain level, as if we had expected
|
||||
// to get a null-padded file, and read it until the end. then we will
|
||||
// decrement both remain and blockRemain by bytesRead, and know that we
|
||||
// reached the expected EOF, without any null buffer to append.
|
||||
if (bytesRead === this.remain) {
|
||||
for (let i = bytesRead; i < this.length && bytesRead < this.blockRemain; i++) {
|
||||
this.buf[i + this.offset] = 0
|
||||
bytesRead++
|
||||
this.remain++
|
||||
}
|
||||
}
|
||||
|
||||
const writeBuf = this.offset === 0 && bytesRead === this.buf.length ?
|
||||
this.buf : this.buf.slice(this.offset, this.offset + bytesRead)
|
||||
|
||||
const flushed = this.write(writeBuf)
|
||||
if (!flushed) {
|
||||
this[AWAITDRAIN](() => this[ONDRAIN]())
|
||||
} else {
|
||||
this[ONDRAIN]()
|
||||
}
|
||||
}
|
||||
|
||||
[AWAITDRAIN] (cb) {
|
||||
this.once('drain', cb)
|
||||
}
|
||||
|
||||
write (writeBuf) {
|
||||
if (this.blockRemain < writeBuf.length) {
|
||||
const er = new Error('writing more data than expected')
|
||||
er.path = this.absolute
|
||||
return this.emit('error', er)
|
||||
}
|
||||
this.remain -= writeBuf.length
|
||||
this.blockRemain -= writeBuf.length
|
||||
this.pos += writeBuf.length
|
||||
this.offset += writeBuf.length
|
||||
return super.write(writeBuf)
|
||||
}
|
||||
|
||||
[ONDRAIN] () {
|
||||
if (!this.remain) {
|
||||
if (this.blockRemain) {
|
||||
super.write(Buffer.alloc(this.blockRemain))
|
||||
}
|
||||
return this[CLOSE](er => er ? this.emit('error', er) : this.end())
|
||||
}
|
||||
|
||||
if (this.offset >= this.length) {
|
||||
// if we only have a smaller bit left to read, alloc a smaller buffer
|
||||
// otherwise, keep it the same length it was before.
|
||||
this.buf = Buffer.allocUnsafe(Math.min(this.blockRemain, this.buf.length))
|
||||
this.offset = 0
|
||||
}
|
||||
this.length = this.buf.length - this.offset
|
||||
this[READ]()
|
||||
}
|
||||
})
|
||||
|
||||
class WriteEntrySync extends WriteEntry {
|
||||
[LSTAT] () {
|
||||
this[ONLSTAT](fs.lstatSync(this.absolute))
|
||||
}
|
||||
|
||||
[SYMLINK] () {
|
||||
this[ONREADLINK](fs.readlinkSync(this.absolute))
|
||||
}
|
||||
|
||||
[OPENFILE] () {
|
||||
this[ONOPENFILE](fs.openSync(this.absolute, 'r'))
|
||||
}
|
||||
|
||||
[READ] () {
|
||||
let threw = true
|
||||
try {
|
||||
const { fd, buf, offset, length, pos } = this
|
||||
const bytesRead = fs.readSync(fd, buf, offset, length, pos)
|
||||
this[ONREAD](bytesRead)
|
||||
threw = false
|
||||
} finally {
|
||||
// ignoring the error from close(2) is a bad practice, but at
|
||||
// this point we already have an error, don't need another one
|
||||
if (threw) {
|
||||
try {
|
||||
this[CLOSE](() => {})
|
||||
} catch (er) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
[AWAITDRAIN] (cb) {
|
||||
cb()
|
||||
}
|
||||
|
||||
[CLOSE] (cb) {
|
||||
fs.closeSync(this.fd)
|
||||
cb()
|
||||
}
|
||||
}
|
||||
|
||||
const WriteEntryTar = warner(class WriteEntryTar extends Minipass {
|
||||
constructor (readEntry, opt) {
|
||||
opt = opt || {}
|
||||
super(opt)
|
||||
this.preservePaths = !!opt.preservePaths
|
||||
this.portable = !!opt.portable
|
||||
this.strict = !!opt.strict
|
||||
this.noPax = !!opt.noPax
|
||||
this.noMtime = !!opt.noMtime
|
||||
|
||||
this.readEntry = readEntry
|
||||
this.type = readEntry.type
|
||||
if (this.type === 'Directory' && this.portable) {
|
||||
this.noMtime = true
|
||||
}
|
||||
|
||||
this.prefix = opt.prefix || null
|
||||
|
||||
this.path = normPath(readEntry.path)
|
||||
this.mode = this[MODE](readEntry.mode)
|
||||
this.uid = this.portable ? null : readEntry.uid
|
||||
this.gid = this.portable ? null : readEntry.gid
|
||||
this.uname = this.portable ? null : readEntry.uname
|
||||
this.gname = this.portable ? null : readEntry.gname
|
||||
this.size = readEntry.size
|
||||
this.mtime = this.noMtime ? null : opt.mtime || readEntry.mtime
|
||||
this.atime = this.portable ? null : readEntry.atime
|
||||
this.ctime = this.portable ? null : readEntry.ctime
|
||||
this.linkpath = normPath(readEntry.linkpath)
|
||||
|
||||
if (typeof opt.onwarn === 'function') {
|
||||
this.on('warn', opt.onwarn)
|
||||
}
|
||||
|
||||
let pathWarn = false
|
||||
if (!this.preservePaths) {
|
||||
const [root, stripped] = stripAbsolutePath(this.path)
|
||||
if (root) {
|
||||
this.path = stripped
|
||||
pathWarn = root
|
||||
}
|
||||
}
|
||||
|
||||
this.remain = readEntry.size
|
||||
this.blockRemain = readEntry.startBlockSize
|
||||
|
||||
this.header = new Header({
|
||||
path: this[PREFIX](this.path),
|
||||
linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
|
||||
: this.linkpath,
|
||||
// only the permissions and setuid/setgid/sticky bitflags
|
||||
// not the higher-order bits that specify file type
|
||||
mode: this.mode,
|
||||
uid: this.portable ? null : this.uid,
|
||||
gid: this.portable ? null : this.gid,
|
||||
size: this.size,
|
||||
mtime: this.noMtime ? null : this.mtime,
|
||||
type: this.type,
|
||||
uname: this.portable ? null : this.uname,
|
||||
atime: this.portable ? null : this.atime,
|
||||
ctime: this.portable ? null : this.ctime,
|
||||
})
|
||||
|
||||
if (pathWarn) {
|
||||
this.warn('TAR_ENTRY_INFO', `stripping ${pathWarn} from absolute path`, {
|
||||
entry: this,
|
||||
path: pathWarn + this.path,
|
||||
})
|
||||
}
|
||||
|
||||
if (this.header.encode() && !this.noPax) {
|
||||
super.write(new Pax({
|
||||
atime: this.portable ? null : this.atime,
|
||||
ctime: this.portable ? null : this.ctime,
|
||||
gid: this.portable ? null : this.gid,
|
||||
mtime: this.noMtime ? null : this.mtime,
|
||||
path: this[PREFIX](this.path),
|
||||
linkpath: this.type === 'Link' ? this[PREFIX](this.linkpath)
|
||||
: this.linkpath,
|
||||
size: this.size,
|
||||
uid: this.portable ? null : this.uid,
|
||||
uname: this.portable ? null : this.uname,
|
||||
dev: this.portable ? null : this.readEntry.dev,
|
||||
ino: this.portable ? null : this.readEntry.ino,
|
||||
nlink: this.portable ? null : this.readEntry.nlink,
|
||||
}).encode())
|
||||
}
|
||||
|
||||
super.write(this.header.block)
|
||||
readEntry.pipe(this)
|
||||
}
|
||||
|
||||
[PREFIX] (path) {
|
||||
return prefixPath(path, this.prefix)
|
||||
}
|
||||
|
||||
[MODE] (mode) {
|
||||
return modeFix(mode, this.type === 'Directory', this.portable)
|
||||
}
|
||||
|
||||
write (data) {
|
||||
const writeLen = data.length
|
||||
if (writeLen > this.blockRemain) {
|
||||
throw new Error('writing more to entry than is appropriate')
|
||||
}
|
||||
this.blockRemain -= writeLen
|
||||
return super.write(data)
|
||||
}
|
||||
|
||||
end () {
|
||||
if (this.blockRemain) {
|
||||
super.write(Buffer.alloc(this.blockRemain))
|
||||
}
|
||||
return super.end()
|
||||
}
|
||||
})
|
||||
|
||||
WriteEntry.Sync = WriteEntrySync
|
||||
WriteEntry.Tar = WriteEntryTar
|
||||
|
||||
const getType = stat =>
|
||||
stat.isFile() ? 'File'
|
||||
: stat.isDirectory() ? 'Directory'
|
||||
: stat.isSymbolicLink() ? 'SymbolicLink'
|
||||
: 'Unsupported'
|
||||
|
||||
module.exports = WriteEntry
|
1
node_modules/tar/node_modules/.bin/mkdirp
generated
vendored
Symbolic link
1
node_modules/tar/node_modules/.bin/mkdirp
generated
vendored
Symbolic link
|
@ -0,0 +1 @@
|
|||
../mkdirp/bin/cmd.js
|
15
node_modules/tar/node_modules/minipass/LICENSE
generated
vendored
Normal file
15
node_modules/tar/node_modules/minipass/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
The ISC License
|
||||
|
||||
Copyright (c) 2017-2023 npm, Inc., Isaac Z. Schlueter, and Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
|
||||
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
769
node_modules/tar/node_modules/minipass/README.md
generated
vendored
Normal file
769
node_modules/tar/node_modules/minipass/README.md
generated
vendored
Normal file
|
@ -0,0 +1,769 @@
|
|||
# minipass
|
||||
|
||||
A _very_ minimal implementation of a [PassThrough
|
||||
stream](https://nodejs.org/api/stream.html#stream_class_stream_passthrough)
|
||||
|
||||
[It's very
|
||||
fast](https://docs.google.com/spreadsheets/d/1K_HR5oh3r80b8WVMWCPPjfuWXUgfkmhlX7FGI6JJ8tY/edit?usp=sharing)
|
||||
for objects, strings, and buffers.
|
||||
|
||||
Supports `pipe()`ing (including multi-`pipe()` and backpressure
|
||||
transmission), buffering data until either a `data` event handler
|
||||
or `pipe()` is added (so you don't lose the first chunk), and
|
||||
most other cases where PassThrough is a good idea.
|
||||
|
||||
There is a `read()` method, but it's much more efficient to
|
||||
consume data from this stream via `'data'` events or by calling
|
||||
`pipe()` into some other stream. Calling `read()` requires the
|
||||
buffer to be flattened in some cases, which requires copying
|
||||
memory.
|
||||
|
||||
If you set `objectMode: true` in the options, then whatever is
|
||||
written will be emitted. Otherwise, it'll do a minimal amount of
|
||||
Buffer copying to ensure proper Streams semantics when `read(n)`
|
||||
is called.
|
||||
|
||||
`objectMode` can also be set by doing `stream.objectMode = true`,
|
||||
or by writing any non-string/non-buffer data. `objectMode` cannot
|
||||
be set to false once it is set.
|
||||
|
||||
This is not a `through` or `through2` stream. It doesn't
|
||||
transform the data, it just passes it right through. If you want
|
||||
to transform the data, extend the class, and override the
|
||||
`write()` method. Once you're done transforming the data however
|
||||
you want, call `super.write()` with the transform output.
|
||||
|
||||
For some examples of streams that extend Minipass in various
|
||||
ways, check out:
|
||||
|
||||
- [minizlib](http://npm.im/minizlib)
|
||||
- [fs-minipass](http://npm.im/fs-minipass)
|
||||
- [tar](http://npm.im/tar)
|
||||
- [minipass-collect](http://npm.im/minipass-collect)
|
||||
- [minipass-flush](http://npm.im/minipass-flush)
|
||||
- [minipass-pipeline](http://npm.im/minipass-pipeline)
|
||||
- [tap](http://npm.im/tap)
|
||||
- [tap-parser](http://npm.im/tap-parser)
|
||||
- [treport](http://npm.im/treport)
|
||||
- [minipass-fetch](http://npm.im/minipass-fetch)
|
||||
- [pacote](http://npm.im/pacote)
|
||||
- [make-fetch-happen](http://npm.im/make-fetch-happen)
|
||||
- [cacache](http://npm.im/cacache)
|
||||
- [ssri](http://npm.im/ssri)
|
||||
- [npm-registry-fetch](http://npm.im/npm-registry-fetch)
|
||||
- [minipass-json-stream](http://npm.im/minipass-json-stream)
|
||||
- [minipass-sized](http://npm.im/minipass-sized)
|
||||
|
||||
## Differences from Node.js Streams
|
||||
|
||||
There are several things that make Minipass streams different
|
||||
from (and in some ways superior to) Node.js core streams.
|
||||
|
||||
Please read these caveats if you are familiar with node-core
|
||||
streams and intend to use Minipass streams in your programs.
|
||||
|
||||
You can avoid most of these differences entirely (for a very
|
||||
small performance penalty) by setting `{async: true}` in the
|
||||
constructor options.
|
||||
|
||||
### Timing
|
||||
|
||||
Minipass streams are designed to support synchronous use-cases.
|
||||
Thus, data is emitted as soon as it is available, always. It is
|
||||
buffered until read, but no longer. Another way to look at it is
|
||||
that Minipass streams are exactly as synchronous as the logic
|
||||
that writes into them.
|
||||
|
||||
This can be surprising if your code relies on
|
||||
`PassThrough.write()` always providing data on the next tick
|
||||
rather than the current one, or being able to call `resume()` and
|
||||
not have the entire buffer disappear immediately.
|
||||
|
||||
However, without this synchronicity guarantee, there would be no
|
||||
way for Minipass to achieve the speeds it does, or support the
|
||||
synchronous use cases that it does. Simply put, waiting takes
|
||||
time.
|
||||
|
||||
This non-deferring approach makes Minipass streams much easier to
|
||||
reason about, especially in the context of Promises and other
|
||||
flow-control mechanisms.
|
||||
|
||||
Example:
|
||||
|
||||
```js
|
||||
// hybrid module, either works
|
||||
import { Minipass } from 'minipass'
|
||||
// or:
|
||||
const { Minipass } = require('minipass')
|
||||
|
||||
const stream = new Minipass()
|
||||
stream.on('data', () => console.log('data event'))
|
||||
console.log('before write')
|
||||
stream.write('hello')
|
||||
console.log('after write')
|
||||
// output:
|
||||
// before write
|
||||
// data event
|
||||
// after write
|
||||
```
|
||||
|
||||
### Exception: Async Opt-In
|
||||
|
||||
If you wish to have a Minipass stream with behavior that more
|
||||
closely mimics Node.js core streams, you can set the stream in
|
||||
async mode either by setting `async: true` in the constructor
|
||||
options, or by setting `stream.async = true` later on.
|
||||
|
||||
```js
|
||||
// hybrid module, either works
|
||||
import { Minipass } from 'minipass'
|
||||
// or:
|
||||
const { Minipass } = require('minipass')
|
||||
|
||||
const asyncStream = new Minipass({ async: true })
|
||||
asyncStream.on('data', () => console.log('data event'))
|
||||
console.log('before write')
|
||||
asyncStream.write('hello')
|
||||
console.log('after write')
|
||||
// output:
|
||||
// before write
|
||||
// after write
|
||||
// data event <-- this is deferred until the next tick
|
||||
```
|
||||
|
||||
Switching _out_ of async mode is unsafe, as it could cause data
|
||||
corruption, and so is not enabled. Example:
|
||||
|
||||
```js
|
||||
import { Minipass } from 'minipass'
|
||||
const stream = new Minipass({ encoding: 'utf8' })
|
||||
stream.on('data', chunk => console.log(chunk))
|
||||
stream.async = true
|
||||
console.log('before writes')
|
||||
stream.write('hello')
|
||||
setStreamSyncAgainSomehow(stream) // <-- this doesn't actually exist!
|
||||
stream.write('world')
|
||||
console.log('after writes')
|
||||
// hypothetical output would be:
|
||||
// before writes
|
||||
// world
|
||||
// after writes
|
||||
// hello
|
||||
// NOT GOOD!
|
||||
```
|
||||
|
||||
To avoid this problem, once set into async mode, any attempt to
|
||||
make the stream sync again will be ignored.
|
||||
|
||||
```js
|
||||
const { Minipass } = require('minipass')
|
||||
const stream = new Minipass({ encoding: 'utf8' })
|
||||
stream.on('data', chunk => console.log(chunk))
|
||||
stream.async = true
|
||||
console.log('before writes')
|
||||
stream.write('hello')
|
||||
stream.async = false // <-- no-op, stream already async
|
||||
stream.write('world')
|
||||
console.log('after writes')
|
||||
// actual output:
|
||||
// before writes
|
||||
// after writes
|
||||
// hello
|
||||
// world
|
||||
```
|
||||
|
||||
### No High/Low Water Marks
|
||||
|
||||
Node.js core streams will optimistically fill up a buffer,
|
||||
returning `true` on all writes until the limit is hit, even if
|
||||
the data has nowhere to go. Then, they will not attempt to draw
|
||||
more data in until the buffer size dips below a minimum value.
|
||||
|
||||
Minipass streams are much simpler. The `write()` method will
|
||||
return `true` if the data has somewhere to go (which is to say,
|
||||
given the timing guarantees, that the data is already there by
|
||||
the time `write()` returns).
|
||||
|
||||
If the data has nowhere to go, then `write()` returns false, and
|
||||
the data sits in a buffer, to be drained out immediately as soon
|
||||
as anyone consumes it.
|
||||
|
||||
Since nothing is ever buffered unnecessarily, there is much less
|
||||
copying data, and less bookkeeping about buffer capacity levels.
|
||||
|
||||
### Hazards of Buffering (or: Why Minipass Is So Fast)
|
||||
|
||||
Since data written to a Minipass stream is immediately written
|
||||
all the way through the pipeline, and `write()` always returns
|
||||
true/false based on whether the data was fully flushed,
|
||||
backpressure is communicated immediately to the upstream caller.
|
||||
This minimizes buffering.
|
||||
|
||||
Consider this case:
|
||||
|
||||
```js
|
||||
const { PassThrough } = require('stream')
|
||||
const p1 = new PassThrough({ highWaterMark: 1024 })
|
||||
const p2 = new PassThrough({ highWaterMark: 1024 })
|
||||
const p3 = new PassThrough({ highWaterMark: 1024 })
|
||||
const p4 = new PassThrough({ highWaterMark: 1024 })
|
||||
|
||||
p1.pipe(p2).pipe(p3).pipe(p4)
|
||||
p4.on('data', () => console.log('made it through'))
|
||||
|
||||
// this returns false and buffers, then writes to p2 on next tick (1)
|
||||
// p2 returns false and buffers, pausing p1, then writes to p3 on next tick (2)
|
||||
// p3 returns false and buffers, pausing p2, then writes to p4 on next tick (3)
|
||||
// p4 returns false and buffers, pausing p3, then emits 'data' and 'drain'
|
||||
// on next tick (4)
|
||||
// p3 sees p4's 'drain' event, and calls resume(), emitting 'resume' and
|
||||
// 'drain' on next tick (5)
|
||||
// p2 sees p3's 'drain', calls resume(), emits 'resume' and 'drain' on next tick (6)
|
||||
// p1 sees p2's 'drain', calls resume(), emits 'resume' and 'drain' on next
|
||||
// tick (7)
|
||||
|
||||
p1.write(Buffer.alloc(2048)) // returns false
|
||||
```
|
||||
|
||||
Along the way, the data was buffered and deferred at each stage,
|
||||
and multiple event deferrals happened, for an unblocked pipeline
|
||||
where it was perfectly safe to write all the way through!
|
||||
|
||||
Furthermore, setting a `highWaterMark` of `1024` might lead
|
||||
someone reading the code to think an advisory maximum of 1KiB is
|
||||
being set for the pipeline. However, the actual advisory
|
||||
buffering level is the _sum_ of `highWaterMark` values, since
|
||||
each one has its own bucket.
|
||||
|
||||
Consider the Minipass case:
|
||||
|
||||
```js
|
||||
const m1 = new Minipass()
|
||||
const m2 = new Minipass()
|
||||
const m3 = new Minipass()
|
||||
const m4 = new Minipass()
|
||||
|
||||
m1.pipe(m2).pipe(m3).pipe(m4)
|
||||
m4.on('data', () => console.log('made it through'))
|
||||
|
||||
// m1 is flowing, so it writes the data to m2 immediately
|
||||
// m2 is flowing, so it writes the data to m3 immediately
|
||||
// m3 is flowing, so it writes the data to m4 immediately
|
||||
// m4 is flowing, so it fires the 'data' event immediately, returns true
|
||||
// m4's write returned true, so m3 is still flowing, returns true
|
||||
// m3's write returned true, so m2 is still flowing, returns true
|
||||
// m2's write returned true, so m1 is still flowing, returns true
|
||||
// No event deferrals or buffering along the way!
|
||||
|
||||
m1.write(Buffer.alloc(2048)) // returns true
|
||||
```
|
||||
|
||||
It is extremely unlikely that you _don't_ want to buffer any data
|
||||
written, or _ever_ buffer data that can be flushed all the way
|
||||
through. Neither node-core streams nor Minipass ever fail to
|
||||
buffer written data, but node-core streams do a lot of
|
||||
unnecessary buffering and pausing.
|
||||
|
||||
As always, the faster implementation is the one that does less
|
||||
stuff and waits less time to do it.
|
||||
|
||||
### Immediately emit `end` for empty streams (when not paused)
|
||||
|
||||
If a stream is not paused, and `end()` is called before writing
|
||||
any data into it, then it will emit `end` immediately.
|
||||
|
||||
If you have logic that occurs on the `end` event which you don't
|
||||
want to potentially happen immediately (for example, closing file
|
||||
descriptors, moving on to the next entry in an archive parse
|
||||
stream, etc.) then be sure to call `stream.pause()` on creation,
|
||||
and then `stream.resume()` once you are ready to respond to the
|
||||
`end` event.
|
||||
|
||||
However, this is _usually_ not a problem because:
|
||||
|
||||
### Emit `end` When Asked
|
||||
|
||||
One hazard of immediately emitting `'end'` is that you may not
|
||||
yet have had a chance to add a listener. In order to avoid this
|
||||
hazard, Minipass streams safely re-emit the `'end'` event if a
|
||||
new listener is added after `'end'` has been emitted.
|
||||
|
||||
Ie, if you do `stream.on('end', someFunction)`, and the stream
|
||||
has already emitted `end`, then it will call the handler right
|
||||
away. (You can think of this somewhat like attaching a new
|
||||
`.then(fn)` to a previously-resolved Promise.)
|
||||
|
||||
To prevent calling handlers multiple times who would not expect
|
||||
multiple ends to occur, all listeners are removed from the
|
||||
`'end'` event whenever it is emitted.
|
||||
|
||||
### Emit `error` When Asked
|
||||
|
||||
The most recent error object passed to the `'error'` event is
|
||||
stored on the stream. If a new `'error'` event handler is added,
|
||||
and an error was previously emitted, then the event handler will
|
||||
be called immediately (or on `process.nextTick` in the case of
|
||||
async streams).
|
||||
|
||||
This makes it much more difficult to end up trying to interact
|
||||
with a broken stream, if the error handler is added after an
|
||||
error was previously emitted.
|
||||
|
||||
### Impact of "immediate flow" on Tee-streams
|
||||
|
||||
A "tee stream" is a stream piping to multiple destinations:
|
||||
|
||||
```js
|
||||
const tee = new Minipass()
|
||||
t.pipe(dest1)
|
||||
t.pipe(dest2)
|
||||
t.write('foo') // goes to both destinations
|
||||
```
|
||||
|
||||
Since Minipass streams _immediately_ process any pending data
|
||||
through the pipeline when a new pipe destination is added, this
|
||||
can have surprising effects, especially when a stream comes in
|
||||
from some other function and may or may not have data in its
|
||||
buffer.
|
||||
|
||||
```js
|
||||
// WARNING! WILL LOSE DATA!
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
src.pipe(dest1) // 'foo' chunk flows to dest1 immediately, and is gone
|
||||
src.pipe(dest2) // gets nothing!
|
||||
```
|
||||
|
||||
One solution is to create a dedicated tee-stream junction that
|
||||
pipes to both locations, and then pipe to _that_ instead.
|
||||
|
||||
```js
|
||||
// Safe example: tee to both places
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
const tee = new Minipass()
|
||||
tee.pipe(dest1)
|
||||
tee.pipe(dest2)
|
||||
src.pipe(tee) // tee gets 'foo', pipes to both locations
|
||||
```
|
||||
|
||||
The same caveat applies to `on('data')` event listeners. The
|
||||
first one added will _immediately_ receive all of the data,
|
||||
leaving nothing for the second:
|
||||
|
||||
```js
|
||||
// WARNING! WILL LOSE DATA!
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
src.on('data', handler1) // receives 'foo' right away
|
||||
src.on('data', handler2) // nothing to see here!
|
||||
```
|
||||
|
||||
Using a dedicated tee-stream can be used in this case as well:
|
||||
|
||||
```js
|
||||
// Safe example: tee to both data handlers
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
const tee = new Minipass()
|
||||
tee.on('data', handler1)
|
||||
tee.on('data', handler2)
|
||||
src.pipe(tee)
|
||||
```
|
||||
|
||||
All of the hazards in this section are avoided by setting `{
|
||||
async: true }` in the Minipass constructor, or by setting
|
||||
`stream.async = true` afterwards. Note that this does add some
|
||||
overhead, so should only be done in cases where you are willing
|
||||
to lose a bit of performance in order to avoid having to refactor
|
||||
program logic.
|
||||
|
||||
## USAGE
|
||||
|
||||
It's a stream! Use it like a stream and it'll most likely do what
|
||||
you want.
|
||||
|
||||
```js
|
||||
import { Minipass } from 'minipass'
|
||||
const mp = new Minipass(options) // optional: { encoding, objectMode }
|
||||
mp.write('foo')
|
||||
mp.pipe(someOtherStream)
|
||||
mp.end('bar')
|
||||
```
|
||||
|
||||
### OPTIONS
|
||||
|
||||
- `encoding` How would you like the data coming _out_ of the
|
||||
stream to be encoded? Accepts any values that can be passed to
|
||||
`Buffer.toString()`.
|
||||
- `objectMode` Emit data exactly as it comes in. This will be
|
||||
flipped on by default if you write() something other than a
|
||||
string or Buffer at any point. Setting `objectMode: true` will
|
||||
prevent setting any encoding value.
|
||||
- `async` Defaults to `false`. Set to `true` to defer data
|
||||
emission until next tick. This reduces performance slightly,
|
||||
but makes Minipass streams use timing behavior closer to Node
|
||||
core streams. See [Timing](#timing) for more details.
|
||||
- `signal` An `AbortSignal` that will cause the stream to unhook
|
||||
itself from everything and become as inert as possible. Note
|
||||
that providing a `signal` parameter will make `'error'` events
|
||||
no longer throw if they are unhandled, but they will still be
|
||||
emitted to handlers if any are attached.
|
||||
|
||||
### API
|
||||
|
||||
Implements the user-facing portions of Node.js's `Readable` and
|
||||
`Writable` streams.
|
||||
|
||||
### Methods
|
||||
|
||||
- `write(chunk, [encoding], [callback])` - Put data in. (Note
|
||||
that, in the base Minipass class, the same data will come out.)
|
||||
Returns `false` if the stream will buffer the next write, or
|
||||
true if it's still in "flowing" mode.
|
||||
- `end([chunk, [encoding]], [callback])` - Signal that you have
|
||||
no more data to write. This will queue an `end` event to be
|
||||
fired when all the data has been consumed.
|
||||
- `setEncoding(encoding)` - Set the encoding for data coming of
|
||||
the stream. This can only be done once.
|
||||
- `pause()` - No more data for a while, please. This also
|
||||
prevents `end` from being emitted for empty streams until the
|
||||
stream is resumed.
|
||||
- `resume()` - Resume the stream. If there's data in the buffer,
|
||||
it is all discarded. Any buffered events are immediately
|
||||
emitted.
|
||||
- `pipe(dest)` - Send all output to the stream provided. When
|
||||
data is emitted, it is immediately written to any and all pipe
|
||||
destinations. (Or written on next tick in `async` mode.)
|
||||
- `unpipe(dest)` - Stop piping to the destination stream. This is
|
||||
immediate, meaning that any asynchronously queued data will
|
||||
_not_ make it to the destination when running in `async` mode.
|
||||
- `options.end` - Boolean, end the destination stream when the
|
||||
source stream ends. Default `true`.
|
||||
- `options.proxyErrors` - Boolean, proxy `error` events from
|
||||
the source stream to the destination stream. Note that errors
|
||||
are _not_ proxied after the pipeline terminates, either due
|
||||
to the source emitting `'end'` or manually unpiping with
|
||||
`src.unpipe(dest)`. Default `false`.
|
||||
- `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are
|
||||
EventEmitters. Some events are given special treatment,
|
||||
however. (See below under "events".)
|
||||
- `promise()` - Returns a Promise that resolves when the stream
|
||||
emits `end`, or rejects if the stream emits `error`.
|
||||
- `collect()` - Return a Promise that resolves on `end` with an
|
||||
array containing each chunk of data that was emitted, or
|
||||
rejects if the stream emits `error`. Note that this consumes
|
||||
the stream data.
|
||||
- `concat()` - Same as `collect()`, but concatenates the data
|
||||
into a single Buffer object. Will reject the returned promise
|
||||
if the stream is in objectMode, or if it goes into objectMode
|
||||
by the end of the data.
|
||||
- `read(n)` - Consume `n` bytes of data out of the buffer. If `n`
|
||||
is not provided, then consume all of it. If `n` bytes are not
|
||||
available, then it returns null. **Note** consuming streams in
|
||||
this way is less efficient, and can lead to unnecessary Buffer
|
||||
copying.
|
||||
- `destroy([er])` - Destroy the stream. If an error is provided,
|
||||
then an `'error'` event is emitted. If the stream has a
|
||||
`close()` method, and has not emitted a `'close'` event yet,
|
||||
then `stream.close()` will be called. Any Promises returned by
|
||||
`.promise()`, `.collect()` or `.concat()` will be rejected.
|
||||
After being destroyed, writing to the stream will emit an
|
||||
error. No more data will be emitted if the stream is destroyed,
|
||||
even if it was previously buffered.
|
||||
|
||||
### Properties
|
||||
|
||||
- `bufferLength` Read-only. Total number of bytes buffered, or in
|
||||
the case of objectMode, the total number of objects.
|
||||
- `encoding` The encoding that has been set. (Setting this is
|
||||
equivalent to calling `setEncoding(enc)` and has the same
|
||||
prohibition against setting multiple times.)
|
||||
- `flowing` Read-only. Boolean indicating whether a chunk written
|
||||
to the stream will be immediately emitted.
|
||||
- `emittedEnd` Read-only. Boolean indicating whether the end-ish
|
||||
events (ie, `end`, `prefinish`, `finish`) have been emitted.
|
||||
Note that listening on any end-ish event will immediateyl
|
||||
re-emit it if it has already been emitted.
|
||||
- `writable` Whether the stream is writable. Default `true`. Set
|
||||
to `false` when `end()`
|
||||
- `readable` Whether the stream is readable. Default `true`.
|
||||
- `pipes` An array of Pipe objects referencing streams that this
|
||||
stream is piping into.
|
||||
- `destroyed` A getter that indicates whether the stream was
|
||||
destroyed.
|
||||
- `paused` True if the stream has been explicitly paused,
|
||||
otherwise false.
|
||||
- `objectMode` Indicates whether the stream is in `objectMode`.
|
||||
Once set to `true`, it cannot be set to `false`.
|
||||
- `aborted` Readonly property set when the `AbortSignal`
|
||||
dispatches an `abort` event.
|
||||
|
||||
### Events
|
||||
|
||||
- `data` Emitted when there's data to read. Argument is the data
|
||||
to read. This is never emitted while not flowing. If a listener
|
||||
is attached, that will resume the stream.
|
||||
- `end` Emitted when there's no more data to read. This will be
|
||||
emitted immediately for empty streams when `end()` is called.
|
||||
If a listener is attached, and `end` was already emitted, then
|
||||
it will be emitted again. All listeners are removed when `end`
|
||||
is emitted.
|
||||
- `prefinish` An end-ish event that follows the same logic as
|
||||
`end` and is emitted in the same conditions where `end` is
|
||||
emitted. Emitted after `'end'`.
|
||||
- `finish` An end-ish event that follows the same logic as `end`
|
||||
and is emitted in the same conditions where `end` is emitted.
|
||||
Emitted after `'prefinish'`.
|
||||
- `close` An indication that an underlying resource has been
|
||||
released. Minipass does not emit this event, but will defer it
|
||||
until after `end` has been emitted, since it throws off some
|
||||
stream libraries otherwise.
|
||||
- `drain` Emitted when the internal buffer empties, and it is
|
||||
again suitable to `write()` into the stream.
|
||||
- `readable` Emitted when data is buffered and ready to be read
|
||||
by a consumer.
|
||||
- `resume` Emitted when stream changes state from buffering to
|
||||
flowing mode. (Ie, when `resume` is called, `pipe` is called,
|
||||
or a `data` event listener is added.)
|
||||
|
||||
### Static Methods
|
||||
|
||||
- `Minipass.isStream(stream)` Returns `true` if the argument is a
|
||||
stream, and false otherwise. To be considered a stream, the
|
||||
object must be either an instance of Minipass, or an
|
||||
EventEmitter that has either a `pipe()` method, or both
|
||||
`write()` and `end()` methods. (Pretty much any stream in
|
||||
node-land will return `true` for this.)
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Here are some examples of things you can do with Minipass
|
||||
streams.
|
||||
|
||||
### simple "are you done yet" promise
|
||||
|
||||
```js
|
||||
mp.promise().then(
|
||||
() => {
|
||||
// stream is finished
|
||||
},
|
||||
er => {
|
||||
// stream emitted an error
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### collecting
|
||||
|
||||
```js
|
||||
mp.collect().then(all => {
|
||||
// all is an array of all the data emitted
|
||||
// encoding is supported in this case, so
|
||||
// so the result will be a collection of strings if
|
||||
// an encoding is specified, or buffers/objects if not.
|
||||
//
|
||||
// In an async function, you may do
|
||||
// const data = await stream.collect()
|
||||
})
|
||||
```
|
||||
|
||||
### collecting into a single blob
|
||||
|
||||
This is a bit slower because it concatenates the data into one
|
||||
chunk for you, but if you're going to do it yourself anyway, it's
|
||||
convenient this way:
|
||||
|
||||
```js
|
||||
mp.concat().then(onebigchunk => {
|
||||
// onebigchunk is a string if the stream
|
||||
// had an encoding set, or a buffer otherwise.
|
||||
})
|
||||
```
|
||||
|
||||
### iteration
|
||||
|
||||
You can iterate over streams synchronously or asynchronously in
|
||||
platforms that support it.
|
||||
|
||||
Synchronous iteration will end when the currently available data
|
||||
is consumed, even if the `end` event has not been reached. In
|
||||
string and buffer mode, the data is concatenated, so unless
|
||||
multiple writes are occurring in the same tick as the `read()`,
|
||||
sync iteration loops will generally only have a single iteration.
|
||||
|
||||
To consume chunks in this way exactly as they have been written,
|
||||
with no flattening, create the stream with the `{ objectMode:
|
||||
true }` option.
|
||||
|
||||
```js
|
||||
const mp = new Minipass({ objectMode: true })
|
||||
mp.write('a')
|
||||
mp.write('b')
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // a, b
|
||||
}
|
||||
mp.write('c')
|
||||
mp.write('d')
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // c, d
|
||||
}
|
||||
mp.write('e')
|
||||
mp.end()
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // e
|
||||
}
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // nothing
|
||||
}
|
||||
```
|
||||
|
||||
Asynchronous iteration will continue until the end event is reached,
|
||||
consuming all of the data.
|
||||
|
||||
```js
|
||||
const mp = new Minipass({ encoding: 'utf8' })
|
||||
|
||||
// some source of some data
|
||||
let i = 5
|
||||
const inter = setInterval(() => {
|
||||
if (i-- > 0) mp.write(Buffer.from('foo\n', 'utf8'))
|
||||
else {
|
||||
mp.end()
|
||||
clearInterval(inter)
|
||||
}
|
||||
}, 100)
|
||||
|
||||
// consume the data with asynchronous iteration
|
||||
async function consume() {
|
||||
for await (let chunk of mp) {
|
||||
console.log(chunk)
|
||||
}
|
||||
return 'ok'
|
||||
}
|
||||
|
||||
consume().then(res => console.log(res))
|
||||
// logs `foo\n` 5 times, and then `ok`
|
||||
```
|
||||
|
||||
### subclass that `console.log()`s everything written into it
|
||||
|
||||
```js
|
||||
class Logger extends Minipass {
|
||||
write(chunk, encoding, callback) {
|
||||
console.log('WRITE', chunk, encoding)
|
||||
return super.write(chunk, encoding, callback)
|
||||
}
|
||||
end(chunk, encoding, callback) {
|
||||
console.log('END', chunk, encoding)
|
||||
return super.end(chunk, encoding, callback)
|
||||
}
|
||||
}
|
||||
|
||||
someSource.pipe(new Logger()).pipe(someDest)
|
||||
```
|
||||
|
||||
### same thing, but using an inline anonymous class
|
||||
|
||||
```js
|
||||
// js classes are fun
|
||||
someSource
|
||||
.pipe(
|
||||
new (class extends Minipass {
|
||||
emit(ev, ...data) {
|
||||
// let's also log events, because debugging some weird thing
|
||||
console.log('EMIT', ev)
|
||||
return super.emit(ev, ...data)
|
||||
}
|
||||
write(chunk, encoding, callback) {
|
||||
console.log('WRITE', chunk, encoding)
|
||||
return super.write(chunk, encoding, callback)
|
||||
}
|
||||
end(chunk, encoding, callback) {
|
||||
console.log('END', chunk, encoding)
|
||||
return super.end(chunk, encoding, callback)
|
||||
}
|
||||
})()
|
||||
)
|
||||
.pipe(someDest)
|
||||
```
|
||||
|
||||
### subclass that defers 'end' for some reason
|
||||
|
||||
```js
|
||||
class SlowEnd extends Minipass {
|
||||
emit(ev, ...args) {
|
||||
if (ev === 'end') {
|
||||
console.log('going to end, hold on a sec')
|
||||
setTimeout(() => {
|
||||
console.log('ok, ready to end now')
|
||||
super.emit('end', ...args)
|
||||
}, 100)
|
||||
} else {
|
||||
return super.emit(ev, ...args)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### transform that creates newline-delimited JSON
|
||||
|
||||
```js
|
||||
class NDJSONEncode extends Minipass {
|
||||
write(obj, cb) {
|
||||
try {
|
||||
// JSON.stringify can throw, emit an error on that
|
||||
return super.write(JSON.stringify(obj) + '\n', 'utf8', cb)
|
||||
} catch (er) {
|
||||
this.emit('error', er)
|
||||
}
|
||||
}
|
||||
end(obj, cb) {
|
||||
if (typeof obj === 'function') {
|
||||
cb = obj
|
||||
obj = undefined
|
||||
}
|
||||
if (obj !== undefined) {
|
||||
this.write(obj)
|
||||
}
|
||||
return super.end(cb)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### transform that parses newline-delimited JSON
|
||||
|
||||
```js
|
||||
class NDJSONDecode extends Minipass {
|
||||
constructor (options) {
|
||||
// always be in object mode, as far as Minipass is concerned
|
||||
super({ objectMode: true })
|
||||
this._jsonBuffer = ''
|
||||
}
|
||||
write (chunk, encoding, cb) {
|
||||
if (typeof chunk === 'string' &&
|
||||
typeof encoding === 'string' &&
|
||||
encoding !== 'utf8') {
|
||||
chunk = Buffer.from(chunk, encoding).toString()
|
||||
} else if (Buffer.isBuffer(chunk)) {
|
||||
chunk = chunk.toString()
|
||||
}
|
||||
if (typeof encoding === 'function') {
|
||||
cb = encoding
|
||||
}
|
||||
const jsonData = (this._jsonBuffer + chunk).split('\n')
|
||||
this._jsonBuffer = jsonData.pop()
|
||||
for (let i = 0; i < jsonData.length; i++) {
|
||||
try {
|
||||
// JSON.parse can throw, emit an error on that
|
||||
super.write(JSON.parse(jsonData[i]))
|
||||
} catch (er) {
|
||||
this.emit('error', er)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if (cb)
|
||||
cb()
|
||||
}
|
||||
}
|
||||
```
|
152
node_modules/tar/node_modules/minipass/index.d.ts
generated
vendored
Normal file
152
node_modules/tar/node_modules/minipass/index.d.ts
generated
vendored
Normal file
|
@ -0,0 +1,152 @@
|
|||
/// <reference types="node" />
|
||||
|
||||
// Note: marking anything protected or private in the exported
|
||||
// class will limit Minipass's ability to be used as the base
|
||||
// for mixin classes.
|
||||
import { EventEmitter } from 'events'
|
||||
import { Stream } from 'stream'
|
||||
|
||||
export namespace Minipass {
|
||||
export type Encoding = BufferEncoding | 'buffer' | null
|
||||
|
||||
export interface Writable extends EventEmitter {
|
||||
end(): any
|
||||
write(chunk: any, ...args: any[]): any
|
||||
}
|
||||
|
||||
export interface Readable extends EventEmitter {
|
||||
pause(): any
|
||||
resume(): any
|
||||
pipe(): any
|
||||
}
|
||||
|
||||
export type DualIterable<T> = Iterable<T> & AsyncIterable<T>
|
||||
|
||||
export type ContiguousData =
|
||||
| Buffer
|
||||
| ArrayBufferLike
|
||||
| ArrayBufferView
|
||||
| string
|
||||
|
||||
export type BufferOrString = Buffer | string
|
||||
|
||||
export interface SharedOptions {
|
||||
async?: boolean
|
||||
signal?: AbortSignal
|
||||
}
|
||||
|
||||
export interface StringOptions extends SharedOptions {
|
||||
encoding: BufferEncoding
|
||||
objectMode?: boolean
|
||||
}
|
||||
|
||||
export interface BufferOptions extends SharedOptions {
|
||||
encoding?: null | 'buffer'
|
||||
objectMode?: boolean
|
||||
}
|
||||
|
||||
export interface ObjectModeOptions extends SharedOptions {
|
||||
objectMode: true
|
||||
}
|
||||
|
||||
export interface PipeOptions {
|
||||
end?: boolean
|
||||
proxyErrors?: boolean
|
||||
}
|
||||
|
||||
export type Options<T> = T extends string
|
||||
? StringOptions
|
||||
: T extends Buffer
|
||||
? BufferOptions
|
||||
: ObjectModeOptions
|
||||
}
|
||||
|
||||
export class Minipass<
|
||||
RType extends any = Buffer,
|
||||
WType extends any = RType extends Minipass.BufferOrString
|
||||
? Minipass.ContiguousData
|
||||
: RType
|
||||
>
|
||||
extends Stream
|
||||
implements Minipass.DualIterable<RType>
|
||||
{
|
||||
static isStream(stream: any): stream is Minipass.Readable | Minipass.Writable
|
||||
|
||||
readonly bufferLength: number
|
||||
readonly flowing: boolean
|
||||
readonly writable: boolean
|
||||
readonly readable: boolean
|
||||
readonly aborted: boolean
|
||||
readonly paused: boolean
|
||||
readonly emittedEnd: boolean
|
||||
readonly destroyed: boolean
|
||||
|
||||
/**
|
||||
* Technically writable, but mutating it can change the type,
|
||||
* so is not safe to do in TypeScript.
|
||||
*/
|
||||
readonly objectMode: boolean
|
||||
async: boolean
|
||||
|
||||
/**
|
||||
* Note: encoding is not actually read-only, and setEncoding(enc)
|
||||
* exists. However, this type definition will insist that TypeScript
|
||||
* programs declare the type of a Minipass stream up front, and if
|
||||
* that type is string, then an encoding MUST be set in the ctor. If
|
||||
* the type is Buffer, then the encoding must be missing, or set to
|
||||
* 'buffer' or null. If the type is anything else, then objectMode
|
||||
* must be set in the constructor options. So there is effectively
|
||||
* no allowed way that a TS program can set the encoding after
|
||||
* construction, as doing so will destroy any hope of type safety.
|
||||
* TypeScript does not provide many options for changing the type of
|
||||
* an object at run-time, which is what changing the encoding does.
|
||||
*/
|
||||
readonly encoding: Minipass.Encoding
|
||||
// setEncoding(encoding: Encoding): void
|
||||
|
||||
// Options required if not reading buffers
|
||||
constructor(
|
||||
...args: RType extends Buffer
|
||||
? [] | [Minipass.Options<RType>]
|
||||
: [Minipass.Options<RType>]
|
||||
)
|
||||
|
||||
write(chunk: WType, cb?: () => void): boolean
|
||||
write(chunk: WType, encoding?: Minipass.Encoding, cb?: () => void): boolean
|
||||
read(size?: number): RType
|
||||
end(cb?: () => void): this
|
||||
end(chunk: any, cb?: () => void): this
|
||||
end(chunk: any, encoding?: Minipass.Encoding, cb?: () => void): this
|
||||
pause(): void
|
||||
resume(): void
|
||||
promise(): Promise<void>
|
||||
collect(): Promise<RType[]>
|
||||
|
||||
concat(): RType extends Minipass.BufferOrString ? Promise<RType> : never
|
||||
destroy(er?: any): void
|
||||
pipe<W extends Minipass.Writable>(dest: W, opts?: Minipass.PipeOptions): W
|
||||
unpipe<W extends Minipass.Writable>(dest: W): void
|
||||
|
||||
/**
|
||||
* alias for on()
|
||||
*/
|
||||
addEventHandler(event: string, listener: (...args: any[]) => any): this
|
||||
|
||||
on(event: string, listener: (...args: any[]) => any): this
|
||||
on(event: 'data', listener: (chunk: RType) => any): this
|
||||
on(event: 'error', listener: (error: any) => any): this
|
||||
on(
|
||||
event:
|
||||
| 'readable'
|
||||
| 'drain'
|
||||
| 'resume'
|
||||
| 'end'
|
||||
| 'prefinish'
|
||||
| 'finish'
|
||||
| 'close',
|
||||
listener: () => any
|
||||
): this
|
||||
|
||||
[Symbol.iterator](): Generator<RType, void, void>
|
||||
[Symbol.asyncIterator](): AsyncGenerator<RType, void, void>
|
||||
}
|
702
node_modules/tar/node_modules/minipass/index.js
generated
vendored
Normal file
702
node_modules/tar/node_modules/minipass/index.js
generated
vendored
Normal file
|
@ -0,0 +1,702 @@
|
|||
'use strict'
|
||||
const proc =
|
||||
typeof process === 'object' && process
|
||||
? process
|
||||
: {
|
||||
stdout: null,
|
||||
stderr: null,
|
||||
}
|
||||
const EE = require('events')
|
||||
const Stream = require('stream')
|
||||
const stringdecoder = require('string_decoder')
|
||||
const SD = stringdecoder.StringDecoder
|
||||
|
||||
const EOF = Symbol('EOF')
|
||||
const MAYBE_EMIT_END = Symbol('maybeEmitEnd')
|
||||
const EMITTED_END = Symbol('emittedEnd')
|
||||
const EMITTING_END = Symbol('emittingEnd')
|
||||
const EMITTED_ERROR = Symbol('emittedError')
|
||||
const CLOSED = Symbol('closed')
|
||||
const READ = Symbol('read')
|
||||
const FLUSH = Symbol('flush')
|
||||
const FLUSHCHUNK = Symbol('flushChunk')
|
||||
const ENCODING = Symbol('encoding')
|
||||
const DECODER = Symbol('decoder')
|
||||
const FLOWING = Symbol('flowing')
|
||||
const PAUSED = Symbol('paused')
|
||||
const RESUME = Symbol('resume')
|
||||
const BUFFER = Symbol('buffer')
|
||||
const PIPES = Symbol('pipes')
|
||||
const BUFFERLENGTH = Symbol('bufferLength')
|
||||
const BUFFERPUSH = Symbol('bufferPush')
|
||||
const BUFFERSHIFT = Symbol('bufferShift')
|
||||
const OBJECTMODE = Symbol('objectMode')
|
||||
// internal event when stream is destroyed
|
||||
const DESTROYED = Symbol('destroyed')
|
||||
// internal event when stream has an error
|
||||
const ERROR = Symbol('error')
|
||||
const EMITDATA = Symbol('emitData')
|
||||
const EMITEND = Symbol('emitEnd')
|
||||
const EMITEND2 = Symbol('emitEnd2')
|
||||
const ASYNC = Symbol('async')
|
||||
const ABORT = Symbol('abort')
|
||||
const ABORTED = Symbol('aborted')
|
||||
const SIGNAL = Symbol('signal')
|
||||
|
||||
const defer = fn => Promise.resolve().then(fn)
|
||||
|
||||
// TODO remove when Node v8 support drops
|
||||
const doIter = global._MP_NO_ITERATOR_SYMBOLS_ !== '1'
|
||||
const ASYNCITERATOR =
|
||||
(doIter && Symbol.asyncIterator) || Symbol('asyncIterator not implemented')
|
||||
const ITERATOR =
|
||||
(doIter && Symbol.iterator) || Symbol('iterator not implemented')
|
||||
|
||||
// events that mean 'the stream is over'
|
||||
// these are treated specially, and re-emitted
|
||||
// if they are listened for after emitting.
|
||||
const isEndish = ev => ev === 'end' || ev === 'finish' || ev === 'prefinish'
|
||||
|
||||
const isArrayBuffer = b =>
|
||||
b instanceof ArrayBuffer ||
|
||||
(typeof b === 'object' &&
|
||||
b.constructor &&
|
||||
b.constructor.name === 'ArrayBuffer' &&
|
||||
b.byteLength >= 0)
|
||||
|
||||
const isArrayBufferView = b => !Buffer.isBuffer(b) && ArrayBuffer.isView(b)
|
||||
|
||||
class Pipe {
|
||||
constructor(src, dest, opts) {
|
||||
this.src = src
|
||||
this.dest = dest
|
||||
this.opts = opts
|
||||
this.ondrain = () => src[RESUME]()
|
||||
dest.on('drain', this.ondrain)
|
||||
}
|
||||
unpipe() {
|
||||
this.dest.removeListener('drain', this.ondrain)
|
||||
}
|
||||
// istanbul ignore next - only here for the prototype
|
||||
proxyErrors() {}
|
||||
end() {
|
||||
this.unpipe()
|
||||
if (this.opts.end) this.dest.end()
|
||||
}
|
||||
}
|
||||
|
||||
class PipeProxyErrors extends Pipe {
|
||||
unpipe() {
|
||||
this.src.removeListener('error', this.proxyErrors)
|
||||
super.unpipe()
|
||||
}
|
||||
constructor(src, dest, opts) {
|
||||
super(src, dest, opts)
|
||||
this.proxyErrors = er => dest.emit('error', er)
|
||||
src.on('error', this.proxyErrors)
|
||||
}
|
||||
}
|
||||
|
||||
class Minipass extends Stream {
|
||||
constructor(options) {
|
||||
super()
|
||||
this[FLOWING] = false
|
||||
// whether we're explicitly paused
|
||||
this[PAUSED] = false
|
||||
this[PIPES] = []
|
||||
this[BUFFER] = []
|
||||
this[OBJECTMODE] = (options && options.objectMode) || false
|
||||
if (this[OBJECTMODE]) this[ENCODING] = null
|
||||
else this[ENCODING] = (options && options.encoding) || null
|
||||
if (this[ENCODING] === 'buffer') this[ENCODING] = null
|
||||
this[ASYNC] = (options && !!options.async) || false
|
||||
this[DECODER] = this[ENCODING] ? new SD(this[ENCODING]) : null
|
||||
this[EOF] = false
|
||||
this[EMITTED_END] = false
|
||||
this[EMITTING_END] = false
|
||||
this[CLOSED] = false
|
||||
this[EMITTED_ERROR] = null
|
||||
this.writable = true
|
||||
this.readable = true
|
||||
this[BUFFERLENGTH] = 0
|
||||
this[DESTROYED] = false
|
||||
if (options && options.debugExposeBuffer === true) {
|
||||
Object.defineProperty(this, 'buffer', { get: () => this[BUFFER] })
|
||||
}
|
||||
if (options && options.debugExposePipes === true) {
|
||||
Object.defineProperty(this, 'pipes', { get: () => this[PIPES] })
|
||||
}
|
||||
this[SIGNAL] = options && options.signal
|
||||
this[ABORTED] = false
|
||||
if (this[SIGNAL]) {
|
||||
this[SIGNAL].addEventListener('abort', () => this[ABORT]())
|
||||
if (this[SIGNAL].aborted) {
|
||||
this[ABORT]()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get bufferLength() {
|
||||
return this[BUFFERLENGTH]
|
||||
}
|
||||
|
||||
get encoding() {
|
||||
return this[ENCODING]
|
||||
}
|
||||
set encoding(enc) {
|
||||
if (this[OBJECTMODE]) throw new Error('cannot set encoding in objectMode')
|
||||
|
||||
if (
|
||||
this[ENCODING] &&
|
||||
enc !== this[ENCODING] &&
|
||||
((this[DECODER] && this[DECODER].lastNeed) || this[BUFFERLENGTH])
|
||||
)
|
||||
throw new Error('cannot change encoding')
|
||||
|
||||
if (this[ENCODING] !== enc) {
|
||||
this[DECODER] = enc ? new SD(enc) : null
|
||||
if (this[BUFFER].length)
|
||||
this[BUFFER] = this[BUFFER].map(chunk => this[DECODER].write(chunk))
|
||||
}
|
||||
|
||||
this[ENCODING] = enc
|
||||
}
|
||||
|
||||
setEncoding(enc) {
|
||||
this.encoding = enc
|
||||
}
|
||||
|
||||
get objectMode() {
|
||||
return this[OBJECTMODE]
|
||||
}
|
||||
set objectMode(om) {
|
||||
this[OBJECTMODE] = this[OBJECTMODE] || !!om
|
||||
}
|
||||
|
||||
get ['async']() {
|
||||
return this[ASYNC]
|
||||
}
|
||||
set ['async'](a) {
|
||||
this[ASYNC] = this[ASYNC] || !!a
|
||||
}
|
||||
|
||||
// drop everything and get out of the flow completely
|
||||
[ABORT]() {
|
||||
this[ABORTED] = true
|
||||
this.emit('abort', this[SIGNAL].reason)
|
||||
this.destroy(this[SIGNAL].reason)
|
||||
}
|
||||
|
||||
get aborted() {
|
||||
return this[ABORTED]
|
||||
}
|
||||
set aborted(_) {}
|
||||
|
||||
write(chunk, encoding, cb) {
|
||||
if (this[ABORTED]) return false
|
||||
if (this[EOF]) throw new Error('write after end')
|
||||
|
||||
if (this[DESTROYED]) {
|
||||
this.emit(
|
||||
'error',
|
||||
Object.assign(
|
||||
new Error('Cannot call write after a stream was destroyed'),
|
||||
{ code: 'ERR_STREAM_DESTROYED' }
|
||||
)
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
if (typeof encoding === 'function') (cb = encoding), (encoding = 'utf8')
|
||||
|
||||
if (!encoding) encoding = 'utf8'
|
||||
|
||||
const fn = this[ASYNC] ? defer : f => f()
|
||||
|
||||
// convert array buffers and typed array views into buffers
|
||||
// at some point in the future, we may want to do the opposite!
|
||||
// leave strings and buffers as-is
|
||||
// anything else switches us into object mode
|
||||
if (!this[OBJECTMODE] && !Buffer.isBuffer(chunk)) {
|
||||
if (isArrayBufferView(chunk))
|
||||
chunk = Buffer.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
|
||||
else if (isArrayBuffer(chunk)) chunk = Buffer.from(chunk)
|
||||
else if (typeof chunk !== 'string')
|
||||
// use the setter so we throw if we have encoding set
|
||||
this.objectMode = true
|
||||
}
|
||||
|
||||
// handle object mode up front, since it's simpler
|
||||
// this yields better performance, fewer checks later.
|
||||
if (this[OBJECTMODE]) {
|
||||
/* istanbul ignore if - maybe impossible? */
|
||||
if (this.flowing && this[BUFFERLENGTH] !== 0) this[FLUSH](true)
|
||||
|
||||
if (this.flowing) this.emit('data', chunk)
|
||||
else this[BUFFERPUSH](chunk)
|
||||
|
||||
if (this[BUFFERLENGTH] !== 0) this.emit('readable')
|
||||
|
||||
if (cb) fn(cb)
|
||||
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
// at this point the chunk is a buffer or string
|
||||
// don't buffer it up or send it to the decoder
|
||||
if (!chunk.length) {
|
||||
if (this[BUFFERLENGTH] !== 0) this.emit('readable')
|
||||
if (cb) fn(cb)
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
// fast-path writing strings of same encoding to a stream with
|
||||
// an empty buffer, skipping the buffer/decoder dance
|
||||
if (
|
||||
typeof chunk === 'string' &&
|
||||
// unless it is a string already ready for us to use
|
||||
!(encoding === this[ENCODING] && !this[DECODER].lastNeed)
|
||||
) {
|
||||
chunk = Buffer.from(chunk, encoding)
|
||||
}
|
||||
|
||||
if (Buffer.isBuffer(chunk) && this[ENCODING])
|
||||
chunk = this[DECODER].write(chunk)
|
||||
|
||||
// Note: flushing CAN potentially switch us into not-flowing mode
|
||||
if (this.flowing && this[BUFFERLENGTH] !== 0) this[FLUSH](true)
|
||||
|
||||
if (this.flowing) this.emit('data', chunk)
|
||||
else this[BUFFERPUSH](chunk)
|
||||
|
||||
if (this[BUFFERLENGTH] !== 0) this.emit('readable')
|
||||
|
||||
if (cb) fn(cb)
|
||||
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
read(n) {
|
||||
if (this[DESTROYED]) return null
|
||||
|
||||
if (this[BUFFERLENGTH] === 0 || n === 0 || n > this[BUFFERLENGTH]) {
|
||||
this[MAYBE_EMIT_END]()
|
||||
return null
|
||||
}
|
||||
|
||||
if (this[OBJECTMODE]) n = null
|
||||
|
||||
if (this[BUFFER].length > 1 && !this[OBJECTMODE]) {
|
||||
if (this.encoding) this[BUFFER] = [this[BUFFER].join('')]
|
||||
else this[BUFFER] = [Buffer.concat(this[BUFFER], this[BUFFERLENGTH])]
|
||||
}
|
||||
|
||||
const ret = this[READ](n || null, this[BUFFER][0])
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
}
|
||||
|
||||
[READ](n, chunk) {
|
||||
if (n === chunk.length || n === null) this[BUFFERSHIFT]()
|
||||
else {
|
||||
this[BUFFER][0] = chunk.slice(n)
|
||||
chunk = chunk.slice(0, n)
|
||||
this[BUFFERLENGTH] -= n
|
||||
}
|
||||
|
||||
this.emit('data', chunk)
|
||||
|
||||
if (!this[BUFFER].length && !this[EOF]) this.emit('drain')
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
end(chunk, encoding, cb) {
|
||||
if (typeof chunk === 'function') (cb = chunk), (chunk = null)
|
||||
if (typeof encoding === 'function') (cb = encoding), (encoding = 'utf8')
|
||||
if (chunk) this.write(chunk, encoding)
|
||||
if (cb) this.once('end', cb)
|
||||
this[EOF] = true
|
||||
this.writable = false
|
||||
|
||||
// if we haven't written anything, then go ahead and emit,
|
||||
// even if we're not reading.
|
||||
// we'll re-emit if a new 'end' listener is added anyway.
|
||||
// This makes MP more suitable to write-only use cases.
|
||||
if (this.flowing || !this[PAUSED]) this[MAYBE_EMIT_END]()
|
||||
return this
|
||||
}
|
||||
|
||||
// don't let the internal resume be overwritten
|
||||
[RESUME]() {
|
||||
if (this[DESTROYED]) return
|
||||
|
||||
this[PAUSED] = false
|
||||
this[FLOWING] = true
|
||||
this.emit('resume')
|
||||
if (this[BUFFER].length) this[FLUSH]()
|
||||
else if (this[EOF]) this[MAYBE_EMIT_END]()
|
||||
else this.emit('drain')
|
||||
}
|
||||
|
||||
resume() {
|
||||
return this[RESUME]()
|
||||
}
|
||||
|
||||
pause() {
|
||||
this[FLOWING] = false
|
||||
this[PAUSED] = true
|
||||
}
|
||||
|
||||
get destroyed() {
|
||||
return this[DESTROYED]
|
||||
}
|
||||
|
||||
get flowing() {
|
||||
return this[FLOWING]
|
||||
}
|
||||
|
||||
get paused() {
|
||||
return this[PAUSED]
|
||||
}
|
||||
|
||||
[BUFFERPUSH](chunk) {
|
||||
if (this[OBJECTMODE]) this[BUFFERLENGTH] += 1
|
||||
else this[BUFFERLENGTH] += chunk.length
|
||||
this[BUFFER].push(chunk)
|
||||
}
|
||||
|
||||
[BUFFERSHIFT]() {
|
||||
if (this[OBJECTMODE]) this[BUFFERLENGTH] -= 1
|
||||
else this[BUFFERLENGTH] -= this[BUFFER][0].length
|
||||
return this[BUFFER].shift()
|
||||
}
|
||||
|
||||
[FLUSH](noDrain) {
|
||||
do {} while (this[FLUSHCHUNK](this[BUFFERSHIFT]()) && this[BUFFER].length)
|
||||
|
||||
if (!noDrain && !this[BUFFER].length && !this[EOF]) this.emit('drain')
|
||||
}
|
||||
|
||||
[FLUSHCHUNK](chunk) {
|
||||
this.emit('data', chunk)
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
pipe(dest, opts) {
|
||||
if (this[DESTROYED]) return
|
||||
|
||||
const ended = this[EMITTED_END]
|
||||
opts = opts || {}
|
||||
if (dest === proc.stdout || dest === proc.stderr) opts.end = false
|
||||
else opts.end = opts.end !== false
|
||||
opts.proxyErrors = !!opts.proxyErrors
|
||||
|
||||
// piping an ended stream ends immediately
|
||||
if (ended) {
|
||||
if (opts.end) dest.end()
|
||||
} else {
|
||||
this[PIPES].push(
|
||||
!opts.proxyErrors
|
||||
? new Pipe(this, dest, opts)
|
||||
: new PipeProxyErrors(this, dest, opts)
|
||||
)
|
||||
if (this[ASYNC]) defer(() => this[RESUME]())
|
||||
else this[RESUME]()
|
||||
}
|
||||
|
||||
return dest
|
||||
}
|
||||
|
||||
unpipe(dest) {
|
||||
const p = this[PIPES].find(p => p.dest === dest)
|
||||
if (p) {
|
||||
this[PIPES].splice(this[PIPES].indexOf(p), 1)
|
||||
p.unpipe()
|
||||
}
|
||||
}
|
||||
|
||||
addListener(ev, fn) {
|
||||
return this.on(ev, fn)
|
||||
}
|
||||
|
||||
on(ev, fn) {
|
||||
const ret = super.on(ev, fn)
|
||||
if (ev === 'data' && !this[PIPES].length && !this.flowing) this[RESUME]()
|
||||
else if (ev === 'readable' && this[BUFFERLENGTH] !== 0)
|
||||
super.emit('readable')
|
||||
else if (isEndish(ev) && this[EMITTED_END]) {
|
||||
super.emit(ev)
|
||||
this.removeAllListeners(ev)
|
||||
} else if (ev === 'error' && this[EMITTED_ERROR]) {
|
||||
if (this[ASYNC]) defer(() => fn.call(this, this[EMITTED_ERROR]))
|
||||
else fn.call(this, this[EMITTED_ERROR])
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
get emittedEnd() {
|
||||
return this[EMITTED_END]
|
||||
}
|
||||
|
||||
[MAYBE_EMIT_END]() {
|
||||
if (
|
||||
!this[EMITTING_END] &&
|
||||
!this[EMITTED_END] &&
|
||||
!this[DESTROYED] &&
|
||||
this[BUFFER].length === 0 &&
|
||||
this[EOF]
|
||||
) {
|
||||
this[EMITTING_END] = true
|
||||
this.emit('end')
|
||||
this.emit('prefinish')
|
||||
this.emit('finish')
|
||||
if (this[CLOSED]) this.emit('close')
|
||||
this[EMITTING_END] = false
|
||||
}
|
||||
}
|
||||
|
||||
emit(ev, data, ...extra) {
|
||||
// error and close are only events allowed after calling destroy()
|
||||
if (ev !== 'error' && ev !== 'close' && ev !== DESTROYED && this[DESTROYED])
|
||||
return
|
||||
else if (ev === 'data') {
|
||||
return !this[OBJECTMODE] && !data
|
||||
? false
|
||||
: this[ASYNC]
|
||||
? defer(() => this[EMITDATA](data))
|
||||
: this[EMITDATA](data)
|
||||
} else if (ev === 'end') {
|
||||
return this[EMITEND]()
|
||||
} else if (ev === 'close') {
|
||||
this[CLOSED] = true
|
||||
// don't emit close before 'end' and 'finish'
|
||||
if (!this[EMITTED_END] && !this[DESTROYED]) return
|
||||
const ret = super.emit('close')
|
||||
this.removeAllListeners('close')
|
||||
return ret
|
||||
} else if (ev === 'error') {
|
||||
this[EMITTED_ERROR] = data
|
||||
super.emit(ERROR, data)
|
||||
const ret =
|
||||
!this[SIGNAL] || this.listeners('error').length
|
||||
? super.emit('error', data)
|
||||
: false
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
} else if (ev === 'resume') {
|
||||
const ret = super.emit('resume')
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
} else if (ev === 'finish' || ev === 'prefinish') {
|
||||
const ret = super.emit(ev)
|
||||
this.removeAllListeners(ev)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Some other unknown event
|
||||
const ret = super.emit(ev, data, ...extra)
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
}
|
||||
|
||||
[EMITDATA](data) {
|
||||
for (const p of this[PIPES]) {
|
||||
if (p.dest.write(data) === false) this.pause()
|
||||
}
|
||||
const ret = super.emit('data', data)
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
}
|
||||
|
||||
[EMITEND]() {
|
||||
if (this[EMITTED_END]) return
|
||||
|
||||
this[EMITTED_END] = true
|
||||
this.readable = false
|
||||
if (this[ASYNC]) defer(() => this[EMITEND2]())
|
||||
else this[EMITEND2]()
|
||||
}
|
||||
|
||||
[EMITEND2]() {
|
||||
if (this[DECODER]) {
|
||||
const data = this[DECODER].end()
|
||||
if (data) {
|
||||
for (const p of this[PIPES]) {
|
||||
p.dest.write(data)
|
||||
}
|
||||
super.emit('data', data)
|
||||
}
|
||||
}
|
||||
|
||||
for (const p of this[PIPES]) {
|
||||
p.end()
|
||||
}
|
||||
const ret = super.emit('end')
|
||||
this.removeAllListeners('end')
|
||||
return ret
|
||||
}
|
||||
|
||||
// const all = await stream.collect()
|
||||
collect() {
|
||||
const buf = []
|
||||
if (!this[OBJECTMODE]) buf.dataLength = 0
|
||||
// set the promise first, in case an error is raised
|
||||
// by triggering the flow here.
|
||||
const p = this.promise()
|
||||
this.on('data', c => {
|
||||
buf.push(c)
|
||||
if (!this[OBJECTMODE]) buf.dataLength += c.length
|
||||
})
|
||||
return p.then(() => buf)
|
||||
}
|
||||
|
||||
// const data = await stream.concat()
|
||||
concat() {
|
||||
return this[OBJECTMODE]
|
||||
? Promise.reject(new Error('cannot concat in objectMode'))
|
||||
: this.collect().then(buf =>
|
||||
this[OBJECTMODE]
|
||||
? Promise.reject(new Error('cannot concat in objectMode'))
|
||||
: this[ENCODING]
|
||||
? buf.join('')
|
||||
: Buffer.concat(buf, buf.dataLength)
|
||||
)
|
||||
}
|
||||
|
||||
// stream.promise().then(() => done, er => emitted error)
|
||||
promise() {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.on(DESTROYED, () => reject(new Error('stream destroyed')))
|
||||
this.on('error', er => reject(er))
|
||||
this.on('end', () => resolve())
|
||||
})
|
||||
}
|
||||
|
||||
// for await (let chunk of stream)
|
||||
[ASYNCITERATOR]() {
|
||||
let stopped = false
|
||||
const stop = () => {
|
||||
this.pause()
|
||||
stopped = true
|
||||
return Promise.resolve({ done: true })
|
||||
}
|
||||
const next = () => {
|
||||
if (stopped) return stop()
|
||||
const res = this.read()
|
||||
if (res !== null) return Promise.resolve({ done: false, value: res })
|
||||
|
||||
if (this[EOF]) return stop()
|
||||
|
||||
let resolve = null
|
||||
let reject = null
|
||||
const onerr = er => {
|
||||
this.removeListener('data', ondata)
|
||||
this.removeListener('end', onend)
|
||||
this.removeListener(DESTROYED, ondestroy)
|
||||
stop()
|
||||
reject(er)
|
||||
}
|
||||
const ondata = value => {
|
||||
this.removeListener('error', onerr)
|
||||
this.removeListener('end', onend)
|
||||
this.removeListener(DESTROYED, ondestroy)
|
||||
this.pause()
|
||||
resolve({ value: value, done: !!this[EOF] })
|
||||
}
|
||||
const onend = () => {
|
||||
this.removeListener('error', onerr)
|
||||
this.removeListener('data', ondata)
|
||||
this.removeListener(DESTROYED, ondestroy)
|
||||
stop()
|
||||
resolve({ done: true })
|
||||
}
|
||||
const ondestroy = () => onerr(new Error('stream destroyed'))
|
||||
return new Promise((res, rej) => {
|
||||
reject = rej
|
||||
resolve = res
|
||||
this.once(DESTROYED, ondestroy)
|
||||
this.once('error', onerr)
|
||||
this.once('end', onend)
|
||||
this.once('data', ondata)
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
next,
|
||||
throw: stop,
|
||||
return: stop,
|
||||
[ASYNCITERATOR]() {
|
||||
return this
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// for (let chunk of stream)
|
||||
[ITERATOR]() {
|
||||
let stopped = false
|
||||
const stop = () => {
|
||||
this.pause()
|
||||
this.removeListener(ERROR, stop)
|
||||
this.removeListener(DESTROYED, stop)
|
||||
this.removeListener('end', stop)
|
||||
stopped = true
|
||||
return { done: true }
|
||||
}
|
||||
|
||||
const next = () => {
|
||||
if (stopped) return stop()
|
||||
const value = this.read()
|
||||
return value === null ? stop() : { value }
|
||||
}
|
||||
this.once('end', stop)
|
||||
this.once(ERROR, stop)
|
||||
this.once(DESTROYED, stop)
|
||||
|
||||
return {
|
||||
next,
|
||||
throw: stop,
|
||||
return: stop,
|
||||
[ITERATOR]() {
|
||||
return this
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
destroy(er) {
|
||||
if (this[DESTROYED]) {
|
||||
if (er) this.emit('error', er)
|
||||
else this.emit(DESTROYED)
|
||||
return this
|
||||
}
|
||||
|
||||
this[DESTROYED] = true
|
||||
|
||||
// throw away all buffered data, it's never coming out
|
||||
this[BUFFER].length = 0
|
||||
this[BUFFERLENGTH] = 0
|
||||
|
||||
if (typeof this.close === 'function' && !this[CLOSED]) this.close()
|
||||
|
||||
if (er) this.emit('error', er)
|
||||
// if no error to emit, still reject pending promises
|
||||
else this.emit(DESTROYED)
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
static isStream(s) {
|
||||
return (
|
||||
!!s &&
|
||||
(s instanceof Minipass ||
|
||||
s instanceof Stream ||
|
||||
(s instanceof EE &&
|
||||
// readable
|
||||
(typeof s.pipe === 'function' ||
|
||||
// writable
|
||||
(typeof s.write === 'function' && typeof s.end === 'function'))))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
exports.Minipass = Minipass
|
702
node_modules/tar/node_modules/minipass/index.mjs
generated
vendored
Normal file
702
node_modules/tar/node_modules/minipass/index.mjs
generated
vendored
Normal file
|
@ -0,0 +1,702 @@
|
|||
'use strict'
|
||||
const proc =
|
||||
typeof process === 'object' && process
|
||||
? process
|
||||
: {
|
||||
stdout: null,
|
||||
stderr: null,
|
||||
}
|
||||
import EE from 'events'
|
||||
import Stream from 'stream'
|
||||
import stringdecoder from 'string_decoder'
|
||||
const SD = stringdecoder.StringDecoder
|
||||
|
||||
const EOF = Symbol('EOF')
|
||||
const MAYBE_EMIT_END = Symbol('maybeEmitEnd')
|
||||
const EMITTED_END = Symbol('emittedEnd')
|
||||
const EMITTING_END = Symbol('emittingEnd')
|
||||
const EMITTED_ERROR = Symbol('emittedError')
|
||||
const CLOSED = Symbol('closed')
|
||||
const READ = Symbol('read')
|
||||
const FLUSH = Symbol('flush')
|
||||
const FLUSHCHUNK = Symbol('flushChunk')
|
||||
const ENCODING = Symbol('encoding')
|
||||
const DECODER = Symbol('decoder')
|
||||
const FLOWING = Symbol('flowing')
|
||||
const PAUSED = Symbol('paused')
|
||||
const RESUME = Symbol('resume')
|
||||
const BUFFER = Symbol('buffer')
|
||||
const PIPES = Symbol('pipes')
|
||||
const BUFFERLENGTH = Symbol('bufferLength')
|
||||
const BUFFERPUSH = Symbol('bufferPush')
|
||||
const BUFFERSHIFT = Symbol('bufferShift')
|
||||
const OBJECTMODE = Symbol('objectMode')
|
||||
// internal event when stream is destroyed
|
||||
const DESTROYED = Symbol('destroyed')
|
||||
// internal event when stream has an error
|
||||
const ERROR = Symbol('error')
|
||||
const EMITDATA = Symbol('emitData')
|
||||
const EMITEND = Symbol('emitEnd')
|
||||
const EMITEND2 = Symbol('emitEnd2')
|
||||
const ASYNC = Symbol('async')
|
||||
const ABORT = Symbol('abort')
|
||||
const ABORTED = Symbol('aborted')
|
||||
const SIGNAL = Symbol('signal')
|
||||
|
||||
const defer = fn => Promise.resolve().then(fn)
|
||||
|
||||
// TODO remove when Node v8 support drops
|
||||
const doIter = global._MP_NO_ITERATOR_SYMBOLS_ !== '1'
|
||||
const ASYNCITERATOR =
|
||||
(doIter && Symbol.asyncIterator) || Symbol('asyncIterator not implemented')
|
||||
const ITERATOR =
|
||||
(doIter && Symbol.iterator) || Symbol('iterator not implemented')
|
||||
|
||||
// events that mean 'the stream is over'
|
||||
// these are treated specially, and re-emitted
|
||||
// if they are listened for after emitting.
|
||||
const isEndish = ev => ev === 'end' || ev === 'finish' || ev === 'prefinish'
|
||||
|
||||
const isArrayBuffer = b =>
|
||||
b instanceof ArrayBuffer ||
|
||||
(typeof b === 'object' &&
|
||||
b.constructor &&
|
||||
b.constructor.name === 'ArrayBuffer' &&
|
||||
b.byteLength >= 0)
|
||||
|
||||
const isArrayBufferView = b => !Buffer.isBuffer(b) && ArrayBuffer.isView(b)
|
||||
|
||||
class Pipe {
|
||||
constructor(src, dest, opts) {
|
||||
this.src = src
|
||||
this.dest = dest
|
||||
this.opts = opts
|
||||
this.ondrain = () => src[RESUME]()
|
||||
dest.on('drain', this.ondrain)
|
||||
}
|
||||
unpipe() {
|
||||
this.dest.removeListener('drain', this.ondrain)
|
||||
}
|
||||
// istanbul ignore next - only here for the prototype
|
||||
proxyErrors() {}
|
||||
end() {
|
||||
this.unpipe()
|
||||
if (this.opts.end) this.dest.end()
|
||||
}
|
||||
}
|
||||
|
||||
class PipeProxyErrors extends Pipe {
|
||||
unpipe() {
|
||||
this.src.removeListener('error', this.proxyErrors)
|
||||
super.unpipe()
|
||||
}
|
||||
constructor(src, dest, opts) {
|
||||
super(src, dest, opts)
|
||||
this.proxyErrors = er => dest.emit('error', er)
|
||||
src.on('error', this.proxyErrors)
|
||||
}
|
||||
}
|
||||
|
||||
export class Minipass extends Stream {
|
||||
constructor(options) {
|
||||
super()
|
||||
this[FLOWING] = false
|
||||
// whether we're explicitly paused
|
||||
this[PAUSED] = false
|
||||
this[PIPES] = []
|
||||
this[BUFFER] = []
|
||||
this[OBJECTMODE] = (options && options.objectMode) || false
|
||||
if (this[OBJECTMODE]) this[ENCODING] = null
|
||||
else this[ENCODING] = (options && options.encoding) || null
|
||||
if (this[ENCODING] === 'buffer') this[ENCODING] = null
|
||||
this[ASYNC] = (options && !!options.async) || false
|
||||
this[DECODER] = this[ENCODING] ? new SD(this[ENCODING]) : null
|
||||
this[EOF] = false
|
||||
this[EMITTED_END] = false
|
||||
this[EMITTING_END] = false
|
||||
this[CLOSED] = false
|
||||
this[EMITTED_ERROR] = null
|
||||
this.writable = true
|
||||
this.readable = true
|
||||
this[BUFFERLENGTH] = 0
|
||||
this[DESTROYED] = false
|
||||
if (options && options.debugExposeBuffer === true) {
|
||||
Object.defineProperty(this, 'buffer', { get: () => this[BUFFER] })
|
||||
}
|
||||
if (options && options.debugExposePipes === true) {
|
||||
Object.defineProperty(this, 'pipes', { get: () => this[PIPES] })
|
||||
}
|
||||
this[SIGNAL] = options && options.signal
|
||||
this[ABORTED] = false
|
||||
if (this[SIGNAL]) {
|
||||
this[SIGNAL].addEventListener('abort', () => this[ABORT]())
|
||||
if (this[SIGNAL].aborted) {
|
||||
this[ABORT]()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
get bufferLength() {
|
||||
return this[BUFFERLENGTH]
|
||||
}
|
||||
|
||||
get encoding() {
|
||||
return this[ENCODING]
|
||||
}
|
||||
set encoding(enc) {
|
||||
if (this[OBJECTMODE]) throw new Error('cannot set encoding in objectMode')
|
||||
|
||||
if (
|
||||
this[ENCODING] &&
|
||||
enc !== this[ENCODING] &&
|
||||
((this[DECODER] && this[DECODER].lastNeed) || this[BUFFERLENGTH])
|
||||
)
|
||||
throw new Error('cannot change encoding')
|
||||
|
||||
if (this[ENCODING] !== enc) {
|
||||
this[DECODER] = enc ? new SD(enc) : null
|
||||
if (this[BUFFER].length)
|
||||
this[BUFFER] = this[BUFFER].map(chunk => this[DECODER].write(chunk))
|
||||
}
|
||||
|
||||
this[ENCODING] = enc
|
||||
}
|
||||
|
||||
setEncoding(enc) {
|
||||
this.encoding = enc
|
||||
}
|
||||
|
||||
get objectMode() {
|
||||
return this[OBJECTMODE]
|
||||
}
|
||||
set objectMode(om) {
|
||||
this[OBJECTMODE] = this[OBJECTMODE] || !!om
|
||||
}
|
||||
|
||||
get ['async']() {
|
||||
return this[ASYNC]
|
||||
}
|
||||
set ['async'](a) {
|
||||
this[ASYNC] = this[ASYNC] || !!a
|
||||
}
|
||||
|
||||
// drop everything and get out of the flow completely
|
||||
[ABORT]() {
|
||||
this[ABORTED] = true
|
||||
this.emit('abort', this[SIGNAL].reason)
|
||||
this.destroy(this[SIGNAL].reason)
|
||||
}
|
||||
|
||||
get aborted() {
|
||||
return this[ABORTED]
|
||||
}
|
||||
set aborted(_) {}
|
||||
|
||||
write(chunk, encoding, cb) {
|
||||
if (this[ABORTED]) return false
|
||||
if (this[EOF]) throw new Error('write after end')
|
||||
|
||||
if (this[DESTROYED]) {
|
||||
this.emit(
|
||||
'error',
|
||||
Object.assign(
|
||||
new Error('Cannot call write after a stream was destroyed'),
|
||||
{ code: 'ERR_STREAM_DESTROYED' }
|
||||
)
|
||||
)
|
||||
return true
|
||||
}
|
||||
|
||||
if (typeof encoding === 'function') (cb = encoding), (encoding = 'utf8')
|
||||
|
||||
if (!encoding) encoding = 'utf8'
|
||||
|
||||
const fn = this[ASYNC] ? defer : f => f()
|
||||
|
||||
// convert array buffers and typed array views into buffers
|
||||
// at some point in the future, we may want to do the opposite!
|
||||
// leave strings and buffers as-is
|
||||
// anything else switches us into object mode
|
||||
if (!this[OBJECTMODE] && !Buffer.isBuffer(chunk)) {
|
||||
if (isArrayBufferView(chunk))
|
||||
chunk = Buffer.from(chunk.buffer, chunk.byteOffset, chunk.byteLength)
|
||||
else if (isArrayBuffer(chunk)) chunk = Buffer.from(chunk)
|
||||
else if (typeof chunk !== 'string')
|
||||
// use the setter so we throw if we have encoding set
|
||||
this.objectMode = true
|
||||
}
|
||||
|
||||
// handle object mode up front, since it's simpler
|
||||
// this yields better performance, fewer checks later.
|
||||
if (this[OBJECTMODE]) {
|
||||
/* istanbul ignore if - maybe impossible? */
|
||||
if (this.flowing && this[BUFFERLENGTH] !== 0) this[FLUSH](true)
|
||||
|
||||
if (this.flowing) this.emit('data', chunk)
|
||||
else this[BUFFERPUSH](chunk)
|
||||
|
||||
if (this[BUFFERLENGTH] !== 0) this.emit('readable')
|
||||
|
||||
if (cb) fn(cb)
|
||||
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
// at this point the chunk is a buffer or string
|
||||
// don't buffer it up or send it to the decoder
|
||||
if (!chunk.length) {
|
||||
if (this[BUFFERLENGTH] !== 0) this.emit('readable')
|
||||
if (cb) fn(cb)
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
// fast-path writing strings of same encoding to a stream with
|
||||
// an empty buffer, skipping the buffer/decoder dance
|
||||
if (
|
||||
typeof chunk === 'string' &&
|
||||
// unless it is a string already ready for us to use
|
||||
!(encoding === this[ENCODING] && !this[DECODER].lastNeed)
|
||||
) {
|
||||
chunk = Buffer.from(chunk, encoding)
|
||||
}
|
||||
|
||||
if (Buffer.isBuffer(chunk) && this[ENCODING])
|
||||
chunk = this[DECODER].write(chunk)
|
||||
|
||||
// Note: flushing CAN potentially switch us into not-flowing mode
|
||||
if (this.flowing && this[BUFFERLENGTH] !== 0) this[FLUSH](true)
|
||||
|
||||
if (this.flowing) this.emit('data', chunk)
|
||||
else this[BUFFERPUSH](chunk)
|
||||
|
||||
if (this[BUFFERLENGTH] !== 0) this.emit('readable')
|
||||
|
||||
if (cb) fn(cb)
|
||||
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
read(n) {
|
||||
if (this[DESTROYED]) return null
|
||||
|
||||
if (this[BUFFERLENGTH] === 0 || n === 0 || n > this[BUFFERLENGTH]) {
|
||||
this[MAYBE_EMIT_END]()
|
||||
return null
|
||||
}
|
||||
|
||||
if (this[OBJECTMODE]) n = null
|
||||
|
||||
if (this[BUFFER].length > 1 && !this[OBJECTMODE]) {
|
||||
if (this.encoding) this[BUFFER] = [this[BUFFER].join('')]
|
||||
else this[BUFFER] = [Buffer.concat(this[BUFFER], this[BUFFERLENGTH])]
|
||||
}
|
||||
|
||||
const ret = this[READ](n || null, this[BUFFER][0])
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
}
|
||||
|
||||
[READ](n, chunk) {
|
||||
if (n === chunk.length || n === null) this[BUFFERSHIFT]()
|
||||
else {
|
||||
this[BUFFER][0] = chunk.slice(n)
|
||||
chunk = chunk.slice(0, n)
|
||||
this[BUFFERLENGTH] -= n
|
||||
}
|
||||
|
||||
this.emit('data', chunk)
|
||||
|
||||
if (!this[BUFFER].length && !this[EOF]) this.emit('drain')
|
||||
|
||||
return chunk
|
||||
}
|
||||
|
||||
end(chunk, encoding, cb) {
|
||||
if (typeof chunk === 'function') (cb = chunk), (chunk = null)
|
||||
if (typeof encoding === 'function') (cb = encoding), (encoding = 'utf8')
|
||||
if (chunk) this.write(chunk, encoding)
|
||||
if (cb) this.once('end', cb)
|
||||
this[EOF] = true
|
||||
this.writable = false
|
||||
|
||||
// if we haven't written anything, then go ahead and emit,
|
||||
// even if we're not reading.
|
||||
// we'll re-emit if a new 'end' listener is added anyway.
|
||||
// This makes MP more suitable to write-only use cases.
|
||||
if (this.flowing || !this[PAUSED]) this[MAYBE_EMIT_END]()
|
||||
return this
|
||||
}
|
||||
|
||||
// don't let the internal resume be overwritten
|
||||
[RESUME]() {
|
||||
if (this[DESTROYED]) return
|
||||
|
||||
this[PAUSED] = false
|
||||
this[FLOWING] = true
|
||||
this.emit('resume')
|
||||
if (this[BUFFER].length) this[FLUSH]()
|
||||
else if (this[EOF]) this[MAYBE_EMIT_END]()
|
||||
else this.emit('drain')
|
||||
}
|
||||
|
||||
resume() {
|
||||
return this[RESUME]()
|
||||
}
|
||||
|
||||
pause() {
|
||||
this[FLOWING] = false
|
||||
this[PAUSED] = true
|
||||
}
|
||||
|
||||
get destroyed() {
|
||||
return this[DESTROYED]
|
||||
}
|
||||
|
||||
get flowing() {
|
||||
return this[FLOWING]
|
||||
}
|
||||
|
||||
get paused() {
|
||||
return this[PAUSED]
|
||||
}
|
||||
|
||||
[BUFFERPUSH](chunk) {
|
||||
if (this[OBJECTMODE]) this[BUFFERLENGTH] += 1
|
||||
else this[BUFFERLENGTH] += chunk.length
|
||||
this[BUFFER].push(chunk)
|
||||
}
|
||||
|
||||
[BUFFERSHIFT]() {
|
||||
if (this[OBJECTMODE]) this[BUFFERLENGTH] -= 1
|
||||
else this[BUFFERLENGTH] -= this[BUFFER][0].length
|
||||
return this[BUFFER].shift()
|
||||
}
|
||||
|
||||
[FLUSH](noDrain) {
|
||||
do {} while (this[FLUSHCHUNK](this[BUFFERSHIFT]()) && this[BUFFER].length)
|
||||
|
||||
if (!noDrain && !this[BUFFER].length && !this[EOF]) this.emit('drain')
|
||||
}
|
||||
|
||||
[FLUSHCHUNK](chunk) {
|
||||
this.emit('data', chunk)
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
pipe(dest, opts) {
|
||||
if (this[DESTROYED]) return
|
||||
|
||||
const ended = this[EMITTED_END]
|
||||
opts = opts || {}
|
||||
if (dest === proc.stdout || dest === proc.stderr) opts.end = false
|
||||
else opts.end = opts.end !== false
|
||||
opts.proxyErrors = !!opts.proxyErrors
|
||||
|
||||
// piping an ended stream ends immediately
|
||||
if (ended) {
|
||||
if (opts.end) dest.end()
|
||||
} else {
|
||||
this[PIPES].push(
|
||||
!opts.proxyErrors
|
||||
? new Pipe(this, dest, opts)
|
||||
: new PipeProxyErrors(this, dest, opts)
|
||||
)
|
||||
if (this[ASYNC]) defer(() => this[RESUME]())
|
||||
else this[RESUME]()
|
||||
}
|
||||
|
||||
return dest
|
||||
}
|
||||
|
||||
unpipe(dest) {
|
||||
const p = this[PIPES].find(p => p.dest === dest)
|
||||
if (p) {
|
||||
this[PIPES].splice(this[PIPES].indexOf(p), 1)
|
||||
p.unpipe()
|
||||
}
|
||||
}
|
||||
|
||||
addListener(ev, fn) {
|
||||
return this.on(ev, fn)
|
||||
}
|
||||
|
||||
on(ev, fn) {
|
||||
const ret = super.on(ev, fn)
|
||||
if (ev === 'data' && !this[PIPES].length && !this.flowing) this[RESUME]()
|
||||
else if (ev === 'readable' && this[BUFFERLENGTH] !== 0)
|
||||
super.emit('readable')
|
||||
else if (isEndish(ev) && this[EMITTED_END]) {
|
||||
super.emit(ev)
|
||||
this.removeAllListeners(ev)
|
||||
} else if (ev === 'error' && this[EMITTED_ERROR]) {
|
||||
if (this[ASYNC]) defer(() => fn.call(this, this[EMITTED_ERROR]))
|
||||
else fn.call(this, this[EMITTED_ERROR])
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
get emittedEnd() {
|
||||
return this[EMITTED_END]
|
||||
}
|
||||
|
||||
[MAYBE_EMIT_END]() {
|
||||
if (
|
||||
!this[EMITTING_END] &&
|
||||
!this[EMITTED_END] &&
|
||||
!this[DESTROYED] &&
|
||||
this[BUFFER].length === 0 &&
|
||||
this[EOF]
|
||||
) {
|
||||
this[EMITTING_END] = true
|
||||
this.emit('end')
|
||||
this.emit('prefinish')
|
||||
this.emit('finish')
|
||||
if (this[CLOSED]) this.emit('close')
|
||||
this[EMITTING_END] = false
|
||||
}
|
||||
}
|
||||
|
||||
emit(ev, data, ...extra) {
|
||||
// error and close are only events allowed after calling destroy()
|
||||
if (ev !== 'error' && ev !== 'close' && ev !== DESTROYED && this[DESTROYED])
|
||||
return
|
||||
else if (ev === 'data') {
|
||||
return !this[OBJECTMODE] && !data
|
||||
? false
|
||||
: this[ASYNC]
|
||||
? defer(() => this[EMITDATA](data))
|
||||
: this[EMITDATA](data)
|
||||
} else if (ev === 'end') {
|
||||
return this[EMITEND]()
|
||||
} else if (ev === 'close') {
|
||||
this[CLOSED] = true
|
||||
// don't emit close before 'end' and 'finish'
|
||||
if (!this[EMITTED_END] && !this[DESTROYED]) return
|
||||
const ret = super.emit('close')
|
||||
this.removeAllListeners('close')
|
||||
return ret
|
||||
} else if (ev === 'error') {
|
||||
this[EMITTED_ERROR] = data
|
||||
super.emit(ERROR, data)
|
||||
const ret =
|
||||
!this[SIGNAL] || this.listeners('error').length
|
||||
? super.emit('error', data)
|
||||
: false
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
} else if (ev === 'resume') {
|
||||
const ret = super.emit('resume')
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
} else if (ev === 'finish' || ev === 'prefinish') {
|
||||
const ret = super.emit(ev)
|
||||
this.removeAllListeners(ev)
|
||||
return ret
|
||||
}
|
||||
|
||||
// Some other unknown event
|
||||
const ret = super.emit(ev, data, ...extra)
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
}
|
||||
|
||||
[EMITDATA](data) {
|
||||
for (const p of this[PIPES]) {
|
||||
if (p.dest.write(data) === false) this.pause()
|
||||
}
|
||||
const ret = super.emit('data', data)
|
||||
this[MAYBE_EMIT_END]()
|
||||
return ret
|
||||
}
|
||||
|
||||
[EMITEND]() {
|
||||
if (this[EMITTED_END]) return
|
||||
|
||||
this[EMITTED_END] = true
|
||||
this.readable = false
|
||||
if (this[ASYNC]) defer(() => this[EMITEND2]())
|
||||
else this[EMITEND2]()
|
||||
}
|
||||
|
||||
[EMITEND2]() {
|
||||
if (this[DECODER]) {
|
||||
const data = this[DECODER].end()
|
||||
if (data) {
|
||||
for (const p of this[PIPES]) {
|
||||
p.dest.write(data)
|
||||
}
|
||||
super.emit('data', data)
|
||||
}
|
||||
}
|
||||
|
||||
for (const p of this[PIPES]) {
|
||||
p.end()
|
||||
}
|
||||
const ret = super.emit('end')
|
||||
this.removeAllListeners('end')
|
||||
return ret
|
||||
}
|
||||
|
||||
// const all = await stream.collect()
|
||||
collect() {
|
||||
const buf = []
|
||||
if (!this[OBJECTMODE]) buf.dataLength = 0
|
||||
// set the promise first, in case an error is raised
|
||||
// by triggering the flow here.
|
||||
const p = this.promise()
|
||||
this.on('data', c => {
|
||||
buf.push(c)
|
||||
if (!this[OBJECTMODE]) buf.dataLength += c.length
|
||||
})
|
||||
return p.then(() => buf)
|
||||
}
|
||||
|
||||
// const data = await stream.concat()
|
||||
concat() {
|
||||
return this[OBJECTMODE]
|
||||
? Promise.reject(new Error('cannot concat in objectMode'))
|
||||
: this.collect().then(buf =>
|
||||
this[OBJECTMODE]
|
||||
? Promise.reject(new Error('cannot concat in objectMode'))
|
||||
: this[ENCODING]
|
||||
? buf.join('')
|
||||
: Buffer.concat(buf, buf.dataLength)
|
||||
)
|
||||
}
|
||||
|
||||
// stream.promise().then(() => done, er => emitted error)
|
||||
promise() {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.on(DESTROYED, () => reject(new Error('stream destroyed')))
|
||||
this.on('error', er => reject(er))
|
||||
this.on('end', () => resolve())
|
||||
})
|
||||
}
|
||||
|
||||
// for await (let chunk of stream)
|
||||
[ASYNCITERATOR]() {
|
||||
let stopped = false
|
||||
const stop = () => {
|
||||
this.pause()
|
||||
stopped = true
|
||||
return Promise.resolve({ done: true })
|
||||
}
|
||||
const next = () => {
|
||||
if (stopped) return stop()
|
||||
const res = this.read()
|
||||
if (res !== null) return Promise.resolve({ done: false, value: res })
|
||||
|
||||
if (this[EOF]) return stop()
|
||||
|
||||
let resolve = null
|
||||
let reject = null
|
||||
const onerr = er => {
|
||||
this.removeListener('data', ondata)
|
||||
this.removeListener('end', onend)
|
||||
this.removeListener(DESTROYED, ondestroy)
|
||||
stop()
|
||||
reject(er)
|
||||
}
|
||||
const ondata = value => {
|
||||
this.removeListener('error', onerr)
|
||||
this.removeListener('end', onend)
|
||||
this.removeListener(DESTROYED, ondestroy)
|
||||
this.pause()
|
||||
resolve({ value: value, done: !!this[EOF] })
|
||||
}
|
||||
const onend = () => {
|
||||
this.removeListener('error', onerr)
|
||||
this.removeListener('data', ondata)
|
||||
this.removeListener(DESTROYED, ondestroy)
|
||||
stop()
|
||||
resolve({ done: true })
|
||||
}
|
||||
const ondestroy = () => onerr(new Error('stream destroyed'))
|
||||
return new Promise((res, rej) => {
|
||||
reject = rej
|
||||
resolve = res
|
||||
this.once(DESTROYED, ondestroy)
|
||||
this.once('error', onerr)
|
||||
this.once('end', onend)
|
||||
this.once('data', ondata)
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
next,
|
||||
throw: stop,
|
||||
return: stop,
|
||||
[ASYNCITERATOR]() {
|
||||
return this
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// for (let chunk of stream)
|
||||
[ITERATOR]() {
|
||||
let stopped = false
|
||||
const stop = () => {
|
||||
this.pause()
|
||||
this.removeListener(ERROR, stop)
|
||||
this.removeListener(DESTROYED, stop)
|
||||
this.removeListener('end', stop)
|
||||
stopped = true
|
||||
return { done: true }
|
||||
}
|
||||
|
||||
const next = () => {
|
||||
if (stopped) return stop()
|
||||
const value = this.read()
|
||||
return value === null ? stop() : { value }
|
||||
}
|
||||
this.once('end', stop)
|
||||
this.once(ERROR, stop)
|
||||
this.once(DESTROYED, stop)
|
||||
|
||||
return {
|
||||
next,
|
||||
throw: stop,
|
||||
return: stop,
|
||||
[ITERATOR]() {
|
||||
return this
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
destroy(er) {
|
||||
if (this[DESTROYED]) {
|
||||
if (er) this.emit('error', er)
|
||||
else this.emit(DESTROYED)
|
||||
return this
|
||||
}
|
||||
|
||||
this[DESTROYED] = true
|
||||
|
||||
// throw away all buffered data, it's never coming out
|
||||
this[BUFFER].length = 0
|
||||
this[BUFFERLENGTH] = 0
|
||||
|
||||
if (typeof this.close === 'function' && !this[CLOSED]) this.close()
|
||||
|
||||
if (er) this.emit('error', er)
|
||||
// if no error to emit, still reject pending promises
|
||||
else this.emit(DESTROYED)
|
||||
|
||||
return this
|
||||
}
|
||||
|
||||
static isStream(s) {
|
||||
return (
|
||||
!!s &&
|
||||
(s instanceof Minipass ||
|
||||
s instanceof Stream ||
|
||||
(s instanceof EE &&
|
||||
// readable
|
||||
(typeof s.pipe === 'function' ||
|
||||
// writable
|
||||
(typeof s.write === 'function' && typeof s.end === 'function'))))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
76
node_modules/tar/node_modules/minipass/package.json
generated
vendored
Normal file
76
node_modules/tar/node_modules/minipass/package.json
generated
vendored
Normal file
|
@ -0,0 +1,76 @@
|
|||
{
|
||||
"name": "minipass",
|
||||
"version": "5.0.0",
|
||||
"description": "minimal implementation of a PassThrough stream",
|
||||
"main": "./index.js",
|
||||
"module": "./index.mjs",
|
||||
"types": "./index.d.ts",
|
||||
"exports": {
|
||||
".": {
|
||||
"import": {
|
||||
"types": "./index.d.ts",
|
||||
"default": "./index.mjs"
|
||||
},
|
||||
"require": {
|
||||
"types": "./index.d.ts",
|
||||
"default": "./index.js"
|
||||
}
|
||||
},
|
||||
"./package.json": "./package.json"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^17.0.41",
|
||||
"end-of-stream": "^1.4.0",
|
||||
"node-abort-controller": "^3.1.1",
|
||||
"prettier": "^2.6.2",
|
||||
"tap": "^16.2.0",
|
||||
"through2": "^2.0.3",
|
||||
"ts-node": "^10.8.1",
|
||||
"typedoc": "^0.23.24",
|
||||
"typescript": "^4.7.3"
|
||||
},
|
||||
"scripts": {
|
||||
"pretest": "npm run prepare",
|
||||
"presnap": "npm run prepare",
|
||||
"prepare": "node ./scripts/transpile-to-esm.js",
|
||||
"snap": "tap",
|
||||
"test": "tap",
|
||||
"preversion": "npm test",
|
||||
"postversion": "npm publish",
|
||||
"postpublish": "git push origin --follow-tags",
|
||||
"typedoc": "typedoc ./index.d.ts",
|
||||
"format": "prettier --write . --loglevel warn"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/isaacs/minipass.git"
|
||||
},
|
||||
"keywords": [
|
||||
"passthrough",
|
||||
"stream"
|
||||
],
|
||||
"author": "Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me/)",
|
||||
"license": "ISC",
|
||||
"files": [
|
||||
"index.d.ts",
|
||||
"index.js",
|
||||
"index.mjs"
|
||||
],
|
||||
"tap": {
|
||||
"check-coverage": true
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"prettier": {
|
||||
"semi": false,
|
||||
"printWidth": 80,
|
||||
"tabWidth": 2,
|
||||
"useTabs": false,
|
||||
"singleQuote": true,
|
||||
"jsxSingleQuote": false,
|
||||
"bracketSameLine": true,
|
||||
"arrowParens": "avoid",
|
||||
"endOfLine": "lf"
|
||||
}
|
||||
}
|
15
node_modules/tar/node_modules/mkdirp/CHANGELOG.md
generated
vendored
Normal file
15
node_modules/tar/node_modules/mkdirp/CHANGELOG.md
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
# Changers Lorgs!
|
||||
|
||||
## 1.0
|
||||
|
||||
Full rewrite. Essentially a brand new module.
|
||||
|
||||
- Return a promise instead of taking a callback.
|
||||
- Use native `fs.mkdir(path, { recursive: true })` when available.
|
||||
- Drop support for outdated Node.js versions. (Technically still works on
|
||||
Node.js v8, but only 10 and above are officially supported.)
|
||||
|
||||
## 0.x
|
||||
|
||||
Original and most widely used recursive directory creation implementation
|
||||
in JavaScript, dating back to 2010.
|
21
node_modules/tar/node_modules/mkdirp/LICENSE
generated
vendored
Normal file
21
node_modules/tar/node_modules/mkdirp/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
Copyright James Halliday (mail@substack.net) and Isaac Z. Schlueter (i@izs.me)
|
||||
|
||||
This project is free software released under the MIT license:
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
68
node_modules/tar/node_modules/mkdirp/bin/cmd.js
generated
vendored
Executable file
68
node_modules/tar/node_modules/mkdirp/bin/cmd.js
generated
vendored
Executable file
|
@ -0,0 +1,68 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
const usage = () => `
|
||||
usage: mkdirp [DIR1,DIR2..] {OPTIONS}
|
||||
|
||||
Create each supplied directory including any necessary parent directories
|
||||
that don't yet exist.
|
||||
|
||||
If the directory already exists, do nothing.
|
||||
|
||||
OPTIONS are:
|
||||
|
||||
-m<mode> If a directory needs to be created, set the mode as an octal
|
||||
--mode=<mode> permission string.
|
||||
|
||||
-v --version Print the mkdirp version number
|
||||
|
||||
-h --help Print this helpful banner
|
||||
|
||||
-p --print Print the first directories created for each path provided
|
||||
|
||||
--manual Use manual implementation, even if native is available
|
||||
`
|
||||
|
||||
const dirs = []
|
||||
const opts = {}
|
||||
let print = false
|
||||
let dashdash = false
|
||||
let manual = false
|
||||
for (const arg of process.argv.slice(2)) {
|
||||
if (dashdash)
|
||||
dirs.push(arg)
|
||||
else if (arg === '--')
|
||||
dashdash = true
|
||||
else if (arg === '--manual')
|
||||
manual = true
|
||||
else if (/^-h/.test(arg) || /^--help/.test(arg)) {
|
||||
console.log(usage())
|
||||
process.exit(0)
|
||||
} else if (arg === '-v' || arg === '--version') {
|
||||
console.log(require('../package.json').version)
|
||||
process.exit(0)
|
||||
} else if (arg === '-p' || arg === '--print') {
|
||||
print = true
|
||||
} else if (/^-m/.test(arg) || /^--mode=/.test(arg)) {
|
||||
const mode = parseInt(arg.replace(/^(-m|--mode=)/, ''), 8)
|
||||
if (isNaN(mode)) {
|
||||
console.error(`invalid mode argument: ${arg}\nMust be an octal number.`)
|
||||
process.exit(1)
|
||||
}
|
||||
opts.mode = mode
|
||||
} else
|
||||
dirs.push(arg)
|
||||
}
|
||||
|
||||
const mkdirp = require('../')
|
||||
const impl = manual ? mkdirp.manual : mkdirp
|
||||
if (dirs.length === 0)
|
||||
console.error(usage())
|
||||
|
||||
Promise.all(dirs.map(dir => impl(dir, opts)))
|
||||
.then(made => print ? made.forEach(m => m && console.log(m)) : null)
|
||||
.catch(er => {
|
||||
console.error(er.message)
|
||||
if (er.code)
|
||||
console.error(' code: ' + er.code)
|
||||
process.exit(1)
|
||||
})
|
31
node_modules/tar/node_modules/mkdirp/index.js
generated
vendored
Normal file
31
node_modules/tar/node_modules/mkdirp/index.js
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
const optsArg = require('./lib/opts-arg.js')
|
||||
const pathArg = require('./lib/path-arg.js')
|
||||
|
||||
const {mkdirpNative, mkdirpNativeSync} = require('./lib/mkdirp-native.js')
|
||||
const {mkdirpManual, mkdirpManualSync} = require('./lib/mkdirp-manual.js')
|
||||
const {useNative, useNativeSync} = require('./lib/use-native.js')
|
||||
|
||||
|
||||
const mkdirp = (path, opts) => {
|
||||
path = pathArg(path)
|
||||
opts = optsArg(opts)
|
||||
return useNative(opts)
|
||||
? mkdirpNative(path, opts)
|
||||
: mkdirpManual(path, opts)
|
||||
}
|
||||
|
||||
const mkdirpSync = (path, opts) => {
|
||||
path = pathArg(path)
|
||||
opts = optsArg(opts)
|
||||
return useNativeSync(opts)
|
||||
? mkdirpNativeSync(path, opts)
|
||||
: mkdirpManualSync(path, opts)
|
||||
}
|
||||
|
||||
mkdirp.sync = mkdirpSync
|
||||
mkdirp.native = (path, opts) => mkdirpNative(pathArg(path), optsArg(opts))
|
||||
mkdirp.manual = (path, opts) => mkdirpManual(pathArg(path), optsArg(opts))
|
||||
mkdirp.nativeSync = (path, opts) => mkdirpNativeSync(pathArg(path), optsArg(opts))
|
||||
mkdirp.manualSync = (path, opts) => mkdirpManualSync(pathArg(path), optsArg(opts))
|
||||
|
||||
module.exports = mkdirp
|
29
node_modules/tar/node_modules/mkdirp/lib/find-made.js
generated
vendored
Normal file
29
node_modules/tar/node_modules/mkdirp/lib/find-made.js
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
const {dirname} = require('path')
|
||||
|
||||
const findMade = (opts, parent, path = undefined) => {
|
||||
// we never want the 'made' return value to be a root directory
|
||||
if (path === parent)
|
||||
return Promise.resolve()
|
||||
|
||||
return opts.statAsync(parent).then(
|
||||
st => st.isDirectory() ? path : undefined, // will fail later
|
||||
er => er.code === 'ENOENT'
|
||||
? findMade(opts, dirname(parent), parent)
|
||||
: undefined
|
||||
)
|
||||
}
|
||||
|
||||
const findMadeSync = (opts, parent, path = undefined) => {
|
||||
if (path === parent)
|
||||
return undefined
|
||||
|
||||
try {
|
||||
return opts.statSync(parent).isDirectory() ? path : undefined
|
||||
} catch (er) {
|
||||
return er.code === 'ENOENT'
|
||||
? findMadeSync(opts, dirname(parent), parent)
|
||||
: undefined
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {findMade, findMadeSync}
|
64
node_modules/tar/node_modules/mkdirp/lib/mkdirp-manual.js
generated
vendored
Normal file
64
node_modules/tar/node_modules/mkdirp/lib/mkdirp-manual.js
generated
vendored
Normal file
|
@ -0,0 +1,64 @@
|
|||
const {dirname} = require('path')
|
||||
|
||||
const mkdirpManual = (path, opts, made) => {
|
||||
opts.recursive = false
|
||||
const parent = dirname(path)
|
||||
if (parent === path) {
|
||||
return opts.mkdirAsync(path, opts).catch(er => {
|
||||
// swallowed by recursive implementation on posix systems
|
||||
// any other error is a failure
|
||||
if (er.code !== 'EISDIR')
|
||||
throw er
|
||||
})
|
||||
}
|
||||
|
||||
return opts.mkdirAsync(path, opts).then(() => made || path, er => {
|
||||
if (er.code === 'ENOENT')
|
||||
return mkdirpManual(parent, opts)
|
||||
.then(made => mkdirpManual(path, opts, made))
|
||||
if (er.code !== 'EEXIST' && er.code !== 'EROFS')
|
||||
throw er
|
||||
return opts.statAsync(path).then(st => {
|
||||
if (st.isDirectory())
|
||||
return made
|
||||
else
|
||||
throw er
|
||||
}, () => { throw er })
|
||||
})
|
||||
}
|
||||
|
||||
const mkdirpManualSync = (path, opts, made) => {
|
||||
const parent = dirname(path)
|
||||
opts.recursive = false
|
||||
|
||||
if (parent === path) {
|
||||
try {
|
||||
return opts.mkdirSync(path, opts)
|
||||
} catch (er) {
|
||||
// swallowed by recursive implementation on posix systems
|
||||
// any other error is a failure
|
||||
if (er.code !== 'EISDIR')
|
||||
throw er
|
||||
else
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
opts.mkdirSync(path, opts)
|
||||
return made || path
|
||||
} catch (er) {
|
||||
if (er.code === 'ENOENT')
|
||||
return mkdirpManualSync(path, opts, mkdirpManualSync(parent, opts, made))
|
||||
if (er.code !== 'EEXIST' && er.code !== 'EROFS')
|
||||
throw er
|
||||
try {
|
||||
if (!opts.statSync(path).isDirectory())
|
||||
throw er
|
||||
} catch (_) {
|
||||
throw er
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {mkdirpManual, mkdirpManualSync}
|
39
node_modules/tar/node_modules/mkdirp/lib/mkdirp-native.js
generated
vendored
Normal file
39
node_modules/tar/node_modules/mkdirp/lib/mkdirp-native.js
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
|||
const {dirname} = require('path')
|
||||
const {findMade, findMadeSync} = require('./find-made.js')
|
||||
const {mkdirpManual, mkdirpManualSync} = require('./mkdirp-manual.js')
|
||||
|
||||
const mkdirpNative = (path, opts) => {
|
||||
opts.recursive = true
|
||||
const parent = dirname(path)
|
||||
if (parent === path)
|
||||
return opts.mkdirAsync(path, opts)
|
||||
|
||||
return findMade(opts, path).then(made =>
|
||||
opts.mkdirAsync(path, opts).then(() => made)
|
||||
.catch(er => {
|
||||
if (er.code === 'ENOENT')
|
||||
return mkdirpManual(path, opts)
|
||||
else
|
||||
throw er
|
||||
}))
|
||||
}
|
||||
|
||||
const mkdirpNativeSync = (path, opts) => {
|
||||
opts.recursive = true
|
||||
const parent = dirname(path)
|
||||
if (parent === path)
|
||||
return opts.mkdirSync(path, opts)
|
||||
|
||||
const made = findMadeSync(opts, path)
|
||||
try {
|
||||
opts.mkdirSync(path, opts)
|
||||
return made
|
||||
} catch (er) {
|
||||
if (er.code === 'ENOENT')
|
||||
return mkdirpManualSync(path, opts)
|
||||
else
|
||||
throw er
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {mkdirpNative, mkdirpNativeSync}
|
23
node_modules/tar/node_modules/mkdirp/lib/opts-arg.js
generated
vendored
Normal file
23
node_modules/tar/node_modules/mkdirp/lib/opts-arg.js
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
|||
const { promisify } = require('util')
|
||||
const fs = require('fs')
|
||||
const optsArg = opts => {
|
||||
if (!opts)
|
||||
opts = { mode: 0o777, fs }
|
||||
else if (typeof opts === 'object')
|
||||
opts = { mode: 0o777, fs, ...opts }
|
||||
else if (typeof opts === 'number')
|
||||
opts = { mode: opts, fs }
|
||||
else if (typeof opts === 'string')
|
||||
opts = { mode: parseInt(opts, 8), fs }
|
||||
else
|
||||
throw new TypeError('invalid options argument')
|
||||
|
||||
opts.mkdir = opts.mkdir || opts.fs.mkdir || fs.mkdir
|
||||
opts.mkdirAsync = promisify(opts.mkdir)
|
||||
opts.stat = opts.stat || opts.fs.stat || fs.stat
|
||||
opts.statAsync = promisify(opts.stat)
|
||||
opts.statSync = opts.statSync || opts.fs.statSync || fs.statSync
|
||||
opts.mkdirSync = opts.mkdirSync || opts.fs.mkdirSync || fs.mkdirSync
|
||||
return opts
|
||||
}
|
||||
module.exports = optsArg
|
29
node_modules/tar/node_modules/mkdirp/lib/path-arg.js
generated
vendored
Normal file
29
node_modules/tar/node_modules/mkdirp/lib/path-arg.js
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
const platform = process.env.__TESTING_MKDIRP_PLATFORM__ || process.platform
|
||||
const { resolve, parse } = require('path')
|
||||
const pathArg = path => {
|
||||
if (/\0/.test(path)) {
|
||||
// simulate same failure that node raises
|
||||
throw Object.assign(
|
||||
new TypeError('path must be a string without null bytes'),
|
||||
{
|
||||
path,
|
||||
code: 'ERR_INVALID_ARG_VALUE',
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
path = resolve(path)
|
||||
if (platform === 'win32') {
|
||||
const badWinChars = /[*|"<>?:]/
|
||||
const {root} = parse(path)
|
||||
if (badWinChars.test(path.substr(root.length))) {
|
||||
throw Object.assign(new Error('Illegal characters in path.'), {
|
||||
path,
|
||||
code: 'EINVAL',
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return path
|
||||
}
|
||||
module.exports = pathArg
|
10
node_modules/tar/node_modules/mkdirp/lib/use-native.js
generated
vendored
Normal file
10
node_modules/tar/node_modules/mkdirp/lib/use-native.js
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
const fs = require('fs')
|
||||
|
||||
const version = process.env.__TESTING_MKDIRP_NODE_VERSION__ || process.version
|
||||
const versArr = version.replace(/^v/, '').split('.')
|
||||
const hasNative = +versArr[0] > 10 || +versArr[0] === 10 && +versArr[1] >= 12
|
||||
|
||||
const useNative = !hasNative ? () => false : opts => opts.mkdir === fs.mkdir
|
||||
const useNativeSync = !hasNative ? () => false : opts => opts.mkdirSync === fs.mkdirSync
|
||||
|
||||
module.exports = {useNative, useNativeSync}
|
44
node_modules/tar/node_modules/mkdirp/package.json
generated
vendored
Normal file
44
node_modules/tar/node_modules/mkdirp/package.json
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
{
|
||||
"name": "mkdirp",
|
||||
"description": "Recursively mkdir, like `mkdir -p`",
|
||||
"version": "1.0.4",
|
||||
"main": "index.js",
|
||||
"keywords": [
|
||||
"mkdir",
|
||||
"directory",
|
||||
"make dir",
|
||||
"make",
|
||||
"dir",
|
||||
"recursive",
|
||||
"native"
|
||||
],
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/isaacs/node-mkdirp.git"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "tap",
|
||||
"snap": "tap",
|
||||
"preversion": "npm test",
|
||||
"postversion": "npm publish",
|
||||
"postpublish": "git push origin --follow-tags"
|
||||
},
|
||||
"tap": {
|
||||
"check-coverage": true,
|
||||
"coverage-map": "map.js"
|
||||
},
|
||||
"devDependencies": {
|
||||
"require-inject": "^1.4.4",
|
||||
"tap": "^14.10.7"
|
||||
},
|
||||
"bin": "bin/cmd.js",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"files": [
|
||||
"bin",
|
||||
"lib",
|
||||
"index.js"
|
||||
]
|
||||
}
|
266
node_modules/tar/node_modules/mkdirp/readme.markdown
generated
vendored
Normal file
266
node_modules/tar/node_modules/mkdirp/readme.markdown
generated
vendored
Normal file
|
@ -0,0 +1,266 @@
|
|||
# mkdirp
|
||||
|
||||
Like `mkdir -p`, but in Node.js!
|
||||
|
||||
Now with a modern API and no\* bugs!
|
||||
|
||||
<small>\* may contain some bugs</small>
|
||||
|
||||
# example
|
||||
|
||||
## pow.js
|
||||
|
||||
```js
|
||||
const mkdirp = require('mkdirp')
|
||||
|
||||
// return value is a Promise resolving to the first directory created
|
||||
mkdirp('/tmp/foo/bar/baz').then(made =>
|
||||
console.log(`made directories, starting with ${made}`))
|
||||
```
|
||||
|
||||
Output (where `/tmp/foo` already exists)
|
||||
|
||||
```
|
||||
made directories, starting with /tmp/foo/bar
|
||||
```
|
||||
|
||||
Or, if you don't have time to wait around for promises:
|
||||
|
||||
```js
|
||||
const mkdirp = require('mkdirp')
|
||||
|
||||
// return value is the first directory created
|
||||
const made = mkdirp.sync('/tmp/foo/bar/baz')
|
||||
console.log(`made directories, starting with ${made}`)
|
||||
```
|
||||
|
||||
And now /tmp/foo/bar/baz exists, huzzah!
|
||||
|
||||
# methods
|
||||
|
||||
```js
|
||||
const mkdirp = require('mkdirp')
|
||||
```
|
||||
|
||||
## mkdirp(dir, [opts]) -> Promise<String | undefined>
|
||||
|
||||
Create a new directory and any necessary subdirectories at `dir` with octal
|
||||
permission string `opts.mode`. If `opts` is a string or number, it will be
|
||||
treated as the `opts.mode`.
|
||||
|
||||
If `opts.mode` isn't specified, it defaults to `0o777 &
|
||||
(~process.umask())`.
|
||||
|
||||
Promise resolves to first directory `made` that had to be created, or
|
||||
`undefined` if everything already exists. Promise rejects if any errors
|
||||
are encountered. Note that, in the case of promise rejection, some
|
||||
directories _may_ have been created, as recursive directory creation is not
|
||||
an atomic operation.
|
||||
|
||||
You can optionally pass in an alternate `fs` implementation by passing in
|
||||
`opts.fs`. Your implementation should have `opts.fs.mkdir(path, opts, cb)`
|
||||
and `opts.fs.stat(path, cb)`.
|
||||
|
||||
You can also override just one or the other of `mkdir` and `stat` by
|
||||
passing in `opts.stat` or `opts.mkdir`, or providing an `fs` option that
|
||||
only overrides one of these.
|
||||
|
||||
## mkdirp.sync(dir, opts) -> String|null
|
||||
|
||||
Synchronously create a new directory and any necessary subdirectories at
|
||||
`dir` with octal permission string `opts.mode`. If `opts` is a string or
|
||||
number, it will be treated as the `opts.mode`.
|
||||
|
||||
If `opts.mode` isn't specified, it defaults to `0o777 &
|
||||
(~process.umask())`.
|
||||
|
||||
Returns the first directory that had to be created, or undefined if
|
||||
everything already exists.
|
||||
|
||||
You can optionally pass in an alternate `fs` implementation by passing in
|
||||
`opts.fs`. Your implementation should have `opts.fs.mkdirSync(path, mode)`
|
||||
and `opts.fs.statSync(path)`.
|
||||
|
||||
You can also override just one or the other of `mkdirSync` and `statSync`
|
||||
by passing in `opts.statSync` or `opts.mkdirSync`, or providing an `fs`
|
||||
option that only overrides one of these.
|
||||
|
||||
## mkdirp.manual, mkdirp.manualSync
|
||||
|
||||
Use the manual implementation (not the native one). This is the default
|
||||
when the native implementation is not available or the stat/mkdir
|
||||
implementation is overridden.
|
||||
|
||||
## mkdirp.native, mkdirp.nativeSync
|
||||
|
||||
Use the native implementation (not the manual one). This is the default
|
||||
when the native implementation is available and stat/mkdir are not
|
||||
overridden.
|
||||
|
||||
# implementation
|
||||
|
||||
On Node.js v10.12.0 and above, use the native `fs.mkdir(p,
|
||||
{recursive:true})` option, unless `fs.mkdir`/`fs.mkdirSync` has been
|
||||
overridden by an option.
|
||||
|
||||
## native implementation
|
||||
|
||||
- If the path is a root directory, then pass it to the underlying
|
||||
implementation and return the result/error. (In this case, it'll either
|
||||
succeed or fail, but we aren't actually creating any dirs.)
|
||||
- Walk up the path statting each directory, to find the first path that
|
||||
will be created, `made`.
|
||||
- Call `fs.mkdir(path, { recursive: true })` (or `fs.mkdirSync`)
|
||||
- If error, raise it to the caller.
|
||||
- Return `made`.
|
||||
|
||||
## manual implementation
|
||||
|
||||
- Call underlying `fs.mkdir` implementation, with `recursive: false`
|
||||
- If error:
|
||||
- If path is a root directory, raise to the caller and do not handle it
|
||||
- If ENOENT, mkdirp parent dir, store result as `made`
|
||||
- stat(path)
|
||||
- If error, raise original `mkdir` error
|
||||
- If directory, return `made`
|
||||
- Else, raise original `mkdir` error
|
||||
- else
|
||||
- return `undefined` if a root dir, or `made` if set, or `path`
|
||||
|
||||
## windows vs unix caveat
|
||||
|
||||
On Windows file systems, attempts to create a root directory (ie, a drive
|
||||
letter or root UNC path) will fail. If the root directory exists, then it
|
||||
will fail with `EPERM`. If the root directory does not exist, then it will
|
||||
fail with `ENOENT`.
|
||||
|
||||
On posix file systems, attempts to create a root directory (in recursive
|
||||
mode) will succeed silently, as it is treated like just another directory
|
||||
that already exists. (In non-recursive mode, of course, it fails with
|
||||
`EEXIST`.)
|
||||
|
||||
In order to preserve this system-specific behavior (and because it's not as
|
||||
if we can create the parent of a root directory anyway), attempts to create
|
||||
a root directory are passed directly to the `fs` implementation, and any
|
||||
errors encountered are not handled.
|
||||
|
||||
## native error caveat
|
||||
|
||||
The native implementation (as of at least Node.js v13.4.0) does not provide
|
||||
appropriate errors in some cases (see
|
||||
[nodejs/node#31481](https://github.com/nodejs/node/issues/31481) and
|
||||
[nodejs/node#28015](https://github.com/nodejs/node/issues/28015)).
|
||||
|
||||
In order to work around this issue, the native implementation will fall
|
||||
back to the manual implementation if an `ENOENT` error is encountered.
|
||||
|
||||
# choosing a recursive mkdir implementation
|
||||
|
||||
There are a few to choose from! Use the one that suits your needs best :D
|
||||
|
||||
## use `fs.mkdir(path, {recursive: true}, cb)` if:
|
||||
|
||||
- You wish to optimize performance even at the expense of other factors.
|
||||
- You don't need to know the first dir created.
|
||||
- You are ok with getting `ENOENT` as the error when some other problem is
|
||||
the actual cause.
|
||||
- You can limit your platforms to Node.js v10.12 and above.
|
||||
- You're ok with using callbacks instead of promises.
|
||||
- You don't need/want a CLI.
|
||||
- You don't need to override the `fs` methods in use.
|
||||
|
||||
## use this module (mkdirp 1.x) if:
|
||||
|
||||
- You need to know the first directory that was created.
|
||||
- You wish to use the native implementation if available, but fall back
|
||||
when it's not.
|
||||
- You prefer promise-returning APIs to callback-taking APIs.
|
||||
- You want more useful error messages than the native recursive mkdir
|
||||
provides (at least as of Node.js v13.4), and are ok with re-trying on
|
||||
`ENOENT` to achieve this.
|
||||
- You need (or at least, are ok with) a CLI.
|
||||
- You need to override the `fs` methods in use.
|
||||
|
||||
## use [`make-dir`](http://npm.im/make-dir) if:
|
||||
|
||||
- You do not need to know the first dir created (and wish to save a few
|
||||
`stat` calls when using the native implementation for this reason).
|
||||
- You wish to use the native implementation if available, but fall back
|
||||
when it's not.
|
||||
- You prefer promise-returning APIs to callback-taking APIs.
|
||||
- You are ok with occasionally getting `ENOENT` errors for failures that
|
||||
are actually related to something other than a missing file system entry.
|
||||
- You don't need/want a CLI.
|
||||
- You need to override the `fs` methods in use.
|
||||
|
||||
## use mkdirp 0.x if:
|
||||
|
||||
- You need to know the first directory that was created.
|
||||
- You need (or at least, are ok with) a CLI.
|
||||
- You need to override the `fs` methods in use.
|
||||
- You're ok with using callbacks instead of promises.
|
||||
- You are not running on Windows, where the root-level ENOENT errors can
|
||||
lead to infinite regress.
|
||||
- You think vinyl just sounds warmer and richer for some weird reason.
|
||||
- You are supporting truly ancient Node.js versions, before even the advent
|
||||
of a `Promise` language primitive. (Please don't. You deserve better.)
|
||||
|
||||
# cli
|
||||
|
||||
This package also ships with a `mkdirp` command.
|
||||
|
||||
```
|
||||
$ mkdirp -h
|
||||
|
||||
usage: mkdirp [DIR1,DIR2..] {OPTIONS}
|
||||
|
||||
Create each supplied directory including any necessary parent directories
|
||||
that don't yet exist.
|
||||
|
||||
If the directory already exists, do nothing.
|
||||
|
||||
OPTIONS are:
|
||||
|
||||
-m<mode> If a directory needs to be created, set the mode as an octal
|
||||
--mode=<mode> permission string.
|
||||
|
||||
-v --version Print the mkdirp version number
|
||||
|
||||
-h --help Print this helpful banner
|
||||
|
||||
-p --print Print the first directories created for each path provided
|
||||
|
||||
--manual Use manual implementation, even if native is available
|
||||
```
|
||||
|
||||
# install
|
||||
|
||||
With [npm](http://npmjs.org) do:
|
||||
|
||||
```
|
||||
npm install mkdirp
|
||||
```
|
||||
|
||||
to get the library locally, or
|
||||
|
||||
```
|
||||
npm install -g mkdirp
|
||||
```
|
||||
|
||||
to get the command everywhere, or
|
||||
|
||||
```
|
||||
npx mkdirp ...
|
||||
```
|
||||
|
||||
to run the command without installing it globally.
|
||||
|
||||
# platform support
|
||||
|
||||
This module works on node v8, but only v10 and above are officially
|
||||
supported, as Node v8 reached its LTS end of life 2020-01-01, which is in
|
||||
the past, as of this writing.
|
||||
|
||||
# license
|
||||
|
||||
MIT
|
15
node_modules/tar/node_modules/yallist/LICENSE
generated
vendored
Normal file
15
node_modules/tar/node_modules/yallist/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
The ISC License
|
||||
|
||||
Copyright (c) Isaac Z. Schlueter and Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
|
||||
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
204
node_modules/tar/node_modules/yallist/README.md
generated
vendored
Normal file
204
node_modules/tar/node_modules/yallist/README.md
generated
vendored
Normal file
|
@ -0,0 +1,204 @@
|
|||
# yallist
|
||||
|
||||
Yet Another Linked List
|
||||
|
||||
There are many doubly-linked list implementations like it, but this
|
||||
one is mine.
|
||||
|
||||
For when an array would be too big, and a Map can't be iterated in
|
||||
reverse order.
|
||||
|
||||
|
||||
[](https://travis-ci.org/isaacs/yallist) [](https://coveralls.io/github/isaacs/yallist)
|
||||
|
||||
## basic usage
|
||||
|
||||
```javascript
|
||||
var yallist = require('yallist')
|
||||
var myList = yallist.create([1, 2, 3])
|
||||
myList.push('foo')
|
||||
myList.unshift('bar')
|
||||
// of course pop() and shift() are there, too
|
||||
console.log(myList.toArray()) // ['bar', 1, 2, 3, 'foo']
|
||||
myList.forEach(function (k) {
|
||||
// walk the list head to tail
|
||||
})
|
||||
myList.forEachReverse(function (k, index, list) {
|
||||
// walk the list tail to head
|
||||
})
|
||||
var myDoubledList = myList.map(function (k) {
|
||||
return k + k
|
||||
})
|
||||
// now myDoubledList contains ['barbar', 2, 4, 6, 'foofoo']
|
||||
// mapReverse is also a thing
|
||||
var myDoubledListReverse = myList.mapReverse(function (k) {
|
||||
return k + k
|
||||
}) // ['foofoo', 6, 4, 2, 'barbar']
|
||||
|
||||
var reduced = myList.reduce(function (set, entry) {
|
||||
set += entry
|
||||
return set
|
||||
}, 'start')
|
||||
console.log(reduced) // 'startfoo123bar'
|
||||
```
|
||||
|
||||
## api
|
||||
|
||||
The whole API is considered "public".
|
||||
|
||||
Functions with the same name as an Array method work more or less the
|
||||
same way.
|
||||
|
||||
There's reverse versions of most things because that's the point.
|
||||
|
||||
### Yallist
|
||||
|
||||
Default export, the class that holds and manages a list.
|
||||
|
||||
Call it with either a forEach-able (like an array) or a set of
|
||||
arguments, to initialize the list.
|
||||
|
||||
The Array-ish methods all act like you'd expect. No magic length,
|
||||
though, so if you change that it won't automatically prune or add
|
||||
empty spots.
|
||||
|
||||
### Yallist.create(..)
|
||||
|
||||
Alias for Yallist function. Some people like factories.
|
||||
|
||||
#### yallist.head
|
||||
|
||||
The first node in the list
|
||||
|
||||
#### yallist.tail
|
||||
|
||||
The last node in the list
|
||||
|
||||
#### yallist.length
|
||||
|
||||
The number of nodes in the list. (Change this at your peril. It is
|
||||
not magic like Array length.)
|
||||
|
||||
#### yallist.toArray()
|
||||
|
||||
Convert the list to an array.
|
||||
|
||||
#### yallist.forEach(fn, [thisp])
|
||||
|
||||
Call a function on each item in the list.
|
||||
|
||||
#### yallist.forEachReverse(fn, [thisp])
|
||||
|
||||
Call a function on each item in the list, in reverse order.
|
||||
|
||||
#### yallist.get(n)
|
||||
|
||||
Get the data at position `n` in the list. If you use this a lot,
|
||||
probably better off just using an Array.
|
||||
|
||||
#### yallist.getReverse(n)
|
||||
|
||||
Get the data at position `n`, counting from the tail.
|
||||
|
||||
#### yallist.map(fn, thisp)
|
||||
|
||||
Create a new Yallist with the result of calling the function on each
|
||||
item.
|
||||
|
||||
#### yallist.mapReverse(fn, thisp)
|
||||
|
||||
Same as `map`, but in reverse.
|
||||
|
||||
#### yallist.pop()
|
||||
|
||||
Get the data from the list tail, and remove the tail from the list.
|
||||
|
||||
#### yallist.push(item, ...)
|
||||
|
||||
Insert one or more items to the tail of the list.
|
||||
|
||||
#### yallist.reduce(fn, initialValue)
|
||||
|
||||
Like Array.reduce.
|
||||
|
||||
#### yallist.reduceReverse
|
||||
|
||||
Like Array.reduce, but in reverse.
|
||||
|
||||
#### yallist.reverse
|
||||
|
||||
Reverse the list in place.
|
||||
|
||||
#### yallist.shift()
|
||||
|
||||
Get the data from the list head, and remove the head from the list.
|
||||
|
||||
#### yallist.slice([from], [to])
|
||||
|
||||
Just like Array.slice, but returns a new Yallist.
|
||||
|
||||
#### yallist.sliceReverse([from], [to])
|
||||
|
||||
Just like yallist.slice, but the result is returned in reverse.
|
||||
|
||||
#### yallist.toArray()
|
||||
|
||||
Create an array representation of the list.
|
||||
|
||||
#### yallist.toArrayReverse()
|
||||
|
||||
Create a reversed array representation of the list.
|
||||
|
||||
#### yallist.unshift(item, ...)
|
||||
|
||||
Insert one or more items to the head of the list.
|
||||
|
||||
#### yallist.unshiftNode(node)
|
||||
|
||||
Move a Node object to the front of the list. (That is, pull it out of
|
||||
wherever it lives, and make it the new head.)
|
||||
|
||||
If the node belongs to a different list, then that list will remove it
|
||||
first.
|
||||
|
||||
#### yallist.pushNode(node)
|
||||
|
||||
Move a Node object to the end of the list. (That is, pull it out of
|
||||
wherever it lives, and make it the new tail.)
|
||||
|
||||
If the node belongs to a list already, then that list will remove it
|
||||
first.
|
||||
|
||||
#### yallist.removeNode(node)
|
||||
|
||||
Remove a node from the list, preserving referential integrity of head
|
||||
and tail and other nodes.
|
||||
|
||||
Will throw an error if you try to have a list remove a node that
|
||||
doesn't belong to it.
|
||||
|
||||
### Yallist.Node
|
||||
|
||||
The class that holds the data and is actually the list.
|
||||
|
||||
Call with `var n = new Node(value, previousNode, nextNode)`
|
||||
|
||||
Note that if you do direct operations on Nodes themselves, it's very
|
||||
easy to get into weird states where the list is broken. Be careful :)
|
||||
|
||||
#### node.next
|
||||
|
||||
The next node in the list.
|
||||
|
||||
#### node.prev
|
||||
|
||||
The previous node in the list.
|
||||
|
||||
#### node.value
|
||||
|
||||
The data the node contains.
|
||||
|
||||
#### node.list
|
||||
|
||||
The list to which this node belongs. (Null if it does not belong to
|
||||
any list.)
|
8
node_modules/tar/node_modules/yallist/iterator.js
generated
vendored
Normal file
8
node_modules/tar/node_modules/yallist/iterator.js
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
'use strict'
|
||||
module.exports = function (Yallist) {
|
||||
Yallist.prototype[Symbol.iterator] = function* () {
|
||||
for (let walker = this.head; walker; walker = walker.next) {
|
||||
yield walker.value
|
||||
}
|
||||
}
|
||||
}
|
29
node_modules/tar/node_modules/yallist/package.json
generated
vendored
Normal file
29
node_modules/tar/node_modules/yallist/package.json
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
{
|
||||
"name": "yallist",
|
||||
"version": "4.0.0",
|
||||
"description": "Yet Another Linked List",
|
||||
"main": "yallist.js",
|
||||
"directories": {
|
||||
"test": "test"
|
||||
},
|
||||
"files": [
|
||||
"yallist.js",
|
||||
"iterator.js"
|
||||
],
|
||||
"dependencies": {},
|
||||
"devDependencies": {
|
||||
"tap": "^12.1.0"
|
||||
},
|
||||
"scripts": {
|
||||
"test": "tap test/*.js --100",
|
||||
"preversion": "npm test",
|
||||
"postversion": "npm publish",
|
||||
"postpublish": "git push origin --all; git push origin --tags"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/isaacs/yallist.git"
|
||||
},
|
||||
"author": "Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me/)",
|
||||
"license": "ISC"
|
||||
}
|
426
node_modules/tar/node_modules/yallist/yallist.js
generated
vendored
Normal file
426
node_modules/tar/node_modules/yallist/yallist.js
generated
vendored
Normal file
|
@ -0,0 +1,426 @@
|
|||
'use strict'
|
||||
module.exports = Yallist
|
||||
|
||||
Yallist.Node = Node
|
||||
Yallist.create = Yallist
|
||||
|
||||
function Yallist (list) {
|
||||
var self = this
|
||||
if (!(self instanceof Yallist)) {
|
||||
self = new Yallist()
|
||||
}
|
||||
|
||||
self.tail = null
|
||||
self.head = null
|
||||
self.length = 0
|
||||
|
||||
if (list && typeof list.forEach === 'function') {
|
||||
list.forEach(function (item) {
|
||||
self.push(item)
|
||||
})
|
||||
} else if (arguments.length > 0) {
|
||||
for (var i = 0, l = arguments.length; i < l; i++) {
|
||||
self.push(arguments[i])
|
||||
}
|
||||
}
|
||||
|
||||
return self
|
||||
}
|
||||
|
||||
Yallist.prototype.removeNode = function (node) {
|
||||
if (node.list !== this) {
|
||||
throw new Error('removing node which does not belong to this list')
|
||||
}
|
||||
|
||||
var next = node.next
|
||||
var prev = node.prev
|
||||
|
||||
if (next) {
|
||||
next.prev = prev
|
||||
}
|
||||
|
||||
if (prev) {
|
||||
prev.next = next
|
||||
}
|
||||
|
||||
if (node === this.head) {
|
||||
this.head = next
|
||||
}
|
||||
if (node === this.tail) {
|
||||
this.tail = prev
|
||||
}
|
||||
|
||||
node.list.length--
|
||||
node.next = null
|
||||
node.prev = null
|
||||
node.list = null
|
||||
|
||||
return next
|
||||
}
|
||||
|
||||
Yallist.prototype.unshiftNode = function (node) {
|
||||
if (node === this.head) {
|
||||
return
|
||||
}
|
||||
|
||||
if (node.list) {
|
||||
node.list.removeNode(node)
|
||||
}
|
||||
|
||||
var head = this.head
|
||||
node.list = this
|
||||
node.next = head
|
||||
if (head) {
|
||||
head.prev = node
|
||||
}
|
||||
|
||||
this.head = node
|
||||
if (!this.tail) {
|
||||
this.tail = node
|
||||
}
|
||||
this.length++
|
||||
}
|
||||
|
||||
Yallist.prototype.pushNode = function (node) {
|
||||
if (node === this.tail) {
|
||||
return
|
||||
}
|
||||
|
||||
if (node.list) {
|
||||
node.list.removeNode(node)
|
||||
}
|
||||
|
||||
var tail = this.tail
|
||||
node.list = this
|
||||
node.prev = tail
|
||||
if (tail) {
|
||||
tail.next = node
|
||||
}
|
||||
|
||||
this.tail = node
|
||||
if (!this.head) {
|
||||
this.head = node
|
||||
}
|
||||
this.length++
|
||||
}
|
||||
|
||||
Yallist.prototype.push = function () {
|
||||
for (var i = 0, l = arguments.length; i < l; i++) {
|
||||
push(this, arguments[i])
|
||||
}
|
||||
return this.length
|
||||
}
|
||||
|
||||
Yallist.prototype.unshift = function () {
|
||||
for (var i = 0, l = arguments.length; i < l; i++) {
|
||||
unshift(this, arguments[i])
|
||||
}
|
||||
return this.length
|
||||
}
|
||||
|
||||
Yallist.prototype.pop = function () {
|
||||
if (!this.tail) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
var res = this.tail.value
|
||||
this.tail = this.tail.prev
|
||||
if (this.tail) {
|
||||
this.tail.next = null
|
||||
} else {
|
||||
this.head = null
|
||||
}
|
||||
this.length--
|
||||
return res
|
||||
}
|
||||
|
||||
Yallist.prototype.shift = function () {
|
||||
if (!this.head) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
var res = this.head.value
|
||||
this.head = this.head.next
|
||||
if (this.head) {
|
||||
this.head.prev = null
|
||||
} else {
|
||||
this.tail = null
|
||||
}
|
||||
this.length--
|
||||
return res
|
||||
}
|
||||
|
||||
Yallist.prototype.forEach = function (fn, thisp) {
|
||||
thisp = thisp || this
|
||||
for (var walker = this.head, i = 0; walker !== null; i++) {
|
||||
fn.call(thisp, walker.value, i, this)
|
||||
walker = walker.next
|
||||
}
|
||||
}
|
||||
|
||||
Yallist.prototype.forEachReverse = function (fn, thisp) {
|
||||
thisp = thisp || this
|
||||
for (var walker = this.tail, i = this.length - 1; walker !== null; i--) {
|
||||
fn.call(thisp, walker.value, i, this)
|
||||
walker = walker.prev
|
||||
}
|
||||
}
|
||||
|
||||
Yallist.prototype.get = function (n) {
|
||||
for (var i = 0, walker = this.head; walker !== null && i < n; i++) {
|
||||
// abort out of the list early if we hit a cycle
|
||||
walker = walker.next
|
||||
}
|
||||
if (i === n && walker !== null) {
|
||||
return walker.value
|
||||
}
|
||||
}
|
||||
|
||||
Yallist.prototype.getReverse = function (n) {
|
||||
for (var i = 0, walker = this.tail; walker !== null && i < n; i++) {
|
||||
// abort out of the list early if we hit a cycle
|
||||
walker = walker.prev
|
||||
}
|
||||
if (i === n && walker !== null) {
|
||||
return walker.value
|
||||
}
|
||||
}
|
||||
|
||||
Yallist.prototype.map = function (fn, thisp) {
|
||||
thisp = thisp || this
|
||||
var res = new Yallist()
|
||||
for (var walker = this.head; walker !== null;) {
|
||||
res.push(fn.call(thisp, walker.value, this))
|
||||
walker = walker.next
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
Yallist.prototype.mapReverse = function (fn, thisp) {
|
||||
thisp = thisp || this
|
||||
var res = new Yallist()
|
||||
for (var walker = this.tail; walker !== null;) {
|
||||
res.push(fn.call(thisp, walker.value, this))
|
||||
walker = walker.prev
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
Yallist.prototype.reduce = function (fn, initial) {
|
||||
var acc
|
||||
var walker = this.head
|
||||
if (arguments.length > 1) {
|
||||
acc = initial
|
||||
} else if (this.head) {
|
||||
walker = this.head.next
|
||||
acc = this.head.value
|
||||
} else {
|
||||
throw new TypeError('Reduce of empty list with no initial value')
|
||||
}
|
||||
|
||||
for (var i = 0; walker !== null; i++) {
|
||||
acc = fn(acc, walker.value, i)
|
||||
walker = walker.next
|
||||
}
|
||||
|
||||
return acc
|
||||
}
|
||||
|
||||
Yallist.prototype.reduceReverse = function (fn, initial) {
|
||||
var acc
|
||||
var walker = this.tail
|
||||
if (arguments.length > 1) {
|
||||
acc = initial
|
||||
} else if (this.tail) {
|
||||
walker = this.tail.prev
|
||||
acc = this.tail.value
|
||||
} else {
|
||||
throw new TypeError('Reduce of empty list with no initial value')
|
||||
}
|
||||
|
||||
for (var i = this.length - 1; walker !== null; i--) {
|
||||
acc = fn(acc, walker.value, i)
|
||||
walker = walker.prev
|
||||
}
|
||||
|
||||
return acc
|
||||
}
|
||||
|
||||
Yallist.prototype.toArray = function () {
|
||||
var arr = new Array(this.length)
|
||||
for (var i = 0, walker = this.head; walker !== null; i++) {
|
||||
arr[i] = walker.value
|
||||
walker = walker.next
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
Yallist.prototype.toArrayReverse = function () {
|
||||
var arr = new Array(this.length)
|
||||
for (var i = 0, walker = this.tail; walker !== null; i++) {
|
||||
arr[i] = walker.value
|
||||
walker = walker.prev
|
||||
}
|
||||
return arr
|
||||
}
|
||||
|
||||
Yallist.prototype.slice = function (from, to) {
|
||||
to = to || this.length
|
||||
if (to < 0) {
|
||||
to += this.length
|
||||
}
|
||||
from = from || 0
|
||||
if (from < 0) {
|
||||
from += this.length
|
||||
}
|
||||
var ret = new Yallist()
|
||||
if (to < from || to < 0) {
|
||||
return ret
|
||||
}
|
||||
if (from < 0) {
|
||||
from = 0
|
||||
}
|
||||
if (to > this.length) {
|
||||
to = this.length
|
||||
}
|
||||
for (var i = 0, walker = this.head; walker !== null && i < from; i++) {
|
||||
walker = walker.next
|
||||
}
|
||||
for (; walker !== null && i < to; i++, walker = walker.next) {
|
||||
ret.push(walker.value)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
Yallist.prototype.sliceReverse = function (from, to) {
|
||||
to = to || this.length
|
||||
if (to < 0) {
|
||||
to += this.length
|
||||
}
|
||||
from = from || 0
|
||||
if (from < 0) {
|
||||
from += this.length
|
||||
}
|
||||
var ret = new Yallist()
|
||||
if (to < from || to < 0) {
|
||||
return ret
|
||||
}
|
||||
if (from < 0) {
|
||||
from = 0
|
||||
}
|
||||
if (to > this.length) {
|
||||
to = this.length
|
||||
}
|
||||
for (var i = this.length, walker = this.tail; walker !== null && i > to; i--) {
|
||||
walker = walker.prev
|
||||
}
|
||||
for (; walker !== null && i > from; i--, walker = walker.prev) {
|
||||
ret.push(walker.value)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
Yallist.prototype.splice = function (start, deleteCount, ...nodes) {
|
||||
if (start > this.length) {
|
||||
start = this.length - 1
|
||||
}
|
||||
if (start < 0) {
|
||||
start = this.length + start;
|
||||
}
|
||||
|
||||
for (var i = 0, walker = this.head; walker !== null && i < start; i++) {
|
||||
walker = walker.next
|
||||
}
|
||||
|
||||
var ret = []
|
||||
for (var i = 0; walker && i < deleteCount; i++) {
|
||||
ret.push(walker.value)
|
||||
walker = this.removeNode(walker)
|
||||
}
|
||||
if (walker === null) {
|
||||
walker = this.tail
|
||||
}
|
||||
|
||||
if (walker !== this.head && walker !== this.tail) {
|
||||
walker = walker.prev
|
||||
}
|
||||
|
||||
for (var i = 0; i < nodes.length; i++) {
|
||||
walker = insert(this, walker, nodes[i])
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
Yallist.prototype.reverse = function () {
|
||||
var head = this.head
|
||||
var tail = this.tail
|
||||
for (var walker = head; walker !== null; walker = walker.prev) {
|
||||
var p = walker.prev
|
||||
walker.prev = walker.next
|
||||
walker.next = p
|
||||
}
|
||||
this.head = tail
|
||||
this.tail = head
|
||||
return this
|
||||
}
|
||||
|
||||
function insert (self, node, value) {
|
||||
var inserted = node === self.head ?
|
||||
new Node(value, null, node, self) :
|
||||
new Node(value, node, node.next, self)
|
||||
|
||||
if (inserted.next === null) {
|
||||
self.tail = inserted
|
||||
}
|
||||
if (inserted.prev === null) {
|
||||
self.head = inserted
|
||||
}
|
||||
|
||||
self.length++
|
||||
|
||||
return inserted
|
||||
}
|
||||
|
||||
function push (self, item) {
|
||||
self.tail = new Node(item, self.tail, null, self)
|
||||
if (!self.head) {
|
||||
self.head = self.tail
|
||||
}
|
||||
self.length++
|
||||
}
|
||||
|
||||
function unshift (self, item) {
|
||||
self.head = new Node(item, null, self.head, self)
|
||||
if (!self.tail) {
|
||||
self.tail = self.head
|
||||
}
|
||||
self.length++
|
||||
}
|
||||
|
||||
function Node (value, prev, next, list) {
|
||||
if (!(this instanceof Node)) {
|
||||
return new Node(value, prev, next, list)
|
||||
}
|
||||
|
||||
this.list = list
|
||||
this.value = value
|
||||
|
||||
if (prev) {
|
||||
prev.next = this
|
||||
this.prev = prev
|
||||
} else {
|
||||
this.prev = null
|
||||
}
|
||||
|
||||
if (next) {
|
||||
next.prev = this
|
||||
this.next = next
|
||||
} else {
|
||||
this.next = null
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
// add if support for Symbol.iterator is present
|
||||
require('./iterator.js')(Yallist)
|
||||
} catch (er) {}
|
70
node_modules/tar/package.json
generated
vendored
Normal file
70
node_modules/tar/package.json
generated
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
{
|
||||
"author": "GitHub Inc.",
|
||||
"name": "tar",
|
||||
"description": "tar for node",
|
||||
"version": "6.2.1",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/isaacs/node-tar.git"
|
||||
},
|
||||
"scripts": {
|
||||
"genparse": "node scripts/generate-parse-fixtures.js",
|
||||
"snap": "tap",
|
||||
"test": "tap"
|
||||
},
|
||||
"dependencies": {
|
||||
"chownr": "^2.0.0",
|
||||
"fs-minipass": "^2.0.0",
|
||||
"minipass": "^5.0.0",
|
||||
"minizlib": "^2.1.1",
|
||||
"mkdirp": "^1.0.3",
|
||||
"yallist": "^4.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@npmcli/eslint-config": "^4.0.0",
|
||||
"@npmcli/template-oss": "4.11.0",
|
||||
"chmodr": "^1.2.0",
|
||||
"end-of-stream": "^1.4.3",
|
||||
"events-to-array": "^2.0.3",
|
||||
"mutate-fs": "^2.1.1",
|
||||
"nock": "^13.2.9",
|
||||
"rimraf": "^3.0.2",
|
||||
"tap": "^16.0.1"
|
||||
},
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"files": [
|
||||
"bin/",
|
||||
"lib/",
|
||||
"index.js"
|
||||
],
|
||||
"tap": {
|
||||
"coverage-map": "map.js",
|
||||
"timeout": 0,
|
||||
"nyc-arg": [
|
||||
"--exclude",
|
||||
"tap-snapshots/**"
|
||||
]
|
||||
},
|
||||
"templateOSS": {
|
||||
"//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.",
|
||||
"version": "4.11.0",
|
||||
"content": "scripts/template-oss",
|
||||
"engines": ">=10",
|
||||
"distPaths": [
|
||||
"index.js"
|
||||
],
|
||||
"allowPaths": [
|
||||
"/index.js"
|
||||
],
|
||||
"ciVersions": [
|
||||
"10.x",
|
||||
"12.x",
|
||||
"14.x",
|
||||
"16.x",
|
||||
"18.x"
|
||||
]
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue