Deployed the page to Github Pages.
This commit is contained in:
parent
1d79754e93
commit
2c89899458
62797 changed files with 6551425 additions and 15279 deletions
20
node_modules/npm-registry-fetch/LICENSE.md
generated
vendored
Normal file
20
node_modules/npm-registry-fetch/LICENSE.md
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
<!-- This file is automatically added by @npmcli/template-oss. Do not edit. -->
|
||||
|
||||
ISC License
|
||||
|
||||
Copyright npm, Inc.
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this
|
||||
software for any purpose with or without fee is hereby
|
||||
granted, provided that the above copyright notice and this
|
||||
permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND NPM DISCLAIMS ALL
|
||||
WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
|
||||
EVENT SHALL NPM BE LIABLE FOR ANY SPECIAL, DIRECT,
|
||||
INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
||||
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
|
||||
USE OR PERFORMANCE OF THIS SOFTWARE.
|
616
node_modules/npm-registry-fetch/README.md
generated
vendored
Normal file
616
node_modules/npm-registry-fetch/README.md
generated
vendored
Normal file
|
@ -0,0 +1,616 @@
|
|||
# npm-registry-fetch
|
||||
|
||||
[`npm-registry-fetch`](https://github.com/npm/npm-registry-fetch) is a Node.js
|
||||
library that implements a `fetch`-like API for accessing npm registry APIs
|
||||
consistently. It's able to consume npm-style configuration values and has all
|
||||
the necessary logic for picking registries, handling scopes, and dealing with
|
||||
authentication details built-in.
|
||||
|
||||
This package is meant to replace the older
|
||||
[`npm-registry-client`](https://npm.im/npm-registry-client).
|
||||
|
||||
## Example
|
||||
|
||||
```javascript
|
||||
const npmFetch = require('npm-registry-fetch')
|
||||
|
||||
console.log(
|
||||
await npmFetch.json('/-/ping')
|
||||
)
|
||||
```
|
||||
|
||||
## Table of Contents
|
||||
|
||||
* [Installing](#install)
|
||||
* [Example](#example)
|
||||
* [Contributing](#contributing)
|
||||
* [API](#api)
|
||||
* [`fetch`](#fetch)
|
||||
* [`fetch.json`](#fetch-json)
|
||||
* [`fetch` options](#fetch-opts)
|
||||
|
||||
### Install
|
||||
|
||||
`$ npm install npm-registry-fetch`
|
||||
|
||||
### Contributing
|
||||
|
||||
The npm team enthusiastically welcomes contributions and project participation!
|
||||
There's a bunch of things you can do if you want to contribute! The [Contributor
|
||||
Guide](CONTRIBUTING.md) has all the information you need for everything from
|
||||
reporting bugs to contributing entire new features. Please don't hesitate to
|
||||
jump in if you'd like to, or even ask us questions if something isn't clear.
|
||||
|
||||
All participants and maintainers in this project are expected to follow [Code of
|
||||
Conduct](CODE_OF_CONDUCT.md), and just generally be excellent to each other.
|
||||
|
||||
Please refer to the [Changelog](CHANGELOG.md) for project history details, too.
|
||||
|
||||
Happy hacking!
|
||||
|
||||
### API
|
||||
|
||||
#### Caching and `write=true` query strings
|
||||
|
||||
Before performing any PUT or DELETE operation, npm clients first make a
|
||||
GET request to the registry resource being updated, which includes
|
||||
the query string `?write=true`.
|
||||
|
||||
The semantics of this are, effectively, "I intend to write to this thing,
|
||||
and need to know the latest current value, so that my write can land
|
||||
cleanly".
|
||||
|
||||
The public npm registry handles these `?write=true` requests by ensuring
|
||||
that the cache is re-validated before sending a response. In order to
|
||||
maintain the same behavior on the client, and not get tripped up by an
|
||||
overeager local cache when we intend to write data to the registry, any
|
||||
request that comes through `npm-registry-fetch` that contains `write=true`
|
||||
in the query string will forcibly set the `prefer-online` option to `true`,
|
||||
and set both `prefer-offline` and `offline` to false, so that any local
|
||||
cached value will be revalidated.
|
||||
|
||||
#### <a name="fetch"></a> `> fetch(url, [opts]) -> Promise<Response>`
|
||||
|
||||
Performs a request to a given URL.
|
||||
|
||||
The URL can be either a full URL, or a path to one. The appropriate registry
|
||||
will be automatically picked if only a URL path is given.
|
||||
|
||||
For available options, please see the section on [`fetch` options](#fetch-opts).
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
const res = await fetch('/-/ping')
|
||||
console.log(res.headers)
|
||||
res.on('data', d => console.log(d.toString('utf8')))
|
||||
```
|
||||
|
||||
#### <a name="fetch-json"></a> `> fetch.json(url, [opts]) -> Promise<ResponseJSON>`
|
||||
|
||||
Performs a request to a given registry URL, parses the body of the response as
|
||||
JSON, and returns it as its final value. This is a utility shorthand for
|
||||
`fetch(url).then(res => res.json())`.
|
||||
|
||||
For available options, please see the section on [`fetch` options](#fetch-opts).
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
const res = await fetch.json('/-/ping')
|
||||
console.log(res) // Body parsed as JSON
|
||||
```
|
||||
|
||||
#### <a name="fetch-json-stream"></a> `> fetch.json.stream(url, jsonPath, [opts]) -> Stream`
|
||||
|
||||
Performs a request to a given registry URL and parses the body of the response
|
||||
as JSON, with each entry being emitted through the stream.
|
||||
|
||||
The `jsonPath` argument is a [`JSONStream.parse()`
|
||||
path](https://github.com/dominictarr/JSONStream#jsonstreamparsepath), and the
|
||||
returned stream (unlike default `JSONStream`s), has a valid
|
||||
`Symbol.asyncIterator` implementation.
|
||||
|
||||
For available options, please see the section on [`fetch` options](#fetch-opts).
|
||||
|
||||
##### Example
|
||||
|
||||
```javascript
|
||||
console.log('https://npm.im/~zkat has access to the following packages:')
|
||||
for await (let {key, value} of fetch.json.stream('/-/user/zkat/package', '$*')) {
|
||||
console.log(`https://npm.im/${key} (perms: ${value})`)
|
||||
}
|
||||
```
|
||||
|
||||
#### <a name="fetch-opts"></a> `fetch` Options
|
||||
|
||||
Fetch options are optional, and can be passed in as either a Map-like object
|
||||
(one with a `.get()` method), a plain javascript object, or a
|
||||
[`figgy-pudding`](https://npm.im/figgy-pudding) instance.
|
||||
|
||||
##### <a name="opts-agent"></a> `opts.agent`
|
||||
|
||||
* Type: http.Agent
|
||||
* Default: an appropriate agent based on URL protocol and proxy settings
|
||||
|
||||
An [`Agent`](https://nodejs.org/api/http.html#http_class_http_agent) instance to
|
||||
be shared across requests. This allows multiple concurrent `fetch` requests to
|
||||
happen on the same socket.
|
||||
|
||||
You do _not_ need to provide this option unless you want something particularly
|
||||
specialized, since proxy configurations and http/https agents are already
|
||||
automatically managed internally when this option is not passed through.
|
||||
|
||||
##### <a name="opts-body"></a> `opts.body`
|
||||
|
||||
* Type: Buffer | Stream | Object
|
||||
* Default: null
|
||||
|
||||
Request body to send through the outgoing request. Buffers and Streams will be
|
||||
passed through as-is, with a default `content-type` of
|
||||
`application/octet-stream`. Plain JavaScript objects will be `JSON.stringify`ed
|
||||
and the `content-type` will default to `application/json`.
|
||||
|
||||
Use [`opts.headers`](#opts-headers) to set the content-type to something else.
|
||||
|
||||
##### <a name="opts-ca"></a> `opts.ca`
|
||||
|
||||
* Type: String, Array, or null
|
||||
* Default: null
|
||||
|
||||
The Certificate Authority signing certificate that is trusted for SSL
|
||||
connections to the registry. Values should be in PEM format (Windows calls it
|
||||
"Base-64 encoded X.509 (.CER)") with newlines replaced by the string `'\n'`. For
|
||||
example:
|
||||
|
||||
```
|
||||
{
|
||||
ca: '-----BEGIN CERTIFICATE-----\nXXXX\nXXXX\n-----END CERTIFICATE-----'
|
||||
}
|
||||
```
|
||||
|
||||
Set to `null` to only allow "known" registrars, or to a specific CA cert
|
||||
to trust only that specific signing authority.
|
||||
|
||||
Multiple CAs can be trusted by specifying an array of certificates instead of a
|
||||
single string.
|
||||
|
||||
See also [`opts.strictSSL`](#opts-strictSSL), [`opts.ca`](#opts-ca) and
|
||||
[`opts.key`](#opts-key)
|
||||
|
||||
##### <a name="opts-cache"></a> `opts.cache`
|
||||
|
||||
* Type: path
|
||||
* Default: null
|
||||
|
||||
The location of the http cache directory. If provided, certain cachable requests
|
||||
will be cached according to [IETF RFC 7234](https://tools.ietf.org/html/rfc7234)
|
||||
rules. This will speed up future requests, as well as make the cached data
|
||||
available offline if necessary/requested.
|
||||
|
||||
See also [`offline`](#opts-offline), [`preferOffline`](#opts-preferOffline),
|
||||
and [`preferOnline`](#opts-preferOnline).
|
||||
|
||||
##### <a name="opts-cert"></a> `opts.cert`
|
||||
|
||||
* Type: String
|
||||
* Default: null
|
||||
|
||||
A client certificate to pass when accessing the registry. Values should be in
|
||||
PEM format (Windows calls it "Base-64 encoded X.509 (.CER)") with newlines
|
||||
replaced by the string `'\n'`. For example:
|
||||
|
||||
```
|
||||
{
|
||||
cert: '-----BEGIN CERTIFICATE-----\nXXXX\nXXXX\n-----END CERTIFICATE-----'
|
||||
}
|
||||
```
|
||||
|
||||
It is _not_ the path to a certificate file (and there is no "certfile" option).
|
||||
|
||||
See also: [`opts.ca`](#opts-ca) and [`opts.key`](#opts-key)
|
||||
|
||||
##### <a name="opts-fetchRetries"></a> `opts.fetchRetries`
|
||||
|
||||
* Type: Number
|
||||
* Default: 2
|
||||
|
||||
The "retries" config for [`retry`](https://npm.im/retry) to use when fetching
|
||||
packages from the registry.
|
||||
|
||||
See also [`opts.retry`](#opts-retry) to provide all retry options as a single
|
||||
object.
|
||||
|
||||
##### <a name="opts-fetchRetryFactor"></a> `opts.fetchRetryFactor`
|
||||
|
||||
* Type: Number
|
||||
* Default: 10
|
||||
|
||||
The "factor" config for [`retry`](https://npm.im/retry) to use when fetching
|
||||
packages.
|
||||
|
||||
See also [`opts.retry`](#opts-retry) to provide all retry options as a single
|
||||
object.
|
||||
|
||||
##### <a name="opts-fetchRetryMintimeout"></a> `opts.fetchRetryMintimeout`
|
||||
|
||||
* Type: Number
|
||||
* Default: 10000 (10 seconds)
|
||||
|
||||
The "minTimeout" config for [`retry`](https://npm.im/retry) to use when fetching
|
||||
packages.
|
||||
|
||||
See also [`opts.retry`](#opts-retry) to provide all retry options as a single
|
||||
object.
|
||||
|
||||
##### <a name="opts-fetchRetryMaxtimeout"></a> `opts.fetchRetryMaxtimeout`
|
||||
|
||||
* Type: Number
|
||||
* Default: 60000 (1 minute)
|
||||
|
||||
The "maxTimeout" config for [`retry`](https://npm.im/retry) to use when fetching
|
||||
packages.
|
||||
|
||||
See also [`opts.retry`](#opts-retry) to provide all retry options as a single
|
||||
object.
|
||||
|
||||
##### <a name="opts-forceAuth"></a> `opts.forceAuth`
|
||||
|
||||
* Type: Object
|
||||
* Default: null
|
||||
|
||||
If present, other auth-related values in `opts` will be completely ignored,
|
||||
including `alwaysAuth`, `email`, and `otp`, when calculating auth for a request,
|
||||
and the auth details in `opts.forceAuth` will be used instead.
|
||||
|
||||
##### <a name="opts-gzip"></a> `opts.gzip`
|
||||
|
||||
* Type: Boolean
|
||||
* Default: false
|
||||
|
||||
If true, `npm-registry-fetch` will set the `Content-Encoding` header to `gzip`
|
||||
and use `zlib.gzip()` or `zlib.createGzip()` to gzip-encode
|
||||
[`opts.body`](#opts-body).
|
||||
|
||||
##### <a name="opts-headers"></a> `opts.headers`
|
||||
|
||||
* Type: Object
|
||||
* Default: null
|
||||
|
||||
Additional headers for the outgoing request. This option can also be used to
|
||||
override headers automatically generated by `npm-registry-fetch`, such as
|
||||
`Content-Type`.
|
||||
|
||||
##### <a name="opts-ignoreBody"></a> `opts.ignoreBody`
|
||||
|
||||
* Type: Boolean
|
||||
* Default: false
|
||||
|
||||
If true, the **response body** will be thrown away and `res.body` set to `null`.
|
||||
This will prevent dangling response sockets for requests where you don't usually
|
||||
care what the response body is.
|
||||
|
||||
##### <a name="opts-integrity"></a> `opts.integrity`
|
||||
|
||||
* Type: String | [SRI object](https://npm.im/ssri)
|
||||
* Default: null
|
||||
|
||||
If provided, the response body's will be verified against this integrity string,
|
||||
using [`ssri`](https://npm.im/ssri). If verification succeeds, the response will
|
||||
complete as normal. If verification fails, the response body will error with an
|
||||
`EINTEGRITY` error.
|
||||
|
||||
Body integrity is only verified if the body is actually consumed to completion --
|
||||
that is, if you use `res.json()`/`res.buffer()`, or if you consume the default
|
||||
`res` stream data to its end.
|
||||
|
||||
Cached data will have its integrity automatically verified using the
|
||||
previously-generated integrity hash for the saved request information, so
|
||||
`EINTEGRITY` errors can happen if [`opts.cache`](#opts-cache) is used, even if
|
||||
`opts.integrity` is not passed in.
|
||||
|
||||
##### <a name="opts-key"></a> `opts.key`
|
||||
|
||||
* Type: String
|
||||
* Default: null
|
||||
|
||||
A client key to pass when accessing the registry. Values should be in PEM
|
||||
format with newlines replaced by the string `'\n'`. For example:
|
||||
|
||||
```
|
||||
{
|
||||
key: '-----BEGIN PRIVATE KEY-----\nXXXX\nXXXX\n-----END PRIVATE KEY-----'
|
||||
}
|
||||
```
|
||||
|
||||
It is _not_ the path to a key file (and there is no "keyfile" option).
|
||||
|
||||
See also: [`opts.ca`](#opts-ca) and [`opts.cert`](#opts-cert)
|
||||
|
||||
##### <a name="opts-localAddress"></a> `opts.localAddress`
|
||||
|
||||
* Type: IP Address String
|
||||
* Default: null
|
||||
|
||||
The IP address of the local interface to use when making connections
|
||||
to the registry.
|
||||
|
||||
See also [`opts.proxy`](#opts-proxy)
|
||||
|
||||
##### <a name="opts-mapJSON"></a> `opts.mapJSON`
|
||||
|
||||
* Type: Function
|
||||
* Default: undefined
|
||||
|
||||
When using `fetch.json.stream()` (NOT `fetch.json()`), this will be passed down
|
||||
to [`JSONStream`](https://npm.im/JSONStream) as the second argument to
|
||||
`JSONStream.parse`, and can be used to transform stream data before output.
|
||||
|
||||
##### <a name="opts-maxSockets"></a> `opts.maxSockets`
|
||||
|
||||
* Type: Integer
|
||||
* Default: 12
|
||||
|
||||
Maximum number of sockets to keep open during requests. Has no effect if
|
||||
[`opts.agent`](#opts-agent) is used.
|
||||
|
||||
##### <a name="opts-method"></a> `opts.method`
|
||||
|
||||
* Type: String
|
||||
* Default: 'GET'
|
||||
|
||||
HTTP method to use for the outgoing request. Case-insensitive.
|
||||
|
||||
##### <a name="opts-noProxy"></a> `opts.noProxy`
|
||||
|
||||
* Type: String | String[]
|
||||
* Default: process.env.NOPROXY
|
||||
|
||||
If present, should be a comma-separated string or an array of domain extensions
|
||||
that a proxy should _not_ be used for.
|
||||
|
||||
##### <a name="opts-npmSession"></a> `opts.npmSession`
|
||||
|
||||
* Type: String
|
||||
* Default: null
|
||||
|
||||
If provided, will be sent in the `npm-session` header. This header is used by
|
||||
the npm registry to identify individual user sessions (usually individual
|
||||
invocations of the CLI).
|
||||
|
||||
##### <a name="opts-npmCommand"></a> `opts.npmCommand`
|
||||
|
||||
* Type: String
|
||||
* Default: null
|
||||
|
||||
If provided, it will be sent in the `npm-command` header. This header is
|
||||
used by the npm registry to identify the npm command that caused this
|
||||
request to be made.
|
||||
|
||||
##### <a name="opts-offline"></a> `opts.offline`
|
||||
|
||||
* Type: Boolean
|
||||
* Default: false
|
||||
|
||||
Force offline mode: no network requests will be done during install. To allow
|
||||
`npm-registry-fetch` to fill in missing cache data, see
|
||||
[`opts.preferOffline`](#opts-preferOffline).
|
||||
|
||||
This option is only really useful if you're also using
|
||||
[`opts.cache`](#opts-cache).
|
||||
|
||||
This option is set to `true` when the request includes `write=true` in the
|
||||
query string.
|
||||
|
||||
##### <a name="opts-otp"></a> `opts.otp`
|
||||
|
||||
* Type: Number | String
|
||||
* Default: null
|
||||
|
||||
This is a one-time password from a two-factor authenticator. It is required for
|
||||
certain registry interactions when two-factor auth is enabled for a user
|
||||
account.
|
||||
|
||||
##### <a name="opts-otpPrompt"></a> `opts.otpPrompt`
|
||||
|
||||
* Type: Function
|
||||
* Default: null
|
||||
|
||||
This is a method which will be called to provide an OTP if the server
|
||||
responds with a 401 response indicating that a one-time-password is
|
||||
required.
|
||||
|
||||
It may return a promise, which must resolve to the OTP value to be used.
|
||||
If the method fails to provide an OTP value, then the fetch will fail with
|
||||
the auth error that indicated an OTP was needed.
|
||||
|
||||
##### <a name="opts-password"></a> `opts.password`
|
||||
|
||||
* Alias: `_password`
|
||||
* Type: String
|
||||
* Default: null
|
||||
|
||||
Password used for basic authentication. For the more modern authentication
|
||||
method, please use the (more secure) [`opts.token`](#opts-token)
|
||||
|
||||
Can optionally be scoped to a registry by using a "nerf dart" for that registry.
|
||||
That is:
|
||||
|
||||
```
|
||||
{
|
||||
'//registry.npmjs.org/:password': 't0k3nH34r'
|
||||
}
|
||||
```
|
||||
|
||||
See also [`opts.username`](#opts-username)
|
||||
|
||||
##### <a name="opts-preferOffline"></a> `opts.preferOffline`
|
||||
|
||||
* Type: Boolean
|
||||
* Default: false
|
||||
|
||||
If true, staleness checks for cached data will be bypassed, but missing data
|
||||
will be requested from the server. To force full offline mode, use
|
||||
[`opts.offline`](#opts-offline).
|
||||
|
||||
This option is generally only useful if you're also using
|
||||
[`opts.cache`](#opts-cache).
|
||||
|
||||
This option is set to `false` when the request includes `write=true` in the
|
||||
query string.
|
||||
|
||||
##### <a name="opts-preferOnline"></a> `opts.preferOnline`
|
||||
|
||||
* Type: Boolean
|
||||
* Default: false
|
||||
|
||||
If true, staleness checks for cached data will be forced, making the CLI look
|
||||
for updates immediately even for fresh package data.
|
||||
|
||||
This option is generally only useful if you're also using
|
||||
[`opts.cache`](#opts-cache).
|
||||
|
||||
This option is set to `true` when the request includes `write=true` in the
|
||||
query string.
|
||||
|
||||
##### <a name="opts-scope"></a> `opts.scope`
|
||||
|
||||
* Type: String
|
||||
* Default: null
|
||||
|
||||
If provided, will be sent in the `npm-scope` header. This header is used by the
|
||||
npm registry to identify the toplevel package scope that a particular project
|
||||
installation is using.
|
||||
|
||||
##### <a name="opts-proxy"></a> `opts.proxy`
|
||||
|
||||
* Type: url
|
||||
* Default: null
|
||||
|
||||
A proxy to use for outgoing http requests. If not passed in, the `HTTP(S)_PROXY`
|
||||
environment variable will be used.
|
||||
|
||||
##### <a name="opts-query"></a> `opts.query`
|
||||
|
||||
* Type: String | Object
|
||||
* Default: null
|
||||
|
||||
If provided, the request URI will have a query string appended to it using this
|
||||
query. If `opts.query` is an object, it will be converted to a query string
|
||||
using
|
||||
[`querystring.stringify()`](https://nodejs.org/api/querystring.html#querystring_querystring_stringify_obj_sep_eq_options).
|
||||
|
||||
If the request URI already has a query string, it will be merged with
|
||||
`opts.query`, preferring `opts.query` values.
|
||||
|
||||
##### <a name="opts-registry"></a> `opts.registry`
|
||||
|
||||
* Type: URL
|
||||
* Default: `'https://registry.npmjs.org'`
|
||||
|
||||
Registry configuration for a request. If a request URL only includes the URL
|
||||
path, this registry setting will be prepended.
|
||||
|
||||
See also [`opts.scope`](#opts-scope), [`opts.spec`](#opts-spec), and
|
||||
[`opts.<scope>:registry`](#opts-scope-registry) which can all affect the actual
|
||||
registry URL used by the outgoing request.
|
||||
|
||||
##### <a name="opts-retry"></a> `opts.retry`
|
||||
|
||||
* Type: Object
|
||||
* Default: null
|
||||
|
||||
Single-object configuration for request retry settings. If passed in, will
|
||||
override individually-passed `fetch-retry-*` settings.
|
||||
|
||||
##### <a name="opts-scope"></a> `opts.scope`
|
||||
|
||||
* Type: String
|
||||
* Default: null
|
||||
|
||||
Associate an operation with a scope for a scoped registry. This option can force
|
||||
lookup of scope-specific registries and authentication.
|
||||
|
||||
See also [`opts.<scope>:registry`](#opts-scope-registry) and
|
||||
[`opts.spec`](#opts-spec) for interactions with this option.
|
||||
|
||||
##### <a name="opts-scope-registry"></a> `opts.<scope>:registry`
|
||||
|
||||
* Type: String
|
||||
* Default: null
|
||||
|
||||
This option type can be used to configure the registry used for requests
|
||||
involving a particular scope. For example, `opts['@myscope:registry'] =
|
||||
'https://scope-specific.registry/'` will make it so requests go out to this
|
||||
registry instead of [`opts.registry`](#opts-registry) when
|
||||
[`opts.scope`](#opts-scope) is used, or when [`opts.spec`](#opts-spec) is a
|
||||
scoped package spec.
|
||||
|
||||
The `@` before the scope name is optional, but recommended.
|
||||
|
||||
##### <a name="opts-spec"></a> `opts.spec`
|
||||
|
||||
* Type: String | [`npm-registry-arg`](https://npm.im/npm-registry-arg) object.
|
||||
* Default: null
|
||||
|
||||
If provided, can be used to automatically configure [`opts.scope`](#opts-scope)
|
||||
based on a specific package name. Non-registry package specs will throw an
|
||||
error.
|
||||
|
||||
##### <a name="opts-strictSSL"></a> `opts.strictSSL`
|
||||
|
||||
* Type: Boolean
|
||||
* Default: true
|
||||
|
||||
Whether or not to do SSL key validation when making requests to the
|
||||
registry via https.
|
||||
|
||||
See also [`opts.ca`](#opts-ca).
|
||||
|
||||
##### <a name="opts-timeout"></a> `opts.timeout`
|
||||
|
||||
* Type: Milliseconds
|
||||
* Default: 300000 (5 minutes)
|
||||
|
||||
Time before a hanging request times out.
|
||||
|
||||
##### <a name="opts-authtoken"></a> `opts._authToken`
|
||||
|
||||
* Type: String
|
||||
* Default: null
|
||||
|
||||
Authentication token string.
|
||||
|
||||
Can be scoped to a registry by using a "nerf dart" for that registry. That is:
|
||||
|
||||
```
|
||||
{
|
||||
'//registry.npmjs.org/:_authToken': 't0k3nH34r'
|
||||
}
|
||||
```
|
||||
|
||||
##### <a name="opts-userAgent"></a> `opts.userAgent`
|
||||
|
||||
* Type: String
|
||||
* Default: `'npm-registry-fetch@<version>/node@<node-version>+<arch> (<platform>)'`
|
||||
|
||||
User agent string to send in the `User-Agent` header.
|
||||
|
||||
##### <a name="opts-username"></a> `opts.username`
|
||||
|
||||
* Type: String
|
||||
* Default: null
|
||||
|
||||
Username used for basic authentication. For the more modern authentication
|
||||
method, please use the (more secure) [`opts.authtoken`](#opts-authtoken)
|
||||
|
||||
Can optionally be scoped to a registry by using a "nerf dart" for that registry.
|
||||
That is:
|
||||
|
||||
```
|
||||
{
|
||||
'//registry.npmjs.org/:username': 't0k3nH34r'
|
||||
}
|
||||
```
|
||||
|
||||
See also [`opts.password`](#opts-password)
|
181
node_modules/npm-registry-fetch/lib/auth.js
generated
vendored
Normal file
181
node_modules/npm-registry-fetch/lib/auth.js
generated
vendored
Normal file
|
@ -0,0 +1,181 @@
|
|||
'use strict'
|
||||
const fs = require('fs')
|
||||
const npa = require('npm-package-arg')
|
||||
const { URL } = require('url')
|
||||
|
||||
// Find the longest registry key that is used for some kind of auth
|
||||
// in the options. Returns the registry key and the auth config.
|
||||
const regFromURI = (uri, opts) => {
|
||||
const parsed = new URL(uri)
|
||||
// try to find a config key indicating we have auth for this registry
|
||||
// can be one of :_authToken, :_auth, :_password and :username, or
|
||||
// :certfile and :keyfile
|
||||
// We walk up the "path" until we're left with just //<host>[:<port>],
|
||||
// stopping when we reach '//'.
|
||||
let regKey = `//${parsed.host}${parsed.pathname}`
|
||||
while (regKey.length > '//'.length) {
|
||||
const authKey = hasAuth(regKey, opts)
|
||||
// got some auth for this URI
|
||||
if (authKey) {
|
||||
return { regKey, authKey }
|
||||
}
|
||||
|
||||
// can be either //host/some/path/:_auth or //host/some/path:_auth
|
||||
// walk up by removing EITHER what's after the slash OR the slash itself
|
||||
regKey = regKey.replace(/([^/]+|\/)$/, '')
|
||||
}
|
||||
return { regKey: false, authKey: null }
|
||||
}
|
||||
|
||||
// Not only do we want to know if there is auth, but if we are calling `npm
|
||||
// logout` we want to know what config value specifically provided it. This is
|
||||
// so we can look up where the config came from to delete it (i.e. user vs
|
||||
// project)
|
||||
const hasAuth = (regKey, opts) => {
|
||||
if (opts[`${regKey}:_authToken`]) {
|
||||
return '_authToken'
|
||||
}
|
||||
if (opts[`${regKey}:_auth`]) {
|
||||
return '_auth'
|
||||
}
|
||||
if (opts[`${regKey}:username`] && opts[`${regKey}:_password`]) {
|
||||
// 'password' can be inferred to also be present
|
||||
return 'username'
|
||||
}
|
||||
if (opts[`${regKey}:certfile`] && opts[`${regKey}:keyfile`]) {
|
||||
// 'keyfile' can be inferred to also be present
|
||||
return 'certfile'
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
const sameHost = (a, b) => {
|
||||
const parsedA = new URL(a)
|
||||
const parsedB = new URL(b)
|
||||
return parsedA.host === parsedB.host
|
||||
}
|
||||
|
||||
const getRegistry = opts => {
|
||||
const { spec } = opts
|
||||
const { scope: specScope, subSpec } = spec ? npa(spec) : {}
|
||||
const subSpecScope = subSpec && subSpec.scope
|
||||
const scope = subSpec ? subSpecScope : specScope
|
||||
const scopeReg = scope && opts[`${scope}:registry`]
|
||||
return scopeReg || opts.registry
|
||||
}
|
||||
|
||||
const maybeReadFile = file => {
|
||||
try {
|
||||
return fs.readFileSync(file, 'utf8')
|
||||
} catch (er) {
|
||||
if (er.code !== 'ENOENT') {
|
||||
throw er
|
||||
}
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
const getAuth = (uri, opts = {}) => {
|
||||
const { forceAuth } = opts
|
||||
if (!uri) {
|
||||
throw new Error('URI is required')
|
||||
}
|
||||
const { regKey, authKey } = regFromURI(uri, forceAuth || opts)
|
||||
|
||||
// we are only allowed to use what's in forceAuth if specified
|
||||
if (forceAuth && !regKey) {
|
||||
return new Auth({
|
||||
// if we force auth we don't want to refer back to anything in config
|
||||
regKey: false,
|
||||
authKey: null,
|
||||
scopeAuthKey: null,
|
||||
token: forceAuth._authToken || forceAuth.token,
|
||||
username: forceAuth.username,
|
||||
password: forceAuth._password || forceAuth.password,
|
||||
auth: forceAuth._auth || forceAuth.auth,
|
||||
certfile: forceAuth.certfile,
|
||||
keyfile: forceAuth.keyfile,
|
||||
})
|
||||
}
|
||||
|
||||
// no auth for this URI, but might have it for the registry
|
||||
if (!regKey) {
|
||||
const registry = getRegistry(opts)
|
||||
if (registry && uri !== registry && sameHost(uri, registry)) {
|
||||
return getAuth(registry, opts)
|
||||
} else if (registry !== opts.registry) {
|
||||
// If making a tarball request to a different base URI than the
|
||||
// registry where we logged in, but the same auth SHOULD be sent
|
||||
// to that artifact host, then we track where it was coming in from,
|
||||
// and warn the user if we get a 4xx error on it.
|
||||
const { regKey: scopeAuthKey, authKey: _authKey } = regFromURI(registry, opts)
|
||||
return new Auth({ scopeAuthKey, regKey: scopeAuthKey, authKey: _authKey })
|
||||
}
|
||||
}
|
||||
|
||||
const {
|
||||
[`${regKey}:_authToken`]: token,
|
||||
[`${regKey}:username`]: username,
|
||||
[`${regKey}:_password`]: password,
|
||||
[`${regKey}:_auth`]: auth,
|
||||
[`${regKey}:certfile`]: certfile,
|
||||
[`${regKey}:keyfile`]: keyfile,
|
||||
} = opts
|
||||
|
||||
return new Auth({
|
||||
scopeAuthKey: null,
|
||||
regKey,
|
||||
authKey,
|
||||
token,
|
||||
auth,
|
||||
username,
|
||||
password,
|
||||
certfile,
|
||||
keyfile,
|
||||
})
|
||||
}
|
||||
|
||||
class Auth {
|
||||
constructor ({
|
||||
token,
|
||||
auth,
|
||||
username,
|
||||
password,
|
||||
scopeAuthKey,
|
||||
certfile,
|
||||
keyfile,
|
||||
regKey,
|
||||
authKey,
|
||||
}) {
|
||||
// same as regKey but only present for scoped auth. Should have been named scopeRegKey
|
||||
this.scopeAuthKey = scopeAuthKey
|
||||
// `${regKey}:${authKey}` will get you back to the auth config that gave us auth
|
||||
this.regKey = regKey
|
||||
this.authKey = authKey
|
||||
this.token = null
|
||||
this.auth = null
|
||||
this.isBasicAuth = false
|
||||
this.cert = null
|
||||
this.key = null
|
||||
if (token) {
|
||||
this.token = token
|
||||
} else if (auth) {
|
||||
this.auth = auth
|
||||
} else if (username && password) {
|
||||
const p = Buffer.from(password, 'base64').toString('utf8')
|
||||
this.auth = Buffer.from(`${username}:${p}`, 'utf8').toString('base64')
|
||||
this.isBasicAuth = true
|
||||
}
|
||||
// mTLS may be used in conjunction with another auth method above
|
||||
if (certfile && keyfile) {
|
||||
const cert = maybeReadFile(certfile, 'utf-8')
|
||||
const key = maybeReadFile(keyfile, 'utf-8')
|
||||
if (cert && key) {
|
||||
this.cert = cert
|
||||
this.key = key
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = getAuth
|
100
node_modules/npm-registry-fetch/lib/check-response.js
generated
vendored
Normal file
100
node_modules/npm-registry-fetch/lib/check-response.js
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
|||
'use strict'
|
||||
|
||||
const errors = require('./errors.js')
|
||||
const { Response } = require('minipass-fetch')
|
||||
const defaultOpts = require('./default-opts.js')
|
||||
const { log } = require('proc-log')
|
||||
const { redact: cleanUrl } = require('@npmcli/redact')
|
||||
|
||||
/* eslint-disable-next-line max-len */
|
||||
const moreInfoUrl = 'https://github.com/npm/cli/wiki/No-auth-for-URI,-but-auth-present-for-scoped-registry'
|
||||
const checkResponse =
|
||||
async ({ method, uri, res, startTime, auth, opts }) => {
|
||||
opts = { ...defaultOpts, ...opts }
|
||||
if (res.headers.has('npm-notice') && !res.headers.has('x-local-cache')) {
|
||||
log.notice('', res.headers.get('npm-notice'))
|
||||
}
|
||||
|
||||
if (res.status >= 400) {
|
||||
logRequest(method, res, startTime)
|
||||
if (auth && auth.scopeAuthKey && !auth.token && !auth.auth) {
|
||||
// we didn't have auth for THIS request, but we do have auth for
|
||||
// requests to the registry indicated by the spec's scope value.
|
||||
// Warn the user.
|
||||
log.warn('registry', `No auth for URI, but auth present for scoped registry.
|
||||
|
||||
URI: ${uri}
|
||||
Scoped Registry Key: ${auth.scopeAuthKey}
|
||||
|
||||
More info here: ${moreInfoUrl}`)
|
||||
}
|
||||
return checkErrors(method, res, startTime, opts)
|
||||
} else {
|
||||
res.body.on('end', () => logRequest(method, res, startTime, opts))
|
||||
if (opts.ignoreBody) {
|
||||
res.body.resume()
|
||||
return new Response(null, res)
|
||||
}
|
||||
return res
|
||||
}
|
||||
}
|
||||
module.exports = checkResponse
|
||||
|
||||
function logRequest (method, res, startTime) {
|
||||
const elapsedTime = Date.now() - startTime
|
||||
const attempt = res.headers.get('x-fetch-attempts')
|
||||
const attemptStr = attempt && attempt > 1 ? ` attempt #${attempt}` : ''
|
||||
const cacheStatus = res.headers.get('x-local-cache-status')
|
||||
const cacheStr = cacheStatus ? ` (cache ${cacheStatus})` : ''
|
||||
const urlStr = cleanUrl(res.url)
|
||||
|
||||
log.http(
|
||||
'fetch',
|
||||
`${method.toUpperCase()} ${res.status} ${urlStr} ${elapsedTime}ms${attemptStr}${cacheStr}`
|
||||
)
|
||||
}
|
||||
|
||||
function checkErrors (method, res, startTime, opts) {
|
||||
return res.buffer()
|
||||
.catch(() => null)
|
||||
.then(body => {
|
||||
let parsed = body
|
||||
try {
|
||||
parsed = JSON.parse(body.toString('utf8'))
|
||||
} catch {
|
||||
// ignore errors
|
||||
}
|
||||
if (res.status === 401 && res.headers.get('www-authenticate')) {
|
||||
const auth = res.headers.get('www-authenticate')
|
||||
.split(/,\s*/)
|
||||
.map(s => s.toLowerCase())
|
||||
if (auth.indexOf('ipaddress') !== -1) {
|
||||
throw new errors.HttpErrorAuthIPAddress(
|
||||
method, res, parsed, opts.spec
|
||||
)
|
||||
} else if (auth.indexOf('otp') !== -1) {
|
||||
throw new errors.HttpErrorAuthOTP(
|
||||
method, res, parsed, opts.spec
|
||||
)
|
||||
} else {
|
||||
throw new errors.HttpErrorAuthUnknown(
|
||||
method, res, parsed, opts.spec
|
||||
)
|
||||
}
|
||||
} else if (
|
||||
res.status === 401 &&
|
||||
body != null &&
|
||||
/one-time pass/.test(body.toString('utf8'))
|
||||
) {
|
||||
// Heuristic for malformed OTP responses that don't include the
|
||||
// www-authenticate header.
|
||||
throw new errors.HttpErrorAuthOTP(
|
||||
method, res, parsed, opts.spec
|
||||
)
|
||||
} else {
|
||||
throw new errors.HttpErrorGeneral(
|
||||
method, res, parsed, opts.spec
|
||||
)
|
||||
}
|
||||
})
|
||||
}
|
19
node_modules/npm-registry-fetch/lib/default-opts.js
generated
vendored
Normal file
19
node_modules/npm-registry-fetch/lib/default-opts.js
generated
vendored
Normal file
|
@ -0,0 +1,19 @@
|
|||
const pkg = require('../package.json')
|
||||
module.exports = {
|
||||
maxSockets: 12,
|
||||
method: 'GET',
|
||||
registry: 'https://registry.npmjs.org/',
|
||||
timeout: 5 * 60 * 1000, // 5 minutes
|
||||
strictSSL: true,
|
||||
noProxy: process.env.NOPROXY,
|
||||
userAgent: `${pkg.name
|
||||
}@${
|
||||
pkg.version
|
||||
}/node@${
|
||||
process.version
|
||||
}+${
|
||||
process.arch
|
||||
} (${
|
||||
process.platform
|
||||
})`,
|
||||
}
|
80
node_modules/npm-registry-fetch/lib/errors.js
generated
vendored
Normal file
80
node_modules/npm-registry-fetch/lib/errors.js
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
|||
'use strict'
|
||||
|
||||
const { URL } = require('node:url')
|
||||
|
||||
function packageName (href) {
|
||||
try {
|
||||
let basePath = new URL(href).pathname.slice(1)
|
||||
if (!basePath.match(/^-/)) {
|
||||
basePath = basePath.split('/')
|
||||
var index = basePath.indexOf('_rewrite')
|
||||
if (index === -1) {
|
||||
index = basePath.length - 1
|
||||
} else {
|
||||
index++
|
||||
}
|
||||
return decodeURIComponent(basePath[index])
|
||||
}
|
||||
} catch {
|
||||
// this is ok
|
||||
}
|
||||
}
|
||||
|
||||
class HttpErrorBase extends Error {
|
||||
constructor (method, res, body, spec) {
|
||||
super()
|
||||
this.name = this.constructor.name
|
||||
this.headers = typeof res.headers?.raw === 'function' ? res.headers.raw() : res.headers
|
||||
this.statusCode = res.status
|
||||
this.code = `E${res.status}`
|
||||
this.method = method
|
||||
this.uri = res.url
|
||||
this.body = body
|
||||
this.pkgid = spec ? spec.toString() : packageName(res.url)
|
||||
Error.captureStackTrace(this, this.constructor)
|
||||
}
|
||||
}
|
||||
|
||||
class HttpErrorGeneral extends HttpErrorBase {
|
||||
constructor (method, res, body, spec) {
|
||||
super(method, res, body, spec)
|
||||
this.message = `${res.status} ${res.statusText} - ${
|
||||
this.method.toUpperCase()
|
||||
} ${
|
||||
this.spec || this.uri
|
||||
}${
|
||||
(body && body.error) ? ' - ' + body.error : ''
|
||||
}`
|
||||
}
|
||||
}
|
||||
|
||||
class HttpErrorAuthOTP extends HttpErrorBase {
|
||||
constructor (method, res, body, spec) {
|
||||
super(method, res, body, spec)
|
||||
this.message = 'OTP required for authentication'
|
||||
this.code = 'EOTP'
|
||||
}
|
||||
}
|
||||
|
||||
class HttpErrorAuthIPAddress extends HttpErrorBase {
|
||||
constructor (method, res, body, spec) {
|
||||
super(method, res, body, spec)
|
||||
this.message = 'Login is not allowed from your IP address'
|
||||
this.code = 'EAUTHIP'
|
||||
}
|
||||
}
|
||||
|
||||
class HttpErrorAuthUnknown extends HttpErrorBase {
|
||||
constructor (method, res, body, spec) {
|
||||
super(method, res, body, spec)
|
||||
this.message = 'Unable to authenticate, need: ' + res.headers.get('www-authenticate')
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
HttpErrorBase,
|
||||
HttpErrorGeneral,
|
||||
HttpErrorAuthOTP,
|
||||
HttpErrorAuthIPAddress,
|
||||
HttpErrorAuthUnknown,
|
||||
}
|
247
node_modules/npm-registry-fetch/lib/index.js
generated
vendored
Normal file
247
node_modules/npm-registry-fetch/lib/index.js
generated
vendored
Normal file
|
@ -0,0 +1,247 @@
|
|||
'use strict'
|
||||
|
||||
const { HttpErrorAuthOTP } = require('./errors.js')
|
||||
const checkResponse = require('./check-response.js')
|
||||
const getAuth = require('./auth.js')
|
||||
const fetch = require('make-fetch-happen')
|
||||
const JSONStream = require('./json-stream')
|
||||
const npa = require('npm-package-arg')
|
||||
const qs = require('querystring')
|
||||
const url = require('url')
|
||||
const zlib = require('minizlib')
|
||||
const { Minipass } = require('minipass')
|
||||
|
||||
const defaultOpts = require('./default-opts.js')
|
||||
|
||||
// WhatWG URL throws if it's not fully resolved
|
||||
const urlIsValid = u => {
|
||||
try {
|
||||
return !!new url.URL(u)
|
||||
} catch (_) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = regFetch
|
||||
function regFetch (uri, /* istanbul ignore next */ opts_ = {}) {
|
||||
const opts = {
|
||||
...defaultOpts,
|
||||
...opts_,
|
||||
}
|
||||
|
||||
// if we did not get a fully qualified URI, then we look at the registry
|
||||
// config or relevant scope to resolve it.
|
||||
const uriValid = urlIsValid(uri)
|
||||
let registry = opts.registry || defaultOpts.registry
|
||||
if (!uriValid) {
|
||||
registry = opts.registry = (
|
||||
(opts.spec && pickRegistry(opts.spec, opts)) ||
|
||||
opts.registry ||
|
||||
registry
|
||||
)
|
||||
uri = `${
|
||||
registry.trim().replace(/\/?$/g, '')
|
||||
}/${
|
||||
uri.trim().replace(/^\//, '')
|
||||
}`
|
||||
// asserts that this is now valid
|
||||
new url.URL(uri)
|
||||
}
|
||||
|
||||
const method = opts.method || 'GET'
|
||||
|
||||
// through that takes into account the scope, the prefix of `uri`, etc
|
||||
const startTime = Date.now()
|
||||
const auth = getAuth(uri, opts)
|
||||
const headers = getHeaders(uri, auth, opts)
|
||||
let body = opts.body
|
||||
const bodyIsStream = Minipass.isStream(body)
|
||||
const bodyIsPromise = body &&
|
||||
typeof body === 'object' &&
|
||||
typeof body.then === 'function'
|
||||
|
||||
if (
|
||||
body && !bodyIsStream && !bodyIsPromise && typeof body !== 'string' && !Buffer.isBuffer(body)
|
||||
) {
|
||||
headers['content-type'] = headers['content-type'] || 'application/json'
|
||||
body = JSON.stringify(body)
|
||||
} else if (body && !headers['content-type']) {
|
||||
headers['content-type'] = 'application/octet-stream'
|
||||
}
|
||||
|
||||
if (opts.gzip) {
|
||||
headers['content-encoding'] = 'gzip'
|
||||
if (bodyIsStream) {
|
||||
const gz = new zlib.Gzip()
|
||||
body.on('error', /* istanbul ignore next: unlikely and hard to test */
|
||||
err => gz.emit('error', err))
|
||||
body = body.pipe(gz)
|
||||
} else if (!bodyIsPromise) {
|
||||
body = new zlib.Gzip().end(body).concat()
|
||||
}
|
||||
}
|
||||
|
||||
const parsed = new url.URL(uri)
|
||||
|
||||
if (opts.query) {
|
||||
const q = typeof opts.query === 'string' ? qs.parse(opts.query)
|
||||
: opts.query
|
||||
|
||||
Object.keys(q).forEach(key => {
|
||||
if (q[key] !== undefined) {
|
||||
parsed.searchParams.set(key, q[key])
|
||||
}
|
||||
})
|
||||
uri = url.format(parsed)
|
||||
}
|
||||
|
||||
if (parsed.searchParams.get('write') === 'true' && method === 'GET') {
|
||||
// do not cache, because this GET is fetching a rev that will be
|
||||
// used for a subsequent PUT or DELETE, so we need to conditionally
|
||||
// update cache.
|
||||
opts.offline = false
|
||||
opts.preferOffline = false
|
||||
opts.preferOnline = true
|
||||
}
|
||||
|
||||
const doFetch = async fetchBody => {
|
||||
const p = fetch(uri, {
|
||||
agent: opts.agent,
|
||||
algorithms: opts.algorithms,
|
||||
body: fetchBody,
|
||||
cache: getCacheMode(opts),
|
||||
cachePath: opts.cache,
|
||||
ca: opts.ca,
|
||||
cert: auth.cert || opts.cert,
|
||||
headers,
|
||||
integrity: opts.integrity,
|
||||
key: auth.key || opts.key,
|
||||
localAddress: opts.localAddress,
|
||||
maxSockets: opts.maxSockets,
|
||||
memoize: opts.memoize,
|
||||
method: method,
|
||||
noProxy: opts.noProxy,
|
||||
proxy: opts.httpsProxy || opts.proxy,
|
||||
retry: opts.retry ? opts.retry : {
|
||||
retries: opts.fetchRetries,
|
||||
factor: opts.fetchRetryFactor,
|
||||
minTimeout: opts.fetchRetryMintimeout,
|
||||
maxTimeout: opts.fetchRetryMaxtimeout,
|
||||
},
|
||||
strictSSL: opts.strictSSL,
|
||||
timeout: opts.timeout || 30 * 1000,
|
||||
}).then(res => checkResponse({
|
||||
method,
|
||||
uri,
|
||||
res,
|
||||
registry,
|
||||
startTime,
|
||||
auth,
|
||||
opts,
|
||||
}))
|
||||
|
||||
if (typeof opts.otpPrompt === 'function') {
|
||||
return p.catch(async er => {
|
||||
if (er instanceof HttpErrorAuthOTP) {
|
||||
let otp
|
||||
// if otp fails to complete, we fail with that failure
|
||||
try {
|
||||
otp = await opts.otpPrompt()
|
||||
} catch (_) {
|
||||
// ignore this error
|
||||
}
|
||||
// if no otp provided, or otpPrompt errored, throw the original HTTP error
|
||||
if (!otp) {
|
||||
throw er
|
||||
}
|
||||
return regFetch(uri, { ...opts, otp })
|
||||
}
|
||||
throw er
|
||||
})
|
||||
} else {
|
||||
return p
|
||||
}
|
||||
}
|
||||
|
||||
return Promise.resolve(body).then(doFetch)
|
||||
}
|
||||
|
||||
module.exports.getAuth = getAuth
|
||||
|
||||
module.exports.json = fetchJSON
|
||||
function fetchJSON (uri, opts) {
|
||||
return regFetch(uri, opts).then(res => res.json())
|
||||
}
|
||||
|
||||
module.exports.json.stream = fetchJSONStream
|
||||
function fetchJSONStream (uri, jsonPath,
|
||||
/* istanbul ignore next */ opts_ = {}) {
|
||||
const opts = { ...defaultOpts, ...opts_ }
|
||||
const parser = JSONStream.parse(jsonPath, opts.mapJSON)
|
||||
regFetch(uri, opts).then(res =>
|
||||
res.body.on('error',
|
||||
/* istanbul ignore next: unlikely and difficult to test */
|
||||
er => parser.emit('error', er)).pipe(parser)
|
||||
).catch(er => parser.emit('error', er))
|
||||
return parser
|
||||
}
|
||||
|
||||
module.exports.pickRegistry = pickRegistry
|
||||
function pickRegistry (spec, opts = {}) {
|
||||
spec = npa(spec)
|
||||
let registry = spec.scope &&
|
||||
opts[spec.scope.replace(/^@?/, '@') + ':registry']
|
||||
|
||||
if (!registry && opts.scope) {
|
||||
registry = opts[opts.scope.replace(/^@?/, '@') + ':registry']
|
||||
}
|
||||
|
||||
if (!registry) {
|
||||
registry = opts.registry || defaultOpts.registry
|
||||
}
|
||||
|
||||
return registry
|
||||
}
|
||||
|
||||
function getCacheMode (opts) {
|
||||
return opts.offline ? 'only-if-cached'
|
||||
: opts.preferOffline ? 'force-cache'
|
||||
: opts.preferOnline ? 'no-cache'
|
||||
: 'default'
|
||||
}
|
||||
|
||||
function getHeaders (uri, auth, opts) {
|
||||
const headers = Object.assign({
|
||||
'user-agent': opts.userAgent,
|
||||
}, opts.headers || {})
|
||||
|
||||
if (opts.authType) {
|
||||
headers['npm-auth-type'] = opts.authType
|
||||
}
|
||||
|
||||
if (opts.scope) {
|
||||
headers['npm-scope'] = opts.scope
|
||||
}
|
||||
|
||||
if (opts.npmSession) {
|
||||
headers['npm-session'] = opts.npmSession
|
||||
}
|
||||
|
||||
if (opts.npmCommand) {
|
||||
headers['npm-command'] = opts.npmCommand
|
||||
}
|
||||
|
||||
// If a tarball is hosted on a different place than the manifest, only send
|
||||
// credentials on `alwaysAuth`
|
||||
if (auth.token) {
|
||||
headers.authorization = `Bearer ${auth.token}`
|
||||
} else if (auth.auth) {
|
||||
headers.authorization = `Basic ${auth.auth}`
|
||||
}
|
||||
|
||||
if (opts.otp) {
|
||||
headers['npm-otp'] = opts.otp
|
||||
}
|
||||
|
||||
return headers
|
||||
}
|
223
node_modules/npm-registry-fetch/lib/json-stream.js
generated
vendored
Normal file
223
node_modules/npm-registry-fetch/lib/json-stream.js
generated
vendored
Normal file
|
@ -0,0 +1,223 @@
|
|||
const Parser = require('jsonparse')
|
||||
const { Minipass } = require('minipass')
|
||||
|
||||
class JSONStreamError extends Error {
|
||||
constructor (err, caller) {
|
||||
super(err.message)
|
||||
Error.captureStackTrace(this, caller || this.constructor)
|
||||
}
|
||||
|
||||
get name () {
|
||||
return 'JSONStreamError'
|
||||
}
|
||||
}
|
||||
|
||||
const check = (x, y) =>
|
||||
typeof x === 'string' ? String(y) === x
|
||||
: x && typeof x.test === 'function' ? x.test(y)
|
||||
: typeof x === 'boolean' || typeof x === 'object' ? x
|
||||
: typeof x === 'function' ? x(y)
|
||||
: false
|
||||
|
||||
class JSONStream extends Minipass {
|
||||
#count = 0
|
||||
#ending = false
|
||||
#footer = null
|
||||
#header = null
|
||||
#map = null
|
||||
#onTokenOriginal
|
||||
#parser
|
||||
#path = null
|
||||
#root = null
|
||||
|
||||
constructor (opts) {
|
||||
super({
|
||||
...opts,
|
||||
objectMode: true,
|
||||
})
|
||||
|
||||
const parser = this.#parser = new Parser()
|
||||
parser.onValue = value => this.#onValue(value)
|
||||
this.#onTokenOriginal = parser.onToken
|
||||
parser.onToken = (token, value) => this.#onToken(token, value)
|
||||
parser.onError = er => this.#onError(er)
|
||||
|
||||
this.#path = typeof opts.path === 'string'
|
||||
? opts.path.split('.').map(e =>
|
||||
e === '$*' ? { emitKey: true }
|
||||
: e === '*' ? true
|
||||
: e === '' ? { recurse: true }
|
||||
: e)
|
||||
: Array.isArray(opts.path) && opts.path.length ? opts.path
|
||||
: null
|
||||
|
||||
if (typeof opts.map === 'function') {
|
||||
this.#map = opts.map
|
||||
}
|
||||
}
|
||||
|
||||
#setHeaderFooter (key, value) {
|
||||
// header has not been emitted yet
|
||||
if (this.#header !== false) {
|
||||
this.#header = this.#header || {}
|
||||
this.#header[key] = value
|
||||
}
|
||||
|
||||
// footer has not been emitted yet but header has
|
||||
if (this.#footer !== false && this.#header === false) {
|
||||
this.#footer = this.#footer || {}
|
||||
this.#footer[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
#onError (er) {
|
||||
// error will always happen during a write() call.
|
||||
const caller = this.#ending ? this.end : this.write
|
||||
this.#ending = false
|
||||
return this.emit('error', new JSONStreamError(er, caller))
|
||||
}
|
||||
|
||||
#onToken (token, value) {
|
||||
const parser = this.#parser
|
||||
this.#onTokenOriginal.call(this.#parser, token, value)
|
||||
if (parser.stack.length === 0) {
|
||||
if (this.#root) {
|
||||
const root = this.#root
|
||||
if (!this.#path) {
|
||||
super.write(root)
|
||||
}
|
||||
this.#root = null
|
||||
this.#count = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#onValue (value) {
|
||||
const parser = this.#parser
|
||||
// the LAST onValue encountered is the root object.
|
||||
// just overwrite it each time.
|
||||
this.#root = value
|
||||
|
||||
if (!this.#path) {
|
||||
return
|
||||
}
|
||||
|
||||
let i = 0 // iterates on path
|
||||
let j = 0 // iterates on stack
|
||||
let emitKey = false
|
||||
while (i < this.#path.length) {
|
||||
const key = this.#path[i]
|
||||
j++
|
||||
|
||||
if (key && !key.recurse) {
|
||||
const c = (j === parser.stack.length) ? parser : parser.stack[j]
|
||||
if (!c) {
|
||||
return
|
||||
}
|
||||
if (!check(key, c.key)) {
|
||||
this.#setHeaderFooter(c.key, value)
|
||||
return
|
||||
}
|
||||
emitKey = !!key.emitKey
|
||||
i++
|
||||
} else {
|
||||
i++
|
||||
if (i >= this.#path.length) {
|
||||
return
|
||||
}
|
||||
const nextKey = this.#path[i]
|
||||
if (!nextKey) {
|
||||
return
|
||||
}
|
||||
while (true) {
|
||||
const c = (j === parser.stack.length) ? parser : parser.stack[j]
|
||||
if (!c) {
|
||||
return
|
||||
}
|
||||
if (check(nextKey, c.key)) {
|
||||
i++
|
||||
if (!Object.isFrozen(parser.stack[j])) {
|
||||
parser.stack[j].value = null
|
||||
}
|
||||
break
|
||||
} else {
|
||||
this.#setHeaderFooter(c.key, value)
|
||||
}
|
||||
j++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// emit header
|
||||
if (this.#header) {
|
||||
const header = this.#header
|
||||
this.#header = false
|
||||
this.emit('header', header)
|
||||
}
|
||||
if (j !== parser.stack.length) {
|
||||
return
|
||||
}
|
||||
|
||||
this.#count++
|
||||
const actualPath = parser.stack.slice(1)
|
||||
.map(e => e.key).concat([parser.key])
|
||||
if (value !== null && value !== undefined) {
|
||||
const data = this.#map ? this.#map(value, actualPath) : value
|
||||
if (data !== null && data !== undefined) {
|
||||
const emit = emitKey ? { value: data } : data
|
||||
if (emitKey) {
|
||||
emit.key = parser.key
|
||||
}
|
||||
super.write(emit)
|
||||
}
|
||||
}
|
||||
|
||||
if (parser.value) {
|
||||
delete parser.value[parser.key]
|
||||
}
|
||||
|
||||
for (const k of parser.stack) {
|
||||
k.value = null
|
||||
}
|
||||
}
|
||||
|
||||
write (chunk, encoding) {
|
||||
if (typeof chunk === 'string') {
|
||||
chunk = Buffer.from(chunk, encoding)
|
||||
} else if (!Buffer.isBuffer(chunk)) {
|
||||
return this.emit('error', new TypeError(
|
||||
'Can only parse JSON from string or buffer input'))
|
||||
}
|
||||
this.#parser.write(chunk)
|
||||
return this.flowing
|
||||
}
|
||||
|
||||
end (chunk, encoding) {
|
||||
this.#ending = true
|
||||
if (chunk) {
|
||||
this.write(chunk, encoding)
|
||||
}
|
||||
|
||||
const h = this.#header
|
||||
this.#header = null
|
||||
const f = this.#footer
|
||||
this.#footer = null
|
||||
if (h) {
|
||||
this.emit('header', h)
|
||||
}
|
||||
if (f) {
|
||||
this.emit('footer', f)
|
||||
}
|
||||
return super.end()
|
||||
}
|
||||
|
||||
static get JSONStreamError () {
|
||||
return JSONStreamError
|
||||
}
|
||||
|
||||
static parse (path, map) {
|
||||
return new JSONStream({ path, map })
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = JSONStream
|
15
node_modules/npm-registry-fetch/node_modules/minipass/LICENSE
generated
vendored
Normal file
15
node_modules/npm-registry-fetch/node_modules/minipass/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
The ISC License
|
||||
|
||||
Copyright (c) 2017-2023 npm, Inc., Isaac Z. Schlueter, and Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
|
||||
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
825
node_modules/npm-registry-fetch/node_modules/minipass/README.md
generated
vendored
Normal file
825
node_modules/npm-registry-fetch/node_modules/minipass/README.md
generated
vendored
Normal file
|
@ -0,0 +1,825 @@
|
|||
# minipass
|
||||
|
||||
A _very_ minimal implementation of a [PassThrough
|
||||
stream](https://nodejs.org/api/stream.html#stream_class_stream_passthrough)
|
||||
|
||||
[It's very
|
||||
fast](https://docs.google.com/spreadsheets/d/1K_HR5oh3r80b8WVMWCPPjfuWXUgfkmhlX7FGI6JJ8tY/edit?usp=sharing)
|
||||
for objects, strings, and buffers.
|
||||
|
||||
Supports `pipe()`ing (including multi-`pipe()` and backpressure
|
||||
transmission), buffering data until either a `data` event handler
|
||||
or `pipe()` is added (so you don't lose the first chunk), and
|
||||
most other cases where PassThrough is a good idea.
|
||||
|
||||
There is a `read()` method, but it's much more efficient to
|
||||
consume data from this stream via `'data'` events or by calling
|
||||
`pipe()` into some other stream. Calling `read()` requires the
|
||||
buffer to be flattened in some cases, which requires copying
|
||||
memory.
|
||||
|
||||
If you set `objectMode: true` in the options, then whatever is
|
||||
written will be emitted. Otherwise, it'll do a minimal amount of
|
||||
Buffer copying to ensure proper Streams semantics when `read(n)`
|
||||
is called.
|
||||
|
||||
`objectMode` can only be set at instantiation. Attempting to
|
||||
write something other than a String or Buffer without having set
|
||||
`objectMode` in the options will throw an error.
|
||||
|
||||
This is not a `through` or `through2` stream. It doesn't
|
||||
transform the data, it just passes it right through. If you want
|
||||
to transform the data, extend the class, and override the
|
||||
`write()` method. Once you're done transforming the data however
|
||||
you want, call `super.write()` with the transform output.
|
||||
|
||||
For some examples of streams that extend Minipass in various
|
||||
ways, check out:
|
||||
|
||||
- [minizlib](http://npm.im/minizlib)
|
||||
- [fs-minipass](http://npm.im/fs-minipass)
|
||||
- [tar](http://npm.im/tar)
|
||||
- [minipass-collect](http://npm.im/minipass-collect)
|
||||
- [minipass-flush](http://npm.im/minipass-flush)
|
||||
- [minipass-pipeline](http://npm.im/minipass-pipeline)
|
||||
- [tap](http://npm.im/tap)
|
||||
- [tap-parser](http://npm.im/tap-parser)
|
||||
- [treport](http://npm.im/treport)
|
||||
- [minipass-fetch](http://npm.im/minipass-fetch)
|
||||
- [pacote](http://npm.im/pacote)
|
||||
- [make-fetch-happen](http://npm.im/make-fetch-happen)
|
||||
- [cacache](http://npm.im/cacache)
|
||||
- [ssri](http://npm.im/ssri)
|
||||
- [npm-registry-fetch](http://npm.im/npm-registry-fetch)
|
||||
- [minipass-json-stream](http://npm.im/minipass-json-stream)
|
||||
- [minipass-sized](http://npm.im/minipass-sized)
|
||||
|
||||
## Usage in TypeScript
|
||||
|
||||
The `Minipass` class takes three type template definitions:
|
||||
|
||||
- `RType` the type being read, which defaults to `Buffer`. If
|
||||
`RType` is `string`, then the constructor _must_ get an options
|
||||
object specifying either an `encoding` or `objectMode: true`.
|
||||
If it's anything other than `string` or `Buffer`, then it
|
||||
_must_ get an options object specifying `objectMode: true`.
|
||||
- `WType` the type being written. If `RType` is `Buffer` or
|
||||
`string`, then this defaults to `ContiguousData` (Buffer,
|
||||
string, ArrayBuffer, or ArrayBufferView). Otherwise, it
|
||||
defaults to `RType`.
|
||||
- `Events` type mapping event names to the arguments emitted
|
||||
with that event, which extends `Minipass.Events`.
|
||||
|
||||
To declare types for custom events in subclasses, extend the
|
||||
third parameter with your own event signatures. For example:
|
||||
|
||||
```js
|
||||
import { Minipass } from 'minipass'
|
||||
|
||||
// a NDJSON stream that emits 'jsonError' when it can't stringify
|
||||
export interface Events extends Minipass.Events {
|
||||
jsonError: [e: Error]
|
||||
}
|
||||
|
||||
export class NDJSONStream extends Minipass<string, any, Events> {
|
||||
constructor() {
|
||||
super({ objectMode: true })
|
||||
}
|
||||
|
||||
// data is type `any` because that's WType
|
||||
write(data, encoding, cb) {
|
||||
try {
|
||||
const json = JSON.stringify(data)
|
||||
return super.write(json + '\n', encoding, cb)
|
||||
} catch (er) {
|
||||
if (!er instanceof Error) {
|
||||
er = Object.assign(new Error('json stringify failed'), {
|
||||
cause: er,
|
||||
})
|
||||
}
|
||||
// trying to emit with something OTHER than an error will
|
||||
// fail, because we declared the event arguments type.
|
||||
this.emit('jsonError', er)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const s = new NDJSONStream()
|
||||
s.on('jsonError', e => {
|
||||
// here, TS knows that e is an Error
|
||||
})
|
||||
```
|
||||
|
||||
Emitting/handling events that aren't declared in this way is
|
||||
fine, but the arguments will be typed as `unknown`.
|
||||
|
||||
## Differences from Node.js Streams
|
||||
|
||||
There are several things that make Minipass streams different
|
||||
from (and in some ways superior to) Node.js core streams.
|
||||
|
||||
Please read these caveats if you are familiar with node-core
|
||||
streams and intend to use Minipass streams in your programs.
|
||||
|
||||
You can avoid most of these differences entirely (for a very
|
||||
small performance penalty) by setting `{async: true}` in the
|
||||
constructor options.
|
||||
|
||||
### Timing
|
||||
|
||||
Minipass streams are designed to support synchronous use-cases.
|
||||
Thus, data is emitted as soon as it is available, always. It is
|
||||
buffered until read, but no longer. Another way to look at it is
|
||||
that Minipass streams are exactly as synchronous as the logic
|
||||
that writes into them.
|
||||
|
||||
This can be surprising if your code relies on
|
||||
`PassThrough.write()` always providing data on the next tick
|
||||
rather than the current one, or being able to call `resume()` and
|
||||
not have the entire buffer disappear immediately.
|
||||
|
||||
However, without this synchronicity guarantee, there would be no
|
||||
way for Minipass to achieve the speeds it does, or support the
|
||||
synchronous use cases that it does. Simply put, waiting takes
|
||||
time.
|
||||
|
||||
This non-deferring approach makes Minipass streams much easier to
|
||||
reason about, especially in the context of Promises and other
|
||||
flow-control mechanisms.
|
||||
|
||||
Example:
|
||||
|
||||
```js
|
||||
// hybrid module, either works
|
||||
import { Minipass } from 'minipass'
|
||||
// or:
|
||||
const { Minipass } = require('minipass')
|
||||
|
||||
const stream = new Minipass()
|
||||
stream.on('data', () => console.log('data event'))
|
||||
console.log('before write')
|
||||
stream.write('hello')
|
||||
console.log('after write')
|
||||
// output:
|
||||
// before write
|
||||
// data event
|
||||
// after write
|
||||
```
|
||||
|
||||
### Exception: Async Opt-In
|
||||
|
||||
If you wish to have a Minipass stream with behavior that more
|
||||
closely mimics Node.js core streams, you can set the stream in
|
||||
async mode either by setting `async: true` in the constructor
|
||||
options, or by setting `stream.async = true` later on.
|
||||
|
||||
```js
|
||||
// hybrid module, either works
|
||||
import { Minipass } from 'minipass'
|
||||
// or:
|
||||
const { Minipass } = require('minipass')
|
||||
|
||||
const asyncStream = new Minipass({ async: true })
|
||||
asyncStream.on('data', () => console.log('data event'))
|
||||
console.log('before write')
|
||||
asyncStream.write('hello')
|
||||
console.log('after write')
|
||||
// output:
|
||||
// before write
|
||||
// after write
|
||||
// data event <-- this is deferred until the next tick
|
||||
```
|
||||
|
||||
Switching _out_ of async mode is unsafe, as it could cause data
|
||||
corruption, and so is not enabled. Example:
|
||||
|
||||
```js
|
||||
import { Minipass } from 'minipass'
|
||||
const stream = new Minipass({ encoding: 'utf8' })
|
||||
stream.on('data', chunk => console.log(chunk))
|
||||
stream.async = true
|
||||
console.log('before writes')
|
||||
stream.write('hello')
|
||||
setStreamSyncAgainSomehow(stream) // <-- this doesn't actually exist!
|
||||
stream.write('world')
|
||||
console.log('after writes')
|
||||
// hypothetical output would be:
|
||||
// before writes
|
||||
// world
|
||||
// after writes
|
||||
// hello
|
||||
// NOT GOOD!
|
||||
```
|
||||
|
||||
To avoid this problem, once set into async mode, any attempt to
|
||||
make the stream sync again will be ignored.
|
||||
|
||||
```js
|
||||
const { Minipass } = require('minipass')
|
||||
const stream = new Minipass({ encoding: 'utf8' })
|
||||
stream.on('data', chunk => console.log(chunk))
|
||||
stream.async = true
|
||||
console.log('before writes')
|
||||
stream.write('hello')
|
||||
stream.async = false // <-- no-op, stream already async
|
||||
stream.write('world')
|
||||
console.log('after writes')
|
||||
// actual output:
|
||||
// before writes
|
||||
// after writes
|
||||
// hello
|
||||
// world
|
||||
```
|
||||
|
||||
### No High/Low Water Marks
|
||||
|
||||
Node.js core streams will optimistically fill up a buffer,
|
||||
returning `true` on all writes until the limit is hit, even if
|
||||
the data has nowhere to go. Then, they will not attempt to draw
|
||||
more data in until the buffer size dips below a minimum value.
|
||||
|
||||
Minipass streams are much simpler. The `write()` method will
|
||||
return `true` if the data has somewhere to go (which is to say,
|
||||
given the timing guarantees, that the data is already there by
|
||||
the time `write()` returns).
|
||||
|
||||
If the data has nowhere to go, then `write()` returns false, and
|
||||
the data sits in a buffer, to be drained out immediately as soon
|
||||
as anyone consumes it.
|
||||
|
||||
Since nothing is ever buffered unnecessarily, there is much less
|
||||
copying data, and less bookkeeping about buffer capacity levels.
|
||||
|
||||
### Hazards of Buffering (or: Why Minipass Is So Fast)
|
||||
|
||||
Since data written to a Minipass stream is immediately written
|
||||
all the way through the pipeline, and `write()` always returns
|
||||
true/false based on whether the data was fully flushed,
|
||||
backpressure is communicated immediately to the upstream caller.
|
||||
This minimizes buffering.
|
||||
|
||||
Consider this case:
|
||||
|
||||
```js
|
||||
const { PassThrough } = require('stream')
|
||||
const p1 = new PassThrough({ highWaterMark: 1024 })
|
||||
const p2 = new PassThrough({ highWaterMark: 1024 })
|
||||
const p3 = new PassThrough({ highWaterMark: 1024 })
|
||||
const p4 = new PassThrough({ highWaterMark: 1024 })
|
||||
|
||||
p1.pipe(p2).pipe(p3).pipe(p4)
|
||||
p4.on('data', () => console.log('made it through'))
|
||||
|
||||
// this returns false and buffers, then writes to p2 on next tick (1)
|
||||
// p2 returns false and buffers, pausing p1, then writes to p3 on next tick (2)
|
||||
// p3 returns false and buffers, pausing p2, then writes to p4 on next tick (3)
|
||||
// p4 returns false and buffers, pausing p3, then emits 'data' and 'drain'
|
||||
// on next tick (4)
|
||||
// p3 sees p4's 'drain' event, and calls resume(), emitting 'resume' and
|
||||
// 'drain' on next tick (5)
|
||||
// p2 sees p3's 'drain', calls resume(), emits 'resume' and 'drain' on next tick (6)
|
||||
// p1 sees p2's 'drain', calls resume(), emits 'resume' and 'drain' on next
|
||||
// tick (7)
|
||||
|
||||
p1.write(Buffer.alloc(2048)) // returns false
|
||||
```
|
||||
|
||||
Along the way, the data was buffered and deferred at each stage,
|
||||
and multiple event deferrals happened, for an unblocked pipeline
|
||||
where it was perfectly safe to write all the way through!
|
||||
|
||||
Furthermore, setting a `highWaterMark` of `1024` might lead
|
||||
someone reading the code to think an advisory maximum of 1KiB is
|
||||
being set for the pipeline. However, the actual advisory
|
||||
buffering level is the _sum_ of `highWaterMark` values, since
|
||||
each one has its own bucket.
|
||||
|
||||
Consider the Minipass case:
|
||||
|
||||
```js
|
||||
const m1 = new Minipass()
|
||||
const m2 = new Minipass()
|
||||
const m3 = new Minipass()
|
||||
const m4 = new Minipass()
|
||||
|
||||
m1.pipe(m2).pipe(m3).pipe(m4)
|
||||
m4.on('data', () => console.log('made it through'))
|
||||
|
||||
// m1 is flowing, so it writes the data to m2 immediately
|
||||
// m2 is flowing, so it writes the data to m3 immediately
|
||||
// m3 is flowing, so it writes the data to m4 immediately
|
||||
// m4 is flowing, so it fires the 'data' event immediately, returns true
|
||||
// m4's write returned true, so m3 is still flowing, returns true
|
||||
// m3's write returned true, so m2 is still flowing, returns true
|
||||
// m2's write returned true, so m1 is still flowing, returns true
|
||||
// No event deferrals or buffering along the way!
|
||||
|
||||
m1.write(Buffer.alloc(2048)) // returns true
|
||||
```
|
||||
|
||||
It is extremely unlikely that you _don't_ want to buffer any data
|
||||
written, or _ever_ buffer data that can be flushed all the way
|
||||
through. Neither node-core streams nor Minipass ever fail to
|
||||
buffer written data, but node-core streams do a lot of
|
||||
unnecessary buffering and pausing.
|
||||
|
||||
As always, the faster implementation is the one that does less
|
||||
stuff and waits less time to do it.
|
||||
|
||||
### Immediately emit `end` for empty streams (when not paused)
|
||||
|
||||
If a stream is not paused, and `end()` is called before writing
|
||||
any data into it, then it will emit `end` immediately.
|
||||
|
||||
If you have logic that occurs on the `end` event which you don't
|
||||
want to potentially happen immediately (for example, closing file
|
||||
descriptors, moving on to the next entry in an archive parse
|
||||
stream, etc.) then be sure to call `stream.pause()` on creation,
|
||||
and then `stream.resume()` once you are ready to respond to the
|
||||
`end` event.
|
||||
|
||||
However, this is _usually_ not a problem because:
|
||||
|
||||
### Emit `end` When Asked
|
||||
|
||||
One hazard of immediately emitting `'end'` is that you may not
|
||||
yet have had a chance to add a listener. In order to avoid this
|
||||
hazard, Minipass streams safely re-emit the `'end'` event if a
|
||||
new listener is added after `'end'` has been emitted.
|
||||
|
||||
Ie, if you do `stream.on('end', someFunction)`, and the stream
|
||||
has already emitted `end`, then it will call the handler right
|
||||
away. (You can think of this somewhat like attaching a new
|
||||
`.then(fn)` to a previously-resolved Promise.)
|
||||
|
||||
To prevent calling handlers multiple times who would not expect
|
||||
multiple ends to occur, all listeners are removed from the
|
||||
`'end'` event whenever it is emitted.
|
||||
|
||||
### Emit `error` When Asked
|
||||
|
||||
The most recent error object passed to the `'error'` event is
|
||||
stored on the stream. If a new `'error'` event handler is added,
|
||||
and an error was previously emitted, then the event handler will
|
||||
be called immediately (or on `process.nextTick` in the case of
|
||||
async streams).
|
||||
|
||||
This makes it much more difficult to end up trying to interact
|
||||
with a broken stream, if the error handler is added after an
|
||||
error was previously emitted.
|
||||
|
||||
### Impact of "immediate flow" on Tee-streams
|
||||
|
||||
A "tee stream" is a stream piping to multiple destinations:
|
||||
|
||||
```js
|
||||
const tee = new Minipass()
|
||||
t.pipe(dest1)
|
||||
t.pipe(dest2)
|
||||
t.write('foo') // goes to both destinations
|
||||
```
|
||||
|
||||
Since Minipass streams _immediately_ process any pending data
|
||||
through the pipeline when a new pipe destination is added, this
|
||||
can have surprising effects, especially when a stream comes in
|
||||
from some other function and may or may not have data in its
|
||||
buffer.
|
||||
|
||||
```js
|
||||
// WARNING! WILL LOSE DATA!
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
src.pipe(dest1) // 'foo' chunk flows to dest1 immediately, and is gone
|
||||
src.pipe(dest2) // gets nothing!
|
||||
```
|
||||
|
||||
One solution is to create a dedicated tee-stream junction that
|
||||
pipes to both locations, and then pipe to _that_ instead.
|
||||
|
||||
```js
|
||||
// Safe example: tee to both places
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
const tee = new Minipass()
|
||||
tee.pipe(dest1)
|
||||
tee.pipe(dest2)
|
||||
src.pipe(tee) // tee gets 'foo', pipes to both locations
|
||||
```
|
||||
|
||||
The same caveat applies to `on('data')` event listeners. The
|
||||
first one added will _immediately_ receive all of the data,
|
||||
leaving nothing for the second:
|
||||
|
||||
```js
|
||||
// WARNING! WILL LOSE DATA!
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
src.on('data', handler1) // receives 'foo' right away
|
||||
src.on('data', handler2) // nothing to see here!
|
||||
```
|
||||
|
||||
Using a dedicated tee-stream can be used in this case as well:
|
||||
|
||||
```js
|
||||
// Safe example: tee to both data handlers
|
||||
const src = new Minipass()
|
||||
src.write('foo')
|
||||
const tee = new Minipass()
|
||||
tee.on('data', handler1)
|
||||
tee.on('data', handler2)
|
||||
src.pipe(tee)
|
||||
```
|
||||
|
||||
All of the hazards in this section are avoided by setting `{
|
||||
async: true }` in the Minipass constructor, or by setting
|
||||
`stream.async = true` afterwards. Note that this does add some
|
||||
overhead, so should only be done in cases where you are willing
|
||||
to lose a bit of performance in order to avoid having to refactor
|
||||
program logic.
|
||||
|
||||
## USAGE
|
||||
|
||||
It's a stream! Use it like a stream and it'll most likely do what
|
||||
you want.
|
||||
|
||||
```js
|
||||
import { Minipass } from 'minipass'
|
||||
const mp = new Minipass(options) // options is optional
|
||||
mp.write('foo')
|
||||
mp.pipe(someOtherStream)
|
||||
mp.end('bar')
|
||||
```
|
||||
|
||||
### OPTIONS
|
||||
|
||||
- `encoding` How would you like the data coming _out_ of the
|
||||
stream to be encoded? Accepts any values that can be passed to
|
||||
`Buffer.toString()`.
|
||||
- `objectMode` Emit data exactly as it comes in. This will be
|
||||
flipped on by default if you write() something other than a
|
||||
string or Buffer at any point. Setting `objectMode: true` will
|
||||
prevent setting any encoding value.
|
||||
- `async` Defaults to `false`. Set to `true` to defer data
|
||||
emission until next tick. This reduces performance slightly,
|
||||
but makes Minipass streams use timing behavior closer to Node
|
||||
core streams. See [Timing](#timing) for more details.
|
||||
- `signal` An `AbortSignal` that will cause the stream to unhook
|
||||
itself from everything and become as inert as possible. Note
|
||||
that providing a `signal` parameter will make `'error'` events
|
||||
no longer throw if they are unhandled, but they will still be
|
||||
emitted to handlers if any are attached.
|
||||
|
||||
### API
|
||||
|
||||
Implements the user-facing portions of Node.js's `Readable` and
|
||||
`Writable` streams.
|
||||
|
||||
### Methods
|
||||
|
||||
- `write(chunk, [encoding], [callback])` - Put data in. (Note
|
||||
that, in the base Minipass class, the same data will come out.)
|
||||
Returns `false` if the stream will buffer the next write, or
|
||||
true if it's still in "flowing" mode.
|
||||
- `end([chunk, [encoding]], [callback])` - Signal that you have
|
||||
no more data to write. This will queue an `end` event to be
|
||||
fired when all the data has been consumed.
|
||||
- `pause()` - No more data for a while, please. This also
|
||||
prevents `end` from being emitted for empty streams until the
|
||||
stream is resumed.
|
||||
- `resume()` - Resume the stream. If there's data in the buffer,
|
||||
it is all discarded. Any buffered events are immediately
|
||||
emitted.
|
||||
- `pipe(dest)` - Send all output to the stream provided. When
|
||||
data is emitted, it is immediately written to any and all pipe
|
||||
destinations. (Or written on next tick in `async` mode.)
|
||||
- `unpipe(dest)` - Stop piping to the destination stream. This is
|
||||
immediate, meaning that any asynchronously queued data will
|
||||
_not_ make it to the destination when running in `async` mode.
|
||||
- `options.end` - Boolean, end the destination stream when the
|
||||
source stream ends. Default `true`.
|
||||
- `options.proxyErrors` - Boolean, proxy `error` events from
|
||||
the source stream to the destination stream. Note that errors
|
||||
are _not_ proxied after the pipeline terminates, either due
|
||||
to the source emitting `'end'` or manually unpiping with
|
||||
`src.unpipe(dest)`. Default `false`.
|
||||
- `on(ev, fn)`, `emit(ev, fn)` - Minipass streams are
|
||||
EventEmitters. Some events are given special treatment,
|
||||
however. (See below under "events".)
|
||||
- `promise()` - Returns a Promise that resolves when the stream
|
||||
emits `end`, or rejects if the stream emits `error`.
|
||||
- `collect()` - Return a Promise that resolves on `end` with an
|
||||
array containing each chunk of data that was emitted, or
|
||||
rejects if the stream emits `error`. Note that this consumes
|
||||
the stream data.
|
||||
- `concat()` - Same as `collect()`, but concatenates the data
|
||||
into a single Buffer object. Will reject the returned promise
|
||||
if the stream is in objectMode, or if it goes into objectMode
|
||||
by the end of the data.
|
||||
- `read(n)` - Consume `n` bytes of data out of the buffer. If `n`
|
||||
is not provided, then consume all of it. If `n` bytes are not
|
||||
available, then it returns null. **Note** consuming streams in
|
||||
this way is less efficient, and can lead to unnecessary Buffer
|
||||
copying.
|
||||
- `destroy([er])` - Destroy the stream. If an error is provided,
|
||||
then an `'error'` event is emitted. If the stream has a
|
||||
`close()` method, and has not emitted a `'close'` event yet,
|
||||
then `stream.close()` will be called. Any Promises returned by
|
||||
`.promise()`, `.collect()` or `.concat()` will be rejected.
|
||||
After being destroyed, writing to the stream will emit an
|
||||
error. No more data will be emitted if the stream is destroyed,
|
||||
even if it was previously buffered.
|
||||
|
||||
### Properties
|
||||
|
||||
- `bufferLength` Read-only. Total number of bytes buffered, or in
|
||||
the case of objectMode, the total number of objects.
|
||||
- `encoding` Read-only. The encoding that has been set.
|
||||
- `flowing` Read-only. Boolean indicating whether a chunk written
|
||||
to the stream will be immediately emitted.
|
||||
- `emittedEnd` Read-only. Boolean indicating whether the end-ish
|
||||
events (ie, `end`, `prefinish`, `finish`) have been emitted.
|
||||
Note that listening on any end-ish event will immediateyl
|
||||
re-emit it if it has already been emitted.
|
||||
- `writable` Whether the stream is writable. Default `true`. Set
|
||||
to `false` when `end()`
|
||||
- `readable` Whether the stream is readable. Default `true`.
|
||||
- `pipes` An array of Pipe objects referencing streams that this
|
||||
stream is piping into.
|
||||
- `destroyed` A getter that indicates whether the stream was
|
||||
destroyed.
|
||||
- `paused` True if the stream has been explicitly paused,
|
||||
otherwise false.
|
||||
- `objectMode` Indicates whether the stream is in `objectMode`.
|
||||
- `aborted` Readonly property set when the `AbortSignal`
|
||||
dispatches an `abort` event.
|
||||
|
||||
### Events
|
||||
|
||||
- `data` Emitted when there's data to read. Argument is the data
|
||||
to read. This is never emitted while not flowing. If a listener
|
||||
is attached, that will resume the stream.
|
||||
- `end` Emitted when there's no more data to read. This will be
|
||||
emitted immediately for empty streams when `end()` is called.
|
||||
If a listener is attached, and `end` was already emitted, then
|
||||
it will be emitted again. All listeners are removed when `end`
|
||||
is emitted.
|
||||
- `prefinish` An end-ish event that follows the same logic as
|
||||
`end` and is emitted in the same conditions where `end` is
|
||||
emitted. Emitted after `'end'`.
|
||||
- `finish` An end-ish event that follows the same logic as `end`
|
||||
and is emitted in the same conditions where `end` is emitted.
|
||||
Emitted after `'prefinish'`.
|
||||
- `close` An indication that an underlying resource has been
|
||||
released. Minipass does not emit this event, but will defer it
|
||||
until after `end` has been emitted, since it throws off some
|
||||
stream libraries otherwise.
|
||||
- `drain` Emitted when the internal buffer empties, and it is
|
||||
again suitable to `write()` into the stream.
|
||||
- `readable` Emitted when data is buffered and ready to be read
|
||||
by a consumer.
|
||||
- `resume` Emitted when stream changes state from buffering to
|
||||
flowing mode. (Ie, when `resume` is called, `pipe` is called,
|
||||
or a `data` event listener is added.)
|
||||
|
||||
### Static Methods
|
||||
|
||||
- `Minipass.isStream(stream)` Returns `true` if the argument is a
|
||||
stream, and false otherwise. To be considered a stream, the
|
||||
object must be either an instance of Minipass, or an
|
||||
EventEmitter that has either a `pipe()` method, or both
|
||||
`write()` and `end()` methods. (Pretty much any stream in
|
||||
node-land will return `true` for this.)
|
||||
|
||||
## EXAMPLES
|
||||
|
||||
Here are some examples of things you can do with Minipass
|
||||
streams.
|
||||
|
||||
### simple "are you done yet" promise
|
||||
|
||||
```js
|
||||
mp.promise().then(
|
||||
() => {
|
||||
// stream is finished
|
||||
},
|
||||
er => {
|
||||
// stream emitted an error
|
||||
}
|
||||
)
|
||||
```
|
||||
|
||||
### collecting
|
||||
|
||||
```js
|
||||
mp.collect().then(all => {
|
||||
// all is an array of all the data emitted
|
||||
// encoding is supported in this case, so
|
||||
// so the result will be a collection of strings if
|
||||
// an encoding is specified, or buffers/objects if not.
|
||||
//
|
||||
// In an async function, you may do
|
||||
// const data = await stream.collect()
|
||||
})
|
||||
```
|
||||
|
||||
### collecting into a single blob
|
||||
|
||||
This is a bit slower because it concatenates the data into one
|
||||
chunk for you, but if you're going to do it yourself anyway, it's
|
||||
convenient this way:
|
||||
|
||||
```js
|
||||
mp.concat().then(onebigchunk => {
|
||||
// onebigchunk is a string if the stream
|
||||
// had an encoding set, or a buffer otherwise.
|
||||
})
|
||||
```
|
||||
|
||||
### iteration
|
||||
|
||||
You can iterate over streams synchronously or asynchronously in
|
||||
platforms that support it.
|
||||
|
||||
Synchronous iteration will end when the currently available data
|
||||
is consumed, even if the `end` event has not been reached. In
|
||||
string and buffer mode, the data is concatenated, so unless
|
||||
multiple writes are occurring in the same tick as the `read()`,
|
||||
sync iteration loops will generally only have a single iteration.
|
||||
|
||||
To consume chunks in this way exactly as they have been written,
|
||||
with no flattening, create the stream with the `{ objectMode:
|
||||
true }` option.
|
||||
|
||||
```js
|
||||
const mp = new Minipass({ objectMode: true })
|
||||
mp.write('a')
|
||||
mp.write('b')
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // a, b
|
||||
}
|
||||
mp.write('c')
|
||||
mp.write('d')
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // c, d
|
||||
}
|
||||
mp.write('e')
|
||||
mp.end()
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // e
|
||||
}
|
||||
for (let letter of mp) {
|
||||
console.log(letter) // nothing
|
||||
}
|
||||
```
|
||||
|
||||
Asynchronous iteration will continue until the end event is reached,
|
||||
consuming all of the data.
|
||||
|
||||
```js
|
||||
const mp = new Minipass({ encoding: 'utf8' })
|
||||
|
||||
// some source of some data
|
||||
let i = 5
|
||||
const inter = setInterval(() => {
|
||||
if (i-- > 0) mp.write(Buffer.from('foo\n', 'utf8'))
|
||||
else {
|
||||
mp.end()
|
||||
clearInterval(inter)
|
||||
}
|
||||
}, 100)
|
||||
|
||||
// consume the data with asynchronous iteration
|
||||
async function consume() {
|
||||
for await (let chunk of mp) {
|
||||
console.log(chunk)
|
||||
}
|
||||
return 'ok'
|
||||
}
|
||||
|
||||
consume().then(res => console.log(res))
|
||||
// logs `foo\n` 5 times, and then `ok`
|
||||
```
|
||||
|
||||
### subclass that `console.log()`s everything written into it
|
||||
|
||||
```js
|
||||
class Logger extends Minipass {
|
||||
write(chunk, encoding, callback) {
|
||||
console.log('WRITE', chunk, encoding)
|
||||
return super.write(chunk, encoding, callback)
|
||||
}
|
||||
end(chunk, encoding, callback) {
|
||||
console.log('END', chunk, encoding)
|
||||
return super.end(chunk, encoding, callback)
|
||||
}
|
||||
}
|
||||
|
||||
someSource.pipe(new Logger()).pipe(someDest)
|
||||
```
|
||||
|
||||
### same thing, but using an inline anonymous class
|
||||
|
||||
```js
|
||||
// js classes are fun
|
||||
someSource
|
||||
.pipe(
|
||||
new (class extends Minipass {
|
||||
emit(ev, ...data) {
|
||||
// let's also log events, because debugging some weird thing
|
||||
console.log('EMIT', ev)
|
||||
return super.emit(ev, ...data)
|
||||
}
|
||||
write(chunk, encoding, callback) {
|
||||
console.log('WRITE', chunk, encoding)
|
||||
return super.write(chunk, encoding, callback)
|
||||
}
|
||||
end(chunk, encoding, callback) {
|
||||
console.log('END', chunk, encoding)
|
||||
return super.end(chunk, encoding, callback)
|
||||
}
|
||||
})()
|
||||
)
|
||||
.pipe(someDest)
|
||||
```
|
||||
|
||||
### subclass that defers 'end' for some reason
|
||||
|
||||
```js
|
||||
class SlowEnd extends Minipass {
|
||||
emit(ev, ...args) {
|
||||
if (ev === 'end') {
|
||||
console.log('going to end, hold on a sec')
|
||||
setTimeout(() => {
|
||||
console.log('ok, ready to end now')
|
||||
super.emit('end', ...args)
|
||||
}, 100)
|
||||
return true
|
||||
} else {
|
||||
return super.emit(ev, ...args)
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### transform that creates newline-delimited JSON
|
||||
|
||||
```js
|
||||
class NDJSONEncode extends Minipass {
|
||||
write(obj, cb) {
|
||||
try {
|
||||
// JSON.stringify can throw, emit an error on that
|
||||
return super.write(JSON.stringify(obj) + '\n', 'utf8', cb)
|
||||
} catch (er) {
|
||||
this.emit('error', er)
|
||||
}
|
||||
}
|
||||
end(obj, cb) {
|
||||
if (typeof obj === 'function') {
|
||||
cb = obj
|
||||
obj = undefined
|
||||
}
|
||||
if (obj !== undefined) {
|
||||
this.write(obj)
|
||||
}
|
||||
return super.end(cb)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### transform that parses newline-delimited JSON
|
||||
|
||||
```js
|
||||
class NDJSONDecode extends Minipass {
|
||||
constructor(options) {
|
||||
// always be in object mode, as far as Minipass is concerned
|
||||
super({ objectMode: true })
|
||||
this._jsonBuffer = ''
|
||||
}
|
||||
write(chunk, encoding, cb) {
|
||||
if (
|
||||
typeof chunk === 'string' &&
|
||||
typeof encoding === 'string' &&
|
||||
encoding !== 'utf8'
|
||||
) {
|
||||
chunk = Buffer.from(chunk, encoding).toString()
|
||||
} else if (Buffer.isBuffer(chunk)) {
|
||||
chunk = chunk.toString()
|
||||
}
|
||||
if (typeof encoding === 'function') {
|
||||
cb = encoding
|
||||
}
|
||||
const jsonData = (this._jsonBuffer + chunk).split('\n')
|
||||
this._jsonBuffer = jsonData.pop()
|
||||
for (let i = 0; i < jsonData.length; i++) {
|
||||
try {
|
||||
// JSON.parse can throw, emit an error on that
|
||||
super.write(JSON.parse(jsonData[i]))
|
||||
} catch (er) {
|
||||
this.emit('error', er)
|
||||
continue
|
||||
}
|
||||
}
|
||||
if (cb) cb()
|
||||
}
|
||||
}
|
||||
```
|
549
node_modules/npm-registry-fetch/node_modules/minipass/dist/commonjs/index.d.ts
generated
vendored
Normal file
549
node_modules/npm-registry-fetch/node_modules/minipass/dist/commonjs/index.d.ts
generated
vendored
Normal file
|
@ -0,0 +1,549 @@
|
|||
/// <reference types="node" />
|
||||
/// <reference types="node" />
|
||||
/// <reference types="node" />
|
||||
/// <reference types="node" />
|
||||
import { EventEmitter } from 'node:events';
|
||||
import { StringDecoder } from 'node:string_decoder';
|
||||
/**
|
||||
* Same as StringDecoder, but exposing the `lastNeed` flag on the type
|
||||
*/
|
||||
type SD = StringDecoder & {
|
||||
lastNeed: boolean;
|
||||
};
|
||||
export type { SD, Pipe, PipeProxyErrors };
|
||||
/**
|
||||
* Return true if the argument is a Minipass stream, Node stream, or something
|
||||
* else that Minipass can interact with.
|
||||
*/
|
||||
export declare const isStream: (s: any) => s is NodeJS.WriteStream | NodeJS.ReadStream | Minipass<any, any, any> | (NodeJS.ReadStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
pause(): any;
|
||||
resume(): any;
|
||||
pipe(...destArgs: any[]): any;
|
||||
}) | (NodeJS.WriteStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
end(): any;
|
||||
write(chunk: any, ...args: any[]): any;
|
||||
});
|
||||
/**
|
||||
* Return true if the argument is a valid {@link Minipass.Readable}
|
||||
*/
|
||||
export declare const isReadable: (s: any) => s is Minipass.Readable;
|
||||
/**
|
||||
* Return true if the argument is a valid {@link Minipass.Writable}
|
||||
*/
|
||||
export declare const isWritable: (s: any) => s is Minipass.Readable;
|
||||
declare const EOF: unique symbol;
|
||||
declare const MAYBE_EMIT_END: unique symbol;
|
||||
declare const EMITTED_END: unique symbol;
|
||||
declare const EMITTING_END: unique symbol;
|
||||
declare const EMITTED_ERROR: unique symbol;
|
||||
declare const CLOSED: unique symbol;
|
||||
declare const READ: unique symbol;
|
||||
declare const FLUSH: unique symbol;
|
||||
declare const FLUSHCHUNK: unique symbol;
|
||||
declare const ENCODING: unique symbol;
|
||||
declare const DECODER: unique symbol;
|
||||
declare const FLOWING: unique symbol;
|
||||
declare const PAUSED: unique symbol;
|
||||
declare const RESUME: unique symbol;
|
||||
declare const BUFFER: unique symbol;
|
||||
declare const PIPES: unique symbol;
|
||||
declare const BUFFERLENGTH: unique symbol;
|
||||
declare const BUFFERPUSH: unique symbol;
|
||||
declare const BUFFERSHIFT: unique symbol;
|
||||
declare const OBJECTMODE: unique symbol;
|
||||
declare const DESTROYED: unique symbol;
|
||||
declare const ERROR: unique symbol;
|
||||
declare const EMITDATA: unique symbol;
|
||||
declare const EMITEND: unique symbol;
|
||||
declare const EMITEND2: unique symbol;
|
||||
declare const ASYNC: unique symbol;
|
||||
declare const ABORT: unique symbol;
|
||||
declare const ABORTED: unique symbol;
|
||||
declare const SIGNAL: unique symbol;
|
||||
declare const DATALISTENERS: unique symbol;
|
||||
declare const DISCARDED: unique symbol;
|
||||
/**
|
||||
* Options that may be passed to stream.pipe()
|
||||
*/
|
||||
export interface PipeOptions {
|
||||
/**
|
||||
* end the destination stream when the source stream ends
|
||||
*/
|
||||
end?: boolean;
|
||||
/**
|
||||
* proxy errors from the source stream to the destination stream
|
||||
*/
|
||||
proxyErrors?: boolean;
|
||||
}
|
||||
/**
|
||||
* Internal class representing a pipe to a destination stream.
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
declare class Pipe<T extends unknown> {
|
||||
src: Minipass<T>;
|
||||
dest: Minipass<any, T>;
|
||||
opts: PipeOptions;
|
||||
ondrain: () => any;
|
||||
constructor(src: Minipass<T>, dest: Minipass.Writable, opts: PipeOptions);
|
||||
unpipe(): void;
|
||||
proxyErrors(_er: any): void;
|
||||
end(): void;
|
||||
}
|
||||
/**
|
||||
* Internal class representing a pipe to a destination stream where
|
||||
* errors are proxied.
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
declare class PipeProxyErrors<T> extends Pipe<T> {
|
||||
unpipe(): void;
|
||||
constructor(src: Minipass<T>, dest: Minipass.Writable, opts: PipeOptions);
|
||||
}
|
||||
export declare namespace Minipass {
|
||||
/**
|
||||
* Encoding used to create a stream that outputs strings rather than
|
||||
* Buffer objects.
|
||||
*/
|
||||
export type Encoding = BufferEncoding | 'buffer' | null;
|
||||
/**
|
||||
* Any stream that Minipass can pipe into
|
||||
*/
|
||||
export type Writable = Minipass<any, any, any> | NodeJS.WriteStream | (NodeJS.WriteStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
end(): any;
|
||||
write(chunk: any, ...args: any[]): any;
|
||||
});
|
||||
/**
|
||||
* Any stream that can be read from
|
||||
*/
|
||||
export type Readable = Minipass<any, any, any> | NodeJS.ReadStream | (NodeJS.ReadStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
pause(): any;
|
||||
resume(): any;
|
||||
pipe(...destArgs: any[]): any;
|
||||
});
|
||||
/**
|
||||
* Utility type that can be iterated sync or async
|
||||
*/
|
||||
export type DualIterable<T> = Iterable<T> & AsyncIterable<T>;
|
||||
type EventArguments = Record<string | symbol, unknown[]>;
|
||||
/**
|
||||
* The listing of events that a Minipass class can emit.
|
||||
* Extend this when extending the Minipass class, and pass as
|
||||
* the third template argument. The key is the name of the event,
|
||||
* and the value is the argument list.
|
||||
*
|
||||
* Any undeclared events will still be allowed, but the handler will get
|
||||
* arguments as `unknown[]`.
|
||||
*/
|
||||
export interface Events<RType extends any = Buffer> extends EventArguments {
|
||||
readable: [];
|
||||
data: [chunk: RType];
|
||||
error: [er: unknown];
|
||||
abort: [reason: unknown];
|
||||
drain: [];
|
||||
resume: [];
|
||||
end: [];
|
||||
finish: [];
|
||||
prefinish: [];
|
||||
close: [];
|
||||
[DESTROYED]: [er?: unknown];
|
||||
[ERROR]: [er: unknown];
|
||||
}
|
||||
/**
|
||||
* String or buffer-like data that can be joined and sliced
|
||||
*/
|
||||
export type ContiguousData = Buffer | ArrayBufferLike | ArrayBufferView | string;
|
||||
export type BufferOrString = Buffer | string;
|
||||
/**
|
||||
* Options passed to the Minipass constructor.
|
||||
*/
|
||||
export type SharedOptions = {
|
||||
/**
|
||||
* Defer all data emission and other events until the end of the
|
||||
* current tick, similar to Node core streams
|
||||
*/
|
||||
async?: boolean;
|
||||
/**
|
||||
* A signal which will abort the stream
|
||||
*/
|
||||
signal?: AbortSignal;
|
||||
/**
|
||||
* Output string encoding. Set to `null` or `'buffer'` (or omit) to
|
||||
* emit Buffer objects rather than strings.
|
||||
*
|
||||
* Conflicts with `objectMode`
|
||||
*/
|
||||
encoding?: BufferEncoding | null | 'buffer';
|
||||
/**
|
||||
* Output data exactly as it was written, supporting non-buffer/string
|
||||
* data (such as arbitrary objects, falsey values, etc.)
|
||||
*
|
||||
* Conflicts with `encoding`
|
||||
*/
|
||||
objectMode?: boolean;
|
||||
};
|
||||
/**
|
||||
* Options for a string encoded output
|
||||
*/
|
||||
export type EncodingOptions = SharedOptions & {
|
||||
encoding: BufferEncoding;
|
||||
objectMode?: false;
|
||||
};
|
||||
/**
|
||||
* Options for contiguous data buffer output
|
||||
*/
|
||||
export type BufferOptions = SharedOptions & {
|
||||
encoding?: null | 'buffer';
|
||||
objectMode?: false;
|
||||
};
|
||||
/**
|
||||
* Options for objectMode arbitrary output
|
||||
*/
|
||||
export type ObjectModeOptions = SharedOptions & {
|
||||
objectMode: true;
|
||||
encoding?: null;
|
||||
};
|
||||
/**
|
||||
* Utility type to determine allowed options based on read type
|
||||
*/
|
||||
export type Options<T> = ObjectModeOptions | (T extends string ? EncodingOptions : T extends Buffer ? BufferOptions : SharedOptions);
|
||||
export {};
|
||||
}
|
||||
/**
|
||||
* Main export, the Minipass class
|
||||
*
|
||||
* `RType` is the type of data emitted, defaults to Buffer
|
||||
*
|
||||
* `WType` is the type of data to be written, if RType is buffer or string,
|
||||
* then any {@link Minipass.ContiguousData} is allowed.
|
||||
*
|
||||
* `Events` is the set of event handler signatures that this object
|
||||
* will emit, see {@link Minipass.Events}
|
||||
*/
|
||||
export declare class Minipass<RType extends unknown = Buffer, WType extends unknown = RType extends Minipass.BufferOrString ? Minipass.ContiguousData : RType, Events extends Minipass.Events<RType> = Minipass.Events<RType>> extends EventEmitter implements Minipass.DualIterable<RType> {
|
||||
[FLOWING]: boolean;
|
||||
[PAUSED]: boolean;
|
||||
[PIPES]: Pipe<RType>[];
|
||||
[BUFFER]: RType[];
|
||||
[OBJECTMODE]: boolean;
|
||||
[ENCODING]: BufferEncoding | null;
|
||||
[ASYNC]: boolean;
|
||||
[DECODER]: SD | null;
|
||||
[EOF]: boolean;
|
||||
[EMITTED_END]: boolean;
|
||||
[EMITTING_END]: boolean;
|
||||
[CLOSED]: boolean;
|
||||
[EMITTED_ERROR]: unknown;
|
||||
[BUFFERLENGTH]: number;
|
||||
[DESTROYED]: boolean;
|
||||
[SIGNAL]?: AbortSignal;
|
||||
[ABORTED]: boolean;
|
||||
[DATALISTENERS]: number;
|
||||
[DISCARDED]: boolean;
|
||||
/**
|
||||
* true if the stream can be written
|
||||
*/
|
||||
writable: boolean;
|
||||
/**
|
||||
* true if the stream can be read
|
||||
*/
|
||||
readable: boolean;
|
||||
/**
|
||||
* If `RType` is Buffer, then options do not need to be provided.
|
||||
* Otherwise, an options object must be provided to specify either
|
||||
* {@link Minipass.SharedOptions.objectMode} or
|
||||
* {@link Minipass.SharedOptions.encoding}, as appropriate.
|
||||
*/
|
||||
constructor(...args: [Minipass.ObjectModeOptions] | (RType extends Buffer ? [] | [Minipass.Options<RType>] : [Minipass.Options<RType>]));
|
||||
/**
|
||||
* The amount of data stored in the buffer waiting to be read.
|
||||
*
|
||||
* For Buffer strings, this will be the total byte length.
|
||||
* For string encoding streams, this will be the string character length,
|
||||
* according to JavaScript's `string.length` logic.
|
||||
* For objectMode streams, this is a count of the items waiting to be
|
||||
* emitted.
|
||||
*/
|
||||
get bufferLength(): number;
|
||||
/**
|
||||
* The `BufferEncoding` currently in use, or `null`
|
||||
*/
|
||||
get encoding(): BufferEncoding | null;
|
||||
/**
|
||||
* @deprecated - This is a read only property
|
||||
*/
|
||||
set encoding(_enc: BufferEncoding | null);
|
||||
/**
|
||||
* @deprecated - Encoding may only be set at instantiation time
|
||||
*/
|
||||
setEncoding(_enc: Minipass.Encoding): void;
|
||||
/**
|
||||
* True if this is an objectMode stream
|
||||
*/
|
||||
get objectMode(): boolean;
|
||||
/**
|
||||
* @deprecated - This is a read-only property
|
||||
*/
|
||||
set objectMode(_om: boolean);
|
||||
/**
|
||||
* true if this is an async stream
|
||||
*/
|
||||
get ['async'](): boolean;
|
||||
/**
|
||||
* Set to true to make this stream async.
|
||||
*
|
||||
* Once set, it cannot be unset, as this would potentially cause incorrect
|
||||
* behavior. Ie, a sync stream can be made async, but an async stream
|
||||
* cannot be safely made sync.
|
||||
*/
|
||||
set ['async'](a: boolean);
|
||||
[ABORT](): void;
|
||||
/**
|
||||
* True if the stream has been aborted.
|
||||
*/
|
||||
get aborted(): boolean;
|
||||
/**
|
||||
* No-op setter. Stream aborted status is set via the AbortSignal provided
|
||||
* in the constructor options.
|
||||
*/
|
||||
set aborted(_: boolean);
|
||||
/**
|
||||
* Write data into the stream
|
||||
*
|
||||
* If the chunk written is a string, and encoding is not specified, then
|
||||
* `utf8` will be assumed. If the stream encoding matches the encoding of
|
||||
* a written string, and the state of the string decoder allows it, then
|
||||
* the string will be passed through to either the output or the internal
|
||||
* buffer without any processing. Otherwise, it will be turned into a
|
||||
* Buffer object for processing into the desired encoding.
|
||||
*
|
||||
* If provided, `cb` function is called immediately before return for
|
||||
* sync streams, or on next tick for async streams, because for this
|
||||
* base class, a chunk is considered "processed" once it is accepted
|
||||
* and either emitted or buffered. That is, the callback does not indicate
|
||||
* that the chunk has been eventually emitted, though of course child
|
||||
* classes can override this function to do whatever processing is required
|
||||
* and call `super.write(...)` only once processing is completed.
|
||||
*/
|
||||
write(chunk: WType, cb?: () => void): boolean;
|
||||
write(chunk: WType, encoding?: Minipass.Encoding, cb?: () => void): boolean;
|
||||
/**
|
||||
* Low-level explicit read method.
|
||||
*
|
||||
* In objectMode, the argument is ignored, and one item is returned if
|
||||
* available.
|
||||
*
|
||||
* `n` is the number of bytes (or in the case of encoding streams,
|
||||
* characters) to consume. If `n` is not provided, then the entire buffer
|
||||
* is returned, or `null` is returned if no data is available.
|
||||
*
|
||||
* If `n` is greater that the amount of data in the internal buffer,
|
||||
* then `null` is returned.
|
||||
*/
|
||||
read(n?: number | null): RType | null;
|
||||
[READ](n: number | null, chunk: RType): RType;
|
||||
/**
|
||||
* End the stream, optionally providing a final write.
|
||||
*
|
||||
* See {@link Minipass#write} for argument descriptions
|
||||
*/
|
||||
end(cb?: () => void): this;
|
||||
end(chunk: WType, cb?: () => void): this;
|
||||
end(chunk: WType, encoding?: Minipass.Encoding, cb?: () => void): this;
|
||||
[RESUME](): void;
|
||||
/**
|
||||
* Resume the stream if it is currently in a paused state
|
||||
*
|
||||
* If called when there are no pipe destinations or `data` event listeners,
|
||||
* this will place the stream in a "discarded" state, where all data will
|
||||
* be thrown away. The discarded state is removed if a pipe destination or
|
||||
* data handler is added, if pause() is called, or if any synchronous or
|
||||
* asynchronous iteration is started.
|
||||
*/
|
||||
resume(): void;
|
||||
/**
|
||||
* Pause the stream
|
||||
*/
|
||||
pause(): void;
|
||||
/**
|
||||
* true if the stream has been forcibly destroyed
|
||||
*/
|
||||
get destroyed(): boolean;
|
||||
/**
|
||||
* true if the stream is currently in a flowing state, meaning that
|
||||
* any writes will be immediately emitted.
|
||||
*/
|
||||
get flowing(): boolean;
|
||||
/**
|
||||
* true if the stream is currently in a paused state
|
||||
*/
|
||||
get paused(): boolean;
|
||||
[BUFFERPUSH](chunk: RType): void;
|
||||
[BUFFERSHIFT](): RType;
|
||||
[FLUSH](noDrain?: boolean): void;
|
||||
[FLUSHCHUNK](chunk: RType): boolean;
|
||||
/**
|
||||
* Pipe all data emitted by this stream into the destination provided.
|
||||
*
|
||||
* Triggers the flow of data.
|
||||
*/
|
||||
pipe<W extends Minipass.Writable>(dest: W, opts?: PipeOptions): W;
|
||||
/**
|
||||
* Fully unhook a piped destination stream.
|
||||
*
|
||||
* If the destination stream was the only consumer of this stream (ie,
|
||||
* there are no other piped destinations or `'data'` event listeners)
|
||||
* then the flow of data will stop until there is another consumer or
|
||||
* {@link Minipass#resume} is explicitly called.
|
||||
*/
|
||||
unpipe<W extends Minipass.Writable>(dest: W): void;
|
||||
/**
|
||||
* Alias for {@link Minipass#on}
|
||||
*/
|
||||
addListener<Event extends keyof Events>(ev: Event, handler: (...args: Events[Event]) => any): this;
|
||||
/**
|
||||
* Mostly identical to `EventEmitter.on`, with the following
|
||||
* behavior differences to prevent data loss and unnecessary hangs:
|
||||
*
|
||||
* - Adding a 'data' event handler will trigger the flow of data
|
||||
*
|
||||
* - Adding a 'readable' event handler when there is data waiting to be read
|
||||
* will cause 'readable' to be emitted immediately.
|
||||
*
|
||||
* - Adding an 'endish' event handler ('end', 'finish', etc.) which has
|
||||
* already passed will cause the event to be emitted immediately and all
|
||||
* handlers removed.
|
||||
*
|
||||
* - Adding an 'error' event handler after an error has been emitted will
|
||||
* cause the event to be re-emitted immediately with the error previously
|
||||
* raised.
|
||||
*/
|
||||
on<Event extends keyof Events>(ev: Event, handler: (...args: Events[Event]) => any): this;
|
||||
/**
|
||||
* Alias for {@link Minipass#off}
|
||||
*/
|
||||
removeListener<Event extends keyof Events>(ev: Event, handler: (...args: Events[Event]) => any): this;
|
||||
/**
|
||||
* Mostly identical to `EventEmitter.off`
|
||||
*
|
||||
* If a 'data' event handler is removed, and it was the last consumer
|
||||
* (ie, there are no pipe destinations or other 'data' event listeners),
|
||||
* then the flow of data will stop until there is another consumer or
|
||||
* {@link Minipass#resume} is explicitly called.
|
||||
*/
|
||||
off<Event extends keyof Events>(ev: Event, handler: (...args: Events[Event]) => any): this;
|
||||
/**
|
||||
* Mostly identical to `EventEmitter.removeAllListeners`
|
||||
*
|
||||
* If all 'data' event handlers are removed, and they were the last consumer
|
||||
* (ie, there are no pipe destinations), then the flow of data will stop
|
||||
* until there is another consumer or {@link Minipass#resume} is explicitly
|
||||
* called.
|
||||
*/
|
||||
removeAllListeners<Event extends keyof Events>(ev?: Event): this;
|
||||
/**
|
||||
* true if the 'end' event has been emitted
|
||||
*/
|
||||
get emittedEnd(): boolean;
|
||||
[MAYBE_EMIT_END](): void;
|
||||
/**
|
||||
* Mostly identical to `EventEmitter.emit`, with the following
|
||||
* behavior differences to prevent data loss and unnecessary hangs:
|
||||
*
|
||||
* If the stream has been destroyed, and the event is something other
|
||||
* than 'close' or 'error', then `false` is returned and no handlers
|
||||
* are called.
|
||||
*
|
||||
* If the event is 'end', and has already been emitted, then the event
|
||||
* is ignored. If the stream is in a paused or non-flowing state, then
|
||||
* the event will be deferred until data flow resumes. If the stream is
|
||||
* async, then handlers will be called on the next tick rather than
|
||||
* immediately.
|
||||
*
|
||||
* If the event is 'close', and 'end' has not yet been emitted, then
|
||||
* the event will be deferred until after 'end' is emitted.
|
||||
*
|
||||
* If the event is 'error', and an AbortSignal was provided for the stream,
|
||||
* and there are no listeners, then the event is ignored, matching the
|
||||
* behavior of node core streams in the presense of an AbortSignal.
|
||||
*
|
||||
* If the event is 'finish' or 'prefinish', then all listeners will be
|
||||
* removed after emitting the event, to prevent double-firing.
|
||||
*/
|
||||
emit<Event extends keyof Events>(ev: Event, ...args: Events[Event]): boolean;
|
||||
[EMITDATA](data: RType): boolean;
|
||||
[EMITEND](): boolean;
|
||||
[EMITEND2](): boolean;
|
||||
/**
|
||||
* Return a Promise that resolves to an array of all emitted data once
|
||||
* the stream ends.
|
||||
*/
|
||||
collect(): Promise<RType[] & {
|
||||
dataLength: number;
|
||||
}>;
|
||||
/**
|
||||
* Return a Promise that resolves to the concatenation of all emitted data
|
||||
* once the stream ends.
|
||||
*
|
||||
* Not allowed on objectMode streams.
|
||||
*/
|
||||
concat(): Promise<RType>;
|
||||
/**
|
||||
* Return a void Promise that resolves once the stream ends.
|
||||
*/
|
||||
promise(): Promise<void>;
|
||||
/**
|
||||
* Asynchronous `for await of` iteration.
|
||||
*
|
||||
* This will continue emitting all chunks until the stream terminates.
|
||||
*/
|
||||
[Symbol.asyncIterator](): AsyncGenerator<RType, void, void>;
|
||||
/**
|
||||
* Synchronous `for of` iteration.
|
||||
*
|
||||
* The iteration will terminate when the internal buffer runs out, even
|
||||
* if the stream has not yet terminated.
|
||||
*/
|
||||
[Symbol.iterator](): Generator<RType, void, void>;
|
||||
/**
|
||||
* Destroy a stream, preventing it from being used for any further purpose.
|
||||
*
|
||||
* If the stream has a `close()` method, then it will be called on
|
||||
* destruction.
|
||||
*
|
||||
* After destruction, any attempt to write data, read data, or emit most
|
||||
* events will be ignored.
|
||||
*
|
||||
* If an error argument is provided, then it will be emitted in an
|
||||
* 'error' event.
|
||||
*/
|
||||
destroy(er?: unknown): this;
|
||||
/**
|
||||
* Alias for {@link isStream}
|
||||
*
|
||||
* Former export location, maintained for backwards compatibility.
|
||||
*
|
||||
* @deprecated
|
||||
*/
|
||||
static get isStream(): (s: any) => s is NodeJS.WriteStream | NodeJS.ReadStream | Minipass<any, any, any> | (NodeJS.ReadStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
pause(): any;
|
||||
resume(): any;
|
||||
pipe(...destArgs: any[]): any;
|
||||
}) | (NodeJS.WriteStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
end(): any;
|
||||
write(chunk: any, ...args: any[]): any;
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=index.d.ts.map
|
1
node_modules/npm-registry-fetch/node_modules/minipass/dist/commonjs/index.d.ts.map
generated
vendored
Normal file
1
node_modules/npm-registry-fetch/node_modules/minipass/dist/commonjs/index.d.ts.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1028
node_modules/npm-registry-fetch/node_modules/minipass/dist/commonjs/index.js
generated
vendored
Normal file
1028
node_modules/npm-registry-fetch/node_modules/minipass/dist/commonjs/index.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1
node_modules/npm-registry-fetch/node_modules/minipass/dist/commonjs/index.js.map
generated
vendored
Normal file
1
node_modules/npm-registry-fetch/node_modules/minipass/dist/commonjs/index.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
3
node_modules/npm-registry-fetch/node_modules/minipass/dist/commonjs/package.json
generated
vendored
Normal file
3
node_modules/npm-registry-fetch/node_modules/minipass/dist/commonjs/package.json
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"type": "commonjs"
|
||||
}
|
549
node_modules/npm-registry-fetch/node_modules/minipass/dist/esm/index.d.ts
generated
vendored
Normal file
549
node_modules/npm-registry-fetch/node_modules/minipass/dist/esm/index.d.ts
generated
vendored
Normal file
|
@ -0,0 +1,549 @@
|
|||
/// <reference types="node" resolution-mode="require"/>
|
||||
/// <reference types="node" resolution-mode="require"/>
|
||||
/// <reference types="node" resolution-mode="require"/>
|
||||
/// <reference types="node" resolution-mode="require"/>
|
||||
import { EventEmitter } from 'node:events';
|
||||
import { StringDecoder } from 'node:string_decoder';
|
||||
/**
|
||||
* Same as StringDecoder, but exposing the `lastNeed` flag on the type
|
||||
*/
|
||||
type SD = StringDecoder & {
|
||||
lastNeed: boolean;
|
||||
};
|
||||
export type { SD, Pipe, PipeProxyErrors };
|
||||
/**
|
||||
* Return true if the argument is a Minipass stream, Node stream, or something
|
||||
* else that Minipass can interact with.
|
||||
*/
|
||||
export declare const isStream: (s: any) => s is NodeJS.WriteStream | NodeJS.ReadStream | Minipass<any, any, any> | (NodeJS.ReadStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
pause(): any;
|
||||
resume(): any;
|
||||
pipe(...destArgs: any[]): any;
|
||||
}) | (NodeJS.WriteStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
end(): any;
|
||||
write(chunk: any, ...args: any[]): any;
|
||||
});
|
||||
/**
|
||||
* Return true if the argument is a valid {@link Minipass.Readable}
|
||||
*/
|
||||
export declare const isReadable: (s: any) => s is Minipass.Readable;
|
||||
/**
|
||||
* Return true if the argument is a valid {@link Minipass.Writable}
|
||||
*/
|
||||
export declare const isWritable: (s: any) => s is Minipass.Readable;
|
||||
declare const EOF: unique symbol;
|
||||
declare const MAYBE_EMIT_END: unique symbol;
|
||||
declare const EMITTED_END: unique symbol;
|
||||
declare const EMITTING_END: unique symbol;
|
||||
declare const EMITTED_ERROR: unique symbol;
|
||||
declare const CLOSED: unique symbol;
|
||||
declare const READ: unique symbol;
|
||||
declare const FLUSH: unique symbol;
|
||||
declare const FLUSHCHUNK: unique symbol;
|
||||
declare const ENCODING: unique symbol;
|
||||
declare const DECODER: unique symbol;
|
||||
declare const FLOWING: unique symbol;
|
||||
declare const PAUSED: unique symbol;
|
||||
declare const RESUME: unique symbol;
|
||||
declare const BUFFER: unique symbol;
|
||||
declare const PIPES: unique symbol;
|
||||
declare const BUFFERLENGTH: unique symbol;
|
||||
declare const BUFFERPUSH: unique symbol;
|
||||
declare const BUFFERSHIFT: unique symbol;
|
||||
declare const OBJECTMODE: unique symbol;
|
||||
declare const DESTROYED: unique symbol;
|
||||
declare const ERROR: unique symbol;
|
||||
declare const EMITDATA: unique symbol;
|
||||
declare const EMITEND: unique symbol;
|
||||
declare const EMITEND2: unique symbol;
|
||||
declare const ASYNC: unique symbol;
|
||||
declare const ABORT: unique symbol;
|
||||
declare const ABORTED: unique symbol;
|
||||
declare const SIGNAL: unique symbol;
|
||||
declare const DATALISTENERS: unique symbol;
|
||||
declare const DISCARDED: unique symbol;
|
||||
/**
|
||||
* Options that may be passed to stream.pipe()
|
||||
*/
|
||||
export interface PipeOptions {
|
||||
/**
|
||||
* end the destination stream when the source stream ends
|
||||
*/
|
||||
end?: boolean;
|
||||
/**
|
||||
* proxy errors from the source stream to the destination stream
|
||||
*/
|
||||
proxyErrors?: boolean;
|
||||
}
|
||||
/**
|
||||
* Internal class representing a pipe to a destination stream.
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
declare class Pipe<T extends unknown> {
|
||||
src: Minipass<T>;
|
||||
dest: Minipass<any, T>;
|
||||
opts: PipeOptions;
|
||||
ondrain: () => any;
|
||||
constructor(src: Minipass<T>, dest: Minipass.Writable, opts: PipeOptions);
|
||||
unpipe(): void;
|
||||
proxyErrors(_er: any): void;
|
||||
end(): void;
|
||||
}
|
||||
/**
|
||||
* Internal class representing a pipe to a destination stream where
|
||||
* errors are proxied.
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
declare class PipeProxyErrors<T> extends Pipe<T> {
|
||||
unpipe(): void;
|
||||
constructor(src: Minipass<T>, dest: Minipass.Writable, opts: PipeOptions);
|
||||
}
|
||||
export declare namespace Minipass {
|
||||
/**
|
||||
* Encoding used to create a stream that outputs strings rather than
|
||||
* Buffer objects.
|
||||
*/
|
||||
export type Encoding = BufferEncoding | 'buffer' | null;
|
||||
/**
|
||||
* Any stream that Minipass can pipe into
|
||||
*/
|
||||
export type Writable = Minipass<any, any, any> | NodeJS.WriteStream | (NodeJS.WriteStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
end(): any;
|
||||
write(chunk: any, ...args: any[]): any;
|
||||
});
|
||||
/**
|
||||
* Any stream that can be read from
|
||||
*/
|
||||
export type Readable = Minipass<any, any, any> | NodeJS.ReadStream | (NodeJS.ReadStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
pause(): any;
|
||||
resume(): any;
|
||||
pipe(...destArgs: any[]): any;
|
||||
});
|
||||
/**
|
||||
* Utility type that can be iterated sync or async
|
||||
*/
|
||||
export type DualIterable<T> = Iterable<T> & AsyncIterable<T>;
|
||||
type EventArguments = Record<string | symbol, unknown[]>;
|
||||
/**
|
||||
* The listing of events that a Minipass class can emit.
|
||||
* Extend this when extending the Minipass class, and pass as
|
||||
* the third template argument. The key is the name of the event,
|
||||
* and the value is the argument list.
|
||||
*
|
||||
* Any undeclared events will still be allowed, but the handler will get
|
||||
* arguments as `unknown[]`.
|
||||
*/
|
||||
export interface Events<RType extends any = Buffer> extends EventArguments {
|
||||
readable: [];
|
||||
data: [chunk: RType];
|
||||
error: [er: unknown];
|
||||
abort: [reason: unknown];
|
||||
drain: [];
|
||||
resume: [];
|
||||
end: [];
|
||||
finish: [];
|
||||
prefinish: [];
|
||||
close: [];
|
||||
[DESTROYED]: [er?: unknown];
|
||||
[ERROR]: [er: unknown];
|
||||
}
|
||||
/**
|
||||
* String or buffer-like data that can be joined and sliced
|
||||
*/
|
||||
export type ContiguousData = Buffer | ArrayBufferLike | ArrayBufferView | string;
|
||||
export type BufferOrString = Buffer | string;
|
||||
/**
|
||||
* Options passed to the Minipass constructor.
|
||||
*/
|
||||
export type SharedOptions = {
|
||||
/**
|
||||
* Defer all data emission and other events until the end of the
|
||||
* current tick, similar to Node core streams
|
||||
*/
|
||||
async?: boolean;
|
||||
/**
|
||||
* A signal which will abort the stream
|
||||
*/
|
||||
signal?: AbortSignal;
|
||||
/**
|
||||
* Output string encoding. Set to `null` or `'buffer'` (or omit) to
|
||||
* emit Buffer objects rather than strings.
|
||||
*
|
||||
* Conflicts with `objectMode`
|
||||
*/
|
||||
encoding?: BufferEncoding | null | 'buffer';
|
||||
/**
|
||||
* Output data exactly as it was written, supporting non-buffer/string
|
||||
* data (such as arbitrary objects, falsey values, etc.)
|
||||
*
|
||||
* Conflicts with `encoding`
|
||||
*/
|
||||
objectMode?: boolean;
|
||||
};
|
||||
/**
|
||||
* Options for a string encoded output
|
||||
*/
|
||||
export type EncodingOptions = SharedOptions & {
|
||||
encoding: BufferEncoding;
|
||||
objectMode?: false;
|
||||
};
|
||||
/**
|
||||
* Options for contiguous data buffer output
|
||||
*/
|
||||
export type BufferOptions = SharedOptions & {
|
||||
encoding?: null | 'buffer';
|
||||
objectMode?: false;
|
||||
};
|
||||
/**
|
||||
* Options for objectMode arbitrary output
|
||||
*/
|
||||
export type ObjectModeOptions = SharedOptions & {
|
||||
objectMode: true;
|
||||
encoding?: null;
|
||||
};
|
||||
/**
|
||||
* Utility type to determine allowed options based on read type
|
||||
*/
|
||||
export type Options<T> = ObjectModeOptions | (T extends string ? EncodingOptions : T extends Buffer ? BufferOptions : SharedOptions);
|
||||
export {};
|
||||
}
|
||||
/**
|
||||
* Main export, the Minipass class
|
||||
*
|
||||
* `RType` is the type of data emitted, defaults to Buffer
|
||||
*
|
||||
* `WType` is the type of data to be written, if RType is buffer or string,
|
||||
* then any {@link Minipass.ContiguousData} is allowed.
|
||||
*
|
||||
* `Events` is the set of event handler signatures that this object
|
||||
* will emit, see {@link Minipass.Events}
|
||||
*/
|
||||
export declare class Minipass<RType extends unknown = Buffer, WType extends unknown = RType extends Minipass.BufferOrString ? Minipass.ContiguousData : RType, Events extends Minipass.Events<RType> = Minipass.Events<RType>> extends EventEmitter implements Minipass.DualIterable<RType> {
|
||||
[FLOWING]: boolean;
|
||||
[PAUSED]: boolean;
|
||||
[PIPES]: Pipe<RType>[];
|
||||
[BUFFER]: RType[];
|
||||
[OBJECTMODE]: boolean;
|
||||
[ENCODING]: BufferEncoding | null;
|
||||
[ASYNC]: boolean;
|
||||
[DECODER]: SD | null;
|
||||
[EOF]: boolean;
|
||||
[EMITTED_END]: boolean;
|
||||
[EMITTING_END]: boolean;
|
||||
[CLOSED]: boolean;
|
||||
[EMITTED_ERROR]: unknown;
|
||||
[BUFFERLENGTH]: number;
|
||||
[DESTROYED]: boolean;
|
||||
[SIGNAL]?: AbortSignal;
|
||||
[ABORTED]: boolean;
|
||||
[DATALISTENERS]: number;
|
||||
[DISCARDED]: boolean;
|
||||
/**
|
||||
* true if the stream can be written
|
||||
*/
|
||||
writable: boolean;
|
||||
/**
|
||||
* true if the stream can be read
|
||||
*/
|
||||
readable: boolean;
|
||||
/**
|
||||
* If `RType` is Buffer, then options do not need to be provided.
|
||||
* Otherwise, an options object must be provided to specify either
|
||||
* {@link Minipass.SharedOptions.objectMode} or
|
||||
* {@link Minipass.SharedOptions.encoding}, as appropriate.
|
||||
*/
|
||||
constructor(...args: [Minipass.ObjectModeOptions] | (RType extends Buffer ? [] | [Minipass.Options<RType>] : [Minipass.Options<RType>]));
|
||||
/**
|
||||
* The amount of data stored in the buffer waiting to be read.
|
||||
*
|
||||
* For Buffer strings, this will be the total byte length.
|
||||
* For string encoding streams, this will be the string character length,
|
||||
* according to JavaScript's `string.length` logic.
|
||||
* For objectMode streams, this is a count of the items waiting to be
|
||||
* emitted.
|
||||
*/
|
||||
get bufferLength(): number;
|
||||
/**
|
||||
* The `BufferEncoding` currently in use, or `null`
|
||||
*/
|
||||
get encoding(): BufferEncoding | null;
|
||||
/**
|
||||
* @deprecated - This is a read only property
|
||||
*/
|
||||
set encoding(_enc: BufferEncoding | null);
|
||||
/**
|
||||
* @deprecated - Encoding may only be set at instantiation time
|
||||
*/
|
||||
setEncoding(_enc: Minipass.Encoding): void;
|
||||
/**
|
||||
* True if this is an objectMode stream
|
||||
*/
|
||||
get objectMode(): boolean;
|
||||
/**
|
||||
* @deprecated - This is a read-only property
|
||||
*/
|
||||
set objectMode(_om: boolean);
|
||||
/**
|
||||
* true if this is an async stream
|
||||
*/
|
||||
get ['async'](): boolean;
|
||||
/**
|
||||
* Set to true to make this stream async.
|
||||
*
|
||||
* Once set, it cannot be unset, as this would potentially cause incorrect
|
||||
* behavior. Ie, a sync stream can be made async, but an async stream
|
||||
* cannot be safely made sync.
|
||||
*/
|
||||
set ['async'](a: boolean);
|
||||
[ABORT](): void;
|
||||
/**
|
||||
* True if the stream has been aborted.
|
||||
*/
|
||||
get aborted(): boolean;
|
||||
/**
|
||||
* No-op setter. Stream aborted status is set via the AbortSignal provided
|
||||
* in the constructor options.
|
||||
*/
|
||||
set aborted(_: boolean);
|
||||
/**
|
||||
* Write data into the stream
|
||||
*
|
||||
* If the chunk written is a string, and encoding is not specified, then
|
||||
* `utf8` will be assumed. If the stream encoding matches the encoding of
|
||||
* a written string, and the state of the string decoder allows it, then
|
||||
* the string will be passed through to either the output or the internal
|
||||
* buffer without any processing. Otherwise, it will be turned into a
|
||||
* Buffer object for processing into the desired encoding.
|
||||
*
|
||||
* If provided, `cb` function is called immediately before return for
|
||||
* sync streams, or on next tick for async streams, because for this
|
||||
* base class, a chunk is considered "processed" once it is accepted
|
||||
* and either emitted or buffered. That is, the callback does not indicate
|
||||
* that the chunk has been eventually emitted, though of course child
|
||||
* classes can override this function to do whatever processing is required
|
||||
* and call `super.write(...)` only once processing is completed.
|
||||
*/
|
||||
write(chunk: WType, cb?: () => void): boolean;
|
||||
write(chunk: WType, encoding?: Minipass.Encoding, cb?: () => void): boolean;
|
||||
/**
|
||||
* Low-level explicit read method.
|
||||
*
|
||||
* In objectMode, the argument is ignored, and one item is returned if
|
||||
* available.
|
||||
*
|
||||
* `n` is the number of bytes (or in the case of encoding streams,
|
||||
* characters) to consume. If `n` is not provided, then the entire buffer
|
||||
* is returned, or `null` is returned if no data is available.
|
||||
*
|
||||
* If `n` is greater that the amount of data in the internal buffer,
|
||||
* then `null` is returned.
|
||||
*/
|
||||
read(n?: number | null): RType | null;
|
||||
[READ](n: number | null, chunk: RType): RType;
|
||||
/**
|
||||
* End the stream, optionally providing a final write.
|
||||
*
|
||||
* See {@link Minipass#write} for argument descriptions
|
||||
*/
|
||||
end(cb?: () => void): this;
|
||||
end(chunk: WType, cb?: () => void): this;
|
||||
end(chunk: WType, encoding?: Minipass.Encoding, cb?: () => void): this;
|
||||
[RESUME](): void;
|
||||
/**
|
||||
* Resume the stream if it is currently in a paused state
|
||||
*
|
||||
* If called when there are no pipe destinations or `data` event listeners,
|
||||
* this will place the stream in a "discarded" state, where all data will
|
||||
* be thrown away. The discarded state is removed if a pipe destination or
|
||||
* data handler is added, if pause() is called, or if any synchronous or
|
||||
* asynchronous iteration is started.
|
||||
*/
|
||||
resume(): void;
|
||||
/**
|
||||
* Pause the stream
|
||||
*/
|
||||
pause(): void;
|
||||
/**
|
||||
* true if the stream has been forcibly destroyed
|
||||
*/
|
||||
get destroyed(): boolean;
|
||||
/**
|
||||
* true if the stream is currently in a flowing state, meaning that
|
||||
* any writes will be immediately emitted.
|
||||
*/
|
||||
get flowing(): boolean;
|
||||
/**
|
||||
* true if the stream is currently in a paused state
|
||||
*/
|
||||
get paused(): boolean;
|
||||
[BUFFERPUSH](chunk: RType): void;
|
||||
[BUFFERSHIFT](): RType;
|
||||
[FLUSH](noDrain?: boolean): void;
|
||||
[FLUSHCHUNK](chunk: RType): boolean;
|
||||
/**
|
||||
* Pipe all data emitted by this stream into the destination provided.
|
||||
*
|
||||
* Triggers the flow of data.
|
||||
*/
|
||||
pipe<W extends Minipass.Writable>(dest: W, opts?: PipeOptions): W;
|
||||
/**
|
||||
* Fully unhook a piped destination stream.
|
||||
*
|
||||
* If the destination stream was the only consumer of this stream (ie,
|
||||
* there are no other piped destinations or `'data'` event listeners)
|
||||
* then the flow of data will stop until there is another consumer or
|
||||
* {@link Minipass#resume} is explicitly called.
|
||||
*/
|
||||
unpipe<W extends Minipass.Writable>(dest: W): void;
|
||||
/**
|
||||
* Alias for {@link Minipass#on}
|
||||
*/
|
||||
addListener<Event extends keyof Events>(ev: Event, handler: (...args: Events[Event]) => any): this;
|
||||
/**
|
||||
* Mostly identical to `EventEmitter.on`, with the following
|
||||
* behavior differences to prevent data loss and unnecessary hangs:
|
||||
*
|
||||
* - Adding a 'data' event handler will trigger the flow of data
|
||||
*
|
||||
* - Adding a 'readable' event handler when there is data waiting to be read
|
||||
* will cause 'readable' to be emitted immediately.
|
||||
*
|
||||
* - Adding an 'endish' event handler ('end', 'finish', etc.) which has
|
||||
* already passed will cause the event to be emitted immediately and all
|
||||
* handlers removed.
|
||||
*
|
||||
* - Adding an 'error' event handler after an error has been emitted will
|
||||
* cause the event to be re-emitted immediately with the error previously
|
||||
* raised.
|
||||
*/
|
||||
on<Event extends keyof Events>(ev: Event, handler: (...args: Events[Event]) => any): this;
|
||||
/**
|
||||
* Alias for {@link Minipass#off}
|
||||
*/
|
||||
removeListener<Event extends keyof Events>(ev: Event, handler: (...args: Events[Event]) => any): this;
|
||||
/**
|
||||
* Mostly identical to `EventEmitter.off`
|
||||
*
|
||||
* If a 'data' event handler is removed, and it was the last consumer
|
||||
* (ie, there are no pipe destinations or other 'data' event listeners),
|
||||
* then the flow of data will stop until there is another consumer or
|
||||
* {@link Minipass#resume} is explicitly called.
|
||||
*/
|
||||
off<Event extends keyof Events>(ev: Event, handler: (...args: Events[Event]) => any): this;
|
||||
/**
|
||||
* Mostly identical to `EventEmitter.removeAllListeners`
|
||||
*
|
||||
* If all 'data' event handlers are removed, and they were the last consumer
|
||||
* (ie, there are no pipe destinations), then the flow of data will stop
|
||||
* until there is another consumer or {@link Minipass#resume} is explicitly
|
||||
* called.
|
||||
*/
|
||||
removeAllListeners<Event extends keyof Events>(ev?: Event): this;
|
||||
/**
|
||||
* true if the 'end' event has been emitted
|
||||
*/
|
||||
get emittedEnd(): boolean;
|
||||
[MAYBE_EMIT_END](): void;
|
||||
/**
|
||||
* Mostly identical to `EventEmitter.emit`, with the following
|
||||
* behavior differences to prevent data loss and unnecessary hangs:
|
||||
*
|
||||
* If the stream has been destroyed, and the event is something other
|
||||
* than 'close' or 'error', then `false` is returned and no handlers
|
||||
* are called.
|
||||
*
|
||||
* If the event is 'end', and has already been emitted, then the event
|
||||
* is ignored. If the stream is in a paused or non-flowing state, then
|
||||
* the event will be deferred until data flow resumes. If the stream is
|
||||
* async, then handlers will be called on the next tick rather than
|
||||
* immediately.
|
||||
*
|
||||
* If the event is 'close', and 'end' has not yet been emitted, then
|
||||
* the event will be deferred until after 'end' is emitted.
|
||||
*
|
||||
* If the event is 'error', and an AbortSignal was provided for the stream,
|
||||
* and there are no listeners, then the event is ignored, matching the
|
||||
* behavior of node core streams in the presense of an AbortSignal.
|
||||
*
|
||||
* If the event is 'finish' or 'prefinish', then all listeners will be
|
||||
* removed after emitting the event, to prevent double-firing.
|
||||
*/
|
||||
emit<Event extends keyof Events>(ev: Event, ...args: Events[Event]): boolean;
|
||||
[EMITDATA](data: RType): boolean;
|
||||
[EMITEND](): boolean;
|
||||
[EMITEND2](): boolean;
|
||||
/**
|
||||
* Return a Promise that resolves to an array of all emitted data once
|
||||
* the stream ends.
|
||||
*/
|
||||
collect(): Promise<RType[] & {
|
||||
dataLength: number;
|
||||
}>;
|
||||
/**
|
||||
* Return a Promise that resolves to the concatenation of all emitted data
|
||||
* once the stream ends.
|
||||
*
|
||||
* Not allowed on objectMode streams.
|
||||
*/
|
||||
concat(): Promise<RType>;
|
||||
/**
|
||||
* Return a void Promise that resolves once the stream ends.
|
||||
*/
|
||||
promise(): Promise<void>;
|
||||
/**
|
||||
* Asynchronous `for await of` iteration.
|
||||
*
|
||||
* This will continue emitting all chunks until the stream terminates.
|
||||
*/
|
||||
[Symbol.asyncIterator](): AsyncGenerator<RType, void, void>;
|
||||
/**
|
||||
* Synchronous `for of` iteration.
|
||||
*
|
||||
* The iteration will terminate when the internal buffer runs out, even
|
||||
* if the stream has not yet terminated.
|
||||
*/
|
||||
[Symbol.iterator](): Generator<RType, void, void>;
|
||||
/**
|
||||
* Destroy a stream, preventing it from being used for any further purpose.
|
||||
*
|
||||
* If the stream has a `close()` method, then it will be called on
|
||||
* destruction.
|
||||
*
|
||||
* After destruction, any attempt to write data, read data, or emit most
|
||||
* events will be ignored.
|
||||
*
|
||||
* If an error argument is provided, then it will be emitted in an
|
||||
* 'error' event.
|
||||
*/
|
||||
destroy(er?: unknown): this;
|
||||
/**
|
||||
* Alias for {@link isStream}
|
||||
*
|
||||
* Former export location, maintained for backwards compatibility.
|
||||
*
|
||||
* @deprecated
|
||||
*/
|
||||
static get isStream(): (s: any) => s is NodeJS.WriteStream | NodeJS.ReadStream | Minipass<any, any, any> | (NodeJS.ReadStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
pause(): any;
|
||||
resume(): any;
|
||||
pipe(...destArgs: any[]): any;
|
||||
}) | (NodeJS.WriteStream & {
|
||||
fd: number;
|
||||
}) | (EventEmitter & {
|
||||
end(): any;
|
||||
write(chunk: any, ...args: any[]): any;
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=index.d.ts.map
|
1
node_modules/npm-registry-fetch/node_modules/minipass/dist/esm/index.d.ts.map
generated
vendored
Normal file
1
node_modules/npm-registry-fetch/node_modules/minipass/dist/esm/index.d.ts.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1018
node_modules/npm-registry-fetch/node_modules/minipass/dist/esm/index.js
generated
vendored
Normal file
1018
node_modules/npm-registry-fetch/node_modules/minipass/dist/esm/index.js
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
1
node_modules/npm-registry-fetch/node_modules/minipass/dist/esm/index.js.map
generated
vendored
Normal file
1
node_modules/npm-registry-fetch/node_modules/minipass/dist/esm/index.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
3
node_modules/npm-registry-fetch/node_modules/minipass/dist/esm/package.json
generated
vendored
Normal file
3
node_modules/npm-registry-fetch/node_modules/minipass/dist/esm/package.json
generated
vendored
Normal file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"type": "module"
|
||||
}
|
82
node_modules/npm-registry-fetch/node_modules/minipass/package.json
generated
vendored
Normal file
82
node_modules/npm-registry-fetch/node_modules/minipass/package.json
generated
vendored
Normal file
|
@ -0,0 +1,82 @@
|
|||
{
|
||||
"name": "minipass",
|
||||
"version": "7.1.2",
|
||||
"description": "minimal implementation of a PassThrough stream",
|
||||
"main": "./dist/commonjs/index.js",
|
||||
"types": "./dist/commonjs/index.d.ts",
|
||||
"type": "module",
|
||||
"tshy": {
|
||||
"selfLink": false,
|
||||
"main": true,
|
||||
"exports": {
|
||||
"./package.json": "./package.json",
|
||||
".": "./src/index.ts"
|
||||
}
|
||||
},
|
||||
"exports": {
|
||||
"./package.json": "./package.json",
|
||||
".": {
|
||||
"import": {
|
||||
"types": "./dist/esm/index.d.ts",
|
||||
"default": "./dist/esm/index.js"
|
||||
},
|
||||
"require": {
|
||||
"types": "./dist/commonjs/index.d.ts",
|
||||
"default": "./dist/commonjs/index.js"
|
||||
}
|
||||
}
|
||||
},
|
||||
"files": [
|
||||
"dist"
|
||||
],
|
||||
"scripts": {
|
||||
"preversion": "npm test",
|
||||
"postversion": "npm publish",
|
||||
"prepublishOnly": "git push origin --follow-tags",
|
||||
"prepare": "tshy",
|
||||
"pretest": "npm run prepare",
|
||||
"presnap": "npm run prepare",
|
||||
"test": "tap",
|
||||
"snap": "tap",
|
||||
"format": "prettier --write . --loglevel warn",
|
||||
"typedoc": "typedoc --tsconfig .tshy/esm.json ./src/*.ts"
|
||||
},
|
||||
"prettier": {
|
||||
"semi": false,
|
||||
"printWidth": 75,
|
||||
"tabWidth": 2,
|
||||
"useTabs": false,
|
||||
"singleQuote": true,
|
||||
"jsxSingleQuote": false,
|
||||
"bracketSameLine": true,
|
||||
"arrowParens": "avoid",
|
||||
"endOfLine": "lf"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/end-of-stream": "^1.4.2",
|
||||
"@types/node": "^20.1.2",
|
||||
"end-of-stream": "^1.4.0",
|
||||
"node-abort-controller": "^3.1.1",
|
||||
"prettier": "^2.6.2",
|
||||
"tap": "^19.0.0",
|
||||
"through2": "^2.0.3",
|
||||
"tshy": "^1.14.0",
|
||||
"typedoc": "^0.25.1"
|
||||
},
|
||||
"repository": "https://github.com/isaacs/minipass",
|
||||
"keywords": [
|
||||
"passthrough",
|
||||
"stream"
|
||||
],
|
||||
"author": "Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me/)",
|
||||
"license": "ISC",
|
||||
"engines": {
|
||||
"node": ">=16 || 14 >=14.17"
|
||||
},
|
||||
"tap": {
|
||||
"typecheck": true,
|
||||
"include": [
|
||||
"test/*.ts"
|
||||
]
|
||||
}
|
||||
}
|
68
node_modules/npm-registry-fetch/package.json
generated
vendored
Normal file
68
node_modules/npm-registry-fetch/package.json
generated
vendored
Normal file
|
@ -0,0 +1,68 @@
|
|||
{
|
||||
"name": "npm-registry-fetch",
|
||||
"version": "17.1.0",
|
||||
"description": "Fetch-based http client for use with npm registry APIs",
|
||||
"main": "lib",
|
||||
"files": [
|
||||
"bin/",
|
||||
"lib/"
|
||||
],
|
||||
"scripts": {
|
||||
"eslint": "eslint",
|
||||
"lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"",
|
||||
"lintfix": "npm run lint -- --fix",
|
||||
"test": "tap",
|
||||
"posttest": "npm run lint",
|
||||
"npmclilint": "npmcli-lint",
|
||||
"postsnap": "npm run lintfix --",
|
||||
"postlint": "template-oss-check",
|
||||
"snap": "tap",
|
||||
"template-oss-apply": "template-oss-apply --force"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/npm/npm-registry-fetch.git"
|
||||
},
|
||||
"keywords": [
|
||||
"npm",
|
||||
"registry",
|
||||
"fetch"
|
||||
],
|
||||
"author": "GitHub Inc.",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@npmcli/redact": "^2.0.0",
|
||||
"jsonparse": "^1.3.1",
|
||||
"make-fetch-happen": "^13.0.0",
|
||||
"minipass": "^7.0.2",
|
||||
"minipass-fetch": "^3.0.0",
|
||||
"minizlib": "^2.1.2",
|
||||
"npm-package-arg": "^11.0.0",
|
||||
"proc-log": "^4.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@npmcli/eslint-config": "^4.0.0",
|
||||
"@npmcli/template-oss": "4.22.0",
|
||||
"cacache": "^18.0.0",
|
||||
"nock": "^13.2.4",
|
||||
"require-inject": "^1.4.4",
|
||||
"ssri": "^10.0.0",
|
||||
"tap": "^16.0.1"
|
||||
},
|
||||
"tap": {
|
||||
"check-coverage": true,
|
||||
"test-ignore": "test[\\\\/](util|cache)[\\\\/]",
|
||||
"nyc-arg": [
|
||||
"--exclude",
|
||||
"tap-snapshots/**"
|
||||
]
|
||||
},
|
||||
"engines": {
|
||||
"node": "^16.14.0 || >=18.0.0"
|
||||
},
|
||||
"templateOSS": {
|
||||
"//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.",
|
||||
"version": "4.22.0",
|
||||
"publish": "true"
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue