Updated the files.
This commit is contained in:
parent
1553e6b971
commit
753967d4f5
23418 changed files with 3784666 additions and 0 deletions
40
my-app/node_modules/@npmcli/agent/README.md
generated
vendored
Executable file
40
my-app/node_modules/@npmcli/agent/README.md
generated
vendored
Executable file
|
@ -0,0 +1,40 @@
|
|||
## @npmcli/agent
|
||||
|
||||
A pair of Agent implementations for nodejs that provide consistent keep-alives, granular timeouts, dns caching, and proxy support.
|
||||
|
||||
### Usage
|
||||
|
||||
```js
|
||||
const { getAgent, HttpAgent } = require('@npmcli/agent')
|
||||
const fetch = require('minipass-fetch')
|
||||
|
||||
const main = async () => {
|
||||
// if you know what agent you need, you can create one directly
|
||||
const agent = new HttpAgent(agentOptions)
|
||||
// or you can use the getAgent helper, it will determine and create an Agent
|
||||
// instance for you as well as reuse that agent for new requests as appropriate
|
||||
const agent = getAgent('https://registry.npmjs.org/npm', agentOptions)
|
||||
// minipass-fetch is just an example, this will work for any http client that
|
||||
// supports node's Agents
|
||||
const res = await fetch('https://registry.npmjs.org/npm', { agent })
|
||||
}
|
||||
|
||||
main()
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
All options supported by the node Agent implementations are supported here, see [the docs](https://nodejs.org/api/http.html#new-agentoptions) for those.
|
||||
|
||||
Options that have been added by this module include:
|
||||
|
||||
- `family`: what tcp family to use, can be `4` for IPv4, `6` for IPv6 or `0` for both.
|
||||
- `proxy`: a URL to a supported proxy, currently supports `HTTP CONNECT` based http/https proxies as well as socks4 and 5.
|
||||
- `dns`: configuration for the built-in dns cache
|
||||
- `ttl`: how long (in milliseconds) to keep cached dns entries, defaults to `5 * 60 * 100 (5 minutes)`
|
||||
- `lookup`: optional function to override how dns lookups are performed, defaults to `require('dns').lookup`
|
||||
- `timeouts`: a set of granular timeouts, all default to `0`
|
||||
- `connection`: time between initiating connection and actually connecting
|
||||
- `idle`: time between data packets (if a top level `timeout` is provided, it will be copied here)
|
||||
- `response`: time between sending a request and receiving a response
|
||||
- `transfer`: time between starting to receive a request and consuming the response fully
|
203
my-app/node_modules/@npmcli/agent/lib/agents.js
generated
vendored
Executable file
203
my-app/node_modules/@npmcli/agent/lib/agents.js
generated
vendored
Executable file
|
@ -0,0 +1,203 @@
|
|||
'use strict'
|
||||
|
||||
const net = require('net')
|
||||
const tls = require('tls')
|
||||
const { once } = require('events')
|
||||
const timers = require('timers/promises')
|
||||
const { normalizeOptions, cacheOptions } = require('./options')
|
||||
const { getProxy, getProxyAgent, proxyCache } = require('./proxy.js')
|
||||
const Errors = require('./errors.js')
|
||||
const { Agent: AgentBase } = require('agent-base')
|
||||
|
||||
module.exports = class Agent extends AgentBase {
|
||||
#options
|
||||
#timeouts
|
||||
#proxy
|
||||
#noProxy
|
||||
#ProxyAgent
|
||||
|
||||
constructor (options = {}) {
|
||||
const { timeouts, proxy, noProxy, ...normalizedOptions } = normalizeOptions(options)
|
||||
|
||||
super(normalizedOptions)
|
||||
|
||||
this.#options = normalizedOptions
|
||||
this.#timeouts = timeouts
|
||||
|
||||
if (proxy) {
|
||||
this.#proxy = new URL(proxy)
|
||||
this.#noProxy = noProxy
|
||||
this.#ProxyAgent = getProxyAgent(proxy)
|
||||
}
|
||||
}
|
||||
|
||||
get proxy () {
|
||||
return this.#proxy ? { url: this.#proxy } : {}
|
||||
}
|
||||
|
||||
#getProxy (options) {
|
||||
if (!this.#proxy) {
|
||||
return
|
||||
}
|
||||
|
||||
const proxy = getProxy(`${options.protocol}//${options.host}:${options.port}`, {
|
||||
proxy: this.#proxy,
|
||||
noProxy: this.#noProxy,
|
||||
})
|
||||
|
||||
if (!proxy) {
|
||||
return
|
||||
}
|
||||
|
||||
const cacheKey = cacheOptions({
|
||||
...options,
|
||||
...this.#options,
|
||||
timeouts: this.#timeouts,
|
||||
proxy,
|
||||
})
|
||||
|
||||
if (proxyCache.has(cacheKey)) {
|
||||
return proxyCache.get(cacheKey)
|
||||
}
|
||||
|
||||
let ProxyAgent = this.#ProxyAgent
|
||||
if (Array.isArray(ProxyAgent)) {
|
||||
ProxyAgent = this.isSecureEndpoint(options) ? ProxyAgent[1] : ProxyAgent[0]
|
||||
}
|
||||
|
||||
const proxyAgent = new ProxyAgent(proxy, this.#options)
|
||||
proxyCache.set(cacheKey, proxyAgent)
|
||||
|
||||
return proxyAgent
|
||||
}
|
||||
|
||||
// takes an array of promises and races them against the connection timeout
|
||||
// which will throw the necessary error if it is hit. This will return the
|
||||
// result of the promise race.
|
||||
async #timeoutConnection ({ promises, options, timeout }, ac = new AbortController()) {
|
||||
if (timeout) {
|
||||
const connectionTimeout = timers.setTimeout(timeout, null, { signal: ac.signal })
|
||||
.then(() => {
|
||||
throw new Errors.ConnectionTimeoutError(`${options.host}:${options.port}`)
|
||||
}).catch((err) => {
|
||||
if (err.name === 'AbortError') {
|
||||
return
|
||||
}
|
||||
throw err
|
||||
})
|
||||
promises.push(connectionTimeout)
|
||||
}
|
||||
|
||||
let result
|
||||
try {
|
||||
result = await Promise.race(promises)
|
||||
ac.abort()
|
||||
} catch (err) {
|
||||
ac.abort()
|
||||
throw err
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
async connect (request, options) {
|
||||
// if the connection does not have its own lookup function
|
||||
// set, then use the one from our options
|
||||
options.lookup ??= this.#options.lookup
|
||||
|
||||
let socket
|
||||
let timeout = this.#timeouts.connection
|
||||
const isSecureEndpoint = this.isSecureEndpoint(options)
|
||||
|
||||
const proxy = this.#getProxy(options)
|
||||
if (proxy) {
|
||||
// some of the proxies will wait for the socket to fully connect before
|
||||
// returning so we have to await this while also racing it against the
|
||||
// connection timeout.
|
||||
const start = Date.now()
|
||||
socket = await this.#timeoutConnection({
|
||||
options,
|
||||
timeout,
|
||||
promises: [proxy.connect(request, options)],
|
||||
})
|
||||
// see how much time proxy.connect took and subtract it from
|
||||
// the timeout
|
||||
if (timeout) {
|
||||
timeout = timeout - (Date.now() - start)
|
||||
}
|
||||
} else {
|
||||
socket = (isSecureEndpoint ? tls : net).connect(options)
|
||||
}
|
||||
|
||||
socket.setKeepAlive(this.keepAlive, this.keepAliveMsecs)
|
||||
socket.setNoDelay(this.keepAlive)
|
||||
|
||||
const abortController = new AbortController()
|
||||
const { signal } = abortController
|
||||
|
||||
const connectPromise = socket[isSecureEndpoint ? 'secureConnecting' : 'connecting']
|
||||
? once(socket, isSecureEndpoint ? 'secureConnect' : 'connect', { signal })
|
||||
: Promise.resolve()
|
||||
|
||||
await this.#timeoutConnection({
|
||||
options,
|
||||
timeout,
|
||||
promises: [
|
||||
connectPromise,
|
||||
once(socket, 'error', { signal }).then((err) => {
|
||||
throw err[0]
|
||||
}),
|
||||
],
|
||||
}, abortController)
|
||||
|
||||
if (this.#timeouts.idle) {
|
||||
socket.setTimeout(this.#timeouts.idle, () => {
|
||||
socket.destroy(new Errors.IdleTimeoutError(`${options.host}:${options.port}`))
|
||||
})
|
||||
}
|
||||
|
||||
return socket
|
||||
}
|
||||
|
||||
addRequest (request, options) {
|
||||
const proxy = this.#getProxy(options)
|
||||
// it would be better to call proxy.addRequest here but this causes the
|
||||
// http-proxy-agent to call its super.addRequest which causes the request
|
||||
// to be added to the agent twice. since we only support 3 agents
|
||||
// currently (see the required agents in proxy.js) we have manually
|
||||
// checked that the only public methods we need to call are called in the
|
||||
// next block. this could change in the future and presumably we would get
|
||||
// failing tests until we have properly called the necessary methods on
|
||||
// each of our proxy agents
|
||||
if (proxy?.setRequestProps) {
|
||||
proxy.setRequestProps(request, options)
|
||||
}
|
||||
|
||||
request.setHeader('connection', this.keepAlive ? 'keep-alive' : 'close')
|
||||
|
||||
if (this.#timeouts.response) {
|
||||
let responseTimeout
|
||||
request.once('finish', () => {
|
||||
setTimeout(() => {
|
||||
request.destroy(new Errors.ResponseTimeoutError(request, this.#proxy))
|
||||
}, this.#timeouts.response)
|
||||
})
|
||||
request.once('response', () => {
|
||||
clearTimeout(responseTimeout)
|
||||
})
|
||||
}
|
||||
|
||||
if (this.#timeouts.transfer) {
|
||||
let transferTimeout
|
||||
request.once('response', (res) => {
|
||||
setTimeout(() => {
|
||||
res.destroy(new Errors.TransferTimeoutError(request, this.#proxy))
|
||||
}, this.#timeouts.transfer)
|
||||
res.once('close', () => {
|
||||
clearTimeout(transferTimeout)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
return super.addRequest(request, options)
|
||||
}
|
||||
}
|
53
my-app/node_modules/@npmcli/agent/lib/dns.js
generated
vendored
Executable file
53
my-app/node_modules/@npmcli/agent/lib/dns.js
generated
vendored
Executable file
|
@ -0,0 +1,53 @@
|
|||
'use strict'
|
||||
|
||||
const { LRUCache } = require('lru-cache')
|
||||
const dns = require('dns')
|
||||
|
||||
// this is a factory so that each request can have its own opts (i.e. ttl)
|
||||
// while still sharing the cache across all requests
|
||||
const cache = new LRUCache({ max: 50 })
|
||||
|
||||
const getOptions = ({
|
||||
family = 0,
|
||||
hints = dns.ADDRCONFIG,
|
||||
all = false,
|
||||
verbatim = undefined,
|
||||
ttl = 5 * 60 * 1000,
|
||||
lookup = dns.lookup,
|
||||
}) => ({
|
||||
// hints and lookup are returned since both are top level properties to (net|tls).connect
|
||||
hints,
|
||||
lookup: (hostname, ...args) => {
|
||||
const callback = args.pop() // callback is always last arg
|
||||
const lookupOptions = args[0] ?? {}
|
||||
|
||||
const options = {
|
||||
family,
|
||||
hints,
|
||||
all,
|
||||
verbatim,
|
||||
...(typeof lookupOptions === 'number' ? { family: lookupOptions } : lookupOptions),
|
||||
}
|
||||
|
||||
const key = JSON.stringify({ hostname, ...options })
|
||||
|
||||
if (cache.has(key)) {
|
||||
const cached = cache.get(key)
|
||||
return process.nextTick(callback, null, ...cached)
|
||||
}
|
||||
|
||||
lookup(hostname, options, (err, ...result) => {
|
||||
if (err) {
|
||||
return callback(err)
|
||||
}
|
||||
|
||||
cache.set(key, result, { ttl })
|
||||
return callback(null, ...result)
|
||||
})
|
||||
},
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
cache,
|
||||
getOptions,
|
||||
}
|
61
my-app/node_modules/@npmcli/agent/lib/errors.js
generated
vendored
Executable file
61
my-app/node_modules/@npmcli/agent/lib/errors.js
generated
vendored
Executable file
|
@ -0,0 +1,61 @@
|
|||
'use strict'
|
||||
|
||||
class InvalidProxyProtocolError extends Error {
|
||||
constructor (url) {
|
||||
super(`Invalid protocol \`${url.protocol}\` connecting to proxy \`${url.host}\``)
|
||||
this.code = 'EINVALIDPROXY'
|
||||
this.proxy = url
|
||||
}
|
||||
}
|
||||
|
||||
class ConnectionTimeoutError extends Error {
|
||||
constructor (host) {
|
||||
super(`Timeout connecting to host \`${host}\``)
|
||||
this.code = 'ECONNECTIONTIMEOUT'
|
||||
this.host = host
|
||||
}
|
||||
}
|
||||
|
||||
class IdleTimeoutError extends Error {
|
||||
constructor (host) {
|
||||
super(`Idle timeout reached for host \`${host}\``)
|
||||
this.code = 'EIDLETIMEOUT'
|
||||
this.host = host
|
||||
}
|
||||
}
|
||||
|
||||
class ResponseTimeoutError extends Error {
|
||||
constructor (request, proxy) {
|
||||
let msg = 'Response timeout '
|
||||
if (proxy) {
|
||||
msg += `from proxy \`${proxy.host}\` `
|
||||
}
|
||||
msg += `connecting to host \`${request.host}\``
|
||||
super(msg)
|
||||
this.code = 'ERESPONSETIMEOUT'
|
||||
this.proxy = proxy
|
||||
this.request = request
|
||||
}
|
||||
}
|
||||
|
||||
class TransferTimeoutError extends Error {
|
||||
constructor (request, proxy) {
|
||||
let msg = 'Transfer timeout '
|
||||
if (proxy) {
|
||||
msg += `from proxy \`${proxy.host}\` `
|
||||
}
|
||||
msg += `for \`${request.host}\``
|
||||
super(msg)
|
||||
this.code = 'ETRANSFERTIMEOUT'
|
||||
this.proxy = proxy
|
||||
this.request = request
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
InvalidProxyProtocolError,
|
||||
ConnectionTimeoutError,
|
||||
IdleTimeoutError,
|
||||
ResponseTimeoutError,
|
||||
TransferTimeoutError,
|
||||
}
|
56
my-app/node_modules/@npmcli/agent/lib/index.js
generated
vendored
Executable file
56
my-app/node_modules/@npmcli/agent/lib/index.js
generated
vendored
Executable file
|
@ -0,0 +1,56 @@
|
|||
'use strict'
|
||||
|
||||
const { LRUCache } = require('lru-cache')
|
||||
const { normalizeOptions, cacheOptions } = require('./options')
|
||||
const { getProxy, proxyCache } = require('./proxy.js')
|
||||
const dns = require('./dns.js')
|
||||
const Agent = require('./agents.js')
|
||||
|
||||
const agentCache = new LRUCache({ max: 20 })
|
||||
|
||||
const getAgent = (url, { agent, proxy, noProxy, ...options } = {}) => {
|
||||
// false has meaning so this can't be a simple truthiness check
|
||||
if (agent != null) {
|
||||
return agent
|
||||
}
|
||||
|
||||
url = new URL(url)
|
||||
|
||||
const proxyForUrl = getProxy(url, { proxy, noProxy })
|
||||
const normalizedOptions = {
|
||||
...normalizeOptions(options),
|
||||
proxy: proxyForUrl,
|
||||
}
|
||||
|
||||
const cacheKey = cacheOptions({
|
||||
...normalizedOptions,
|
||||
secureEndpoint: url.protocol === 'https:',
|
||||
})
|
||||
|
||||
if (agentCache.has(cacheKey)) {
|
||||
return agentCache.get(cacheKey)
|
||||
}
|
||||
|
||||
const newAgent = new Agent(normalizedOptions)
|
||||
agentCache.set(cacheKey, newAgent)
|
||||
|
||||
return newAgent
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getAgent,
|
||||
Agent,
|
||||
// these are exported for backwards compatability
|
||||
HttpAgent: Agent,
|
||||
HttpsAgent: Agent,
|
||||
cache: {
|
||||
proxy: proxyCache,
|
||||
agent: agentCache,
|
||||
dns: dns.cache,
|
||||
clear: () => {
|
||||
proxyCache.clear()
|
||||
agentCache.clear()
|
||||
dns.cache.clear()
|
||||
},
|
||||
},
|
||||
}
|
86
my-app/node_modules/@npmcli/agent/lib/options.js
generated
vendored
Executable file
86
my-app/node_modules/@npmcli/agent/lib/options.js
generated
vendored
Executable file
|
@ -0,0 +1,86 @@
|
|||
'use strict'
|
||||
|
||||
const dns = require('./dns')
|
||||
|
||||
const normalizeOptions = (opts) => {
|
||||
const family = parseInt(opts.family ?? '0', 10)
|
||||
const keepAlive = opts.keepAlive ?? true
|
||||
|
||||
const normalized = {
|
||||
// nodejs http agent options. these are all the defaults
|
||||
// but kept here to increase the likelihood of cache hits
|
||||
// https://nodejs.org/api/http.html#new-agentoptions
|
||||
keepAliveMsecs: keepAlive ? 1000 : undefined,
|
||||
maxSockets: opts.maxSockets ?? 15,
|
||||
maxTotalSockets: Infinity,
|
||||
maxFreeSockets: keepAlive ? 256 : undefined,
|
||||
scheduling: 'fifo',
|
||||
// then spread the rest of the options
|
||||
...opts,
|
||||
// we already set these to their defaults that we want
|
||||
family,
|
||||
keepAlive,
|
||||
// our custom timeout options
|
||||
timeouts: {
|
||||
// the standard timeout option is mapped to our idle timeout
|
||||
// and then deleted below
|
||||
idle: opts.timeout ?? 0,
|
||||
connection: 0,
|
||||
response: 0,
|
||||
transfer: 0,
|
||||
...opts.timeouts,
|
||||
},
|
||||
// get the dns options that go at the top level of socket connection
|
||||
...dns.getOptions({ family, ...opts.dns }),
|
||||
}
|
||||
|
||||
// remove timeout since we already used it to set our own idle timeout
|
||||
delete normalized.timeout
|
||||
|
||||
return normalized
|
||||
}
|
||||
|
||||
const createKey = (obj) => {
|
||||
let key = ''
|
||||
const sorted = Object.entries(obj).sort((a, b) => a[0] - b[0])
|
||||
for (let [k, v] of sorted) {
|
||||
if (v == null) {
|
||||
v = 'null'
|
||||
} else if (v instanceof URL) {
|
||||
v = v.toString()
|
||||
} else if (typeof v === 'object') {
|
||||
v = createKey(v)
|
||||
}
|
||||
key += `${k}:${v}:`
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
const cacheOptions = ({ secureEndpoint, ...options }) => createKey({
|
||||
secureEndpoint: !!secureEndpoint,
|
||||
// socket connect options
|
||||
family: options.family,
|
||||
hints: options.hints,
|
||||
localAddress: options.localAddress,
|
||||
// tls specific connect options
|
||||
strictSsl: secureEndpoint ? !!options.rejectUnauthorized : false,
|
||||
ca: secureEndpoint ? options.ca : null,
|
||||
cert: secureEndpoint ? options.cert : null,
|
||||
key: secureEndpoint ? options.key : null,
|
||||
// http agent options
|
||||
keepAlive: options.keepAlive,
|
||||
keepAliveMsecs: options.keepAliveMsecs,
|
||||
maxSockets: options.maxSockets,
|
||||
maxTotalSockets: options.maxTotalSockets,
|
||||
maxFreeSockets: options.maxFreeSockets,
|
||||
scheduling: options.scheduling,
|
||||
// timeout options
|
||||
timeouts: options.timeouts,
|
||||
// proxy
|
||||
proxy: options.proxy,
|
||||
})
|
||||
|
||||
module.exports = {
|
||||
normalizeOptions,
|
||||
cacheOptions,
|
||||
}
|
88
my-app/node_modules/@npmcli/agent/lib/proxy.js
generated
vendored
Executable file
88
my-app/node_modules/@npmcli/agent/lib/proxy.js
generated
vendored
Executable file
|
@ -0,0 +1,88 @@
|
|||
'use strict'
|
||||
|
||||
const { HttpProxyAgent } = require('http-proxy-agent')
|
||||
const { HttpsProxyAgent } = require('https-proxy-agent')
|
||||
const { SocksProxyAgent } = require('socks-proxy-agent')
|
||||
const { LRUCache } = require('lru-cache')
|
||||
const { InvalidProxyProtocolError } = require('./errors.js')
|
||||
|
||||
const PROXY_CACHE = new LRUCache({ max: 20 })
|
||||
|
||||
const SOCKS_PROTOCOLS = new Set(SocksProxyAgent.protocols)
|
||||
|
||||
const PROXY_ENV_KEYS = new Set(['https_proxy', 'http_proxy', 'proxy', 'no_proxy'])
|
||||
|
||||
const PROXY_ENV = Object.entries(process.env).reduce((acc, [key, value]) => {
|
||||
key = key.toLowerCase()
|
||||
if (PROXY_ENV_KEYS.has(key)) {
|
||||
acc[key] = value
|
||||
}
|
||||
return acc
|
||||
}, {})
|
||||
|
||||
const getProxyAgent = (url) => {
|
||||
url = new URL(url)
|
||||
|
||||
const protocol = url.protocol.slice(0, -1)
|
||||
if (SOCKS_PROTOCOLS.has(protocol)) {
|
||||
return SocksProxyAgent
|
||||
}
|
||||
if (protocol === 'https' || protocol === 'http') {
|
||||
return [HttpProxyAgent, HttpsProxyAgent]
|
||||
}
|
||||
|
||||
throw new InvalidProxyProtocolError(url)
|
||||
}
|
||||
|
||||
const isNoProxy = (url, noProxy) => {
|
||||
if (typeof noProxy === 'string') {
|
||||
noProxy = noProxy.split(',').map((p) => p.trim()).filter(Boolean)
|
||||
}
|
||||
|
||||
if (!noProxy || !noProxy.length) {
|
||||
return false
|
||||
}
|
||||
|
||||
const hostSegments = url.hostname.split('.').reverse()
|
||||
|
||||
return noProxy.some((no) => {
|
||||
const noSegments = no.split('.').filter(Boolean).reverse()
|
||||
if (!noSegments.length) {
|
||||
return false
|
||||
}
|
||||
|
||||
for (let i = 0; i < noSegments.length; i++) {
|
||||
if (hostSegments[i] !== noSegments[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
const getProxy = (url, { proxy, noProxy }) => {
|
||||
url = new URL(url)
|
||||
|
||||
if (!proxy) {
|
||||
proxy = url.protocol === 'https:'
|
||||
? PROXY_ENV.https_proxy
|
||||
: PROXY_ENV.https_proxy || PROXY_ENV.http_proxy || PROXY_ENV.proxy
|
||||
}
|
||||
|
||||
if (!noProxy) {
|
||||
noProxy = PROXY_ENV.no_proxy
|
||||
}
|
||||
|
||||
if (!proxy || isNoProxy(url, noProxy)) {
|
||||
return null
|
||||
}
|
||||
|
||||
return new URL(proxy)
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getProxyAgent,
|
||||
getProxy,
|
||||
proxyCache: PROXY_CACHE,
|
||||
}
|
15
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/LICENSE
generated
vendored
Executable file
15
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/LICENSE
generated
vendored
Executable file
|
@ -0,0 +1,15 @@
|
|||
The ISC License
|
||||
|
||||
Copyright (c) 2010-2023 Isaac Z. Schlueter and Contributors
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR
|
||||
IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
1204
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/README.md
generated
vendored
Executable file
1204
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/README.md
generated
vendored
Executable file
File diff suppressed because it is too large
Load diff
856
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/commonjs/index.d.ts
generated
vendored
Executable file
856
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/commonjs/index.d.ts
generated
vendored
Executable file
|
@ -0,0 +1,856 @@
|
|||
/**
|
||||
* @module LRUCache
|
||||
*/
|
||||
declare const TYPE: unique symbol;
|
||||
export type PosInt = number & {
|
||||
[TYPE]: 'Positive Integer';
|
||||
};
|
||||
export type Index = number & {
|
||||
[TYPE]: 'LRUCache Index';
|
||||
};
|
||||
export type UintArray = Uint8Array | Uint16Array | Uint32Array;
|
||||
export type NumberArray = UintArray | number[];
|
||||
declare class ZeroArray extends Array<number> {
|
||||
constructor(size: number);
|
||||
}
|
||||
export type { ZeroArray };
|
||||
export type { Stack };
|
||||
export type StackLike = Stack | Index[];
|
||||
declare class Stack {
|
||||
#private;
|
||||
heap: NumberArray;
|
||||
length: number;
|
||||
static create(max: number): StackLike;
|
||||
constructor(max: number, HeapCls: {
|
||||
new (n: number): NumberArray;
|
||||
});
|
||||
push(n: Index): void;
|
||||
pop(): Index;
|
||||
}
|
||||
/**
|
||||
* Promise representing an in-progress {@link LRUCache#fetch} call
|
||||
*/
|
||||
export type BackgroundFetch<V> = Promise<V | undefined> & {
|
||||
__returned: BackgroundFetch<V> | undefined;
|
||||
__abortController: AbortController;
|
||||
__staleWhileFetching: V | undefined;
|
||||
};
|
||||
export type DisposeTask<K, V> = [
|
||||
value: V,
|
||||
key: K,
|
||||
reason: LRUCache.DisposeReason
|
||||
];
|
||||
export declare namespace LRUCache {
|
||||
/**
|
||||
* An integer greater than 0, reflecting the calculated size of items
|
||||
*/
|
||||
type Size = number;
|
||||
/**
|
||||
* Integer greater than 0, representing some number of milliseconds, or the
|
||||
* time at which a TTL started counting from.
|
||||
*/
|
||||
type Milliseconds = number;
|
||||
/**
|
||||
* An integer greater than 0, reflecting a number of items
|
||||
*/
|
||||
type Count = number;
|
||||
/**
|
||||
* The reason why an item was removed from the cache, passed
|
||||
* to the {@link Disposer} methods.
|
||||
*/
|
||||
type DisposeReason = 'evict' | 'set' | 'delete';
|
||||
/**
|
||||
* A method called upon item removal, passed as the
|
||||
* {@link OptionsBase.dispose} and/or
|
||||
* {@link OptionsBase.disposeAfter} options.
|
||||
*/
|
||||
type Disposer<K, V> = (value: V, key: K, reason: DisposeReason) => void;
|
||||
/**
|
||||
* A function that returns the effective calculated size
|
||||
* of an entry in the cache.
|
||||
*/
|
||||
type SizeCalculator<K, V> = (value: V, key: K) => Size;
|
||||
/**
|
||||
* Options provided to the
|
||||
* {@link OptionsBase.fetchMethod} function.
|
||||
*/
|
||||
interface FetcherOptions<K, V, FC = unknown> {
|
||||
signal: AbortSignal;
|
||||
options: FetcherFetchOptions<K, V, FC>;
|
||||
/**
|
||||
* Object provided in the {@link FetchOptions.context} option to
|
||||
* {@link LRUCache#fetch}
|
||||
*/
|
||||
context: FC;
|
||||
}
|
||||
/**
|
||||
* Status object that may be passed to {@link LRUCache#fetch},
|
||||
* {@link LRUCache#get}, {@link LRUCache#set}, and {@link LRUCache#has}.
|
||||
*/
|
||||
interface Status<V> {
|
||||
/**
|
||||
* The status of a set() operation.
|
||||
*
|
||||
* - add: the item was not found in the cache, and was added
|
||||
* - update: the item was in the cache, with the same value provided
|
||||
* - replace: the item was in the cache, and replaced
|
||||
* - miss: the item was not added to the cache for some reason
|
||||
*/
|
||||
set?: 'add' | 'update' | 'replace' | 'miss';
|
||||
/**
|
||||
* the ttl stored for the item, or undefined if ttls are not used.
|
||||
*/
|
||||
ttl?: Milliseconds;
|
||||
/**
|
||||
* the start time for the item, or undefined if ttls are not used.
|
||||
*/
|
||||
start?: Milliseconds;
|
||||
/**
|
||||
* The timestamp used for TTL calculation
|
||||
*/
|
||||
now?: Milliseconds;
|
||||
/**
|
||||
* the remaining ttl for the item, or undefined if ttls are not used.
|
||||
*/
|
||||
remainingTTL?: Milliseconds;
|
||||
/**
|
||||
* The calculated size for the item, if sizes are used.
|
||||
*/
|
||||
entrySize?: Size;
|
||||
/**
|
||||
* The total calculated size of the cache, if sizes are used.
|
||||
*/
|
||||
totalCalculatedSize?: Size;
|
||||
/**
|
||||
* A flag indicating that the item was not stored, due to exceeding the
|
||||
* {@link OptionsBase.maxEntrySize}
|
||||
*/
|
||||
maxEntrySizeExceeded?: true;
|
||||
/**
|
||||
* The old value, specified in the case of `set:'update'` or
|
||||
* `set:'replace'`
|
||||
*/
|
||||
oldValue?: V;
|
||||
/**
|
||||
* The results of a {@link LRUCache#has} operation
|
||||
*
|
||||
* - hit: the item was found in the cache
|
||||
* - stale: the item was found in the cache, but is stale
|
||||
* - miss: the item was not found in the cache
|
||||
*/
|
||||
has?: 'hit' | 'stale' | 'miss';
|
||||
/**
|
||||
* The status of a {@link LRUCache#fetch} operation.
|
||||
* Note that this can change as the underlying fetch() moves through
|
||||
* various states.
|
||||
*
|
||||
* - inflight: there is another fetch() for this key which is in process
|
||||
* - get: there is no fetchMethod, so {@link LRUCache#get} was called.
|
||||
* - miss: the item is not in cache, and will be fetched.
|
||||
* - hit: the item is in the cache, and was resolved immediately.
|
||||
* - stale: the item is in the cache, but stale.
|
||||
* - refresh: the item is in the cache, and not stale, but
|
||||
* {@link FetchOptions.forceRefresh} was specified.
|
||||
*/
|
||||
fetch?: 'get' | 'inflight' | 'miss' | 'hit' | 'stale' | 'refresh';
|
||||
/**
|
||||
* The {@link OptionsBase.fetchMethod} was called
|
||||
*/
|
||||
fetchDispatched?: true;
|
||||
/**
|
||||
* The cached value was updated after a successful call to
|
||||
* {@link OptionsBase.fetchMethod}
|
||||
*/
|
||||
fetchUpdated?: true;
|
||||
/**
|
||||
* The reason for a fetch() rejection. Either the error raised by the
|
||||
* {@link OptionsBase.fetchMethod}, or the reason for an
|
||||
* AbortSignal.
|
||||
*/
|
||||
fetchError?: Error;
|
||||
/**
|
||||
* The fetch received an abort signal
|
||||
*/
|
||||
fetchAborted?: true;
|
||||
/**
|
||||
* The abort signal received was ignored, and the fetch was allowed to
|
||||
* continue.
|
||||
*/
|
||||
fetchAbortIgnored?: true;
|
||||
/**
|
||||
* The fetchMethod promise resolved successfully
|
||||
*/
|
||||
fetchResolved?: true;
|
||||
/**
|
||||
* The fetchMethod promise was rejected
|
||||
*/
|
||||
fetchRejected?: true;
|
||||
/**
|
||||
* The status of a {@link LRUCache#get} operation.
|
||||
*
|
||||
* - fetching: The item is currently being fetched. If a previous value
|
||||
* is present and allowed, that will be returned.
|
||||
* - stale: The item is in the cache, and is stale.
|
||||
* - hit: the item is in the cache
|
||||
* - miss: the item is not in the cache
|
||||
*/
|
||||
get?: 'stale' | 'hit' | 'miss';
|
||||
/**
|
||||
* A fetch or get operation returned a stale value.
|
||||
*/
|
||||
returnedStale?: true;
|
||||
}
|
||||
/**
|
||||
* options which override the options set in the LRUCache constructor
|
||||
* when calling {@link LRUCache#fetch}.
|
||||
*
|
||||
* This is the union of {@link GetOptions} and {@link SetOptions}, plus
|
||||
* {@link OptionsBase.noDeleteOnFetchRejection},
|
||||
* {@link OptionsBase.allowStaleOnFetchRejection},
|
||||
* {@link FetchOptions.forceRefresh}, and
|
||||
* {@link FetcherOptions.context}
|
||||
*
|
||||
* Any of these may be modified in the {@link OptionsBase.fetchMethod}
|
||||
* function, but the {@link GetOptions} fields will of course have no
|
||||
* effect, as the {@link LRUCache#get} call already happened by the time
|
||||
* the fetchMethod is called.
|
||||
*/
|
||||
interface FetcherFetchOptions<K, V, FC = unknown> extends Pick<OptionsBase<K, V, FC>, 'allowStale' | 'updateAgeOnGet' | 'noDeleteOnStaleGet' | 'sizeCalculation' | 'ttl' | 'noDisposeOnSet' | 'noUpdateTTL' | 'noDeleteOnFetchRejection' | 'allowStaleOnFetchRejection' | 'ignoreFetchAbort' | 'allowStaleOnFetchAbort'> {
|
||||
status?: Status<V>;
|
||||
size?: Size;
|
||||
}
|
||||
/**
|
||||
* Options that may be passed to the {@link LRUCache#fetch} method.
|
||||
*/
|
||||
interface FetchOptions<K, V, FC> extends FetcherFetchOptions<K, V, FC> {
|
||||
/**
|
||||
* Set to true to force a re-load of the existing data, even if it
|
||||
* is not yet stale.
|
||||
*/
|
||||
forceRefresh?: boolean;
|
||||
/**
|
||||
* Context provided to the {@link OptionsBase.fetchMethod} as
|
||||
* the {@link FetcherOptions.context} param.
|
||||
*
|
||||
* If the FC type is specified as unknown (the default),
|
||||
* undefined or void, then this is optional. Otherwise, it will
|
||||
* be required.
|
||||
*/
|
||||
context?: FC;
|
||||
signal?: AbortSignal;
|
||||
status?: Status<V>;
|
||||
}
|
||||
/**
|
||||
* Options provided to {@link LRUCache#fetch} when the FC type is something
|
||||
* other than `unknown`, `undefined`, or `void`
|
||||
*/
|
||||
interface FetchOptionsWithContext<K, V, FC> extends FetchOptions<K, V, FC> {
|
||||
context: FC;
|
||||
}
|
||||
/**
|
||||
* Options provided to {@link LRUCache#fetch} when the FC type is
|
||||
* `undefined` or `void`
|
||||
*/
|
||||
interface FetchOptionsNoContext<K, V> extends FetchOptions<K, V, undefined> {
|
||||
context?: undefined;
|
||||
}
|
||||
/**
|
||||
* Options that may be passed to the {@link LRUCache#has} method.
|
||||
*/
|
||||
interface HasOptions<K, V, FC> extends Pick<OptionsBase<K, V, FC>, 'updateAgeOnHas'> {
|
||||
status?: Status<V>;
|
||||
}
|
||||
/**
|
||||
* Options that may be passed to the {@link LRUCache#get} method.
|
||||
*/
|
||||
interface GetOptions<K, V, FC> extends Pick<OptionsBase<K, V, FC>, 'allowStale' | 'updateAgeOnGet' | 'noDeleteOnStaleGet'> {
|
||||
status?: Status<V>;
|
||||
}
|
||||
/**
|
||||
* Options that may be passed to the {@link LRUCache#peek} method.
|
||||
*/
|
||||
interface PeekOptions<K, V, FC> extends Pick<OptionsBase<K, V, FC>, 'allowStale'> {
|
||||
}
|
||||
/**
|
||||
* Options that may be passed to the {@link LRUCache#set} method.
|
||||
*/
|
||||
interface SetOptions<K, V, FC> extends Pick<OptionsBase<K, V, FC>, 'sizeCalculation' | 'ttl' | 'noDisposeOnSet' | 'noUpdateTTL'> {
|
||||
/**
|
||||
* If size tracking is enabled, then setting an explicit size
|
||||
* in the {@link LRUCache#set} call will prevent calling the
|
||||
* {@link OptionsBase.sizeCalculation} function.
|
||||
*/
|
||||
size?: Size;
|
||||
/**
|
||||
* If TTL tracking is enabled, then setting an explicit start
|
||||
* time in the {@link LRUCache#set} call will override the
|
||||
* default time from `performance.now()` or `Date.now()`.
|
||||
*
|
||||
* Note that it must be a valid value for whichever time-tracking
|
||||
* method is in use.
|
||||
*/
|
||||
start?: Milliseconds;
|
||||
status?: Status<V>;
|
||||
}
|
||||
/**
|
||||
* The type signature for the {@link OptionsBase.fetchMethod} option.
|
||||
*/
|
||||
type Fetcher<K, V, FC = unknown> = (key: K, staleValue: V | undefined, options: FetcherOptions<K, V, FC>) => Promise<V | undefined | void> | V | undefined | void;
|
||||
/**
|
||||
* Options which may be passed to the {@link LRUCache} constructor.
|
||||
*
|
||||
* Most of these may be overridden in the various options that use
|
||||
* them.
|
||||
*
|
||||
* Despite all being technically optional, the constructor requires that
|
||||
* a cache is at minimum limited by one or more of {@link OptionsBase.max},
|
||||
* {@link OptionsBase.ttl}, or {@link OptionsBase.maxSize}.
|
||||
*
|
||||
* If {@link OptionsBase.ttl} is used alone, then it is strongly advised
|
||||
* (and in fact required by the type definitions here) that the cache
|
||||
* also set {@link OptionsBase.ttlAutopurge}, to prevent potentially
|
||||
* unbounded storage.
|
||||
*/
|
||||
interface OptionsBase<K, V, FC> {
|
||||
/**
|
||||
* The maximum number of items to store in the cache before evicting
|
||||
* old entries. This is read-only on the {@link LRUCache} instance,
|
||||
* and may not be overridden.
|
||||
*
|
||||
* If set, then storage space will be pre-allocated at construction
|
||||
* time, and the cache will perform significantly faster.
|
||||
*
|
||||
* Note that significantly fewer items may be stored, if
|
||||
* {@link OptionsBase.maxSize} and/or {@link OptionsBase.ttl} are also
|
||||
* set.
|
||||
*/
|
||||
max?: Count;
|
||||
/**
|
||||
* Max time in milliseconds for items to live in cache before they are
|
||||
* considered stale. Note that stale items are NOT preemptively removed
|
||||
* by default, and MAY live in the cache long after they have expired.
|
||||
*
|
||||
* Also, as this cache is optimized for LRU/MRU operations, some of
|
||||
* the staleness/TTL checks will reduce performance, as they will incur
|
||||
* overhead by deleting items.
|
||||
*
|
||||
* Must be an integer number of ms. If set to 0, this indicates "no TTL"
|
||||
*
|
||||
* @default 0
|
||||
*/
|
||||
ttl?: Milliseconds;
|
||||
/**
|
||||
* Minimum amount of time in ms in which to check for staleness.
|
||||
* Defaults to 1, which means that the current time is checked
|
||||
* at most once per millisecond.
|
||||
*
|
||||
* Set to 0 to check the current time every time staleness is tested.
|
||||
* (This reduces performance, and is theoretically unnecessary.)
|
||||
*
|
||||
* Setting this to a higher value will improve performance somewhat
|
||||
* while using ttl tracking, albeit at the expense of keeping stale
|
||||
* items around a bit longer than their TTLs would indicate.
|
||||
*
|
||||
* @default 1
|
||||
*/
|
||||
ttlResolution?: Milliseconds;
|
||||
/**
|
||||
* Preemptively remove stale items from the cache.
|
||||
* Note that this may significantly degrade performance,
|
||||
* especially if the cache is storing a large number of items.
|
||||
* It is almost always best to just leave the stale items in
|
||||
* the cache, and let them fall out as new items are added.
|
||||
*
|
||||
* Note that this means that {@link OptionsBase.allowStale} is a bit
|
||||
* pointless, as stale items will be deleted almost as soon as they
|
||||
* expire.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
ttlAutopurge?: boolean;
|
||||
/**
|
||||
* Update the age of items on {@link LRUCache#get}, renewing their TTL
|
||||
*
|
||||
* Has no effect if {@link OptionsBase.ttl} is not set.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
updateAgeOnGet?: boolean;
|
||||
/**
|
||||
* Update the age of items on {@link LRUCache#has}, renewing their TTL
|
||||
*
|
||||
* Has no effect if {@link OptionsBase.ttl} is not set.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
updateAgeOnHas?: boolean;
|
||||
/**
|
||||
* Allow {@link LRUCache#get} and {@link LRUCache#fetch} calls to return
|
||||
* stale data, if available.
|
||||
*/
|
||||
allowStale?: boolean;
|
||||
/**
|
||||
* Function that is called on items when they are dropped from the cache.
|
||||
* This can be handy if you want to close file descriptors or do other
|
||||
* cleanup tasks when items are no longer accessible. Called with `key,
|
||||
* value`. It's called before actually removing the item from the
|
||||
* internal cache, so it is *NOT* safe to re-add them.
|
||||
*
|
||||
* Use {@link OptionsBase.disposeAfter} if you wish to dispose items after
|
||||
* they have been full removed, when it is safe to add them back to the
|
||||
* cache.
|
||||
*/
|
||||
dispose?: Disposer<K, V>;
|
||||
/**
|
||||
* The same as {@link OptionsBase.dispose}, but called *after* the entry
|
||||
* is completely removed and the cache is once again in a clean state.
|
||||
* It is safe to add an item right back into the cache at this point.
|
||||
* However, note that it is *very* easy to inadvertently create infinite
|
||||
* recursion this way.
|
||||
*/
|
||||
disposeAfter?: Disposer<K, V>;
|
||||
/**
|
||||
* Set to true to suppress calling the
|
||||
* {@link OptionsBase.dispose} function if the entry key is
|
||||
* still accessible within the cache.
|
||||
* This may be overridden by passing an options object to
|
||||
* {@link LRUCache#set}.
|
||||
*/
|
||||
noDisposeOnSet?: boolean;
|
||||
/**
|
||||
* Boolean flag to tell the cache to not update the TTL when
|
||||
* setting a new value for an existing key (ie, when updating a value
|
||||
* rather than inserting a new value). Note that the TTL value is
|
||||
* _always_ set (if provided) when adding a new entry into the cache.
|
||||
*
|
||||
* Has no effect if a {@link OptionsBase.ttl} is not set.
|
||||
*/
|
||||
noUpdateTTL?: boolean;
|
||||
/**
|
||||
* If you wish to track item size, you must provide a maxSize
|
||||
* note that we still will only keep up to max *actual items*,
|
||||
* if max is set, so size tracking may cause fewer than max items
|
||||
* to be stored. At the extreme, a single item of maxSize size
|
||||
* will cause everything else in the cache to be dropped when it
|
||||
* is added. Use with caution!
|
||||
*
|
||||
* Note also that size tracking can negatively impact performance,
|
||||
* though for most cases, only minimally.
|
||||
*/
|
||||
maxSize?: Size;
|
||||
/**
|
||||
* The maximum allowed size for any single item in the cache.
|
||||
*
|
||||
* If a larger item is passed to {@link LRUCache#set} or returned by a
|
||||
* {@link OptionsBase.fetchMethod}, then it will not be stored in the
|
||||
* cache.
|
||||
*/
|
||||
maxEntrySize?: Size;
|
||||
/**
|
||||
* A function that returns a number indicating the item's size.
|
||||
*
|
||||
* If not provided, and {@link OptionsBase.maxSize} or
|
||||
* {@link OptionsBase.maxEntrySize} are set, then all
|
||||
* {@link LRUCache#set} calls **must** provide an explicit
|
||||
* {@link SetOptions.size} or sizeCalculation param.
|
||||
*/
|
||||
sizeCalculation?: SizeCalculator<K, V>;
|
||||
/**
|
||||
* Method that provides the implementation for {@link LRUCache#fetch}
|
||||
*/
|
||||
fetchMethod?: Fetcher<K, V, FC>;
|
||||
/**
|
||||
* Set to true to suppress the deletion of stale data when a
|
||||
* {@link OptionsBase.fetchMethod} returns a rejected promise.
|
||||
*/
|
||||
noDeleteOnFetchRejection?: boolean;
|
||||
/**
|
||||
* Do not delete stale items when they are retrieved with
|
||||
* {@link LRUCache#get}.
|
||||
*
|
||||
* Note that the `get` return value will still be `undefined`
|
||||
* unless {@link OptionsBase.allowStale} is true.
|
||||
*/
|
||||
noDeleteOnStaleGet?: boolean;
|
||||
/**
|
||||
* Set to true to allow returning stale data when a
|
||||
* {@link OptionsBase.fetchMethod} throws an error or returns a rejected
|
||||
* promise.
|
||||
*
|
||||
* This differs from using {@link OptionsBase.allowStale} in that stale
|
||||
* data will ONLY be returned in the case that the
|
||||
* {@link LRUCache#fetch} fails, not any other times.
|
||||
*/
|
||||
allowStaleOnFetchRejection?: boolean;
|
||||
/**
|
||||
* Set to true to return a stale value from the cache when the
|
||||
* `AbortSignal` passed to the {@link OptionsBase.fetchMethod} dispatches an `'abort'`
|
||||
* event, whether user-triggered, or due to internal cache behavior.
|
||||
*
|
||||
* Unless {@link OptionsBase.ignoreFetchAbort} is also set, the underlying
|
||||
* {@link OptionsBase.fetchMethod} will still be considered canceled, and
|
||||
* any value it returns will be ignored and not cached.
|
||||
*
|
||||
* Caveat: since fetches are aborted when a new value is explicitly
|
||||
* set in the cache, this can lead to fetch returning a stale value,
|
||||
* since that was the fallback value _at the moment the `fetch()` was
|
||||
* initiated_, even though the new updated value is now present in
|
||||
* the cache.
|
||||
*
|
||||
* For example:
|
||||
*
|
||||
* ```ts
|
||||
* const cache = new LRUCache<string, any>({
|
||||
* ttl: 100,
|
||||
* fetchMethod: async (url, oldValue, { signal }) => {
|
||||
* const res = await fetch(url, { signal })
|
||||
* return await res.json()
|
||||
* }
|
||||
* })
|
||||
* cache.set('https://example.com/', { some: 'data' })
|
||||
* // 100ms go by...
|
||||
* const result = cache.fetch('https://example.com/')
|
||||
* cache.set('https://example.com/', { other: 'thing' })
|
||||
* console.log(await result) // { some: 'data' }
|
||||
* console.log(cache.get('https://example.com/')) // { other: 'thing' }
|
||||
* ```
|
||||
*/
|
||||
allowStaleOnFetchAbort?: boolean;
|
||||
/**
|
||||
* Set to true to ignore the `abort` event emitted by the `AbortSignal`
|
||||
* object passed to {@link OptionsBase.fetchMethod}, and still cache the
|
||||
* resulting resolution value, as long as it is not `undefined`.
|
||||
*
|
||||
* When used on its own, this means aborted {@link LRUCache#fetch} calls are not
|
||||
* immediately resolved or rejected when they are aborted, and instead
|
||||
* take the full time to await.
|
||||
*
|
||||
* When used with {@link OptionsBase.allowStaleOnFetchAbort}, aborted
|
||||
* {@link LRUCache#fetch} calls will resolve immediately to their stale
|
||||
* cached value or `undefined`, and will continue to process and eventually
|
||||
* update the cache when they resolve, as long as the resulting value is
|
||||
* not `undefined`, thus supporting a "return stale on timeout while
|
||||
* refreshing" mechanism by passing `AbortSignal.timeout(n)` as the signal.
|
||||
*
|
||||
* **Note**: regardless of this setting, an `abort` event _is still
|
||||
* emitted on the `AbortSignal` object_, so may result in invalid results
|
||||
* when passed to other underlying APIs that use AbortSignals.
|
||||
*
|
||||
* This may be overridden in the {@link OptionsBase.fetchMethod} or the
|
||||
* call to {@link LRUCache#fetch}.
|
||||
*/
|
||||
ignoreFetchAbort?: boolean;
|
||||
}
|
||||
interface OptionsMaxLimit<K, V, FC> extends OptionsBase<K, V, FC> {
|
||||
max: Count;
|
||||
}
|
||||
interface OptionsTTLLimit<K, V, FC> extends OptionsBase<K, V, FC> {
|
||||
ttl: Milliseconds;
|
||||
ttlAutopurge: boolean;
|
||||
}
|
||||
interface OptionsSizeLimit<K, V, FC> extends OptionsBase<K, V, FC> {
|
||||
maxSize: Size;
|
||||
}
|
||||
/**
|
||||
* The valid safe options for the {@link LRUCache} constructor
|
||||
*/
|
||||
type Options<K, V, FC> = OptionsMaxLimit<K, V, FC> | OptionsSizeLimit<K, V, FC> | OptionsTTLLimit<K, V, FC>;
|
||||
/**
|
||||
* Entry objects used by {@link LRUCache#load} and {@link LRUCache#dump},
|
||||
* and returned by {@link LRUCache#info}.
|
||||
*/
|
||||
interface Entry<V> {
|
||||
value: V;
|
||||
ttl?: Milliseconds;
|
||||
size?: Size;
|
||||
start?: Milliseconds;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Default export, the thing you're using this module to get.
|
||||
*
|
||||
* All properties from the options object (with the exception of
|
||||
* {@link OptionsBase.max} and {@link OptionsBase.maxSize}) are added as
|
||||
* normal public members. (`max` and `maxBase` are read-only getters.)
|
||||
* Changing any of these will alter the defaults for subsequent method calls,
|
||||
* but is otherwise safe.
|
||||
*/
|
||||
export declare class LRUCache<K extends {}, V extends {}, FC = unknown> implements Map<K, V> {
|
||||
#private;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.ttl}
|
||||
*/
|
||||
ttl: LRUCache.Milliseconds;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.ttlResolution}
|
||||
*/
|
||||
ttlResolution: LRUCache.Milliseconds;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.ttlAutopurge}
|
||||
*/
|
||||
ttlAutopurge: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.updateAgeOnGet}
|
||||
*/
|
||||
updateAgeOnGet: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.updateAgeOnHas}
|
||||
*/
|
||||
updateAgeOnHas: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.allowStale}
|
||||
*/
|
||||
allowStale: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.noDisposeOnSet}
|
||||
*/
|
||||
noDisposeOnSet: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.noUpdateTTL}
|
||||
*/
|
||||
noUpdateTTL: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.maxEntrySize}
|
||||
*/
|
||||
maxEntrySize: LRUCache.Size;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.sizeCalculation}
|
||||
*/
|
||||
sizeCalculation?: LRUCache.SizeCalculator<K, V>;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.noDeleteOnFetchRejection}
|
||||
*/
|
||||
noDeleteOnFetchRejection: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.noDeleteOnStaleGet}
|
||||
*/
|
||||
noDeleteOnStaleGet: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.allowStaleOnFetchAbort}
|
||||
*/
|
||||
allowStaleOnFetchAbort: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.allowStaleOnFetchRejection}
|
||||
*/
|
||||
allowStaleOnFetchRejection: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.ignoreFetchAbort}
|
||||
*/
|
||||
ignoreFetchAbort: boolean;
|
||||
/**
|
||||
* Do not call this method unless you need to inspect the
|
||||
* inner workings of the cache. If anything returned by this
|
||||
* object is modified in any way, strange breakage may occur.
|
||||
*
|
||||
* These fields are private for a reason!
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
static unsafeExposeInternals<K extends {}, V extends {}, FC extends unknown = unknown>(c: LRUCache<K, V, FC>): {
|
||||
starts: ZeroArray | undefined;
|
||||
ttls: ZeroArray | undefined;
|
||||
sizes: ZeroArray | undefined;
|
||||
keyMap: Map<K, number>;
|
||||
keyList: (K | undefined)[];
|
||||
valList: (V | BackgroundFetch<V> | undefined)[];
|
||||
next: NumberArray;
|
||||
prev: NumberArray;
|
||||
readonly head: Index;
|
||||
readonly tail: Index;
|
||||
free: StackLike;
|
||||
isBackgroundFetch: (p: any) => boolean;
|
||||
backgroundFetch: (k: K, index: number | undefined, options: LRUCache.FetchOptions<K, V, FC>, context: any) => BackgroundFetch<V>;
|
||||
moveToTail: (index: number) => void;
|
||||
indexes: (options?: {
|
||||
allowStale: boolean;
|
||||
}) => Generator<Index, void, unknown>;
|
||||
rindexes: (options?: {
|
||||
allowStale: boolean;
|
||||
}) => Generator<Index, void, unknown>;
|
||||
isStale: (index: number | undefined) => boolean;
|
||||
};
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.max} (read-only)
|
||||
*/
|
||||
get max(): LRUCache.Count;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.maxSize} (read-only)
|
||||
*/
|
||||
get maxSize(): LRUCache.Count;
|
||||
/**
|
||||
* The total computed size of items in the cache (read-only)
|
||||
*/
|
||||
get calculatedSize(): LRUCache.Size;
|
||||
/**
|
||||
* The number of items stored in the cache (read-only)
|
||||
*/
|
||||
get size(): LRUCache.Count;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.fetchMethod} (read-only)
|
||||
*/
|
||||
get fetchMethod(): LRUCache.Fetcher<K, V, FC> | undefined;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.dispose} (read-only)
|
||||
*/
|
||||
get dispose(): LRUCache.Disposer<K, V> | undefined;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.disposeAfter} (read-only)
|
||||
*/
|
||||
get disposeAfter(): LRUCache.Disposer<K, V> | undefined;
|
||||
constructor(options: LRUCache.Options<K, V, FC> | LRUCache<K, V, FC>);
|
||||
/**
|
||||
* Return the remaining TTL time for a given entry key
|
||||
*/
|
||||
getRemainingTTL(key: K): number;
|
||||
/**
|
||||
* Return a generator yielding `[key, value]` pairs,
|
||||
* in order from most recently used to least recently used.
|
||||
*/
|
||||
entries(): Generator<[K, V], void, unknown>;
|
||||
/**
|
||||
* Inverse order version of {@link LRUCache.entries}
|
||||
*
|
||||
* Return a generator yielding `[key, value]` pairs,
|
||||
* in order from least recently used to most recently used.
|
||||
*/
|
||||
rentries(): Generator<(K | V | BackgroundFetch<V> | undefined)[], void, unknown>;
|
||||
/**
|
||||
* Return a generator yielding the keys in the cache,
|
||||
* in order from most recently used to least recently used.
|
||||
*/
|
||||
keys(): Generator<K, void, unknown>;
|
||||
/**
|
||||
* Inverse order version of {@link LRUCache.keys}
|
||||
*
|
||||
* Return a generator yielding the keys in the cache,
|
||||
* in order from least recently used to most recently used.
|
||||
*/
|
||||
rkeys(): Generator<K, void, unknown>;
|
||||
/**
|
||||
* Return a generator yielding the values in the cache,
|
||||
* in order from most recently used to least recently used.
|
||||
*/
|
||||
values(): Generator<V, void, unknown>;
|
||||
/**
|
||||
* Inverse order version of {@link LRUCache.values}
|
||||
*
|
||||
* Return a generator yielding the values in the cache,
|
||||
* in order from least recently used to most recently used.
|
||||
*/
|
||||
rvalues(): Generator<V | BackgroundFetch<V> | undefined, void, unknown>;
|
||||
/**
|
||||
* Iterating over the cache itself yields the same results as
|
||||
* {@link LRUCache.entries}
|
||||
*/
|
||||
[Symbol.iterator](): Generator<[K, V], void, unknown>;
|
||||
/**
|
||||
* A String value that is used in the creation of the default string description of an object.
|
||||
* Called by the built-in method Object.prototype.toString.
|
||||
*/
|
||||
[Symbol.toStringTag]: string;
|
||||
/**
|
||||
* Find a value for which the supplied fn method returns a truthy value,
|
||||
* similar to Array.find(). fn is called as fn(value, key, cache).
|
||||
*/
|
||||
find(fn: (v: V, k: K, self: LRUCache<K, V, FC>) => boolean, getOptions?: LRUCache.GetOptions<K, V, FC>): V | undefined;
|
||||
/**
|
||||
* Call the supplied function on each item in the cache, in order from
|
||||
* most recently used to least recently used. fn is called as
|
||||
* fn(value, key, cache). Does not update age or recenty of use.
|
||||
* Does not iterate over stale values.
|
||||
*/
|
||||
forEach(fn: (v: V, k: K, self: LRUCache<K, V, FC>) => any, thisp?: any): void;
|
||||
/**
|
||||
* The same as {@link LRUCache.forEach} but items are iterated over in
|
||||
* reverse order. (ie, less recently used items are iterated over first.)
|
||||
*/
|
||||
rforEach(fn: (v: V, k: K, self: LRUCache<K, V, FC>) => any, thisp?: any): void;
|
||||
/**
|
||||
* Delete any stale entries. Returns true if anything was removed,
|
||||
* false otherwise.
|
||||
*/
|
||||
purgeStale(): boolean;
|
||||
/**
|
||||
* Get the extended info about a given entry, to get its value, size, and
|
||||
* TTL info simultaneously. Like {@link LRUCache#dump}, but just for a
|
||||
* single key. Always returns stale values, if their info is found in the
|
||||
* cache, so be sure to check for expired TTLs if relevant.
|
||||
*/
|
||||
info(key: K): LRUCache.Entry<V> | undefined;
|
||||
/**
|
||||
* Return an array of [key, {@link LRUCache.Entry}] tuples which can be
|
||||
* passed to cache.load()
|
||||
*/
|
||||
dump(): [K, LRUCache.Entry<V>][];
|
||||
/**
|
||||
* Reset the cache and load in the items in entries in the order listed.
|
||||
* Note that the shape of the resulting cache may be different if the
|
||||
* same options are not used in both caches.
|
||||
*/
|
||||
load(arr: [K, LRUCache.Entry<V>][]): void;
|
||||
/**
|
||||
* Add a value to the cache.
|
||||
*
|
||||
* Note: if `undefined` is specified as a value, this is an alias for
|
||||
* {@link LRUCache#delete}
|
||||
*/
|
||||
set(k: K, v: V | BackgroundFetch<V> | undefined, setOptions?: LRUCache.SetOptions<K, V, FC>): this;
|
||||
/**
|
||||
* Evict the least recently used item, returning its value or
|
||||
* `undefined` if cache is empty.
|
||||
*/
|
||||
pop(): V | undefined;
|
||||
/**
|
||||
* Check if a key is in the cache, without updating the recency of use.
|
||||
* Will return false if the item is stale, even though it is technically
|
||||
* in the cache.
|
||||
*
|
||||
* Will not update item age unless
|
||||
* {@link LRUCache.OptionsBase.updateAgeOnHas} is set.
|
||||
*/
|
||||
has(k: K, hasOptions?: LRUCache.HasOptions<K, V, FC>): boolean;
|
||||
/**
|
||||
* Like {@link LRUCache#get} but doesn't update recency or delete stale
|
||||
* items.
|
||||
*
|
||||
* Returns `undefined` if the item is stale, unless
|
||||
* {@link LRUCache.OptionsBase.allowStale} is set.
|
||||
*/
|
||||
peek(k: K, peekOptions?: LRUCache.PeekOptions<K, V, FC>): V | undefined;
|
||||
/**
|
||||
* Make an asynchronous cached fetch using the
|
||||
* {@link LRUCache.OptionsBase.fetchMethod} function.
|
||||
*
|
||||
* If multiple fetches for the same key are issued, then they will all be
|
||||
* coalesced into a single call to fetchMethod.
|
||||
*
|
||||
* Note that this means that handling options such as
|
||||
* {@link LRUCache.OptionsBase.allowStaleOnFetchAbort},
|
||||
* {@link LRUCache.FetchOptions.signal},
|
||||
* and {@link LRUCache.OptionsBase.allowStaleOnFetchRejection} will be
|
||||
* determined by the FIRST fetch() call for a given key.
|
||||
*
|
||||
* This is a known (fixable) shortcoming which will be addresed on when
|
||||
* someone complains about it, as the fix would involve added complexity and
|
||||
* may not be worth the costs for this edge case.
|
||||
*/
|
||||
fetch(k: K, fetchOptions: unknown extends FC ? LRUCache.FetchOptions<K, V, FC> : FC extends undefined | void ? LRUCache.FetchOptionsNoContext<K, V> : LRUCache.FetchOptionsWithContext<K, V, FC>): Promise<undefined | V>;
|
||||
fetch(k: unknown extends FC ? K : FC extends undefined | void ? K : never, fetchOptions?: unknown extends FC ? LRUCache.FetchOptions<K, V, FC> : FC extends undefined | void ? LRUCache.FetchOptionsNoContext<K, V> : never): Promise<undefined | V>;
|
||||
/**
|
||||
* Return a value from the cache. Will update the recency of the cache
|
||||
* entry found.
|
||||
*
|
||||
* If the key is not found, get() will return `undefined`.
|
||||
*/
|
||||
get(k: K, getOptions?: LRUCache.GetOptions<K, V, FC>): V | undefined;
|
||||
/**
|
||||
* Deletes a key out of the cache.
|
||||
* Returns true if the key was deleted, false otherwise.
|
||||
*/
|
||||
delete(k: K): boolean;
|
||||
/**
|
||||
* Clear the cache entirely, throwing away all values.
|
||||
*/
|
||||
clear(): void;
|
||||
}
|
||||
//# sourceMappingURL=index.d.ts.map
|
1
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/commonjs/index.d.ts.map
generated
vendored
Executable file
1
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/commonjs/index.d.ts.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
1446
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/commonjs/index.js
generated
vendored
Executable file
1446
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/commonjs/index.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load diff
1
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/commonjs/index.js.map
generated
vendored
Executable file
1
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/commonjs/index.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
3
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/commonjs/package.json
generated
vendored
Executable file
3
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/commonjs/package.json
generated
vendored
Executable file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"type": "commonjs"
|
||||
}
|
856
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/esm/index.d.ts
generated
vendored
Executable file
856
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/esm/index.d.ts
generated
vendored
Executable file
|
@ -0,0 +1,856 @@
|
|||
/**
|
||||
* @module LRUCache
|
||||
*/
|
||||
declare const TYPE: unique symbol;
|
||||
export type PosInt = number & {
|
||||
[TYPE]: 'Positive Integer';
|
||||
};
|
||||
export type Index = number & {
|
||||
[TYPE]: 'LRUCache Index';
|
||||
};
|
||||
export type UintArray = Uint8Array | Uint16Array | Uint32Array;
|
||||
export type NumberArray = UintArray | number[];
|
||||
declare class ZeroArray extends Array<number> {
|
||||
constructor(size: number);
|
||||
}
|
||||
export type { ZeroArray };
|
||||
export type { Stack };
|
||||
export type StackLike = Stack | Index[];
|
||||
declare class Stack {
|
||||
#private;
|
||||
heap: NumberArray;
|
||||
length: number;
|
||||
static create(max: number): StackLike;
|
||||
constructor(max: number, HeapCls: {
|
||||
new (n: number): NumberArray;
|
||||
});
|
||||
push(n: Index): void;
|
||||
pop(): Index;
|
||||
}
|
||||
/**
|
||||
* Promise representing an in-progress {@link LRUCache#fetch} call
|
||||
*/
|
||||
export type BackgroundFetch<V> = Promise<V | undefined> & {
|
||||
__returned: BackgroundFetch<V> | undefined;
|
||||
__abortController: AbortController;
|
||||
__staleWhileFetching: V | undefined;
|
||||
};
|
||||
export type DisposeTask<K, V> = [
|
||||
value: V,
|
||||
key: K,
|
||||
reason: LRUCache.DisposeReason
|
||||
];
|
||||
export declare namespace LRUCache {
|
||||
/**
|
||||
* An integer greater than 0, reflecting the calculated size of items
|
||||
*/
|
||||
type Size = number;
|
||||
/**
|
||||
* Integer greater than 0, representing some number of milliseconds, or the
|
||||
* time at which a TTL started counting from.
|
||||
*/
|
||||
type Milliseconds = number;
|
||||
/**
|
||||
* An integer greater than 0, reflecting a number of items
|
||||
*/
|
||||
type Count = number;
|
||||
/**
|
||||
* The reason why an item was removed from the cache, passed
|
||||
* to the {@link Disposer} methods.
|
||||
*/
|
||||
type DisposeReason = 'evict' | 'set' | 'delete';
|
||||
/**
|
||||
* A method called upon item removal, passed as the
|
||||
* {@link OptionsBase.dispose} and/or
|
||||
* {@link OptionsBase.disposeAfter} options.
|
||||
*/
|
||||
type Disposer<K, V> = (value: V, key: K, reason: DisposeReason) => void;
|
||||
/**
|
||||
* A function that returns the effective calculated size
|
||||
* of an entry in the cache.
|
||||
*/
|
||||
type SizeCalculator<K, V> = (value: V, key: K) => Size;
|
||||
/**
|
||||
* Options provided to the
|
||||
* {@link OptionsBase.fetchMethod} function.
|
||||
*/
|
||||
interface FetcherOptions<K, V, FC = unknown> {
|
||||
signal: AbortSignal;
|
||||
options: FetcherFetchOptions<K, V, FC>;
|
||||
/**
|
||||
* Object provided in the {@link FetchOptions.context} option to
|
||||
* {@link LRUCache#fetch}
|
||||
*/
|
||||
context: FC;
|
||||
}
|
||||
/**
|
||||
* Status object that may be passed to {@link LRUCache#fetch},
|
||||
* {@link LRUCache#get}, {@link LRUCache#set}, and {@link LRUCache#has}.
|
||||
*/
|
||||
interface Status<V> {
|
||||
/**
|
||||
* The status of a set() operation.
|
||||
*
|
||||
* - add: the item was not found in the cache, and was added
|
||||
* - update: the item was in the cache, with the same value provided
|
||||
* - replace: the item was in the cache, and replaced
|
||||
* - miss: the item was not added to the cache for some reason
|
||||
*/
|
||||
set?: 'add' | 'update' | 'replace' | 'miss';
|
||||
/**
|
||||
* the ttl stored for the item, or undefined if ttls are not used.
|
||||
*/
|
||||
ttl?: Milliseconds;
|
||||
/**
|
||||
* the start time for the item, or undefined if ttls are not used.
|
||||
*/
|
||||
start?: Milliseconds;
|
||||
/**
|
||||
* The timestamp used for TTL calculation
|
||||
*/
|
||||
now?: Milliseconds;
|
||||
/**
|
||||
* the remaining ttl for the item, or undefined if ttls are not used.
|
||||
*/
|
||||
remainingTTL?: Milliseconds;
|
||||
/**
|
||||
* The calculated size for the item, if sizes are used.
|
||||
*/
|
||||
entrySize?: Size;
|
||||
/**
|
||||
* The total calculated size of the cache, if sizes are used.
|
||||
*/
|
||||
totalCalculatedSize?: Size;
|
||||
/**
|
||||
* A flag indicating that the item was not stored, due to exceeding the
|
||||
* {@link OptionsBase.maxEntrySize}
|
||||
*/
|
||||
maxEntrySizeExceeded?: true;
|
||||
/**
|
||||
* The old value, specified in the case of `set:'update'` or
|
||||
* `set:'replace'`
|
||||
*/
|
||||
oldValue?: V;
|
||||
/**
|
||||
* The results of a {@link LRUCache#has} operation
|
||||
*
|
||||
* - hit: the item was found in the cache
|
||||
* - stale: the item was found in the cache, but is stale
|
||||
* - miss: the item was not found in the cache
|
||||
*/
|
||||
has?: 'hit' | 'stale' | 'miss';
|
||||
/**
|
||||
* The status of a {@link LRUCache#fetch} operation.
|
||||
* Note that this can change as the underlying fetch() moves through
|
||||
* various states.
|
||||
*
|
||||
* - inflight: there is another fetch() for this key which is in process
|
||||
* - get: there is no fetchMethod, so {@link LRUCache#get} was called.
|
||||
* - miss: the item is not in cache, and will be fetched.
|
||||
* - hit: the item is in the cache, and was resolved immediately.
|
||||
* - stale: the item is in the cache, but stale.
|
||||
* - refresh: the item is in the cache, and not stale, but
|
||||
* {@link FetchOptions.forceRefresh} was specified.
|
||||
*/
|
||||
fetch?: 'get' | 'inflight' | 'miss' | 'hit' | 'stale' | 'refresh';
|
||||
/**
|
||||
* The {@link OptionsBase.fetchMethod} was called
|
||||
*/
|
||||
fetchDispatched?: true;
|
||||
/**
|
||||
* The cached value was updated after a successful call to
|
||||
* {@link OptionsBase.fetchMethod}
|
||||
*/
|
||||
fetchUpdated?: true;
|
||||
/**
|
||||
* The reason for a fetch() rejection. Either the error raised by the
|
||||
* {@link OptionsBase.fetchMethod}, or the reason for an
|
||||
* AbortSignal.
|
||||
*/
|
||||
fetchError?: Error;
|
||||
/**
|
||||
* The fetch received an abort signal
|
||||
*/
|
||||
fetchAborted?: true;
|
||||
/**
|
||||
* The abort signal received was ignored, and the fetch was allowed to
|
||||
* continue.
|
||||
*/
|
||||
fetchAbortIgnored?: true;
|
||||
/**
|
||||
* The fetchMethod promise resolved successfully
|
||||
*/
|
||||
fetchResolved?: true;
|
||||
/**
|
||||
* The fetchMethod promise was rejected
|
||||
*/
|
||||
fetchRejected?: true;
|
||||
/**
|
||||
* The status of a {@link LRUCache#get} operation.
|
||||
*
|
||||
* - fetching: The item is currently being fetched. If a previous value
|
||||
* is present and allowed, that will be returned.
|
||||
* - stale: The item is in the cache, and is stale.
|
||||
* - hit: the item is in the cache
|
||||
* - miss: the item is not in the cache
|
||||
*/
|
||||
get?: 'stale' | 'hit' | 'miss';
|
||||
/**
|
||||
* A fetch or get operation returned a stale value.
|
||||
*/
|
||||
returnedStale?: true;
|
||||
}
|
||||
/**
|
||||
* options which override the options set in the LRUCache constructor
|
||||
* when calling {@link LRUCache#fetch}.
|
||||
*
|
||||
* This is the union of {@link GetOptions} and {@link SetOptions}, plus
|
||||
* {@link OptionsBase.noDeleteOnFetchRejection},
|
||||
* {@link OptionsBase.allowStaleOnFetchRejection},
|
||||
* {@link FetchOptions.forceRefresh}, and
|
||||
* {@link FetcherOptions.context}
|
||||
*
|
||||
* Any of these may be modified in the {@link OptionsBase.fetchMethod}
|
||||
* function, but the {@link GetOptions} fields will of course have no
|
||||
* effect, as the {@link LRUCache#get} call already happened by the time
|
||||
* the fetchMethod is called.
|
||||
*/
|
||||
interface FetcherFetchOptions<K, V, FC = unknown> extends Pick<OptionsBase<K, V, FC>, 'allowStale' | 'updateAgeOnGet' | 'noDeleteOnStaleGet' | 'sizeCalculation' | 'ttl' | 'noDisposeOnSet' | 'noUpdateTTL' | 'noDeleteOnFetchRejection' | 'allowStaleOnFetchRejection' | 'ignoreFetchAbort' | 'allowStaleOnFetchAbort'> {
|
||||
status?: Status<V>;
|
||||
size?: Size;
|
||||
}
|
||||
/**
|
||||
* Options that may be passed to the {@link LRUCache#fetch} method.
|
||||
*/
|
||||
interface FetchOptions<K, V, FC> extends FetcherFetchOptions<K, V, FC> {
|
||||
/**
|
||||
* Set to true to force a re-load of the existing data, even if it
|
||||
* is not yet stale.
|
||||
*/
|
||||
forceRefresh?: boolean;
|
||||
/**
|
||||
* Context provided to the {@link OptionsBase.fetchMethod} as
|
||||
* the {@link FetcherOptions.context} param.
|
||||
*
|
||||
* If the FC type is specified as unknown (the default),
|
||||
* undefined or void, then this is optional. Otherwise, it will
|
||||
* be required.
|
||||
*/
|
||||
context?: FC;
|
||||
signal?: AbortSignal;
|
||||
status?: Status<V>;
|
||||
}
|
||||
/**
|
||||
* Options provided to {@link LRUCache#fetch} when the FC type is something
|
||||
* other than `unknown`, `undefined`, or `void`
|
||||
*/
|
||||
interface FetchOptionsWithContext<K, V, FC> extends FetchOptions<K, V, FC> {
|
||||
context: FC;
|
||||
}
|
||||
/**
|
||||
* Options provided to {@link LRUCache#fetch} when the FC type is
|
||||
* `undefined` or `void`
|
||||
*/
|
||||
interface FetchOptionsNoContext<K, V> extends FetchOptions<K, V, undefined> {
|
||||
context?: undefined;
|
||||
}
|
||||
/**
|
||||
* Options that may be passed to the {@link LRUCache#has} method.
|
||||
*/
|
||||
interface HasOptions<K, V, FC> extends Pick<OptionsBase<K, V, FC>, 'updateAgeOnHas'> {
|
||||
status?: Status<V>;
|
||||
}
|
||||
/**
|
||||
* Options that may be passed to the {@link LRUCache#get} method.
|
||||
*/
|
||||
interface GetOptions<K, V, FC> extends Pick<OptionsBase<K, V, FC>, 'allowStale' | 'updateAgeOnGet' | 'noDeleteOnStaleGet'> {
|
||||
status?: Status<V>;
|
||||
}
|
||||
/**
|
||||
* Options that may be passed to the {@link LRUCache#peek} method.
|
||||
*/
|
||||
interface PeekOptions<K, V, FC> extends Pick<OptionsBase<K, V, FC>, 'allowStale'> {
|
||||
}
|
||||
/**
|
||||
* Options that may be passed to the {@link LRUCache#set} method.
|
||||
*/
|
||||
interface SetOptions<K, V, FC> extends Pick<OptionsBase<K, V, FC>, 'sizeCalculation' | 'ttl' | 'noDisposeOnSet' | 'noUpdateTTL'> {
|
||||
/**
|
||||
* If size tracking is enabled, then setting an explicit size
|
||||
* in the {@link LRUCache#set} call will prevent calling the
|
||||
* {@link OptionsBase.sizeCalculation} function.
|
||||
*/
|
||||
size?: Size;
|
||||
/**
|
||||
* If TTL tracking is enabled, then setting an explicit start
|
||||
* time in the {@link LRUCache#set} call will override the
|
||||
* default time from `performance.now()` or `Date.now()`.
|
||||
*
|
||||
* Note that it must be a valid value for whichever time-tracking
|
||||
* method is in use.
|
||||
*/
|
||||
start?: Milliseconds;
|
||||
status?: Status<V>;
|
||||
}
|
||||
/**
|
||||
* The type signature for the {@link OptionsBase.fetchMethod} option.
|
||||
*/
|
||||
type Fetcher<K, V, FC = unknown> = (key: K, staleValue: V | undefined, options: FetcherOptions<K, V, FC>) => Promise<V | undefined | void> | V | undefined | void;
|
||||
/**
|
||||
* Options which may be passed to the {@link LRUCache} constructor.
|
||||
*
|
||||
* Most of these may be overridden in the various options that use
|
||||
* them.
|
||||
*
|
||||
* Despite all being technically optional, the constructor requires that
|
||||
* a cache is at minimum limited by one or more of {@link OptionsBase.max},
|
||||
* {@link OptionsBase.ttl}, or {@link OptionsBase.maxSize}.
|
||||
*
|
||||
* If {@link OptionsBase.ttl} is used alone, then it is strongly advised
|
||||
* (and in fact required by the type definitions here) that the cache
|
||||
* also set {@link OptionsBase.ttlAutopurge}, to prevent potentially
|
||||
* unbounded storage.
|
||||
*/
|
||||
interface OptionsBase<K, V, FC> {
|
||||
/**
|
||||
* The maximum number of items to store in the cache before evicting
|
||||
* old entries. This is read-only on the {@link LRUCache} instance,
|
||||
* and may not be overridden.
|
||||
*
|
||||
* If set, then storage space will be pre-allocated at construction
|
||||
* time, and the cache will perform significantly faster.
|
||||
*
|
||||
* Note that significantly fewer items may be stored, if
|
||||
* {@link OptionsBase.maxSize} and/or {@link OptionsBase.ttl} are also
|
||||
* set.
|
||||
*/
|
||||
max?: Count;
|
||||
/**
|
||||
* Max time in milliseconds for items to live in cache before they are
|
||||
* considered stale. Note that stale items are NOT preemptively removed
|
||||
* by default, and MAY live in the cache long after they have expired.
|
||||
*
|
||||
* Also, as this cache is optimized for LRU/MRU operations, some of
|
||||
* the staleness/TTL checks will reduce performance, as they will incur
|
||||
* overhead by deleting items.
|
||||
*
|
||||
* Must be an integer number of ms. If set to 0, this indicates "no TTL"
|
||||
*
|
||||
* @default 0
|
||||
*/
|
||||
ttl?: Milliseconds;
|
||||
/**
|
||||
* Minimum amount of time in ms in which to check for staleness.
|
||||
* Defaults to 1, which means that the current time is checked
|
||||
* at most once per millisecond.
|
||||
*
|
||||
* Set to 0 to check the current time every time staleness is tested.
|
||||
* (This reduces performance, and is theoretically unnecessary.)
|
||||
*
|
||||
* Setting this to a higher value will improve performance somewhat
|
||||
* while using ttl tracking, albeit at the expense of keeping stale
|
||||
* items around a bit longer than their TTLs would indicate.
|
||||
*
|
||||
* @default 1
|
||||
*/
|
||||
ttlResolution?: Milliseconds;
|
||||
/**
|
||||
* Preemptively remove stale items from the cache.
|
||||
* Note that this may significantly degrade performance,
|
||||
* especially if the cache is storing a large number of items.
|
||||
* It is almost always best to just leave the stale items in
|
||||
* the cache, and let them fall out as new items are added.
|
||||
*
|
||||
* Note that this means that {@link OptionsBase.allowStale} is a bit
|
||||
* pointless, as stale items will be deleted almost as soon as they
|
||||
* expire.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
ttlAutopurge?: boolean;
|
||||
/**
|
||||
* Update the age of items on {@link LRUCache#get}, renewing their TTL
|
||||
*
|
||||
* Has no effect if {@link OptionsBase.ttl} is not set.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
updateAgeOnGet?: boolean;
|
||||
/**
|
||||
* Update the age of items on {@link LRUCache#has}, renewing their TTL
|
||||
*
|
||||
* Has no effect if {@link OptionsBase.ttl} is not set.
|
||||
*
|
||||
* @default false
|
||||
*/
|
||||
updateAgeOnHas?: boolean;
|
||||
/**
|
||||
* Allow {@link LRUCache#get} and {@link LRUCache#fetch} calls to return
|
||||
* stale data, if available.
|
||||
*/
|
||||
allowStale?: boolean;
|
||||
/**
|
||||
* Function that is called on items when they are dropped from the cache.
|
||||
* This can be handy if you want to close file descriptors or do other
|
||||
* cleanup tasks when items are no longer accessible. Called with `key,
|
||||
* value`. It's called before actually removing the item from the
|
||||
* internal cache, so it is *NOT* safe to re-add them.
|
||||
*
|
||||
* Use {@link OptionsBase.disposeAfter} if you wish to dispose items after
|
||||
* they have been full removed, when it is safe to add them back to the
|
||||
* cache.
|
||||
*/
|
||||
dispose?: Disposer<K, V>;
|
||||
/**
|
||||
* The same as {@link OptionsBase.dispose}, but called *after* the entry
|
||||
* is completely removed and the cache is once again in a clean state.
|
||||
* It is safe to add an item right back into the cache at this point.
|
||||
* However, note that it is *very* easy to inadvertently create infinite
|
||||
* recursion this way.
|
||||
*/
|
||||
disposeAfter?: Disposer<K, V>;
|
||||
/**
|
||||
* Set to true to suppress calling the
|
||||
* {@link OptionsBase.dispose} function if the entry key is
|
||||
* still accessible within the cache.
|
||||
* This may be overridden by passing an options object to
|
||||
* {@link LRUCache#set}.
|
||||
*/
|
||||
noDisposeOnSet?: boolean;
|
||||
/**
|
||||
* Boolean flag to tell the cache to not update the TTL when
|
||||
* setting a new value for an existing key (ie, when updating a value
|
||||
* rather than inserting a new value). Note that the TTL value is
|
||||
* _always_ set (if provided) when adding a new entry into the cache.
|
||||
*
|
||||
* Has no effect if a {@link OptionsBase.ttl} is not set.
|
||||
*/
|
||||
noUpdateTTL?: boolean;
|
||||
/**
|
||||
* If you wish to track item size, you must provide a maxSize
|
||||
* note that we still will only keep up to max *actual items*,
|
||||
* if max is set, so size tracking may cause fewer than max items
|
||||
* to be stored. At the extreme, a single item of maxSize size
|
||||
* will cause everything else in the cache to be dropped when it
|
||||
* is added. Use with caution!
|
||||
*
|
||||
* Note also that size tracking can negatively impact performance,
|
||||
* though for most cases, only minimally.
|
||||
*/
|
||||
maxSize?: Size;
|
||||
/**
|
||||
* The maximum allowed size for any single item in the cache.
|
||||
*
|
||||
* If a larger item is passed to {@link LRUCache#set} or returned by a
|
||||
* {@link OptionsBase.fetchMethod}, then it will not be stored in the
|
||||
* cache.
|
||||
*/
|
||||
maxEntrySize?: Size;
|
||||
/**
|
||||
* A function that returns a number indicating the item's size.
|
||||
*
|
||||
* If not provided, and {@link OptionsBase.maxSize} or
|
||||
* {@link OptionsBase.maxEntrySize} are set, then all
|
||||
* {@link LRUCache#set} calls **must** provide an explicit
|
||||
* {@link SetOptions.size} or sizeCalculation param.
|
||||
*/
|
||||
sizeCalculation?: SizeCalculator<K, V>;
|
||||
/**
|
||||
* Method that provides the implementation for {@link LRUCache#fetch}
|
||||
*/
|
||||
fetchMethod?: Fetcher<K, V, FC>;
|
||||
/**
|
||||
* Set to true to suppress the deletion of stale data when a
|
||||
* {@link OptionsBase.fetchMethod} returns a rejected promise.
|
||||
*/
|
||||
noDeleteOnFetchRejection?: boolean;
|
||||
/**
|
||||
* Do not delete stale items when they are retrieved with
|
||||
* {@link LRUCache#get}.
|
||||
*
|
||||
* Note that the `get` return value will still be `undefined`
|
||||
* unless {@link OptionsBase.allowStale} is true.
|
||||
*/
|
||||
noDeleteOnStaleGet?: boolean;
|
||||
/**
|
||||
* Set to true to allow returning stale data when a
|
||||
* {@link OptionsBase.fetchMethod} throws an error or returns a rejected
|
||||
* promise.
|
||||
*
|
||||
* This differs from using {@link OptionsBase.allowStale} in that stale
|
||||
* data will ONLY be returned in the case that the
|
||||
* {@link LRUCache#fetch} fails, not any other times.
|
||||
*/
|
||||
allowStaleOnFetchRejection?: boolean;
|
||||
/**
|
||||
* Set to true to return a stale value from the cache when the
|
||||
* `AbortSignal` passed to the {@link OptionsBase.fetchMethod} dispatches an `'abort'`
|
||||
* event, whether user-triggered, or due to internal cache behavior.
|
||||
*
|
||||
* Unless {@link OptionsBase.ignoreFetchAbort} is also set, the underlying
|
||||
* {@link OptionsBase.fetchMethod} will still be considered canceled, and
|
||||
* any value it returns will be ignored and not cached.
|
||||
*
|
||||
* Caveat: since fetches are aborted when a new value is explicitly
|
||||
* set in the cache, this can lead to fetch returning a stale value,
|
||||
* since that was the fallback value _at the moment the `fetch()` was
|
||||
* initiated_, even though the new updated value is now present in
|
||||
* the cache.
|
||||
*
|
||||
* For example:
|
||||
*
|
||||
* ```ts
|
||||
* const cache = new LRUCache<string, any>({
|
||||
* ttl: 100,
|
||||
* fetchMethod: async (url, oldValue, { signal }) => {
|
||||
* const res = await fetch(url, { signal })
|
||||
* return await res.json()
|
||||
* }
|
||||
* })
|
||||
* cache.set('https://example.com/', { some: 'data' })
|
||||
* // 100ms go by...
|
||||
* const result = cache.fetch('https://example.com/')
|
||||
* cache.set('https://example.com/', { other: 'thing' })
|
||||
* console.log(await result) // { some: 'data' }
|
||||
* console.log(cache.get('https://example.com/')) // { other: 'thing' }
|
||||
* ```
|
||||
*/
|
||||
allowStaleOnFetchAbort?: boolean;
|
||||
/**
|
||||
* Set to true to ignore the `abort` event emitted by the `AbortSignal`
|
||||
* object passed to {@link OptionsBase.fetchMethod}, and still cache the
|
||||
* resulting resolution value, as long as it is not `undefined`.
|
||||
*
|
||||
* When used on its own, this means aborted {@link LRUCache#fetch} calls are not
|
||||
* immediately resolved or rejected when they are aborted, and instead
|
||||
* take the full time to await.
|
||||
*
|
||||
* When used with {@link OptionsBase.allowStaleOnFetchAbort}, aborted
|
||||
* {@link LRUCache#fetch} calls will resolve immediately to their stale
|
||||
* cached value or `undefined`, and will continue to process and eventually
|
||||
* update the cache when they resolve, as long as the resulting value is
|
||||
* not `undefined`, thus supporting a "return stale on timeout while
|
||||
* refreshing" mechanism by passing `AbortSignal.timeout(n)` as the signal.
|
||||
*
|
||||
* **Note**: regardless of this setting, an `abort` event _is still
|
||||
* emitted on the `AbortSignal` object_, so may result in invalid results
|
||||
* when passed to other underlying APIs that use AbortSignals.
|
||||
*
|
||||
* This may be overridden in the {@link OptionsBase.fetchMethod} or the
|
||||
* call to {@link LRUCache#fetch}.
|
||||
*/
|
||||
ignoreFetchAbort?: boolean;
|
||||
}
|
||||
interface OptionsMaxLimit<K, V, FC> extends OptionsBase<K, V, FC> {
|
||||
max: Count;
|
||||
}
|
||||
interface OptionsTTLLimit<K, V, FC> extends OptionsBase<K, V, FC> {
|
||||
ttl: Milliseconds;
|
||||
ttlAutopurge: boolean;
|
||||
}
|
||||
interface OptionsSizeLimit<K, V, FC> extends OptionsBase<K, V, FC> {
|
||||
maxSize: Size;
|
||||
}
|
||||
/**
|
||||
* The valid safe options for the {@link LRUCache} constructor
|
||||
*/
|
||||
type Options<K, V, FC> = OptionsMaxLimit<K, V, FC> | OptionsSizeLimit<K, V, FC> | OptionsTTLLimit<K, V, FC>;
|
||||
/**
|
||||
* Entry objects used by {@link LRUCache#load} and {@link LRUCache#dump},
|
||||
* and returned by {@link LRUCache#info}.
|
||||
*/
|
||||
interface Entry<V> {
|
||||
value: V;
|
||||
ttl?: Milliseconds;
|
||||
size?: Size;
|
||||
start?: Milliseconds;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Default export, the thing you're using this module to get.
|
||||
*
|
||||
* All properties from the options object (with the exception of
|
||||
* {@link OptionsBase.max} and {@link OptionsBase.maxSize}) are added as
|
||||
* normal public members. (`max` and `maxBase` are read-only getters.)
|
||||
* Changing any of these will alter the defaults for subsequent method calls,
|
||||
* but is otherwise safe.
|
||||
*/
|
||||
export declare class LRUCache<K extends {}, V extends {}, FC = unknown> implements Map<K, V> {
|
||||
#private;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.ttl}
|
||||
*/
|
||||
ttl: LRUCache.Milliseconds;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.ttlResolution}
|
||||
*/
|
||||
ttlResolution: LRUCache.Milliseconds;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.ttlAutopurge}
|
||||
*/
|
||||
ttlAutopurge: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.updateAgeOnGet}
|
||||
*/
|
||||
updateAgeOnGet: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.updateAgeOnHas}
|
||||
*/
|
||||
updateAgeOnHas: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.allowStale}
|
||||
*/
|
||||
allowStale: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.noDisposeOnSet}
|
||||
*/
|
||||
noDisposeOnSet: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.noUpdateTTL}
|
||||
*/
|
||||
noUpdateTTL: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.maxEntrySize}
|
||||
*/
|
||||
maxEntrySize: LRUCache.Size;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.sizeCalculation}
|
||||
*/
|
||||
sizeCalculation?: LRUCache.SizeCalculator<K, V>;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.noDeleteOnFetchRejection}
|
||||
*/
|
||||
noDeleteOnFetchRejection: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.noDeleteOnStaleGet}
|
||||
*/
|
||||
noDeleteOnStaleGet: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.allowStaleOnFetchAbort}
|
||||
*/
|
||||
allowStaleOnFetchAbort: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.allowStaleOnFetchRejection}
|
||||
*/
|
||||
allowStaleOnFetchRejection: boolean;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.ignoreFetchAbort}
|
||||
*/
|
||||
ignoreFetchAbort: boolean;
|
||||
/**
|
||||
* Do not call this method unless you need to inspect the
|
||||
* inner workings of the cache. If anything returned by this
|
||||
* object is modified in any way, strange breakage may occur.
|
||||
*
|
||||
* These fields are private for a reason!
|
||||
*
|
||||
* @internal
|
||||
*/
|
||||
static unsafeExposeInternals<K extends {}, V extends {}, FC extends unknown = unknown>(c: LRUCache<K, V, FC>): {
|
||||
starts: ZeroArray | undefined;
|
||||
ttls: ZeroArray | undefined;
|
||||
sizes: ZeroArray | undefined;
|
||||
keyMap: Map<K, number>;
|
||||
keyList: (K | undefined)[];
|
||||
valList: (V | BackgroundFetch<V> | undefined)[];
|
||||
next: NumberArray;
|
||||
prev: NumberArray;
|
||||
readonly head: Index;
|
||||
readonly tail: Index;
|
||||
free: StackLike;
|
||||
isBackgroundFetch: (p: any) => boolean;
|
||||
backgroundFetch: (k: K, index: number | undefined, options: LRUCache.FetchOptions<K, V, FC>, context: any) => BackgroundFetch<V>;
|
||||
moveToTail: (index: number) => void;
|
||||
indexes: (options?: {
|
||||
allowStale: boolean;
|
||||
}) => Generator<Index, void, unknown>;
|
||||
rindexes: (options?: {
|
||||
allowStale: boolean;
|
||||
}) => Generator<Index, void, unknown>;
|
||||
isStale: (index: number | undefined) => boolean;
|
||||
};
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.max} (read-only)
|
||||
*/
|
||||
get max(): LRUCache.Count;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.maxSize} (read-only)
|
||||
*/
|
||||
get maxSize(): LRUCache.Count;
|
||||
/**
|
||||
* The total computed size of items in the cache (read-only)
|
||||
*/
|
||||
get calculatedSize(): LRUCache.Size;
|
||||
/**
|
||||
* The number of items stored in the cache (read-only)
|
||||
*/
|
||||
get size(): LRUCache.Count;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.fetchMethod} (read-only)
|
||||
*/
|
||||
get fetchMethod(): LRUCache.Fetcher<K, V, FC> | undefined;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.dispose} (read-only)
|
||||
*/
|
||||
get dispose(): LRUCache.Disposer<K, V> | undefined;
|
||||
/**
|
||||
* {@link LRUCache.OptionsBase.disposeAfter} (read-only)
|
||||
*/
|
||||
get disposeAfter(): LRUCache.Disposer<K, V> | undefined;
|
||||
constructor(options: LRUCache.Options<K, V, FC> | LRUCache<K, V, FC>);
|
||||
/**
|
||||
* Return the remaining TTL time for a given entry key
|
||||
*/
|
||||
getRemainingTTL(key: K): number;
|
||||
/**
|
||||
* Return a generator yielding `[key, value]` pairs,
|
||||
* in order from most recently used to least recently used.
|
||||
*/
|
||||
entries(): Generator<[K, V], void, unknown>;
|
||||
/**
|
||||
* Inverse order version of {@link LRUCache.entries}
|
||||
*
|
||||
* Return a generator yielding `[key, value]` pairs,
|
||||
* in order from least recently used to most recently used.
|
||||
*/
|
||||
rentries(): Generator<(K | V | BackgroundFetch<V> | undefined)[], void, unknown>;
|
||||
/**
|
||||
* Return a generator yielding the keys in the cache,
|
||||
* in order from most recently used to least recently used.
|
||||
*/
|
||||
keys(): Generator<K, void, unknown>;
|
||||
/**
|
||||
* Inverse order version of {@link LRUCache.keys}
|
||||
*
|
||||
* Return a generator yielding the keys in the cache,
|
||||
* in order from least recently used to most recently used.
|
||||
*/
|
||||
rkeys(): Generator<K, void, unknown>;
|
||||
/**
|
||||
* Return a generator yielding the values in the cache,
|
||||
* in order from most recently used to least recently used.
|
||||
*/
|
||||
values(): Generator<V, void, unknown>;
|
||||
/**
|
||||
* Inverse order version of {@link LRUCache.values}
|
||||
*
|
||||
* Return a generator yielding the values in the cache,
|
||||
* in order from least recently used to most recently used.
|
||||
*/
|
||||
rvalues(): Generator<V | BackgroundFetch<V> | undefined, void, unknown>;
|
||||
/**
|
||||
* Iterating over the cache itself yields the same results as
|
||||
* {@link LRUCache.entries}
|
||||
*/
|
||||
[Symbol.iterator](): Generator<[K, V], void, unknown>;
|
||||
/**
|
||||
* A String value that is used in the creation of the default string description of an object.
|
||||
* Called by the built-in method Object.prototype.toString.
|
||||
*/
|
||||
[Symbol.toStringTag]: string;
|
||||
/**
|
||||
* Find a value for which the supplied fn method returns a truthy value,
|
||||
* similar to Array.find(). fn is called as fn(value, key, cache).
|
||||
*/
|
||||
find(fn: (v: V, k: K, self: LRUCache<K, V, FC>) => boolean, getOptions?: LRUCache.GetOptions<K, V, FC>): V | undefined;
|
||||
/**
|
||||
* Call the supplied function on each item in the cache, in order from
|
||||
* most recently used to least recently used. fn is called as
|
||||
* fn(value, key, cache). Does not update age or recenty of use.
|
||||
* Does not iterate over stale values.
|
||||
*/
|
||||
forEach(fn: (v: V, k: K, self: LRUCache<K, V, FC>) => any, thisp?: any): void;
|
||||
/**
|
||||
* The same as {@link LRUCache.forEach} but items are iterated over in
|
||||
* reverse order. (ie, less recently used items are iterated over first.)
|
||||
*/
|
||||
rforEach(fn: (v: V, k: K, self: LRUCache<K, V, FC>) => any, thisp?: any): void;
|
||||
/**
|
||||
* Delete any stale entries. Returns true if anything was removed,
|
||||
* false otherwise.
|
||||
*/
|
||||
purgeStale(): boolean;
|
||||
/**
|
||||
* Get the extended info about a given entry, to get its value, size, and
|
||||
* TTL info simultaneously. Like {@link LRUCache#dump}, but just for a
|
||||
* single key. Always returns stale values, if their info is found in the
|
||||
* cache, so be sure to check for expired TTLs if relevant.
|
||||
*/
|
||||
info(key: K): LRUCache.Entry<V> | undefined;
|
||||
/**
|
||||
* Return an array of [key, {@link LRUCache.Entry}] tuples which can be
|
||||
* passed to cache.load()
|
||||
*/
|
||||
dump(): [K, LRUCache.Entry<V>][];
|
||||
/**
|
||||
* Reset the cache and load in the items in entries in the order listed.
|
||||
* Note that the shape of the resulting cache may be different if the
|
||||
* same options are not used in both caches.
|
||||
*/
|
||||
load(arr: [K, LRUCache.Entry<V>][]): void;
|
||||
/**
|
||||
* Add a value to the cache.
|
||||
*
|
||||
* Note: if `undefined` is specified as a value, this is an alias for
|
||||
* {@link LRUCache#delete}
|
||||
*/
|
||||
set(k: K, v: V | BackgroundFetch<V> | undefined, setOptions?: LRUCache.SetOptions<K, V, FC>): this;
|
||||
/**
|
||||
* Evict the least recently used item, returning its value or
|
||||
* `undefined` if cache is empty.
|
||||
*/
|
||||
pop(): V | undefined;
|
||||
/**
|
||||
* Check if a key is in the cache, without updating the recency of use.
|
||||
* Will return false if the item is stale, even though it is technically
|
||||
* in the cache.
|
||||
*
|
||||
* Will not update item age unless
|
||||
* {@link LRUCache.OptionsBase.updateAgeOnHas} is set.
|
||||
*/
|
||||
has(k: K, hasOptions?: LRUCache.HasOptions<K, V, FC>): boolean;
|
||||
/**
|
||||
* Like {@link LRUCache#get} but doesn't update recency or delete stale
|
||||
* items.
|
||||
*
|
||||
* Returns `undefined` if the item is stale, unless
|
||||
* {@link LRUCache.OptionsBase.allowStale} is set.
|
||||
*/
|
||||
peek(k: K, peekOptions?: LRUCache.PeekOptions<K, V, FC>): V | undefined;
|
||||
/**
|
||||
* Make an asynchronous cached fetch using the
|
||||
* {@link LRUCache.OptionsBase.fetchMethod} function.
|
||||
*
|
||||
* If multiple fetches for the same key are issued, then they will all be
|
||||
* coalesced into a single call to fetchMethod.
|
||||
*
|
||||
* Note that this means that handling options such as
|
||||
* {@link LRUCache.OptionsBase.allowStaleOnFetchAbort},
|
||||
* {@link LRUCache.FetchOptions.signal},
|
||||
* and {@link LRUCache.OptionsBase.allowStaleOnFetchRejection} will be
|
||||
* determined by the FIRST fetch() call for a given key.
|
||||
*
|
||||
* This is a known (fixable) shortcoming which will be addresed on when
|
||||
* someone complains about it, as the fix would involve added complexity and
|
||||
* may not be worth the costs for this edge case.
|
||||
*/
|
||||
fetch(k: K, fetchOptions: unknown extends FC ? LRUCache.FetchOptions<K, V, FC> : FC extends undefined | void ? LRUCache.FetchOptionsNoContext<K, V> : LRUCache.FetchOptionsWithContext<K, V, FC>): Promise<undefined | V>;
|
||||
fetch(k: unknown extends FC ? K : FC extends undefined | void ? K : never, fetchOptions?: unknown extends FC ? LRUCache.FetchOptions<K, V, FC> : FC extends undefined | void ? LRUCache.FetchOptionsNoContext<K, V> : never): Promise<undefined | V>;
|
||||
/**
|
||||
* Return a value from the cache. Will update the recency of the cache
|
||||
* entry found.
|
||||
*
|
||||
* If the key is not found, get() will return `undefined`.
|
||||
*/
|
||||
get(k: K, getOptions?: LRUCache.GetOptions<K, V, FC>): V | undefined;
|
||||
/**
|
||||
* Deletes a key out of the cache.
|
||||
* Returns true if the key was deleted, false otherwise.
|
||||
*/
|
||||
delete(k: K): boolean;
|
||||
/**
|
||||
* Clear the cache entirely, throwing away all values.
|
||||
*/
|
||||
clear(): void;
|
||||
}
|
||||
//# sourceMappingURL=index.d.ts.map
|
1
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/esm/index.d.ts.map
generated
vendored
Executable file
1
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/esm/index.d.ts.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
1442
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/esm/index.js
generated
vendored
Executable file
1442
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/esm/index.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load diff
1
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/esm/index.js.map
generated
vendored
Executable file
1
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/esm/index.js.map
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
3
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/esm/package.json
generated
vendored
Executable file
3
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/dist/esm/package.json
generated
vendored
Executable file
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"type": "module"
|
||||
}
|
118
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/package.json
generated
vendored
Executable file
118
my-app/node_modules/@npmcli/agent/node_modules/lru-cache/package.json
generated
vendored
Executable file
|
@ -0,0 +1,118 @@
|
|||
{
|
||||
"name": "lru-cache",
|
||||
"description": "A cache object that deletes the least-recently-used items.",
|
||||
"version": "10.2.0",
|
||||
"author": "Isaac Z. Schlueter <i@izs.me>",
|
||||
"keywords": [
|
||||
"mru",
|
||||
"lru",
|
||||
"cache"
|
||||
],
|
||||
"sideEffects": false,
|
||||
"scripts": {
|
||||
"build": "npm run prepare",
|
||||
"prepare": "tshy",
|
||||
"postprepare": "bash fixup.sh",
|
||||
"pretest": "npm run prepare",
|
||||
"presnap": "npm run prepare",
|
||||
"test": "tap",
|
||||
"snap": "tap",
|
||||
"preversion": "npm test",
|
||||
"postversion": "npm publish",
|
||||
"prepublishOnly": "git push origin --follow-tags",
|
||||
"format": "prettier --write .",
|
||||
"typedoc": "typedoc --tsconfig ./.tshy/esm.json ./src/*.ts",
|
||||
"benchmark-results-typedoc": "bash scripts/benchmark-results-typedoc.sh",
|
||||
"prebenchmark": "npm run prepare",
|
||||
"benchmark": "make -C benchmark",
|
||||
"preprofile": "npm run prepare",
|
||||
"profile": "make -C benchmark profile"
|
||||
},
|
||||
"main": "./dist/commonjs/index.js",
|
||||
"types": "./dist/commonjs/index.d.ts",
|
||||
"tshy": {
|
||||
"exports": {
|
||||
".": "./src/index.ts",
|
||||
"./min": {
|
||||
"import": {
|
||||
"types": "./dist/mjs/index.d.ts",
|
||||
"default": "./dist/mjs/index.min.js"
|
||||
},
|
||||
"require": {
|
||||
"types": "./dist/commonjs/index.d.ts",
|
||||
"default": "./dist/commonjs/index.min.js"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git://github.com/isaacs/node-lru-cache.git"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@tapjs/clock": "^1.1.16",
|
||||
"@types/node": "^20.2.5",
|
||||
"@types/tap": "^15.0.6",
|
||||
"benchmark": "^2.1.4",
|
||||
"clock-mock": "^2.0.2",
|
||||
"esbuild": "^0.17.11",
|
||||
"eslint-config-prettier": "^8.5.0",
|
||||
"marked": "^4.2.12",
|
||||
"mkdirp": "^2.1.5",
|
||||
"prettier": "^2.6.2",
|
||||
"tap": "^18.5.7",
|
||||
"tshy": "^1.8.0",
|
||||
"tslib": "^2.4.0",
|
||||
"typedoc": "^0.25.3",
|
||||
"typescript": "^5.2.2"
|
||||
},
|
||||
"license": "ISC",
|
||||
"files": [
|
||||
"dist"
|
||||
],
|
||||
"engines": {
|
||||
"node": "14 || >=16.14"
|
||||
},
|
||||
"prettier": {
|
||||
"semi": false,
|
||||
"printWidth": 70,
|
||||
"tabWidth": 2,
|
||||
"useTabs": false,
|
||||
"singleQuote": true,
|
||||
"jsxSingleQuote": false,
|
||||
"bracketSameLine": true,
|
||||
"arrowParens": "avoid",
|
||||
"endOfLine": "lf"
|
||||
},
|
||||
"tap": {
|
||||
"node-arg": [
|
||||
"--expose-gc"
|
||||
],
|
||||
"plugin": [
|
||||
"@tapjs/clock"
|
||||
]
|
||||
},
|
||||
"exports": {
|
||||
".": {
|
||||
"import": {
|
||||
"types": "./dist/esm/index.d.ts",
|
||||
"default": "./dist/esm/index.js"
|
||||
},
|
||||
"require": {
|
||||
"types": "./dist/commonjs/index.d.ts",
|
||||
"default": "./dist/commonjs/index.js"
|
||||
}
|
||||
},
|
||||
"./min": {
|
||||
"import": {
|
||||
"types": "./dist/mjs/index.d.ts",
|
||||
"default": "./dist/mjs/index.min.js"
|
||||
},
|
||||
"require": {
|
||||
"types": "./dist/commonjs/index.d.ts",
|
||||
"default": "./dist/commonjs/index.min.js"
|
||||
}
|
||||
}
|
||||
},
|
||||
"type": "module"
|
||||
}
|
60
my-app/node_modules/@npmcli/agent/package.json
generated
vendored
Executable file
60
my-app/node_modules/@npmcli/agent/package.json
generated
vendored
Executable file
|
@ -0,0 +1,60 @@
|
|||
{
|
||||
"name": "@npmcli/agent",
|
||||
"version": "2.2.1",
|
||||
"description": "the http/https agent used by the npm cli",
|
||||
"main": "lib/index.js",
|
||||
"scripts": {
|
||||
"gencerts": "bash scripts/create-cert.sh",
|
||||
"test": "tap",
|
||||
"lint": "eslint \"**/*.{js,cjs,ts,mjs,jsx,tsx}\"",
|
||||
"postlint": "template-oss-check",
|
||||
"template-oss-apply": "template-oss-apply --force",
|
||||
"lintfix": "npm run lint -- --fix",
|
||||
"snap": "tap",
|
||||
"posttest": "npm run lint"
|
||||
},
|
||||
"author": "GitHub Inc.",
|
||||
"license": "ISC",
|
||||
"bugs": {
|
||||
"url": "https://github.com/npm/agent/issues"
|
||||
},
|
||||
"homepage": "https://github.com/npm/agent#readme",
|
||||
"files": [
|
||||
"bin/",
|
||||
"lib/"
|
||||
],
|
||||
"engines": {
|
||||
"node": "^16.14.0 || >=18.0.0"
|
||||
},
|
||||
"templateOSS": {
|
||||
"//@npmcli/template-oss": "This file is partially managed by @npmcli/template-oss. Edits may be overwritten.",
|
||||
"version": "4.21.3",
|
||||
"publish": "true"
|
||||
},
|
||||
"dependencies": {
|
||||
"agent-base": "^7.1.0",
|
||||
"http-proxy-agent": "^7.0.0",
|
||||
"https-proxy-agent": "^7.0.1",
|
||||
"lru-cache": "^10.0.1",
|
||||
"socks-proxy-agent": "^8.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@npmcli/eslint-config": "^4.0.0",
|
||||
"@npmcli/template-oss": "4.21.3",
|
||||
"minipass-fetch": "^3.0.3",
|
||||
"nock": "^13.2.7",
|
||||
"semver": "^7.5.4",
|
||||
"simple-socks": "^3.1.0",
|
||||
"tap": "^16.3.0"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "https://github.com/npm/agent.git"
|
||||
},
|
||||
"tap": {
|
||||
"nyc-arg": [
|
||||
"--exclude",
|
||||
"tap-snapshots/**"
|
||||
]
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue