chore: update deps

This commit is contained in:
Rim
2025-03-29 16:05:52 -04:00
parent a177b9bb4c
commit 505a26c84d
1489 changed files with 27814 additions and 146817 deletions

115
node_modules/undici/lib/dispatcher/agent.js generated vendored Normal file
View File

@ -0,0 +1,115 @@
'use strict'
const { InvalidArgumentError } = require('../core/errors')
const { kClients, kRunning, kClose, kDestroy, kDispatch } = require('../core/symbols')
const DispatcherBase = require('./dispatcher-base')
const Pool = require('./pool')
const Client = require('./client')
const util = require('../core/util')
const kOnConnect = Symbol('onConnect')
const kOnDisconnect = Symbol('onDisconnect')
const kOnConnectionError = Symbol('onConnectionError')
const kOnDrain = Symbol('onDrain')
const kFactory = Symbol('factory')
const kOptions = Symbol('options')
function defaultFactory (origin, opts) {
return opts && opts.connections === 1
? new Client(origin, opts)
: new Pool(origin, opts)
}
class Agent extends DispatcherBase {
constructor ({ factory = defaultFactory, connect, ...options } = {}) {
if (typeof factory !== 'function') {
throw new InvalidArgumentError('factory must be a function.')
}
if (connect != null && typeof connect !== 'function' && typeof connect !== 'object') {
throw new InvalidArgumentError('connect must be a function or an object')
}
super()
if (connect && typeof connect !== 'function') {
connect = { ...connect }
}
this[kOptions] = { ...util.deepClone(options), connect }
this[kFactory] = factory
this[kClients] = new Map()
this[kOnDrain] = (origin, targets) => {
this.emit('drain', origin, [this, ...targets])
}
this[kOnConnect] = (origin, targets) => {
this.emit('connect', origin, [this, ...targets])
}
this[kOnDisconnect] = (origin, targets, err) => {
this.emit('disconnect', origin, [this, ...targets], err)
}
this[kOnConnectionError] = (origin, targets, err) => {
this.emit('connectionError', origin, [this, ...targets], err)
}
}
get [kRunning] () {
let ret = 0
for (const client of this[kClients].values()) {
ret += client[kRunning]
}
return ret
}
[kDispatch] (opts, handler) {
let key
if (opts.origin && (typeof opts.origin === 'string' || opts.origin instanceof URL)) {
key = String(opts.origin)
} else {
throw new InvalidArgumentError('opts.origin must be a non-empty string or URL.')
}
let dispatcher = this[kClients].get(key)
if (!dispatcher) {
dispatcher = this[kFactory](opts.origin, this[kOptions])
.on('drain', this[kOnDrain])
.on('connect', this[kOnConnect])
.on('disconnect', this[kOnDisconnect])
.on('connectionError', this[kOnConnectionError])
// This introduces a tiny memory leak, as dispatchers are never removed from the map.
// TODO(mcollina): remove te timer when the client/pool do not have any more
// active connections.
this[kClients].set(key, dispatcher)
}
return dispatcher.dispatch(opts, handler)
}
async [kClose] () {
const closePromises = []
for (const client of this[kClients].values()) {
closePromises.push(client.close())
}
this[kClients].clear()
await Promise.all(closePromises)
}
async [kDestroy] (err) {
const destroyPromises = []
for (const client of this[kClients].values()) {
destroyPromises.push(client.destroy(err))
}
this[kClients].clear()
await Promise.all(destroyPromises)
}
}
module.exports = Agent

206
node_modules/undici/lib/dispatcher/balanced-pool.js generated vendored Normal file
View File

@ -0,0 +1,206 @@
'use strict'
const {
BalancedPoolMissingUpstreamError,
InvalidArgumentError
} = require('../core/errors')
const {
PoolBase,
kClients,
kNeedDrain,
kAddClient,
kRemoveClient,
kGetDispatcher
} = require('./pool-base')
const Pool = require('./pool')
const { kUrl } = require('../core/symbols')
const { parseOrigin } = require('../core/util')
const kFactory = Symbol('factory')
const kOptions = Symbol('options')
const kGreatestCommonDivisor = Symbol('kGreatestCommonDivisor')
const kCurrentWeight = Symbol('kCurrentWeight')
const kIndex = Symbol('kIndex')
const kWeight = Symbol('kWeight')
const kMaxWeightPerServer = Symbol('kMaxWeightPerServer')
const kErrorPenalty = Symbol('kErrorPenalty')
/**
* Calculate the greatest common divisor of two numbers by
* using the Euclidean algorithm.
*
* @param {number} a
* @param {number} b
* @returns {number}
*/
function getGreatestCommonDivisor (a, b) {
if (a === 0) return b
while (b !== 0) {
const t = b
b = a % b
a = t
}
return a
}
function defaultFactory (origin, opts) {
return new Pool(origin, opts)
}
class BalancedPool extends PoolBase {
constructor (upstreams = [], { factory = defaultFactory, ...opts } = {}) {
if (typeof factory !== 'function') {
throw new InvalidArgumentError('factory must be a function.')
}
super()
this[kOptions] = opts
this[kIndex] = -1
this[kCurrentWeight] = 0
this[kMaxWeightPerServer] = this[kOptions].maxWeightPerServer || 100
this[kErrorPenalty] = this[kOptions].errorPenalty || 15
if (!Array.isArray(upstreams)) {
upstreams = [upstreams]
}
this[kFactory] = factory
for (const upstream of upstreams) {
this.addUpstream(upstream)
}
this._updateBalancedPoolStats()
}
addUpstream (upstream) {
const upstreamOrigin = parseOrigin(upstream).origin
if (this[kClients].find((pool) => (
pool[kUrl].origin === upstreamOrigin &&
pool.closed !== true &&
pool.destroyed !== true
))) {
return this
}
const pool = this[kFactory](upstreamOrigin, Object.assign({}, this[kOptions]))
this[kAddClient](pool)
pool.on('connect', () => {
pool[kWeight] = Math.min(this[kMaxWeightPerServer], pool[kWeight] + this[kErrorPenalty])
})
pool.on('connectionError', () => {
pool[kWeight] = Math.max(1, pool[kWeight] - this[kErrorPenalty])
this._updateBalancedPoolStats()
})
pool.on('disconnect', (...args) => {
const err = args[2]
if (err && err.code === 'UND_ERR_SOCKET') {
// decrease the weight of the pool.
pool[kWeight] = Math.max(1, pool[kWeight] - this[kErrorPenalty])
this._updateBalancedPoolStats()
}
})
for (const client of this[kClients]) {
client[kWeight] = this[kMaxWeightPerServer]
}
this._updateBalancedPoolStats()
return this
}
_updateBalancedPoolStats () {
let result = 0
for (let i = 0; i < this[kClients].length; i++) {
result = getGreatestCommonDivisor(this[kClients][i][kWeight], result)
}
this[kGreatestCommonDivisor] = result
}
removeUpstream (upstream) {
const upstreamOrigin = parseOrigin(upstream).origin
const pool = this[kClients].find((pool) => (
pool[kUrl].origin === upstreamOrigin &&
pool.closed !== true &&
pool.destroyed !== true
))
if (pool) {
this[kRemoveClient](pool)
}
return this
}
get upstreams () {
return this[kClients]
.filter(dispatcher => dispatcher.closed !== true && dispatcher.destroyed !== true)
.map((p) => p[kUrl].origin)
}
[kGetDispatcher] () {
// We validate that pools is greater than 0,
// otherwise we would have to wait until an upstream
// is added, which might never happen.
if (this[kClients].length === 0) {
throw new BalancedPoolMissingUpstreamError()
}
const dispatcher = this[kClients].find(dispatcher => (
!dispatcher[kNeedDrain] &&
dispatcher.closed !== true &&
dispatcher.destroyed !== true
))
if (!dispatcher) {
return
}
const allClientsBusy = this[kClients].map(pool => pool[kNeedDrain]).reduce((a, b) => a && b, true)
if (allClientsBusy) {
return
}
let counter = 0
let maxWeightIndex = this[kClients].findIndex(pool => !pool[kNeedDrain])
while (counter++ < this[kClients].length) {
this[kIndex] = (this[kIndex] + 1) % this[kClients].length
const pool = this[kClients][this[kIndex]]
// find pool index with the largest weight
if (pool[kWeight] > this[kClients][maxWeightIndex][kWeight] && !pool[kNeedDrain]) {
maxWeightIndex = this[kIndex]
}
// decrease the current weight every `this[kClients].length`.
if (this[kIndex] === 0) {
// Set the current weight to the next lower weight.
this[kCurrentWeight] = this[kCurrentWeight] - this[kGreatestCommonDivisor]
if (this[kCurrentWeight] <= 0) {
this[kCurrentWeight] = this[kMaxWeightPerServer]
}
}
if (pool[kWeight] >= this[kCurrentWeight] && (!pool[kNeedDrain])) {
return pool
}
}
this[kCurrentWeight] = this[kClients][maxWeightIndex][kWeight]
this[kIndex] = maxWeightIndex
return this[kClients][maxWeightIndex]
}
}
module.exports = BalancedPool

1615
node_modules/undici/lib/dispatcher/client-h1.js generated vendored Normal file

File diff suppressed because it is too large Load Diff

795
node_modules/undici/lib/dispatcher/client-h2.js generated vendored Normal file
View File

@ -0,0 +1,795 @@
'use strict'
const assert = require('node:assert')
const { pipeline } = require('node:stream')
const util = require('../core/util.js')
const {
RequestContentLengthMismatchError,
RequestAbortedError,
SocketError,
InformationalError
} = require('../core/errors.js')
const {
kUrl,
kReset,
kClient,
kRunning,
kPending,
kQueue,
kPendingIdx,
kRunningIdx,
kError,
kSocket,
kStrictContentLength,
kOnError,
kMaxConcurrentStreams,
kHTTP2Session,
kResume,
kSize,
kHTTPContext,
kClosed,
kBodyTimeout
} = require('../core/symbols.js')
const { channels } = require('../core/diagnostics.js')
const kOpenStreams = Symbol('open streams')
let extractBody
/** @type {import('http2')} */
let http2
try {
http2 = require('node:http2')
} catch {
// @ts-ignore
http2 = { constants: {} }
}
const {
constants: {
HTTP2_HEADER_AUTHORITY,
HTTP2_HEADER_METHOD,
HTTP2_HEADER_PATH,
HTTP2_HEADER_SCHEME,
HTTP2_HEADER_CONTENT_LENGTH,
HTTP2_HEADER_EXPECT,
HTTP2_HEADER_STATUS
}
} = http2
function parseH2Headers (headers) {
const result = []
for (const [name, value] of Object.entries(headers)) {
// h2 may concat the header value by array
// e.g. Set-Cookie
if (Array.isArray(value)) {
for (const subvalue of value) {
// we need to provide each header value of header name
// because the headers handler expect name-value pair
result.push(Buffer.from(name), Buffer.from(subvalue))
}
} else {
result.push(Buffer.from(name), Buffer.from(value))
}
}
return result
}
async function connectH2 (client, socket) {
client[kSocket] = socket
const session = http2.connect(client[kUrl], {
createConnection: () => socket,
peerMaxConcurrentStreams: client[kMaxConcurrentStreams],
settings: {
// TODO(metcoder95): add support for PUSH
enablePush: false
}
})
session[kOpenStreams] = 0
session[kClient] = client
session[kSocket] = socket
session[kHTTP2Session] = null
util.addListener(session, 'error', onHttp2SessionError)
util.addListener(session, 'frameError', onHttp2FrameError)
util.addListener(session, 'end', onHttp2SessionEnd)
util.addListener(session, 'goaway', onHttp2SessionGoAway)
util.addListener(session, 'close', onHttp2SessionClose)
session.unref()
client[kHTTP2Session] = session
socket[kHTTP2Session] = session
util.addListener(socket, 'error', onHttp2SocketError)
util.addListener(socket, 'end', onHttp2SocketEnd)
util.addListener(socket, 'close', onHttp2SocketClose)
socket[kClosed] = false
socket.on('close', onSocketClose)
return {
version: 'h2',
defaultPipelining: Infinity,
write (request) {
return writeH2(client, request)
},
resume () {
resumeH2(client)
},
destroy (err, callback) {
if (socket[kClosed]) {
queueMicrotask(callback)
} else {
socket.destroy(err).on('close', callback)
}
},
get destroyed () {
return socket.destroyed
},
busy () {
return false
}
}
}
function resumeH2 (client) {
const socket = client[kSocket]
if (socket?.destroyed === false) {
if (client[kSize] === 0 || client[kMaxConcurrentStreams] === 0) {
socket.unref()
client[kHTTP2Session].unref()
} else {
socket.ref()
client[kHTTP2Session].ref()
}
}
}
function onHttp2SessionError (err) {
assert(err.code !== 'ERR_TLS_CERT_ALTNAME_INVALID')
this[kSocket][kError] = err
this[kClient][kOnError](err)
}
function onHttp2FrameError (type, code, id) {
if (id === 0) {
const err = new InformationalError(`HTTP/2: "frameError" received - type ${type}, code ${code}`)
this[kSocket][kError] = err
this[kClient][kOnError](err)
}
}
function onHttp2SessionEnd () {
const err = new SocketError('other side closed', util.getSocketInfo(this[kSocket]))
this.destroy(err)
util.destroy(this[kSocket], err)
}
/**
* This is the root cause of #3011
* We need to handle GOAWAY frames properly, and trigger the session close
* along with the socket right away
*
* @this {import('http2').ClientHttp2Session}
* @param {number} errorCode
*/
function onHttp2SessionGoAway (errorCode) {
// TODO(mcollina): Verify if GOAWAY implements the spec correctly:
// https://datatracker.ietf.org/doc/html/rfc7540#section-6.8
// Specifically, we do not verify the "valid" stream id.
const err = this[kError] || new SocketError(`HTTP/2: "GOAWAY" frame received with code ${errorCode}`, util.getSocketInfo(this[kSocket]))
const client = this[kClient]
client[kSocket] = null
client[kHTTPContext] = null
// this is an HTTP2 session
this.close()
this[kHTTP2Session] = null
util.destroy(this[kSocket], err)
// Fail head of pipeline.
if (client[kRunningIdx] < client[kQueue].length) {
const request = client[kQueue][client[kRunningIdx]]
client[kQueue][client[kRunningIdx]++] = null
util.errorRequest(client, request, err)
client[kPendingIdx] = client[kRunningIdx]
}
assert(client[kRunning] === 0)
client.emit('disconnect', client[kUrl], [client], err)
client[kResume]()
}
function onHttp2SessionClose () {
const { [kClient]: client } = this
const { [kSocket]: socket } = client
const err = this[kSocket][kError] || this[kError] || new SocketError('closed', util.getSocketInfo(socket))
client[kSocket] = null
client[kHTTPContext] = null
if (client.destroyed) {
assert(client[kPending] === 0)
// Fail entire queue.
const requests = client[kQueue].splice(client[kRunningIdx])
for (let i = 0; i < requests.length; i++) {
const request = requests[i]
util.errorRequest(client, request, err)
}
}
}
function onHttp2SocketClose () {
const err = this[kError] || new SocketError('closed', util.getSocketInfo(this))
const client = this[kHTTP2Session][kClient]
client[kSocket] = null
client[kHTTPContext] = null
if (this[kHTTP2Session] !== null) {
this[kHTTP2Session].destroy(err)
}
client[kPendingIdx] = client[kRunningIdx]
assert(client[kRunning] === 0)
client.emit('disconnect', client[kUrl], [client], err)
client[kResume]()
}
function onHttp2SocketError (err) {
assert(err.code !== 'ERR_TLS_CERT_ALTNAME_INVALID')
this[kError] = err
this[kClient][kOnError](err)
}
function onHttp2SocketEnd () {
util.destroy(this, new SocketError('other side closed', util.getSocketInfo(this)))
}
function onSocketClose () {
this[kClosed] = true
}
// https://www.rfc-editor.org/rfc/rfc7230#section-3.3.2
function shouldSendContentLength (method) {
return method !== 'GET' && method !== 'HEAD' && method !== 'OPTIONS' && method !== 'TRACE' && method !== 'CONNECT'
}
function writeH2 (client, request) {
const requestTimeout = request.bodyTimeout ?? client[kBodyTimeout]
const session = client[kHTTP2Session]
const { method, path, host, upgrade, expectContinue, signal, headers: reqHeaders } = request
let { body } = request
if (upgrade) {
util.errorRequest(client, request, new Error('Upgrade not supported for H2'))
return false
}
const headers = {}
for (let n = 0; n < reqHeaders.length; n += 2) {
const key = reqHeaders[n + 0]
const val = reqHeaders[n + 1]
if (Array.isArray(val)) {
for (let i = 0; i < val.length; i++) {
if (headers[key]) {
headers[key] += `,${val[i]}`
} else {
headers[key] = val[i]
}
}
} else {
headers[key] = val
}
}
/** @type {import('node:http2').ClientHttp2Stream} */
let stream = null
const { hostname, port } = client[kUrl]
headers[HTTP2_HEADER_AUTHORITY] = host || `${hostname}${port ? `:${port}` : ''}`
headers[HTTP2_HEADER_METHOD] = method
const abort = (err) => {
if (request.aborted || request.completed) {
return
}
err = err || new RequestAbortedError()
util.errorRequest(client, request, err)
if (stream != null) {
// Some chunks might still come after abort,
// let's ignore them
stream.removeAllListeners('data')
// On Abort, we close the stream to send RST_STREAM frame
stream.close()
// We move the running index to the next request
client[kOnError](err)
client[kResume]()
}
// We do not destroy the socket as we can continue using the session
// the stream gets destroyed and the session remains to create new streams
util.destroy(body, err)
}
try {
// We are already connected, streams are pending.
// We can call on connect, and wait for abort
request.onConnect(abort)
} catch (err) {
util.errorRequest(client, request, err)
}
if (request.aborted) {
return false
}
if (method === 'CONNECT') {
session.ref()
// We are already connected, streams are pending, first request
// will create a new stream. We trigger a request to create the stream and wait until
// `ready` event is triggered
// We disabled endStream to allow the user to write to the stream
stream = session.request(headers, { endStream: false, signal })
if (!stream.pending) {
request.onUpgrade(null, null, stream)
++session[kOpenStreams]
client[kQueue][client[kRunningIdx]++] = null
} else {
stream.once('ready', () => {
request.onUpgrade(null, null, stream)
++session[kOpenStreams]
client[kQueue][client[kRunningIdx]++] = null
})
}
stream.once('close', () => {
session[kOpenStreams] -= 1
if (session[kOpenStreams] === 0) session.unref()
})
stream.setTimeout(requestTimeout)
return true
}
// https://tools.ietf.org/html/rfc7540#section-8.3
// :path and :scheme headers must be omitted when sending CONNECT
headers[HTTP2_HEADER_PATH] = path
headers[HTTP2_HEADER_SCHEME] = 'https'
// https://tools.ietf.org/html/rfc7231#section-4.3.1
// https://tools.ietf.org/html/rfc7231#section-4.3.2
// https://tools.ietf.org/html/rfc7231#section-4.3.5
// Sending a payload body on a request that does not
// expect it can cause undefined behavior on some
// servers and corrupt connection state. Do not
// re-use the connection for further requests.
const expectsPayload = (
method === 'PUT' ||
method === 'POST' ||
method === 'PATCH'
)
if (body && typeof body.read === 'function') {
// Try to read EOF in order to get length.
body.read(0)
}
let contentLength = util.bodyLength(body)
if (util.isFormDataLike(body)) {
extractBody ??= require('../web/fetch/body.js').extractBody
const [bodyStream, contentType] = extractBody(body)
headers['content-type'] = contentType
body = bodyStream.stream
contentLength = bodyStream.length
}
if (contentLength == null) {
contentLength = request.contentLength
}
if (contentLength === 0 || !expectsPayload) {
// https://tools.ietf.org/html/rfc7230#section-3.3.2
// A user agent SHOULD NOT send a Content-Length header field when
// the request message does not contain a payload body and the method
// semantics do not anticipate such a body.
contentLength = null
}
// https://github.com/nodejs/undici/issues/2046
// A user agent may send a Content-Length header with 0 value, this should be allowed.
if (shouldSendContentLength(method) && contentLength > 0 && request.contentLength != null && request.contentLength !== contentLength) {
if (client[kStrictContentLength]) {
util.errorRequest(client, request, new RequestContentLengthMismatchError())
return false
}
process.emitWarning(new RequestContentLengthMismatchError())
}
if (contentLength != null) {
assert(body, 'no body must not have content length')
headers[HTTP2_HEADER_CONTENT_LENGTH] = `${contentLength}`
}
session.ref()
if (channels.sendHeaders.hasSubscribers) {
let header = ''
for (const key in headers) {
header += `${key}: ${headers[key]}\r\n`
}
channels.sendHeaders.publish({ request, headers: header, socket: session[kSocket] })
}
// TODO(metcoder95): add support for sending trailers
const shouldEndStream = method === 'GET' || method === 'HEAD' || body === null
if (expectContinue) {
headers[HTTP2_HEADER_EXPECT] = '100-continue'
stream = session.request(headers, { endStream: shouldEndStream, signal })
stream.once('continue', writeBodyH2)
} else {
stream = session.request(headers, {
endStream: shouldEndStream,
signal
})
writeBodyH2()
}
// Increment counter as we have new streams open
++session[kOpenStreams]
stream.setTimeout(requestTimeout)
stream.once('response', headers => {
const { [HTTP2_HEADER_STATUS]: statusCode, ...realHeaders } = headers
request.onResponseStarted()
// Due to the stream nature, it is possible we face a race condition
// where the stream has been assigned, but the request has been aborted
// the request remains in-flight and headers hasn't been received yet
// for those scenarios, best effort is to destroy the stream immediately
// as there's no value to keep it open.
if (request.aborted) {
stream.removeAllListeners('data')
return
}
if (request.onHeaders(Number(statusCode), parseH2Headers(realHeaders), stream.resume.bind(stream), '') === false) {
stream.pause()
}
})
stream.on('data', (chunk) => {
if (request.onData(chunk) === false) {
stream.pause()
}
})
stream.once('end', (err) => {
stream.removeAllListeners('data')
// When state is null, it means we haven't consumed body and the stream still do not have
// a state.
// Present specially when using pipeline or stream
if (stream.state?.state == null || stream.state.state < 6) {
// Do not complete the request if it was aborted
// Not prone to happen for as safety net to avoid race conditions with 'trailers'
if (!request.aborted && !request.completed) {
request.onComplete({})
}
client[kQueue][client[kRunningIdx]++] = null
client[kResume]()
} else {
// Stream is closed or half-closed-remote (6), decrement counter and cleanup
// It does not have sense to continue working with the stream as we do not
// have yet RST_STREAM support on client-side
--session[kOpenStreams]
if (session[kOpenStreams] === 0) {
session.unref()
}
abort(err ?? new InformationalError('HTTP/2: stream half-closed (remote)'))
client[kQueue][client[kRunningIdx]++] = null
client[kPendingIdx] = client[kRunningIdx]
client[kResume]()
}
})
stream.once('close', () => {
stream.removeAllListeners('data')
session[kOpenStreams] -= 1
if (session[kOpenStreams] === 0) {
session.unref()
}
})
stream.once('error', function (err) {
stream.removeAllListeners('data')
abort(err)
})
stream.once('frameError', (type, code) => {
stream.removeAllListeners('data')
abort(new InformationalError(`HTTP/2: "frameError" received - type ${type}, code ${code}`))
})
stream.on('aborted', () => {
stream.removeAllListeners('data')
})
stream.on('timeout', () => {
const err = new InformationalError(`HTTP/2: "stream timeout after ${requestTimeout}"`)
stream.removeAllListeners('data')
session[kOpenStreams] -= 1
if (session[kOpenStreams] === 0) {
session.unref()
}
abort(err)
})
stream.once('trailers', trailers => {
if (request.aborted || request.completed) {
return
}
request.onComplete(trailers)
})
return true
function writeBodyH2 () {
/* istanbul ignore else: assertion */
if (!body || contentLength === 0) {
writeBuffer(
abort,
stream,
null,
client,
request,
client[kSocket],
contentLength,
expectsPayload
)
} else if (util.isBuffer(body)) {
writeBuffer(
abort,
stream,
body,
client,
request,
client[kSocket],
contentLength,
expectsPayload
)
} else if (util.isBlobLike(body)) {
if (typeof body.stream === 'function') {
writeIterable(
abort,
stream,
body.stream(),
client,
request,
client[kSocket],
contentLength,
expectsPayload
)
} else {
writeBlob(
abort,
stream,
body,
client,
request,
client[kSocket],
contentLength,
expectsPayload
)
}
} else if (util.isStream(body)) {
writeStream(
abort,
client[kSocket],
expectsPayload,
stream,
body,
client,
request,
contentLength
)
} else if (util.isIterable(body)) {
writeIterable(
abort,
stream,
body,
client,
request,
client[kSocket],
contentLength,
expectsPayload
)
} else {
assert(false)
}
}
}
function writeBuffer (abort, h2stream, body, client, request, socket, contentLength, expectsPayload) {
try {
if (body != null && util.isBuffer(body)) {
assert(contentLength === body.byteLength, 'buffer body must have content length')
h2stream.cork()
h2stream.write(body)
h2stream.uncork()
h2stream.end()
request.onBodySent(body)
}
if (!expectsPayload) {
socket[kReset] = true
}
request.onRequestSent()
client[kResume]()
} catch (error) {
abort(error)
}
}
function writeStream (abort, socket, expectsPayload, h2stream, body, client, request, contentLength) {
assert(contentLength !== 0 || client[kRunning] === 0, 'stream body cannot be pipelined')
// For HTTP/2, is enough to pipe the stream
const pipe = pipeline(
body,
h2stream,
(err) => {
if (err) {
util.destroy(pipe, err)
abort(err)
} else {
util.removeAllListeners(pipe)
request.onRequestSent()
if (!expectsPayload) {
socket[kReset] = true
}
client[kResume]()
}
}
)
util.addListener(pipe, 'data', onPipeData)
function onPipeData (chunk) {
request.onBodySent(chunk)
}
}
async function writeBlob (abort, h2stream, body, client, request, socket, contentLength, expectsPayload) {
assert(contentLength === body.size, 'blob body must have content length')
try {
if (contentLength != null && contentLength !== body.size) {
throw new RequestContentLengthMismatchError()
}
const buffer = Buffer.from(await body.arrayBuffer())
h2stream.cork()
h2stream.write(buffer)
h2stream.uncork()
h2stream.end()
request.onBodySent(buffer)
request.onRequestSent()
if (!expectsPayload) {
socket[kReset] = true
}
client[kResume]()
} catch (err) {
abort(err)
}
}
async function writeIterable (abort, h2stream, body, client, request, socket, contentLength, expectsPayload) {
assert(contentLength !== 0 || client[kRunning] === 0, 'iterator body cannot be pipelined')
let callback = null
function onDrain () {
if (callback) {
const cb = callback
callback = null
cb()
}
}
const waitForDrain = () => new Promise((resolve, reject) => {
assert(callback === null)
if (socket[kError]) {
reject(socket[kError])
} else {
callback = resolve
}
})
h2stream
.on('close', onDrain)
.on('drain', onDrain)
try {
// It's up to the user to somehow abort the async iterable.
for await (const chunk of body) {
if (socket[kError]) {
throw socket[kError]
}
const res = h2stream.write(chunk)
request.onBodySent(chunk)
if (!res) {
await waitForDrain()
}
}
h2stream.end()
request.onRequestSent()
if (!expectsPayload) {
socket[kReset] = true
}
client[kResume]()
} catch (err) {
abort(err)
} finally {
h2stream
.off('close', onDrain)
.off('drain', onDrain)
}
}
module.exports = connectH2

609
node_modules/undici/lib/dispatcher/client.js generated vendored Normal file
View File

@ -0,0 +1,609 @@
'use strict'
const assert = require('node:assert')
const net = require('node:net')
const http = require('node:http')
const util = require('../core/util.js')
const { channels } = require('../core/diagnostics.js')
const Request = require('../core/request.js')
const DispatcherBase = require('./dispatcher-base')
const {
InvalidArgumentError,
InformationalError,
ClientDestroyedError
} = require('../core/errors.js')
const buildConnector = require('../core/connect.js')
const {
kUrl,
kServerName,
kClient,
kBusy,
kConnect,
kResuming,
kRunning,
kPending,
kSize,
kQueue,
kConnected,
kConnecting,
kNeedDrain,
kKeepAliveDefaultTimeout,
kHostHeader,
kPendingIdx,
kRunningIdx,
kError,
kPipelining,
kKeepAliveTimeoutValue,
kMaxHeadersSize,
kKeepAliveMaxTimeout,
kKeepAliveTimeoutThreshold,
kHeadersTimeout,
kBodyTimeout,
kStrictContentLength,
kConnector,
kMaxRequests,
kCounter,
kClose,
kDestroy,
kDispatch,
kLocalAddress,
kMaxResponseSize,
kOnError,
kHTTPContext,
kMaxConcurrentStreams,
kResume
} = require('../core/symbols.js')
const connectH1 = require('./client-h1.js')
const connectH2 = require('./client-h2.js')
const kClosedResolve = Symbol('kClosedResolve')
const getDefaultNodeMaxHeaderSize = http &&
http.maxHeaderSize &&
Number.isInteger(http.maxHeaderSize) &&
http.maxHeaderSize > 0
? () => http.maxHeaderSize
: () => { throw new InvalidArgumentError('http module not available or http.maxHeaderSize invalid') }
const noop = () => {}
function getPipelining (client) {
return client[kPipelining] ?? client[kHTTPContext]?.defaultPipelining ?? 1
}
/**
* @type {import('../../types/client.js').default}
*/
class Client extends DispatcherBase {
/**
*
* @param {string|URL} url
* @param {import('../../types/client.js').Client.Options} options
*/
constructor (url, {
maxHeaderSize,
headersTimeout,
socketTimeout,
requestTimeout,
connectTimeout,
bodyTimeout,
idleTimeout,
keepAlive,
keepAliveTimeout,
maxKeepAliveTimeout,
keepAliveMaxTimeout,
keepAliveTimeoutThreshold,
socketPath,
pipelining,
tls,
strictContentLength,
maxCachedSessions,
connect,
maxRequestsPerClient,
localAddress,
maxResponseSize,
autoSelectFamily,
autoSelectFamilyAttemptTimeout,
// h2
maxConcurrentStreams,
allowH2
} = {}) {
if (keepAlive !== undefined) {
throw new InvalidArgumentError('unsupported keepAlive, use pipelining=0 instead')
}
if (socketTimeout !== undefined) {
throw new InvalidArgumentError('unsupported socketTimeout, use headersTimeout & bodyTimeout instead')
}
if (requestTimeout !== undefined) {
throw new InvalidArgumentError('unsupported requestTimeout, use headersTimeout & bodyTimeout instead')
}
if (idleTimeout !== undefined) {
throw new InvalidArgumentError('unsupported idleTimeout, use keepAliveTimeout instead')
}
if (maxKeepAliveTimeout !== undefined) {
throw new InvalidArgumentError('unsupported maxKeepAliveTimeout, use keepAliveMaxTimeout instead')
}
if (maxHeaderSize != null) {
if (!Number.isInteger(maxHeaderSize) || maxHeaderSize < 1) {
throw new InvalidArgumentError('invalid maxHeaderSize')
}
} else {
// If maxHeaderSize is not provided, use the default value from the http module
// or if that is not available, throw an error.
maxHeaderSize = getDefaultNodeMaxHeaderSize()
}
if (socketPath != null && typeof socketPath !== 'string') {
throw new InvalidArgumentError('invalid socketPath')
}
if (connectTimeout != null && (!Number.isFinite(connectTimeout) || connectTimeout < 0)) {
throw new InvalidArgumentError('invalid connectTimeout')
}
if (keepAliveTimeout != null && (!Number.isFinite(keepAliveTimeout) || keepAliveTimeout <= 0)) {
throw new InvalidArgumentError('invalid keepAliveTimeout')
}
if (keepAliveMaxTimeout != null && (!Number.isFinite(keepAliveMaxTimeout) || keepAliveMaxTimeout <= 0)) {
throw new InvalidArgumentError('invalid keepAliveMaxTimeout')
}
if (keepAliveTimeoutThreshold != null && !Number.isFinite(keepAliveTimeoutThreshold)) {
throw new InvalidArgumentError('invalid keepAliveTimeoutThreshold')
}
if (headersTimeout != null && (!Number.isInteger(headersTimeout) || headersTimeout < 0)) {
throw new InvalidArgumentError('headersTimeout must be a positive integer or zero')
}
if (bodyTimeout != null && (!Number.isInteger(bodyTimeout) || bodyTimeout < 0)) {
throw new InvalidArgumentError('bodyTimeout must be a positive integer or zero')
}
if (connect != null && typeof connect !== 'function' && typeof connect !== 'object') {
throw new InvalidArgumentError('connect must be a function or an object')
}
if (maxRequestsPerClient != null && (!Number.isInteger(maxRequestsPerClient) || maxRequestsPerClient < 0)) {
throw new InvalidArgumentError('maxRequestsPerClient must be a positive number')
}
if (localAddress != null && (typeof localAddress !== 'string' || net.isIP(localAddress) === 0)) {
throw new InvalidArgumentError('localAddress must be valid string IP address')
}
if (maxResponseSize != null && (!Number.isInteger(maxResponseSize) || maxResponseSize < -1)) {
throw new InvalidArgumentError('maxResponseSize must be a positive number')
}
if (
autoSelectFamilyAttemptTimeout != null &&
(!Number.isInteger(autoSelectFamilyAttemptTimeout) || autoSelectFamilyAttemptTimeout < -1)
) {
throw new InvalidArgumentError('autoSelectFamilyAttemptTimeout must be a positive number')
}
// h2
if (allowH2 != null && typeof allowH2 !== 'boolean') {
throw new InvalidArgumentError('allowH2 must be a valid boolean value')
}
if (maxConcurrentStreams != null && (typeof maxConcurrentStreams !== 'number' || maxConcurrentStreams < 1)) {
throw new InvalidArgumentError('maxConcurrentStreams must be a positive integer, greater than 0')
}
super()
if (typeof connect !== 'function') {
connect = buildConnector({
...tls,
maxCachedSessions,
allowH2,
socketPath,
timeout: connectTimeout,
...(typeof autoSelectFamily === 'boolean' ? { autoSelectFamily, autoSelectFamilyAttemptTimeout } : undefined),
...connect
})
}
this[kUrl] = util.parseOrigin(url)
this[kConnector] = connect
this[kPipelining] = pipelining != null ? pipelining : 1
this[kMaxHeadersSize] = maxHeaderSize
this[kKeepAliveDefaultTimeout] = keepAliveTimeout == null ? 4e3 : keepAliveTimeout
this[kKeepAliveMaxTimeout] = keepAliveMaxTimeout == null ? 600e3 : keepAliveMaxTimeout
this[kKeepAliveTimeoutThreshold] = keepAliveTimeoutThreshold == null ? 2e3 : keepAliveTimeoutThreshold
this[kKeepAliveTimeoutValue] = this[kKeepAliveDefaultTimeout]
this[kServerName] = null
this[kLocalAddress] = localAddress != null ? localAddress : null
this[kResuming] = 0 // 0, idle, 1, scheduled, 2 resuming
this[kNeedDrain] = 0 // 0, idle, 1, scheduled, 2 resuming
this[kHostHeader] = `host: ${this[kUrl].hostname}${this[kUrl].port ? `:${this[kUrl].port}` : ''}\r\n`
this[kBodyTimeout] = bodyTimeout != null ? bodyTimeout : 300e3
this[kHeadersTimeout] = headersTimeout != null ? headersTimeout : 300e3
this[kStrictContentLength] = strictContentLength == null ? true : strictContentLength
this[kMaxRequests] = maxRequestsPerClient
this[kClosedResolve] = null
this[kMaxResponseSize] = maxResponseSize > -1 ? maxResponseSize : -1
this[kMaxConcurrentStreams] = maxConcurrentStreams != null ? maxConcurrentStreams : 100 // Max peerConcurrentStreams for a Node h2 server
this[kHTTPContext] = null
// kQueue is built up of 3 sections separated by
// the kRunningIdx and kPendingIdx indices.
// | complete | running | pending |
// ^ kRunningIdx ^ kPendingIdx ^ kQueue.length
// kRunningIdx points to the first running element.
// kPendingIdx points to the first pending element.
// This implements a fast queue with an amortized
// time of O(1).
this[kQueue] = []
this[kRunningIdx] = 0
this[kPendingIdx] = 0
this[kResume] = (sync) => resume(this, sync)
this[kOnError] = (err) => onError(this, err)
}
get pipelining () {
return this[kPipelining]
}
set pipelining (value) {
this[kPipelining] = value
this[kResume](true)
}
get [kPending] () {
return this[kQueue].length - this[kPendingIdx]
}
get [kRunning] () {
return this[kPendingIdx] - this[kRunningIdx]
}
get [kSize] () {
return this[kQueue].length - this[kRunningIdx]
}
get [kConnected] () {
return !!this[kHTTPContext] && !this[kConnecting] && !this[kHTTPContext].destroyed
}
get [kBusy] () {
return Boolean(
this[kHTTPContext]?.busy(null) ||
(this[kSize] >= (getPipelining(this) || 1)) ||
this[kPending] > 0
)
}
/* istanbul ignore: only used for test */
[kConnect] (cb) {
connect(this)
this.once('connect', cb)
}
[kDispatch] (opts, handler) {
const origin = opts.origin || this[kUrl].origin
const request = new Request(origin, opts, handler)
this[kQueue].push(request)
if (this[kResuming]) {
// Do nothing.
} else if (util.bodyLength(request.body) == null && util.isIterable(request.body)) {
// Wait a tick in case stream/iterator is ended in the same tick.
this[kResuming] = 1
queueMicrotask(() => resume(this))
} else {
this[kResume](true)
}
if (this[kResuming] && this[kNeedDrain] !== 2 && this[kBusy]) {
this[kNeedDrain] = 2
}
return this[kNeedDrain] < 2
}
async [kClose] () {
// TODO: for H2 we need to gracefully flush the remaining enqueued
// request and close each stream.
return new Promise((resolve) => {
if (this[kSize]) {
this[kClosedResolve] = resolve
} else {
resolve(null)
}
})
}
async [kDestroy] (err) {
return new Promise((resolve) => {
const requests = this[kQueue].splice(this[kPendingIdx])
for (let i = 0; i < requests.length; i++) {
const request = requests[i]
util.errorRequest(this, request, err)
}
const callback = () => {
if (this[kClosedResolve]) {
// TODO (fix): Should we error here with ClientDestroyedError?
this[kClosedResolve]()
this[kClosedResolve] = null
}
resolve(null)
}
if (this[kHTTPContext]) {
this[kHTTPContext].destroy(err, callback)
this[kHTTPContext] = null
} else {
queueMicrotask(callback)
}
this[kResume]()
})
}
}
function onError (client, err) {
if (
client[kRunning] === 0 &&
err.code !== 'UND_ERR_INFO' &&
err.code !== 'UND_ERR_SOCKET'
) {
// Error is not caused by running request and not a recoverable
// socket error.
assert(client[kPendingIdx] === client[kRunningIdx])
const requests = client[kQueue].splice(client[kRunningIdx])
for (let i = 0; i < requests.length; i++) {
const request = requests[i]
util.errorRequest(client, request, err)
}
assert(client[kSize] === 0)
}
}
/**
* @param {Client} client
* @returns
*/
async function connect (client) {
assert(!client[kConnecting])
assert(!client[kHTTPContext])
let { host, hostname, protocol, port } = client[kUrl]
// Resolve ipv6
if (hostname[0] === '[') {
const idx = hostname.indexOf(']')
assert(idx !== -1)
const ip = hostname.substring(1, idx)
assert(net.isIPv6(ip))
hostname = ip
}
client[kConnecting] = true
if (channels.beforeConnect.hasSubscribers) {
channels.beforeConnect.publish({
connectParams: {
host,
hostname,
protocol,
port,
version: client[kHTTPContext]?.version,
servername: client[kServerName],
localAddress: client[kLocalAddress]
},
connector: client[kConnector]
})
}
try {
const socket = await new Promise((resolve, reject) => {
client[kConnector]({
host,
hostname,
protocol,
port,
servername: client[kServerName],
localAddress: client[kLocalAddress]
}, (err, socket) => {
if (err) {
reject(err)
} else {
resolve(socket)
}
})
})
if (client.destroyed) {
util.destroy(socket.on('error', noop), new ClientDestroyedError())
return
}
assert(socket)
try {
client[kHTTPContext] = socket.alpnProtocol === 'h2'
? await connectH2(client, socket)
: await connectH1(client, socket)
} catch (err) {
socket.destroy().on('error', noop)
throw err
}
client[kConnecting] = false
socket[kCounter] = 0
socket[kMaxRequests] = client[kMaxRequests]
socket[kClient] = client
socket[kError] = null
if (channels.connected.hasSubscribers) {
channels.connected.publish({
connectParams: {
host,
hostname,
protocol,
port,
version: client[kHTTPContext]?.version,
servername: client[kServerName],
localAddress: client[kLocalAddress]
},
connector: client[kConnector],
socket
})
}
client.emit('connect', client[kUrl], [client])
} catch (err) {
if (client.destroyed) {
return
}
client[kConnecting] = false
if (channels.connectError.hasSubscribers) {
channels.connectError.publish({
connectParams: {
host,
hostname,
protocol,
port,
version: client[kHTTPContext]?.version,
servername: client[kServerName],
localAddress: client[kLocalAddress]
},
connector: client[kConnector],
error: err
})
}
if (err.code === 'ERR_TLS_CERT_ALTNAME_INVALID') {
assert(client[kRunning] === 0)
while (client[kPending] > 0 && client[kQueue][client[kPendingIdx]].servername === client[kServerName]) {
const request = client[kQueue][client[kPendingIdx]++]
util.errorRequest(client, request, err)
}
} else {
onError(client, err)
}
client.emit('connectionError', client[kUrl], [client], err)
}
client[kResume]()
}
function emitDrain (client) {
client[kNeedDrain] = 0
client.emit('drain', client[kUrl], [client])
}
function resume (client, sync) {
if (client[kResuming] === 2) {
return
}
client[kResuming] = 2
_resume(client, sync)
client[kResuming] = 0
if (client[kRunningIdx] > 256) {
client[kQueue].splice(0, client[kRunningIdx])
client[kPendingIdx] -= client[kRunningIdx]
client[kRunningIdx] = 0
}
}
function _resume (client, sync) {
while (true) {
if (client.destroyed) {
assert(client[kPending] === 0)
return
}
if (client[kClosedResolve] && !client[kSize]) {
client[kClosedResolve]()
client[kClosedResolve] = null
return
}
if (client[kHTTPContext]) {
client[kHTTPContext].resume()
}
if (client[kBusy]) {
client[kNeedDrain] = 2
} else if (client[kNeedDrain] === 2) {
if (sync) {
client[kNeedDrain] = 1
queueMicrotask(() => emitDrain(client))
} else {
emitDrain(client)
}
continue
}
if (client[kPending] === 0) {
return
}
if (client[kRunning] >= (getPipelining(client) || 1)) {
return
}
const request = client[kQueue][client[kPendingIdx]]
if (client[kUrl].protocol === 'https:' && client[kServerName] !== request.servername) {
if (client[kRunning] > 0) {
return
}
client[kServerName] = request.servername
client[kHTTPContext]?.destroy(new InformationalError('servername changed'), () => {
client[kHTTPContext] = null
resume(client)
})
}
if (client[kConnecting]) {
return
}
if (!client[kHTTPContext]) {
connect(client)
return
}
if (client[kHTTPContext].destroyed) {
return
}
if (client[kHTTPContext].busy(request)) {
return
}
if (!request.aborted && client[kHTTPContext].write(request)) {
client[kPendingIdx]++
} else {
client[kQueue].splice(client[kPendingIdx], 1)
}
}
}
module.exports = Client

161
node_modules/undici/lib/dispatcher/dispatcher-base.js generated vendored Normal file
View File

@ -0,0 +1,161 @@
'use strict'
const Dispatcher = require('./dispatcher')
const UnwrapHandler = require('../handler/unwrap-handler')
const {
ClientDestroyedError,
ClientClosedError,
InvalidArgumentError
} = require('../core/errors')
const { kDestroy, kClose, kClosed, kDestroyed, kDispatch } = require('../core/symbols')
const kOnDestroyed = Symbol('onDestroyed')
const kOnClosed = Symbol('onClosed')
class DispatcherBase extends Dispatcher {
constructor () {
super()
this[kDestroyed] = false
this[kOnDestroyed] = null
this[kClosed] = false
this[kOnClosed] = []
}
get destroyed () {
return this[kDestroyed]
}
get closed () {
return this[kClosed]
}
close (callback) {
if (callback === undefined) {
return new Promise((resolve, reject) => {
this.close((err, data) => {
return err ? reject(err) : resolve(data)
})
})
}
if (typeof callback !== 'function') {
throw new InvalidArgumentError('invalid callback')
}
if (this[kDestroyed]) {
queueMicrotask(() => callback(new ClientDestroyedError(), null))
return
}
if (this[kClosed]) {
if (this[kOnClosed]) {
this[kOnClosed].push(callback)
} else {
queueMicrotask(() => callback(null, null))
}
return
}
this[kClosed] = true
this[kOnClosed].push(callback)
const onClosed = () => {
const callbacks = this[kOnClosed]
this[kOnClosed] = null
for (let i = 0; i < callbacks.length; i++) {
callbacks[i](null, null)
}
}
// Should not error.
this[kClose]()
.then(() => this.destroy())
.then(() => {
queueMicrotask(onClosed)
})
}
destroy (err, callback) {
if (typeof err === 'function') {
callback = err
err = null
}
if (callback === undefined) {
return new Promise((resolve, reject) => {
this.destroy(err, (err, data) => {
return err ? /* istanbul ignore next: should never error */ reject(err) : resolve(data)
})
})
}
if (typeof callback !== 'function') {
throw new InvalidArgumentError('invalid callback')
}
if (this[kDestroyed]) {
if (this[kOnDestroyed]) {
this[kOnDestroyed].push(callback)
} else {
queueMicrotask(() => callback(null, null))
}
return
}
if (!err) {
err = new ClientDestroyedError()
}
this[kDestroyed] = true
this[kOnDestroyed] = this[kOnDestroyed] || []
this[kOnDestroyed].push(callback)
const onDestroyed = () => {
const callbacks = this[kOnDestroyed]
this[kOnDestroyed] = null
for (let i = 0; i < callbacks.length; i++) {
callbacks[i](null, null)
}
}
// Should not error.
this[kDestroy](err).then(() => {
queueMicrotask(onDestroyed)
})
}
dispatch (opts, handler) {
if (!handler || typeof handler !== 'object') {
throw new InvalidArgumentError('handler must be an object')
}
handler = UnwrapHandler.unwrap(handler)
try {
if (!opts || typeof opts !== 'object') {
throw new InvalidArgumentError('opts must be an object.')
}
if (this[kDestroyed] || this[kOnDestroyed]) {
throw new ClientDestroyedError()
}
if (this[kClosed]) {
throw new ClientClosedError()
}
return this[kDispatch](opts, handler)
} catch (err) {
if (typeof handler.onError !== 'function') {
throw err
}
handler.onError(err)
return false
}
}
}
module.exports = DispatcherBase

48
node_modules/undici/lib/dispatcher/dispatcher.js generated vendored Normal file
View File

@ -0,0 +1,48 @@
'use strict'
const EventEmitter = require('node:events')
const WrapHandler = require('../handler/wrap-handler')
const wrapInterceptor = (dispatch) => (opts, handler) => dispatch(opts, WrapHandler.wrap(handler))
class Dispatcher extends EventEmitter {
dispatch () {
throw new Error('not implemented')
}
close () {
throw new Error('not implemented')
}
destroy () {
throw new Error('not implemented')
}
compose (...args) {
// So we handle [interceptor1, interceptor2] or interceptor1, interceptor2, ...
const interceptors = Array.isArray(args[0]) ? args[0] : args
let dispatch = this.dispatch.bind(this)
for (const interceptor of interceptors) {
if (interceptor == null) {
continue
}
if (typeof interceptor !== 'function') {
throw new TypeError(`invalid interceptor, expected function received ${typeof interceptor}`)
}
dispatch = interceptor(dispatch)
dispatch = wrapInterceptor(dispatch)
if (dispatch == null || typeof dispatch !== 'function' || dispatch.length !== 2) {
throw new TypeError('invalid interceptor')
}
}
return new Proxy(this, {
get: (target, key) => key === 'dispatch' ? dispatch : target[key]
})
}
}
module.exports = Dispatcher

View File

@ -0,0 +1,151 @@
'use strict'
const DispatcherBase = require('./dispatcher-base')
const { kClose, kDestroy, kClosed, kDestroyed, kDispatch, kNoProxyAgent, kHttpProxyAgent, kHttpsProxyAgent } = require('../core/symbols')
const ProxyAgent = require('./proxy-agent')
const Agent = require('./agent')
const DEFAULT_PORTS = {
'http:': 80,
'https:': 443
}
class EnvHttpProxyAgent extends DispatcherBase {
#noProxyValue = null
#noProxyEntries = null
#opts = null
constructor (opts = {}) {
super()
this.#opts = opts
const { httpProxy, httpsProxy, noProxy, ...agentOpts } = opts
this[kNoProxyAgent] = new Agent(agentOpts)
const HTTP_PROXY = httpProxy ?? process.env.http_proxy ?? process.env.HTTP_PROXY
if (HTTP_PROXY) {
this[kHttpProxyAgent] = new ProxyAgent({ ...agentOpts, uri: HTTP_PROXY })
} else {
this[kHttpProxyAgent] = this[kNoProxyAgent]
}
const HTTPS_PROXY = httpsProxy ?? process.env.https_proxy ?? process.env.HTTPS_PROXY
if (HTTPS_PROXY) {
this[kHttpsProxyAgent] = new ProxyAgent({ ...agentOpts, uri: HTTPS_PROXY })
} else {
this[kHttpsProxyAgent] = this[kHttpProxyAgent]
}
this.#parseNoProxy()
}
[kDispatch] (opts, handler) {
const url = new URL(opts.origin)
const agent = this.#getProxyAgentForUrl(url)
return agent.dispatch(opts, handler)
}
async [kClose] () {
await this[kNoProxyAgent].close()
if (!this[kHttpProxyAgent][kClosed]) {
await this[kHttpProxyAgent].close()
}
if (!this[kHttpsProxyAgent][kClosed]) {
await this[kHttpsProxyAgent].close()
}
}
async [kDestroy] (err) {
await this[kNoProxyAgent].destroy(err)
if (!this[kHttpProxyAgent][kDestroyed]) {
await this[kHttpProxyAgent].destroy(err)
}
if (!this[kHttpsProxyAgent][kDestroyed]) {
await this[kHttpsProxyAgent].destroy(err)
}
}
#getProxyAgentForUrl (url) {
let { protocol, host: hostname, port } = url
// Stripping ports in this way instead of using parsedUrl.hostname to make
// sure that the brackets around IPv6 addresses are kept.
hostname = hostname.replace(/:\d*$/, '').toLowerCase()
port = Number.parseInt(port, 10) || DEFAULT_PORTS[protocol] || 0
if (!this.#shouldProxy(hostname, port)) {
return this[kNoProxyAgent]
}
if (protocol === 'https:') {
return this[kHttpsProxyAgent]
}
return this[kHttpProxyAgent]
}
#shouldProxy (hostname, port) {
if (this.#noProxyChanged) {
this.#parseNoProxy()
}
if (this.#noProxyEntries.length === 0) {
return true // Always proxy if NO_PROXY is not set or empty.
}
if (this.#noProxyValue === '*') {
return false // Never proxy if wildcard is set.
}
for (let i = 0; i < this.#noProxyEntries.length; i++) {
const entry = this.#noProxyEntries[i]
if (entry.port && entry.port !== port) {
continue // Skip if ports don't match.
}
if (!/^[.*]/.test(entry.hostname)) {
// No wildcards, so don't proxy only if there is not an exact match.
if (hostname === entry.hostname) {
return false
}
} else {
// Don't proxy if the hostname ends with the no_proxy host.
if (hostname.endsWith(entry.hostname.replace(/^\*/, ''))) {
return false
}
}
}
return true
}
#parseNoProxy () {
const noProxyValue = this.#opts.noProxy ?? this.#noProxyEnv
const noProxySplit = noProxyValue.split(/[,\s]/)
const noProxyEntries = []
for (let i = 0; i < noProxySplit.length; i++) {
const entry = noProxySplit[i]
if (!entry) {
continue
}
const parsed = entry.match(/^(.+):(\d+)$/)
noProxyEntries.push({
hostname: (parsed ? parsed[1] : entry).toLowerCase(),
port: parsed ? Number.parseInt(parsed[2], 10) : 0
})
}
this.#noProxyValue = noProxyValue
this.#noProxyEntries = noProxyEntries
}
get #noProxyChanged () {
if (this.#opts.noProxy !== undefined) {
return false
}
return this.#noProxyValue !== this.#noProxyEnv
}
get #noProxyEnv () {
return process.env.no_proxy ?? process.env.NO_PROXY ?? ''
}
}
module.exports = EnvHttpProxyAgent

159
node_modules/undici/lib/dispatcher/fixed-queue.js generated vendored Normal file
View File

@ -0,0 +1,159 @@
'use strict'
// Extracted from node/lib/internal/fixed_queue.js
// Currently optimal queue size, tested on V8 6.0 - 6.6. Must be power of two.
const kSize = 2048
const kMask = kSize - 1
// The FixedQueue is implemented as a singly-linked list of fixed-size
// circular buffers. It looks something like this:
//
// head tail
// | |
// v v
// +-----------+ <-----\ +-----------+ <------\ +-----------+
// | [null] | \----- | next | \------- | next |
// +-----------+ +-----------+ +-----------+
// | item | <-- bottom | item | <-- bottom | undefined |
// | item | | item | | undefined |
// | item | | item | | undefined |
// | item | | item | | undefined |
// | item | | item | bottom --> | item |
// | item | | item | | item |
// | ... | | ... | | ... |
// | item | | item | | item |
// | item | | item | | item |
// | undefined | <-- top | item | | item |
// | undefined | | item | | item |
// | undefined | | undefined | <-- top top --> | undefined |
// +-----------+ +-----------+ +-----------+
//
// Or, if there is only one circular buffer, it looks something
// like either of these:
//
// head tail head tail
// | | | |
// v v v v
// +-----------+ +-----------+
// | [null] | | [null] |
// +-----------+ +-----------+
// | undefined | | item |
// | undefined | | item |
// | item | <-- bottom top --> | undefined |
// | item | | undefined |
// | undefined | <-- top bottom --> | item |
// | undefined | | item |
// +-----------+ +-----------+
//
// Adding a value means moving `top` forward by one, removing means
// moving `bottom` forward by one. After reaching the end, the queue
// wraps around.
//
// When `top === bottom` the current queue is empty and when
// `top + 1 === bottom` it's full. This wastes a single space of storage
// but allows much quicker checks.
/**
* @type {FixedCircularBuffer}
* @template T
*/
class FixedCircularBuffer {
constructor () {
/**
* @type {number}
*/
this.bottom = 0
/**
* @type {number}
*/
this.top = 0
/**
* @type {Array<T|undefined>}
*/
this.list = new Array(kSize).fill(undefined)
/**
* @type {T|null}
*/
this.next = null
}
/**
* @returns {boolean}
*/
isEmpty () {
return this.top === this.bottom
}
/**
* @returns {boolean}
*/
isFull () {
return ((this.top + 1) & kMask) === this.bottom
}
/**
* @param {T} data
* @returns {void}
*/
push (data) {
this.list[this.top] = data
this.top = (this.top + 1) & kMask
}
/**
* @returns {T|null}
*/
shift () {
const nextItem = this.list[this.bottom]
if (nextItem === undefined) { return null }
this.list[this.bottom] = undefined
this.bottom = (this.bottom + 1) & kMask
return nextItem
}
}
/**
* @template T
*/
module.exports = class FixedQueue {
constructor () {
/**
* @type {FixedCircularBuffer<T>}
*/
this.head = this.tail = new FixedCircularBuffer()
}
/**
* @returns {boolean}
*/
isEmpty () {
return this.head.isEmpty()
}
/**
* @param {T} data
*/
push (data) {
if (this.head.isFull()) {
// Head is full: Creates a new queue, sets the old queue's `.next` to it,
// and sets it as the new main queue.
this.head = this.head.next = new FixedCircularBuffer()
}
this.head.push(data)
}
/**
* @returns {T|null}
*/
shift () {
const tail = this.tail
const next = tail.shift()
if (tail.isEmpty() && tail.next !== null) {
// If there is another queue, it forms the new tail.
this.tail = tail.next
tail.next = null
}
return next
}
}

194
node_modules/undici/lib/dispatcher/pool-base.js generated vendored Normal file
View File

@ -0,0 +1,194 @@
'use strict'
const DispatcherBase = require('./dispatcher-base')
const FixedQueue = require('./fixed-queue')
const { kConnected, kSize, kRunning, kPending, kQueued, kBusy, kFree, kUrl, kClose, kDestroy, kDispatch } = require('../core/symbols')
const PoolStats = require('./pool-stats')
const kClients = Symbol('clients')
const kNeedDrain = Symbol('needDrain')
const kQueue = Symbol('queue')
const kClosedResolve = Symbol('closed resolve')
const kOnDrain = Symbol('onDrain')
const kOnConnect = Symbol('onConnect')
const kOnDisconnect = Symbol('onDisconnect')
const kOnConnectionError = Symbol('onConnectionError')
const kGetDispatcher = Symbol('get dispatcher')
const kAddClient = Symbol('add client')
const kRemoveClient = Symbol('remove client')
const kStats = Symbol('stats')
class PoolBase extends DispatcherBase {
constructor () {
super()
this[kQueue] = new FixedQueue()
this[kClients] = []
this[kQueued] = 0
const pool = this
this[kOnDrain] = function onDrain (origin, targets) {
const queue = pool[kQueue]
let needDrain = false
while (!needDrain) {
const item = queue.shift()
if (!item) {
break
}
pool[kQueued]--
needDrain = !this.dispatch(item.opts, item.handler)
}
this[kNeedDrain] = needDrain
if (!this[kNeedDrain] && pool[kNeedDrain]) {
pool[kNeedDrain] = false
pool.emit('drain', origin, [pool, ...targets])
}
if (pool[kClosedResolve] && queue.isEmpty()) {
Promise
.all(pool[kClients].map(c => c.close()))
.then(pool[kClosedResolve])
}
}
this[kOnConnect] = (origin, targets) => {
pool.emit('connect', origin, [pool, ...targets])
}
this[kOnDisconnect] = (origin, targets, err) => {
pool.emit('disconnect', origin, [pool, ...targets], err)
}
this[kOnConnectionError] = (origin, targets, err) => {
pool.emit('connectionError', origin, [pool, ...targets], err)
}
this[kStats] = new PoolStats(this)
}
get [kBusy] () {
return this[kNeedDrain]
}
get [kConnected] () {
return this[kClients].filter(client => client[kConnected]).length
}
get [kFree] () {
return this[kClients].filter(client => client[kConnected] && !client[kNeedDrain]).length
}
get [kPending] () {
let ret = this[kQueued]
for (const { [kPending]: pending } of this[kClients]) {
ret += pending
}
return ret
}
get [kRunning] () {
let ret = 0
for (const { [kRunning]: running } of this[kClients]) {
ret += running
}
return ret
}
get [kSize] () {
let ret = this[kQueued]
for (const { [kSize]: size } of this[kClients]) {
ret += size
}
return ret
}
get stats () {
return this[kStats]
}
async [kClose] () {
if (this[kQueue].isEmpty()) {
await Promise.all(this[kClients].map(c => c.close()))
} else {
await new Promise((resolve) => {
this[kClosedResolve] = resolve
})
}
}
async [kDestroy] (err) {
while (true) {
const item = this[kQueue].shift()
if (!item) {
break
}
item.handler.onError(err)
}
await Promise.all(this[kClients].map(c => c.destroy(err)))
}
[kDispatch] (opts, handler) {
const dispatcher = this[kGetDispatcher]()
if (!dispatcher) {
this[kNeedDrain] = true
this[kQueue].push({ opts, handler })
this[kQueued]++
} else if (!dispatcher.dispatch(opts, handler)) {
dispatcher[kNeedDrain] = true
this[kNeedDrain] = !this[kGetDispatcher]()
}
return !this[kNeedDrain]
}
[kAddClient] (client) {
client
.on('drain', this[kOnDrain])
.on('connect', this[kOnConnect])
.on('disconnect', this[kOnDisconnect])
.on('connectionError', this[kOnConnectionError])
this[kClients].push(client)
if (this[kNeedDrain]) {
queueMicrotask(() => {
if (this[kNeedDrain]) {
this[kOnDrain](client[kUrl], [this, client])
}
})
}
return this
}
[kRemoveClient] (client) {
client.close(() => {
const idx = this[kClients].indexOf(client)
if (idx !== -1) {
this[kClients].splice(idx, 1)
}
})
this[kNeedDrain] = this[kClients].some(dispatcher => (
!dispatcher[kNeedDrain] &&
dispatcher.closed !== true &&
dispatcher.destroyed !== true
))
}
}
module.exports = {
PoolBase,
kClients,
kNeedDrain,
kAddClient,
kRemoveClient,
kGetDispatcher
}

36
node_modules/undici/lib/dispatcher/pool-stats.js generated vendored Normal file
View File

@ -0,0 +1,36 @@
'use strict'
const { kFree, kConnected, kPending, kQueued, kRunning, kSize } = require('../core/symbols')
const kPool = Symbol('pool')
class PoolStats {
constructor (pool) {
this[kPool] = pool
}
get connected () {
return this[kPool][kConnected]
}
get free () {
return this[kPool][kFree]
}
get pending () {
return this[kPool][kPending]
}
get queued () {
return this[kPool][kQueued]
}
get running () {
return this[kPool][kRunning]
}
get size () {
return this[kPool][kSize]
}
}
module.exports = PoolStats

104
node_modules/undici/lib/dispatcher/pool.js generated vendored Normal file
View File

@ -0,0 +1,104 @@
'use strict'
const {
PoolBase,
kClients,
kNeedDrain,
kAddClient,
kGetDispatcher
} = require('./pool-base')
const Client = require('./client')
const {
InvalidArgumentError
} = require('../core/errors')
const util = require('../core/util')
const { kUrl } = require('../core/symbols')
const buildConnector = require('../core/connect')
const kOptions = Symbol('options')
const kConnections = Symbol('connections')
const kFactory = Symbol('factory')
function defaultFactory (origin, opts) {
return new Client(origin, opts)
}
class Pool extends PoolBase {
constructor (origin, {
connections,
factory = defaultFactory,
connect,
connectTimeout,
tls,
maxCachedSessions,
socketPath,
autoSelectFamily,
autoSelectFamilyAttemptTimeout,
allowH2,
...options
} = {}) {
if (connections != null && (!Number.isFinite(connections) || connections < 0)) {
throw new InvalidArgumentError('invalid connections')
}
if (typeof factory !== 'function') {
throw new InvalidArgumentError('factory must be a function.')
}
if (connect != null && typeof connect !== 'function' && typeof connect !== 'object') {
throw new InvalidArgumentError('connect must be a function or an object')
}
super()
if (typeof connect !== 'function') {
connect = buildConnector({
...tls,
maxCachedSessions,
allowH2,
socketPath,
timeout: connectTimeout,
...(typeof autoSelectFamily === 'boolean' ? { autoSelectFamily, autoSelectFamilyAttemptTimeout } : undefined),
...connect
})
}
this[kConnections] = connections || null
this[kUrl] = util.parseOrigin(origin)
this[kOptions] = { ...util.deepClone(options), connect, allowH2 }
this[kOptions].interceptors = options.interceptors
? { ...options.interceptors }
: undefined
this[kFactory] = factory
this.on('connectionError', (origin, targets, error) => {
// If a connection error occurs, we remove the client from the pool,
// and emit a connectionError event. They will not be re-used.
// Fixes https://github.com/nodejs/undici/issues/3895
for (const target of targets) {
// Do not use kRemoveClient here, as it will close the client,
// but the client cannot be closed in this state.
const idx = this[kClients].indexOf(target)
if (idx !== -1) {
this[kClients].splice(idx, 1)
}
}
})
}
[kGetDispatcher] () {
for (const client of this[kClients]) {
if (!client[kNeedDrain]) {
return client
}
}
if (!this[kConnections] || this[kClients].length < this[kConnections]) {
const dispatcher = this[kFactory](this[kUrl], this[kOptions])
this[kAddClient](dispatcher)
return dispatcher
}
}
}
module.exports = Pool

189
node_modules/undici/lib/dispatcher/proxy-agent.js generated vendored Normal file
View File

@ -0,0 +1,189 @@
'use strict'
const { kProxy, kClose, kDestroy } = require('../core/symbols')
const { URL } = require('node:url')
const Agent = require('./agent')
const Pool = require('./pool')
const DispatcherBase = require('./dispatcher-base')
const { InvalidArgumentError, RequestAbortedError, SecureProxyConnectionError } = require('../core/errors')
const buildConnector = require('../core/connect')
const kAgent = Symbol('proxy agent')
const kClient = Symbol('proxy client')
const kProxyHeaders = Symbol('proxy headers')
const kRequestTls = Symbol('request tls settings')
const kProxyTls = Symbol('proxy tls settings')
const kConnectEndpoint = Symbol('connect endpoint function')
function defaultProtocolPort (protocol) {
return protocol === 'https:' ? 443 : 80
}
function defaultFactory (origin, opts) {
return new Pool(origin, opts)
}
const noop = () => {}
class ProxyAgent extends DispatcherBase {
constructor (opts) {
if (!opts || (typeof opts === 'object' && !(opts instanceof URL) && !opts.uri)) {
throw new InvalidArgumentError('Proxy uri is mandatory')
}
const { clientFactory = defaultFactory } = opts
if (typeof clientFactory !== 'function') {
throw new InvalidArgumentError('Proxy opts.clientFactory must be a function.')
}
super()
const url = this.#getUrl(opts)
const { href, origin, port, protocol, username, password, hostname: proxyHostname } = url
this[kProxy] = { uri: href, protocol }
this[kRequestTls] = opts.requestTls
this[kProxyTls] = opts.proxyTls
this[kProxyHeaders] = opts.headers || {}
if (opts.auth && opts.token) {
throw new InvalidArgumentError('opts.auth cannot be used in combination with opts.token')
} else if (opts.auth) {
/* @deprecated in favour of opts.token */
this[kProxyHeaders]['proxy-authorization'] = `Basic ${opts.auth}`
} else if (opts.token) {
this[kProxyHeaders]['proxy-authorization'] = opts.token
} else if (username && password) {
this[kProxyHeaders]['proxy-authorization'] = `Basic ${Buffer.from(`${decodeURIComponent(username)}:${decodeURIComponent(password)}`).toString('base64')}`
}
const connect = buildConnector({ ...opts.proxyTls })
this[kConnectEndpoint] = buildConnector({ ...opts.requestTls })
this[kClient] = clientFactory(url, { connect })
this[kAgent] = new Agent({
...opts,
connect: async (opts, callback) => {
let requestedPath = opts.host
if (!opts.port) {
requestedPath += `:${defaultProtocolPort(opts.protocol)}`
}
try {
const { socket, statusCode } = await this[kClient].connect({
origin,
port,
path: requestedPath,
signal: opts.signal,
headers: {
...this[kProxyHeaders],
host: opts.host
},
servername: this[kProxyTls]?.servername || proxyHostname
})
if (statusCode !== 200) {
socket.on('error', noop).destroy()
callback(new RequestAbortedError(`Proxy response (${statusCode}) !== 200 when HTTP Tunneling`))
}
if (opts.protocol !== 'https:') {
callback(null, socket)
return
}
let servername
if (this[kRequestTls]) {
servername = this[kRequestTls].servername
} else {
servername = opts.servername
}
this[kConnectEndpoint]({ ...opts, servername, httpSocket: socket }, callback)
} catch (err) {
if (err.code === 'ERR_TLS_CERT_ALTNAME_INVALID') {
// Throw a custom error to avoid loop in client.js#connect
callback(new SecureProxyConnectionError(err))
} else {
callback(err)
}
}
}
})
}
dispatch (opts, handler) {
const headers = buildHeaders(opts.headers)
throwIfProxyAuthIsSent(headers)
if (headers && !('host' in headers) && !('Host' in headers)) {
const { host } = new URL(opts.origin)
headers.host = host
}
return this[kAgent].dispatch(
{
...opts,
headers
},
handler
)
}
/**
* @param {import('../types/proxy-agent').ProxyAgent.Options | string | URL} opts
* @returns {URL}
*/
#getUrl (opts) {
if (typeof opts === 'string') {
return new URL(opts)
} else if (opts instanceof URL) {
return opts
} else {
return new URL(opts.uri)
}
}
async [kClose] () {
await this[kAgent].close()
await this[kClient].close()
}
async [kDestroy] () {
await this[kAgent].destroy()
await this[kClient].destroy()
}
}
/**
* @param {string[] | Record<string, string>} headers
* @returns {Record<string, string>}
*/
function buildHeaders (headers) {
// When using undici.fetch, the headers list is stored
// as an array.
if (Array.isArray(headers)) {
/** @type {Record<string, string>} */
const headersPair = {}
for (let i = 0; i < headers.length; i += 2) {
headersPair[headers[i]] = headers[i + 1]
}
return headersPair
}
return headers
}
/**
* @param {Record<string, string>} headers
*
* Previous versions of ProxyAgent suggests the Proxy-Authorization in request headers
* Nevertheless, it was changed and to avoid a security vulnerability by end users
* this check was created.
* It should be removed in the next major version for performance reasons
*/
function throwIfProxyAuthIsSent (headers) {
const existProxyAuth = headers && Object.keys(headers)
.find((key) => key.toLowerCase() === 'proxy-authorization')
if (existProxyAuth) {
throw new InvalidArgumentError('Proxy-Authorization should be sent in ProxyAgent constructor')
}
}
module.exports = ProxyAgent

35
node_modules/undici/lib/dispatcher/retry-agent.js generated vendored Normal file
View File

@ -0,0 +1,35 @@
'use strict'
const Dispatcher = require('./dispatcher')
const RetryHandler = require('../handler/retry-handler')
class RetryAgent extends Dispatcher {
#agent = null
#options = null
constructor (agent, options = {}) {
super(options)
this.#agent = agent
this.#options = options
}
dispatch (opts, handler) {
const retry = new RetryHandler({
...opts,
retryOptions: this.#options
}, {
dispatch: this.#agent.dispatch.bind(this.#agent),
handler
})
return this.#agent.dispatch(opts, retry)
}
close () {
return this.#agent.close()
}
destroy () {
return this.#agent.destroy()
}
}
module.exports = RetryAgent