All files rateLimits.js

89.74% Statements 35/39
80% Branches 16/20
100% Functions 6/6
89.74% Lines 35/39

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104    1x 1x 1x 1x     1x             1x     3x 3x 3x 1x   3x                   3x 1x   3x 3x   3x                 6x     6x                     5720x     5720x 213x   5720x 5720x 5720x 1x   5719x                     5719x     5719x 1x   5719x 5719x 5719x     5719x         1x   1x  
'use strict';
 
const NodeCache = require( 'node-cache' );
const { makeWrappedResultEnvelope } = require( './utils.js' );
const { error, makeErrorInNormalForm } = require( '../function-schemata/javascript/src/error.js' );
const { wrapInZ6 } = require( '../function-schemata/javascript/src/utils.js' );
 
function rateLimitErrorFor( limit ) {
	return makeWrappedResultEnvelope(
		null,
		makeErrorInNormalForm(
			error.orchestrator_rate_limit,
			[ wrapInZ6( limit.toString() ) ] ) );
}
 
const DEFAULT_TTL_ = 15 * 60;
 
function getTTLForEnvironment( environment ) {
	const ttlString = environment.FUNCTION_ORCHESTRATOR_RATE_LIMIT_TTL_SECONDS;
	let ttl = Number.parseInt( ttlString );
	if ( Number.isNaN( ttl ) ) {
		ttl = DEFAULT_TTL_;
	}
	return ttl;
}
 
/**
 * Implements a simple cache to count how many operations are currently attached to
 * a single request ID.
 */
class RateLimitCache {
 
	constructor( limit = 300, environment = null ) {
		if ( !environment ) {
			environment = process.env;
		}
		const ttl = getTTLForEnvironment( environment );
		this.limit_ = limit;
		// TODO (T340561): Parameterize the TTL; ensure it's much longer than the request timeout.
		this.idCounter_ = new NodeCache( { stdTTL: ttl } );
	}
 
	/**
	 * Remove a request ID from the cache upon orchestration completion.
	 *
	 * @param {string} requestId ID for a request
	 */
	evict( requestId ) {
		Iif ( !requestId ) {
			return;
		}
		this.idCounter_.del( requestId );
	}
 
	/**
	 * Increase by one the count for a given request ID.
	 *
	 * @param {string} requestId ID for a request
	 * @return {Object|null} Z22 containing error if the request should be
	 *  rate-limited; null otherwise
	 */
	increment( requestId ) {
		Iif ( !requestId ) {
			return null;
		}
		if ( !this.idCounter_.has( requestId ) ) {
			this.idCounter_.set( requestId, 0 );
		}
		const newValue = this.idCounter_.get( requestId ) + 1;
		this.idCounter_.set( requestId, newValue );
		if ( newValue > this.limit_ ) {
			return rateLimitErrorFor( this.limit_ );
		}
		return null;
	}
 
	/**
	 * Decrease by one the count for a given request ID.
	 *
	 * @param {string} requestId ID for a request
	 * @return {Object|null} Z22 containing error if the request should be
	 *  rate-limited; null otherwise
	 */
	decrement( requestId ) {
		Iif ( !requestId ) {
			return null;
		}
		if ( !this.idCounter_.has( requestId ) ) {
			this.idCounter_.set( requestId, 0 );
		}
		const newValue = this.idCounter_.get( requestId ) - 1;
		this.idCounter_.set( requestId, newValue );
		Iif ( newValue > this.limit_ ) {
			return rateLimitErrorFor( this.limit_ );
		}
		return null;
	}
 
}
 
const rateLimits = new RateLimitCache();
 
module.exports = { rateLimits, RateLimitCache };