70 $logger = LoggerFactory::getInstance(
'runJobs' );
100 global $wgJobClasses, $wgTrxProfilerLimits;
102 $response = [
'jobs' => [],
'reached' =>
'none-ready' ];
104 $type = isset( $options[
'type'] ) ? $options[
'type'] :
false;
105 $maxJobs = isset( $options[
'maxJobs'] ) ? $options[
'maxJobs'] :
false;
106 $maxTime = isset( $options[
'maxTime'] ) ? $options[
'maxTime'] :
false;
107 $noThrottle = isset( $options[
'throttle'] ) && !$options[
'throttle'];
110 if (
$type !==
false && !isset( $wgJobClasses[
$type] ) ) {
120 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
124 if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
125 $response[
'reached'] =
'replica-lag-limit';
134 $trxProfiler->setLogger( LoggerFactory::getInstance(
'DBPerformance' ) );
135 $trxProfiler->setExpectations( $wgTrxProfilerLimits[
'JobRunner'], __METHOD__ );
143 $stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
146 $startTime = microtime(
true );
151 $blacklist = $noThrottle ? [] : array_keys( $backoffs );
154 if ( $type ===
false ) {
160 } elseif ( in_array( $type, $blacklist ) ) {
163 $job = $group->pop( $type );
165 $lbFactory->commitMasterChanges( __METHOD__ );
170 $jType =
$job->getType();
179 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
180 ? $backoffDeltas[$jType] + $ttw
186 if ( $info[
'status'] !==
false || !
$job->allowRetries() ) {
188 $lbFactory->commitMasterChanges( __METHOD__ );
192 if ( $info[
'status'] ===
false && mt_rand( 0, 49 ) == 0 ) {
193 $ttw = max( $ttw, self::ERROR_BACKOFF_TTL );
194 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
195 ? $backoffDeltas[$jType] + $ttw
201 'status' => ( $info[
'status'] ===
false ) ?
'failed' :
'ok',
202 'error' => $info[
'error'],
203 'time' => $info[
'timeMs']
205 $timeMsTotal += $info[
'timeMs'];
208 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
211 } elseif ( $maxTime && ( microtime(
true ) - $startTime ) > $maxTime ) {
219 $timePassed = microtime(
true ) - $lastCheckTime;
220 if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
223 'ifWritesSince' => $lastCheckTime,
224 'timeout' => self::MAX_ALLOWED_LAG
227 $response[
'reached'] =
'replica-lag-limit';
230 $lastCheckTime = microtime(
true );
233 if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
234 $group->waitForBackups();
246 if ( $backoffDeltas ) {
265 $msg = $job->
toString() .
" STARTING";
266 $this->logger->debug( $msg );
271 $jobStartTime = microtime(
true );
273 $fnameTrxOwner = get_class( $job ) .
'::run';
283 $error = get_class( $e ) .
': ' . $e->getMessage();
297 MediaWikiServices::getInstance()->getLinkCache()->clear();
298 $timeMs = intval( ( microtime(
true ) - $jobStartTime ) * 1000 );
304 $pickupDelay = max( 0, $popTime - $readyTs );
305 $stats->timing(
'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
306 $stats->timing(
"jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
310 if ( $rootTimestamp ) {
312 $stats->timing(
"jobqueue.pickup_root_age.$jType", 1000 * $age );
315 $stats->timing(
"jobqueue.run.$jType", $timeMs );
317 if ( $rssStart && $rssEnd ) {
318 $stats->updateCount(
"jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
322 $msg = $job->
toString() .
" t=$timeMs error={$error}";
323 $this->logger->error( $msg );
326 $msg = $job->
toString() .
" t=$timeMs good";
327 $this->logger->info( $msg );
331 return [
'status' =>
$status,
'error' => $error,
'timeMs' => $timeMs ];
340 return isset( $info[
'ru_maxrss'] ) ? (int)$info[
'ru_maxrss'] : null;
349 global $wgJobBackoffThrottling;
351 if ( !isset( $wgJobBackoffThrottling[$job->
getType()] ) ||
357 $itemsPerSecond = $wgJobBackoffThrottling[$job->
getType()];
358 if ( $itemsPerSecond <= 0 ) {
366 $seconds = floor( $exactSeconds );
367 $remainder = $exactSeconds - $seconds;
368 $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
371 return (
int)$seconds;
383 $file =
wfTempDir() .
'/mw-runJobs-backoffs.json';
384 if ( is_file( $file ) ) {
385 $noblock = ( $mode ===
'nowait' ) ? LOCK_NB : 0;
386 $handle = fopen( $file,
'rb' );
387 if ( !flock( $handle, LOCK_SH | $noblock ) ) {
391 $content = stream_get_contents( $handle );
392 flock( $handle, LOCK_UN );
394 $ctime = microtime(
true );
395 $cBackoffs = json_decode(
$content,
true ) ?: [];
398 unset( $cBackoffs[
$type] );
424 $noblock = ( $mode ===
'nowait' ) ? LOCK_NB : 0;
425 $file =
wfTempDir() .
'/mw-runJobs-backoffs.json';
426 $handle = fopen( $file,
'wb+' );
427 if ( !flock( $handle, LOCK_EX | $noblock ) ) {
431 $ctime = microtime(
true );
432 $content = stream_get_contents( $handle );
433 $cBackoffs = json_decode(
$content,
true ) ?: [];
434 foreach ( $deltas
as $type => $seconds ) {
435 $cBackoffs[
$type] = isset( $cBackoffs[
$type] ) && $cBackoffs[
$type] >= $ctime
436 ? $cBackoffs[
$type] + $seconds
441 unset( $cBackoffs[
$type] );
444 ftruncate( $handle, 0 );
445 fwrite( $handle, json_encode( $cBackoffs ) );
446 flock( $handle, LOCK_UN );
460 static $maxBytes = null;
461 if ( $maxBytes === null ) {
463 if ( preg_match(
'!^(\d+)(k|m|g|)$!i', ini_get(
'memory_limit' ), $m ) ) {
464 list( , $num, $unit ) = $m;
465 $conv = [
'g' => 1073741824,
'm' => 1048576,
'k' => 1024,
'' => 1 ];
466 $maxBytes = $num * $conv[strtolower( $unit )];
471 $usedBytes = memory_get_usage();
472 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
473 $msg =
"Detected excessive memory usage ($usedBytes/$maxBytes).";
475 $this->logger->error( $msg );
488 if ( $this->debug ) {
505 global $wgJobSerialCommitThreshold;
509 if ( $wgJobSerialCommitThreshold !==
false && $lb->getServerCount() > 1 ) {
511 $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
513 if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
514 $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY );
515 if (
$time < $wgJobSerialCommitThreshold ) {
531 $ms = intval( 1000 *
$time );
532 $msg = $job->
toString() .
" COMMIT ENQUEUED [{$ms}ms of writes]";
533 $this->logger->info( $msg );
537 if ( !$dbwSerial->lock(
'jobrunner-serial-commit', __METHOD__, 30 ) ) {
539 throw new DBError( $dbwSerial,
"Timed out waiting on commit queue." );
541 $unlocker =
new ScopedCallback(
function ()
use ( $dbwSerial ) {
542 $dbwSerial->unlock(
'jobrunner-serial-commit', __METHOD__ );
546 $pos = $lb->getMasterPos();
548 $lb->waitForAll( $pos );
553 ScopedCallback::consume( $unlocker );
beginMasterChanges($fname=__METHOD__)
Flush any master transaction snapshots and set DBO_TRX (if DBO_DEFAULT is set)
deferred txt A few of the database updates required by various functions here can be deferred until after the result page is displayed to the user For updating the view updating the linked to tables after a etc PHP does not yet have any way to tell the server to actually return and disconnect while still running these but it might have such a feature in the future We handle these by creating a deferred update object and putting those objects on a global list
Database error base class.
the array() calling protocol came about after MediaWiki 1.4rc1.
processing should stop and the error should be shown to the user * false
callable null $debug
Debug output handler.
Apache License January AND DISTRIBUTION Definitions License shall mean the terms and conditions for use
div flags Integer display flags(NO_ACTION_LINK, NO_EXTRA_USER_LINKS) 'LogException'returning false will NOT prevent logging $e
setLogger(LoggerInterface $logger)
static instance()
Singleton.
static doUpdates($mode= 'run', $stage=self::ALL)
Do any deferred updates and clear the list.
wfGetRusage()
Get system resource usage of current request context.
executeJob(Job $job, LBFactory $lbFactory, $stats, $popTime)
Class to both describe a background job and handle jobs.
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency MediaWikiServices
this hook is for auditing only $response
when a variable name is used in a it is silently declared as a new local masking the global
No-op job that does nothing.
commitMasterChanges($fname=__METHOD__, array $options=[])
Commit changes on all master connections.
loadBackoffs(array $backoffs, $mode= 'wait')
Get the previous backoff expiries from persistent storage On I/O or lock acquisition failure this ret...
const TS_UNIX
Unix time - the number of seconds since 1970-01-01 00:00:00 UTC.
wfTimestamp($outputtype=TS_UNIX, $ts=0)
Get a timestamp string in one of various formats.
Exception class for replica DB wait timeouts.
wfReadOnly()
Check whether the wiki is in read-only mode.
wfTempDir()
Tries to get the system directory for temporary files.
run(array $options)
Run jobs of the specified number/type for the specified time.
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content as context as context $options
static overrideRequestId($id)
Override the unique request ID.
wfWikiID()
Get an ASCII string identifying this wiki This is used as a prefix in memcached keys.
This document is intended to provide useful advice for parties seeking to redistribute MediaWiki to end users It s targeted particularly at maintainers for Linux since it s been observed that distribution packages of MediaWiki often break We ve consistently had to recommend that users seeking support use official tarballs instead of their distribution s and this often solves whatever problem the user is having It would be nice if this could such as
static rollbackMasterChangesAndLog($e)
If there are any open database transactions, roll them back and log the stack trace of the exception ...
static singleton($wiki=false)
commitMasterChanges(LBFactory $lbFactory, Job $job, $fnameTrxOwner)
Issue a commit on all masters who are currently in a transaction and have made changes to the databas...
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback the instantiator that will return a new instance of the service When it will create an instance of MediaWikiServices and populate it with the services defined in the files listed by thereby bootstrapping the DI framework Per $wgServiceWiringFiles lists includes ServiceWiring php
Job queue runner utility methods.
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content $content
getBackoffTimeToWait(Job $job)
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set $status
MediaWiki Logger LoggerFactory implements a PSR[0] compatible message logging system Named Psr Log LoggerInterface instances can be obtained from the MediaWiki Logger LoggerFactory::getInstance() static method.MediaWiki\Logger\LoggerFactory expects a class implementing the MediaWiki\Logger\Spi interface to act as a factory for new Psr\Log\LoggerInterface instances.The"Spi"in MediaWiki\Logger\Spi stands for"service provider interface".An SPI is an API intended to be implemented or extended by a third party.This software design pattern is intended to enable framework extension and replaceable components.It is specifically used in the MediaWiki\Logger\LoggerFactory service to allow alternate PSR-3 logging implementations to be easily integrated with MediaWiki.The service provider interface allows the backend logging library to be implemented in multiple ways.The $wgMWLoggerDefaultSpi global provides the classname of the default MediaWiki\Logger\Spi implementation to be loaded at runtime.This can either be the name of a class implementing the MediaWiki\Logger\Spi with a zero argument const ructor or a callable that will return an MediaWiki\Logger\Spi instance.Alternately the MediaWiki\Logger\LoggerFactory MediaWiki Logger LoggerFactory
checkMemoryOK()
Make sure that this script is not too close to the memory usage limit.
static logException($e)
Log an exception to the exception log (if enabled).
flushReplicaSnapshots($fname=__METHOD__)
Commit all replica DB transactions so as to flush any REPEATABLE-READ or SSI snapshot.
debugCallback($msg)
Log the job message.
do that in ParserLimitReportFormat instead use this to modify the parameters of the image and a DIV can begin in one section and end in another Make sure your code can handle that case gracefully See the EditSectionClearerLink extension for an example zero but section is usually empty its values are the globals values before the output is cached one of or reset my talk my contributions etc etc otherwise the built in rate limiting checks are if enabled allows for interception of redirect as a string mapping parameter names to values & $type
see documentation in includes Linker php for Linker::makeImageLink & $time
const TS_DB
MySQL DATETIME (YYYY-MM-DD HH:MM:SS)
syncBackoffDeltas(array $backoffs, array &$deltas, $mode= 'wait')
Merge the current backoff expiries from persistent storage.
__construct(LoggerInterface $logger=null)
teardown($status)
Do any final cleanup after run(), deferred updates, and all DB commits happen.