MediaWiki  1.28.1
JobRunner.php
Go to the documentation of this file.
1 <?php
30 
37 class JobRunner implements LoggerAwareInterface {
39  protected $debug;
40 
44  protected $logger;
45 
46  const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
47  const LAG_CHECK_PERIOD = 1.0; // check replica DB lag this many seconds
48  const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
49 
53  public function setDebugHandler( $debug ) {
54  $this->debug = $debug;
55  }
56 
61  public function setLogger( LoggerInterface $logger ) {
62  $this->logger = $logger;
63  }
64 
68  public function __construct( LoggerInterface $logger = null ) {
69  if ( $logger === null ) {
70  $logger = LoggerFactory::getInstance( 'runJobs' );
71  }
72  $this->setLogger( $logger );
73  }
74 
99  public function run( array $options ) {
100  global $wgJobClasses, $wgTrxProfilerLimits;
101 
102  $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
103 
104  $type = isset( $options['type'] ) ? $options['type'] : false;
105  $maxJobs = isset( $options['maxJobs'] ) ? $options['maxJobs'] : false;
106  $maxTime = isset( $options['maxTime'] ) ? $options['maxTime'] : false;
107  $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
108 
109  // Bail if job type is invalid
110  if ( $type !== false && !isset( $wgJobClasses[$type] ) ) {
111  $response['reached'] = 'none-possible';
112  return $response;
113  }
114  // Bail out if DB is in read-only mode
115  if ( wfReadOnly() ) {
116  $response['reached'] = 'read-only';
117  return $response;
118  }
119 
120  $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
121  // Bail out if there is too much DB lag.
122  // This check should not block as we want to try other wiki queues.
123  list( , $maxLag ) = $lbFactory->getMainLB( wfWikiID() )->getMaxLag();
124  if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
125  $response['reached'] = 'replica-lag-limit';
126  return $response;
127  }
128 
129  // Flush any pending DB writes for sanity
130  $lbFactory->commitAll( __METHOD__ );
131 
132  // Catch huge single updates that lead to replica DB lag
133  $trxProfiler = Profiler::instance()->getTransactionProfiler();
134  $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
135  $trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__ );
136 
137  // Some jobs types should not run until a certain timestamp
138  $backoffs = []; // map of (type => UNIX expiry)
139  $backoffDeltas = []; // map of (type => seconds)
140  $wait = 'wait'; // block to read backoffs the first time
141 
142  $group = JobQueueGroup::singleton();
143  $stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
144  $jobsPopped = 0;
145  $timeMsTotal = 0;
146  $startTime = microtime( true ); // time since jobs started running
147  $lastCheckTime = 1; // timestamp of last replica DB check
148  do {
149  // Sync the persistent backoffs with concurrent runners
150  $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
151  $blacklist = $noThrottle ? [] : array_keys( $backoffs );
152  $wait = 'nowait'; // less important now
153 
154  if ( $type === false ) {
155  $job = $group->pop(
158  $blacklist
159  );
160  } elseif ( in_array( $type, $blacklist ) ) {
161  $job = false; // requested queue in backoff state
162  } else {
163  $job = $group->pop( $type ); // job from a single queue
164  }
165  $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
166 
167  if ( $job ) { // found a job
168  ++$jobsPopped;
169  $popTime = time();
170  $jType = $job->getType();
171 
172  WebRequest::overrideRequestId( $job->getRequestId() );
173 
174  // Back off of certain jobs for a while (for throttling and for errors)
175  $ttw = $this->getBackoffTimeToWait( $job );
176  if ( $ttw > 0 ) {
177  // Always add the delta for other runners in case the time running the
178  // job negated the backoff for each individually but not collectively.
179  $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
180  ? $backoffDeltas[$jType] + $ttw
181  : $ttw;
182  $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
183  }
184 
185  $info = $this->executeJob( $job, $lbFactory, $stats, $popTime );
186  if ( $info['status'] !== false || !$job->allowRetries() ) {
187  $group->ack( $job ); // succeeded or job cannot be retried
188  $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
189  }
190 
191  // Back off of certain jobs for a while (for throttling and for errors)
192  if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
193  $ttw = max( $ttw, self::ERROR_BACKOFF_TTL ); // too many errors
194  $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
195  ? $backoffDeltas[$jType] + $ttw
196  : $ttw;
197  }
198 
199  $response['jobs'][] = [
200  'type' => $jType,
201  'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
202  'error' => $info['error'],
203  'time' => $info['timeMs']
204  ];
205  $timeMsTotal += $info['timeMs'];
206 
207  // Break out if we hit the job count or wall time limits...
208  if ( $maxJobs && $jobsPopped >= $maxJobs ) {
209  $response['reached'] = 'job-limit';
210  break;
211  } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
212  $response['reached'] = 'time-limit';
213  break;
214  }
215 
216  // Don't let any of the main DB replica DBs get backed up.
217  // This only waits for so long before exiting and letting
218  // other wikis in the farm (on different masters) get a chance.
219  $timePassed = microtime( true ) - $lastCheckTime;
220  if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
221  try {
222  $lbFactory->waitForReplication( [
223  'ifWritesSince' => $lastCheckTime,
224  'timeout' => self::MAX_ALLOWED_LAG
225  ] );
226  } catch ( DBReplicationWaitError $e ) {
227  $response['reached'] = 'replica-lag-limit';
228  break;
229  }
230  $lastCheckTime = microtime( true );
231  }
232  // Don't let any queue replica DBs/backups fall behind
233  if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
234  $group->waitForBackups();
235  }
236 
237  // Bail if near-OOM instead of in a job
238  if ( !$this->checkMemoryOK() ) {
239  $response['reached'] = 'memory-limit';
240  break;
241  }
242  }
243  } while ( $job ); // stop when there are no jobs
244 
245  // Sync the persistent backoffs for the next runJobs.php pass
246  if ( $backoffDeltas ) {
247  $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
248  }
249 
250  $response['backoffs'] = $backoffs;
251  $response['elapsed'] = $timeMsTotal;
252 
253  return $response;
254  }
255 
263  private function executeJob( Job $job, LBFactory $lbFactory, $stats, $popTime ) {
264  $jType = $job->getType();
265  $msg = $job->toString() . " STARTING";
266  $this->logger->debug( $msg );
267  $this->debugCallback( $msg );
268 
269  // Run the job...
270  $rssStart = $this->getMaxRssKb();
271  $jobStartTime = microtime( true );
272  try {
273  $fnameTrxOwner = get_class( $job ) . '::run'; // give run() outer scope
274  $lbFactory->beginMasterChanges( $fnameTrxOwner );
275  $status = $job->run();
276  $error = $job->getLastError();
277  $this->commitMasterChanges( $lbFactory, $job, $fnameTrxOwner );
278  // Run any deferred update tasks; doUpdates() manages transactions itself
280  } catch ( Exception $e ) {
282  $status = false;
283  $error = get_class( $e ) . ': ' . $e->getMessage();
284  }
285  // Always attempt to call teardown() even if Job throws exception.
286  try {
287  $job->teardown( $status );
288  } catch ( Exception $e ) {
290  }
291 
292  // Commit all outstanding connections that are in a transaction
293  // to get a fresh repeatable read snapshot on every connection.
294  // Note that jobs are still responsible for handling replica DB lag.
295  $lbFactory->flushReplicaSnapshots( __METHOD__ );
296  // Clear out title cache data from prior snapshots
297  MediaWikiServices::getInstance()->getLinkCache()->clear();
298  $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
299  $rssEnd = $this->getMaxRssKb();
300 
301  // Record how long jobs wait before getting popped
302  $readyTs = $job->getReadyTimestamp();
303  if ( $readyTs ) {
304  $pickupDelay = max( 0, $popTime - $readyTs );
305  $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
306  $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
307  }
308  // Record root job age for jobs being run
309  $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
310  if ( $rootTimestamp ) {
311  $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $rootTimestamp ) );
312  $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
313  }
314  // Track the execution time for jobs
315  $stats->timing( "jobqueue.run.$jType", $timeMs );
316  // Track RSS increases for jobs (in case of memory leaks)
317  if ( $rssStart && $rssEnd ) {
318  $stats->updateCount( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
319  }
320 
321  if ( $status === false ) {
322  $msg = $job->toString() . " t=$timeMs error={$error}";
323  $this->logger->error( $msg );
324  $this->debugCallback( $msg );
325  } else {
326  $msg = $job->toString() . " t=$timeMs good";
327  $this->logger->info( $msg );
328  $this->debugCallback( $msg );
329  }
330 
331  return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
332  }
333 
337  private function getMaxRssKb() {
338  $info = wfGetRusage() ?: [];
339  // see http://linux.die.net/man/2/getrusage
340  return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
341  }
342 
348  private function getBackoffTimeToWait( Job $job ) {
349  global $wgJobBackoffThrottling;
350 
351  if ( !isset( $wgJobBackoffThrottling[$job->getType()] ) ||
352  $job instanceof DuplicateJob // no work was done
353  ) {
354  return 0; // not throttled
355  }
356 
357  $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()];
358  if ( $itemsPerSecond <= 0 ) {
359  return 0; // not throttled
360  }
361 
362  $seconds = 0;
363  if ( $job->workItemCount() > 0 ) {
364  $exactSeconds = $job->workItemCount() / $itemsPerSecond;
365  // use randomized rounding
366  $seconds = floor( $exactSeconds );
367  $remainder = $exactSeconds - $seconds;
368  $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
369  }
370 
371  return (int)$seconds;
372  }
373 
382  private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
383  $file = wfTempDir() . '/mw-runJobs-backoffs.json';
384  if ( is_file( $file ) ) {
385  $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
386  $handle = fopen( $file, 'rb' );
387  if ( !flock( $handle, LOCK_SH | $noblock ) ) {
388  fclose( $handle );
389  return $backoffs; // don't wait on lock
390  }
391  $content = stream_get_contents( $handle );
392  flock( $handle, LOCK_UN );
393  fclose( $handle );
394  $ctime = microtime( true );
395  $cBackoffs = json_decode( $content, true ) ?: [];
396  foreach ( $cBackoffs as $type => $timestamp ) {
397  if ( $timestamp < $ctime ) {
398  unset( $cBackoffs[$type] );
399  }
400  }
401  } else {
402  $cBackoffs = [];
403  }
404 
405  return $cBackoffs;
406  }
407 
419  private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
420  if ( !$deltas ) {
421  return $this->loadBackoffs( $backoffs, $mode );
422  }
423 
424  $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
425  $file = wfTempDir() . '/mw-runJobs-backoffs.json';
426  $handle = fopen( $file, 'wb+' );
427  if ( !flock( $handle, LOCK_EX | $noblock ) ) {
428  fclose( $handle );
429  return $backoffs; // don't wait on lock
430  }
431  $ctime = microtime( true );
432  $content = stream_get_contents( $handle );
433  $cBackoffs = json_decode( $content, true ) ?: [];
434  foreach ( $deltas as $type => $seconds ) {
435  $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
436  ? $cBackoffs[$type] + $seconds
437  : $ctime + $seconds;
438  }
439  foreach ( $cBackoffs as $type => $timestamp ) {
440  if ( $timestamp < $ctime ) {
441  unset( $cBackoffs[$type] );
442  }
443  }
444  ftruncate( $handle, 0 );
445  fwrite( $handle, json_encode( $cBackoffs ) );
446  flock( $handle, LOCK_UN );
447  fclose( $handle );
448 
449  $deltas = [];
450 
451  return $cBackoffs;
452  }
453 
459  private function checkMemoryOK() {
460  static $maxBytes = null;
461  if ( $maxBytes === null ) {
462  $m = [];
463  if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
464  list( , $num, $unit ) = $m;
465  $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
466  $maxBytes = $num * $conv[strtolower( $unit )];
467  } else {
468  $maxBytes = 0;
469  }
470  }
471  $usedBytes = memory_get_usage();
472  if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
473  $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
474  $this->debugCallback( $msg );
475  $this->logger->error( $msg );
476 
477  return false;
478  }
479 
480  return true;
481  }
482 
487  private function debugCallback( $msg ) {
488  if ( $this->debug ) {
489  call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
490  }
491  }
492 
504  private function commitMasterChanges( LBFactory $lbFactory, Job $job, $fnameTrxOwner ) {
505  global $wgJobSerialCommitThreshold;
506 
507  $time = false;
508  $lb = $lbFactory->getMainLB( wfWikiID() );
509  if ( $wgJobSerialCommitThreshold !== false && $lb->getServerCount() > 1 ) {
510  // Generally, there is one master connection to the local DB
511  $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
512  // We need natively blocking fast locks
513  if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
514  $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY );
515  if ( $time < $wgJobSerialCommitThreshold ) {
516  $dbwSerial = false;
517  }
518  } else {
519  $dbwSerial = false;
520  }
521  } else {
522  // There are no replica DBs or writes are all to foreign DB (we don't handle that)
523  $dbwSerial = false;
524  }
525 
526  if ( !$dbwSerial ) {
527  $lbFactory->commitMasterChanges( $fnameTrxOwner );
528  return;
529  }
530 
531  $ms = intval( 1000 * $time );
532  $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
533  $this->logger->info( $msg );
534  $this->debugCallback( $msg );
535 
536  // Wait for an exclusive lock to commit
537  if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__, 30 ) ) {
538  // This will trigger a rollback in the main loop
539  throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
540  }
541  $unlocker = new ScopedCallback( function () use ( $dbwSerial ) {
542  $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__ );
543  } );
544 
545  // Wait for the replica DBs to catch up
546  $pos = $lb->getMasterPos();
547  if ( $pos ) {
548  $lb->waitForAll( $pos );
549  }
550 
551  // Actually commit the DB master changes
552  $lbFactory->commitMasterChanges( $fnameTrxOwner );
553  ScopedCallback::consume( $unlocker );
554  }
555 }
beginMasterChanges($fname=__METHOD__)
Flush any master transaction snapshots and set DBO_TRX (if DBO_DEFAULT is set)
Definition: LBFactory.php:191
deferred txt A few of the database updates required by various functions here can be deferred until after the result page is displayed to the user For updating the view updating the linked to tables after a etc PHP does not yet have any way to tell the server to actually return and disconnect while still running these but it might have such a feature in the future We handle these by creating a deferred update object and putting those objects on a global list
Definition: deferred.txt:11
getType()
Definition: Job.php:121
Database error base class.
Definition: DBError.php:26
the array() calling protocol came about after MediaWiki 1.4rc1.
processing should stop and the error should be shown to the user * false
Definition: hooks.txt:189
callable null $debug
Debug output handler.
Definition: JobRunner.php:39
Apache License January AND DISTRIBUTION Definitions License shall mean the terms and conditions for use
div flags Integer display flags(NO_ACTION_LINK, NO_EXTRA_USER_LINKS) 'LogException'returning false will NOT prevent logging $e
Definition: hooks.txt:2102
const LAG_CHECK_PERIOD
Definition: JobRunner.php:47
setLogger(LoggerInterface $logger)
Definition: JobRunner.php:61
static instance()
Singleton.
Definition: Profiler.php:61
static doUpdates($mode= 'run', $stage=self::ALL)
Do any deferred updates and clear the list.
wfGetRusage()
Get system resource usage of current request context.
executeJob(Job $job, LBFactory $lbFactory, $stats, $popTime)
Definition: JobRunner.php:263
Class to both describe a background job and handle jobs.
Definition: Job.php:31
setDebugHandler($debug)
Definition: JobRunner.php:53
getReadyTimestamp()
Definition: Job.php:175
const MAX_ALLOWED_LAG
Definition: JobRunner.php:46
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency MediaWikiServices
Definition: injection.txt:23
this hook is for auditing only $response
Definition: hooks.txt:802
getMainLB($domain=false)
when a variable name is used in a it is silently declared as a new local masking the global
Definition: design.txt:93
No-op job that does nothing.
commitMasterChanges($fname=__METHOD__, array $options=[])
Commit changes on all master connections.
Definition: LBFactory.php:203
loadBackoffs(array $backoffs, $mode= 'wait')
Get the previous backoff expiries from persistent storage On I/O or lock acquisition failure this ret...
Definition: JobRunner.php:382
const TS_UNIX
Unix time - the number of seconds since 1970-01-01 00:00:00 UTC.
Definition: defines.php:6
wfTimestamp($outputtype=TS_UNIX, $ts=0)
Get a timestamp string in one of various formats.
Exception class for replica DB wait timeouts.
wfReadOnly()
Check whether the wiki is in read-only mode.
wfTempDir()
Tries to get the system directory for temporary files.
run()
Run the job.
run(array $options)
Run jobs of the specified number/type for the specified time.
Definition: JobRunner.php:99
$wait
if($limit) $timestamp
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content as context as context $options
Definition: hooks.txt:1046
getRootJobParams()
Definition: Job.php:274
toString()
Definition: Job.php:337
static overrideRequestId($id)
Override the unique request ID.
Definition: WebRequest.php:288
wfWikiID()
Get an ASCII string identifying this wiki This is used as a prefix in memcached keys.
This document is intended to provide useful advice for parties seeking to redistribute MediaWiki to end users It s targeted particularly at maintainers for Linux since it s been observed that distribution packages of MediaWiki often break We ve consistently had to recommend that users seeking support use official tarballs instead of their distribution s and this often solves whatever problem the user is having It would be nice if this could such as
Definition: distributors.txt:9
static rollbackMasterChangesAndLog($e)
If there are any open database transactions, roll them back and log the stack trace of the exception ...
static singleton($wiki=false)
commitMasterChanges(LBFactory $lbFactory, Job $job, $fnameTrxOwner)
Issue a commit on all masters who are currently in a transaction and have made changes to the databas...
Definition: JobRunner.php:504
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback the instantiator that will return a new instance of the service When it will create an instance of MediaWikiServices and populate it with the services defined in the files listed by thereby bootstrapping the DI framework Per $wgServiceWiringFiles lists includes ServiceWiring php
Definition: injection.txt:35
$lbFactory
Job queue runner utility methods.
Definition: JobRunner.php:37
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content $content
Definition: hooks.txt:1046
const ERROR_BACKOFF_TTL
Definition: JobRunner.php:48
if(count($args)< 1) $job
getBackoffTimeToWait(Job $job)
Definition: JobRunner.php:348
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set $status
Definition: hooks.txt:1046
MediaWiki Logger LoggerFactory implements a PSR[0] compatible message logging system Named Psr Log LoggerInterface instances can be obtained from the MediaWiki Logger LoggerFactory::getInstance() static method.MediaWiki\Logger\LoggerFactory expects a class implementing the MediaWiki\Logger\Spi interface to act as a factory for new Psr\Log\LoggerInterface instances.The"Spi"in MediaWiki\Logger\Spi stands for"service provider interface".An SPI is an API intended to be implemented or extended by a third party.This software design pattern is intended to enable framework extension and replaceable components.It is specifically used in the MediaWiki\Logger\LoggerFactory service to allow alternate PSR-3 logging implementations to be easily integrated with MediaWiki.The service provider interface allows the backend logging library to be implemented in multiple ways.The $wgMWLoggerDefaultSpi global provides the classname of the default MediaWiki\Logger\Spi implementation to be loaded at runtime.This can either be the name of a class implementing the MediaWiki\Logger\Spi with a zero argument const ructor or a callable that will return an MediaWiki\Logger\Spi instance.Alternately the MediaWiki\Logger\LoggerFactory MediaWiki Logger LoggerFactory
Definition: logger.txt:5
checkMemoryOK()
Make sure that this script is not too close to the memory usage limit.
Definition: JobRunner.php:459
getLastError()
Definition: Job.php:397
static logException($e)
Log an exception to the exception log (if enabled).
flushReplicaSnapshots($fname=__METHOD__)
Commit all replica DB transactions so as to flush any REPEATABLE-READ or SSI snapshot.
Definition: LBFactory.php:182
workItemCount()
Definition: Job.php:207
debugCallback($msg)
Log the job message.
Definition: JobRunner.php:487
do that in ParserLimitReportFormat instead use this to modify the parameters of the image and a DIV can begin in one section and end in another Make sure your code can handle that case gracefully See the EditSectionClearerLink extension for an example zero but section is usually empty its values are the globals values before the output is cached one of or reset my talk my contributions etc etc otherwise the built in rate limiting checks are if enabled allows for interception of redirect as a string mapping parameter names to values & $type
Definition: hooks.txt:2491
see documentation in includes Linker php for Linker::makeImageLink & $time
Definition: hooks.txt:1749
const TS_DB
MySQL DATETIME (YYYY-MM-DD HH:MM:SS)
Definition: defines.php:16
syncBackoffDeltas(array $backoffs, array &$deltas, $mode= 'wait')
Merge the current backoff expiries from persistent storage.
Definition: JobRunner.php:419
__construct(LoggerInterface $logger=null)
Definition: JobRunner.php:68
teardown($status)
Do any final cleanup after run(), deferred updates, and all DB commits happen.
Definition: Job.php:318