MediaWiki  1.27.1
JobRunner.php
Go to the documentation of this file.
1 <?php
27 
34 class JobRunner implements LoggerAwareInterface {
36  protected $debug;
37 
41  protected $logger;
42 
43  const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
44  const LAG_CHECK_PERIOD = 1.0; // check slave lag this many seconds
45  const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
46 
50  public function setDebugHandler( $debug ) {
51  $this->debug = $debug;
52  }
53 
58  public function setLogger( LoggerInterface $logger ) {
59  $this->logger = $logger;
60  }
61 
65  public function __construct( LoggerInterface $logger = null ) {
66  if ( $logger === null ) {
67  $logger = LoggerFactory::getInstance( 'runJobs' );
68  }
69  $this->setLogger( $logger );
70  }
71 
96  public function run( array $options ) {
97  global $wgJobClasses, $wgTrxProfilerLimits;
98 
99  $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
100 
101  $type = isset( $options['type'] ) ? $options['type'] : false;
102  $maxJobs = isset( $options['maxJobs'] ) ? $options['maxJobs'] : false;
103  $maxTime = isset( $options['maxTime'] ) ? $options['maxTime'] : false;
104  $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
105 
106  // Bail if job type is invalid
107  if ( $type !== false && !isset( $wgJobClasses[$type] ) ) {
108  $response['reached'] = 'none-possible';
109  return $response;
110  }
111  // Bail out if DB is in read-only mode
112  if ( wfReadOnly() ) {
113  $response['reached'] = 'read-only';
114  return $response;
115  }
116  // Bail out if there is too much DB lag.
117  // This check should not block as we want to try other wiki queues.
118  list( , $maxLag ) = wfGetLB( wfWikiID() )->getMaxLag();
119  if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
120  $response['reached'] = 'slave-lag-limit';
121  return $response;
122  }
123 
124  // Flush any pending DB writes for sanity
125  wfGetLBFactory()->commitAll( __METHOD__ );
126 
127  // Catch huge single updates that lead to slave lag
128  $trxProfiler = Profiler::instance()->getTransactionProfiler();
129  $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
130  $trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__ );
131 
132  // Some jobs types should not run until a certain timestamp
133  $backoffs = []; // map of (type => UNIX expiry)
134  $backoffDeltas = []; // map of (type => seconds)
135  $wait = 'wait'; // block to read backoffs the first time
136 
137  $group = JobQueueGroup::singleton();
138  $stats = RequestContext::getMain()->getStats();
139  $jobsPopped = 0;
140  $timeMsTotal = 0;
141  $startTime = microtime( true ); // time since jobs started running
142  $lastCheckTime = 1; // timestamp of last slave check
143  do {
144  // Sync the persistent backoffs with concurrent runners
145  $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
146  $blacklist = $noThrottle ? [] : array_keys( $backoffs );
147  $wait = 'nowait'; // less important now
148 
149  if ( $type === false ) {
150  $job = $group->pop(
153  $blacklist
154  );
155  } elseif ( in_array( $type, $blacklist ) ) {
156  $job = false; // requested queue in backoff state
157  } else {
158  $job = $group->pop( $type ); // job from a single queue
159  }
160 
161  if ( $job ) { // found a job
162  ++$jobsPopped;
163  $popTime = time();
164  $jType = $job->getType();
165 
166  WebRequest::overrideRequestId( $job->getRequestId() );
167 
168  // Back off of certain jobs for a while (for throttling and for errors)
169  $ttw = $this->getBackoffTimeToWait( $job );
170  if ( $ttw > 0 ) {
171  // Always add the delta for other runners in case the time running the
172  // job negated the backoff for each individually but not collectively.
173  $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
174  ? $backoffDeltas[$jType] + $ttw
175  : $ttw;
176  $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
177  }
178 
179  $info = $this->executeJob( $job, $stats, $popTime );
180  if ( $info['status'] !== false || !$job->allowRetries() ) {
181  $group->ack( $job ); // succeeded or job cannot be retried
182  }
183 
184  // Back off of certain jobs for a while (for throttling and for errors)
185  if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
186  $ttw = max( $ttw, self::ERROR_BACKOFF_TTL ); // too many errors
187  $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
188  ? $backoffDeltas[$jType] + $ttw
189  : $ttw;
190  }
191 
192  $response['jobs'][] = [
193  'type' => $jType,
194  'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
195  'error' => $info['error'],
196  'time' => $info['timeMs']
197  ];
198  $timeMsTotal += $info['timeMs'];
199 
200  // Break out if we hit the job count or wall time limits...
201  if ( $maxJobs && $jobsPopped >= $maxJobs ) {
202  $response['reached'] = 'job-limit';
203  break;
204  } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
205  $response['reached'] = 'time-limit';
206  break;
207  }
208 
209  // Don't let any of the main DB slaves get backed up.
210  // This only waits for so long before exiting and letting
211  // other wikis in the farm (on different masters) get a chance.
212  $timePassed = microtime( true ) - $lastCheckTime;
213  if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
214  try {
215  wfGetLBFactory()->waitForReplication( [
216  'ifWritesSince' => $lastCheckTime,
217  'timeout' => self::MAX_ALLOWED_LAG
218  ] );
219  } catch ( DBReplicationWaitError $e ) {
220  $response['reached'] = 'slave-lag-limit';
221  break;
222  }
223  $lastCheckTime = microtime( true );
224  }
225  // Don't let any queue slaves/backups fall behind
226  if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
227  $group->waitForBackups();
228  }
229 
230  // Bail if near-OOM instead of in a job
231  if ( !$this->checkMemoryOK() ) {
232  $response['reached'] = 'memory-limit';
233  break;
234  }
235  }
236  } while ( $job ); // stop when there are no jobs
237 
238  // Sync the persistent backoffs for the next runJobs.php pass
239  if ( $backoffDeltas ) {
240  $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
241  }
242 
243  $response['backoffs'] = $backoffs;
244  $response['elapsed'] = $timeMsTotal;
245 
246  return $response;
247  }
248 
255  private function executeJob( Job $job, $stats, $popTime ) {
256  $jType = $job->getType();
257  $msg = $job->toString() . " STARTING";
258  $this->logger->debug( $msg );
259  $this->debugCallback( $msg );
260 
261  // Run the job...
262  $rssStart = $this->getMaxRssKb();
263  $jobStartTime = microtime( true );
264  try {
265  $status = $job->run();
266  $error = $job->getLastError();
267  $this->commitMasterChanges( $job );
268 
270  $this->commitMasterChanges( $job );
271  $job->teardown();
272  } catch ( Exception $e ) {
274  $status = false;
275  $error = get_class( $e ) . ': ' . $e->getMessage();
277  }
278  // Commit all outstanding connections that are in a transaction
279  // to get a fresh repeatable read snapshot on every connection.
280  // Note that jobs are still responsible for handling slave lag.
281  wfGetLBFactory()->commitAll( __METHOD__ );
282  // Clear out title cache data from prior snapshots
283  LinkCache::singleton()->clear();
284  $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
285  $rssEnd = $this->getMaxRssKb();
286 
287  // Record how long jobs wait before getting popped
288  $readyTs = $job->getReadyTimestamp();
289  if ( $readyTs ) {
290  $pickupDelay = max( 0, $popTime - $readyTs );
291  $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
292  $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
293  }
294  // Record root job age for jobs being run
295  $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
296  if ( $rootTimestamp ) {
297  $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $rootTimestamp ) );
298  $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
299  }
300  // Track the execution time for jobs
301  $stats->timing( "jobqueue.run.$jType", $timeMs );
302  // Track RSS increases for jobs (in case of memory leaks)
303  if ( $rssStart && $rssEnd ) {
304  $stats->increment( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
305  }
306 
307  if ( $status === false ) {
308  $msg = $job->toString() . " t=$timeMs error={$error}";
309  $this->logger->error( $msg );
310  $this->debugCallback( $msg );
311  } else {
312  $msg = $job->toString() . " t=$timeMs good";
313  $this->logger->info( $msg );
314  $this->debugCallback( $msg );
315  }
316 
317  return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
318  }
319 
323  private function getMaxRssKb() {
324  $info = wfGetRusage() ?: [];
325  // see http://linux.die.net/man/2/getrusage
326  return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
327  }
328 
334  private function getBackoffTimeToWait( Job $job ) {
335  global $wgJobBackoffThrottling;
336 
337  if ( !isset( $wgJobBackoffThrottling[$job->getType()] ) ||
338  $job instanceof DuplicateJob // no work was done
339  ) {
340  return 0; // not throttled
341  }
342 
343  $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()];
344  if ( $itemsPerSecond <= 0 ) {
345  return 0; // not throttled
346  }
347 
348  $seconds = 0;
349  if ( $job->workItemCount() > 0 ) {
350  $exactSeconds = $job->workItemCount() / $itemsPerSecond;
351  // use randomized rounding
352  $seconds = floor( $exactSeconds );
353  $remainder = $exactSeconds - $seconds;
354  $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
355  }
356 
357  return (int)$seconds;
358  }
359 
368  private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
369  $file = wfTempDir() . '/mw-runJobs-backoffs.json';
370  if ( is_file( $file ) ) {
371  $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
372  $handle = fopen( $file, 'rb' );
373  if ( !flock( $handle, LOCK_SH | $noblock ) ) {
374  fclose( $handle );
375  return $backoffs; // don't wait on lock
376  }
377  $content = stream_get_contents( $handle );
378  flock( $handle, LOCK_UN );
379  fclose( $handle );
380  $ctime = microtime( true );
381  $cBackoffs = json_decode( $content, true ) ?: [];
382  foreach ( $cBackoffs as $type => $timestamp ) {
383  if ( $timestamp < $ctime ) {
384  unset( $cBackoffs[$type] );
385  }
386  }
387  } else {
388  $cBackoffs = [];
389  }
390 
391  return $cBackoffs;
392  }
393 
405  private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
406  if ( !$deltas ) {
407  return $this->loadBackoffs( $backoffs, $mode );
408  }
409 
410  $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
411  $file = wfTempDir() . '/mw-runJobs-backoffs.json';
412  $handle = fopen( $file, 'wb+' );
413  if ( !flock( $handle, LOCK_EX | $noblock ) ) {
414  fclose( $handle );
415  return $backoffs; // don't wait on lock
416  }
417  $ctime = microtime( true );
418  $content = stream_get_contents( $handle );
419  $cBackoffs = json_decode( $content, true ) ?: [];
420  foreach ( $deltas as $type => $seconds ) {
421  $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
422  ? $cBackoffs[$type] + $seconds
423  : $ctime + $seconds;
424  }
425  foreach ( $cBackoffs as $type => $timestamp ) {
426  if ( $timestamp < $ctime ) {
427  unset( $cBackoffs[$type] );
428  }
429  }
430  ftruncate( $handle, 0 );
431  fwrite( $handle, json_encode( $cBackoffs ) );
432  flock( $handle, LOCK_UN );
433  fclose( $handle );
434 
435  $deltas = [];
436 
437  return $cBackoffs;
438  }
439 
445  private function checkMemoryOK() {
446  static $maxBytes = null;
447  if ( $maxBytes === null ) {
448  $m = [];
449  if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
450  list( , $num, $unit ) = $m;
451  $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
452  $maxBytes = $num * $conv[strtolower( $unit )];
453  } else {
454  $maxBytes = 0;
455  }
456  }
457  $usedBytes = memory_get_usage();
458  if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
459  $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
460  $this->debugCallback( $msg );
461  $this->logger->error( $msg );
462 
463  return false;
464  }
465 
466  return true;
467  }
468 
473  private function debugCallback( $msg ) {
474  if ( $this->debug ) {
475  call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
476  }
477  }
478 
488  private function commitMasterChanges( Job $job ) {
489  global $wgJobSerialCommitThreshold;
490 
491  $lb = wfGetLB( wfWikiID() );
492  if ( $wgJobSerialCommitThreshold !== false && $lb->getServerCount() > 1 ) {
493  // Generally, there is one master connection to the local DB
494  $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
495  } else {
496  $dbwSerial = false;
497  }
498 
499  if ( !$dbwSerial
500  || !$dbwSerial->namedLocksEnqueue()
501  || $dbwSerial->pendingWriteQueryDuration() < $wgJobSerialCommitThreshold
502  ) {
503  // Writes are all to foreign DBs, named locks don't form queues,
504  // or $wgJobSerialCommitThreshold is not reached; commit changes now
505  wfGetLBFactory()->commitMasterChanges( __METHOD__ );
506  return;
507  }
508 
509  $ms = intval( 1000 * $dbwSerial->pendingWriteQueryDuration() );
510  $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
511  $this->logger->info( $msg );
512  $this->debugCallback( $msg );
513 
514  // Wait for an exclusive lock to commit
515  if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__, 30 ) ) {
516  // This will trigger a rollback in the main loop
517  throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
518  }
519  // Wait for the generic slave to catch up
520  $pos = $lb->getMasterPos();
521  if ( $pos ) {
522  $lb->waitForOne( $pos );
523  }
524 
525  $fname = __METHOD__;
526  // Re-ping all masters with transactions. This throws DBError if some
527  // connection died while waiting on locks/slaves, triggering a rollback.
528  wfGetLBFactory()->forEachLB( function( LoadBalancer $lb ) use ( $fname ) {
529  $lb->forEachOpenConnection( function( IDatabase $conn ) use ( $fname ) {
530  if ( $conn->writesOrCallbacksPending() ) {
531  $conn->query( "SELECT 1", $fname );
532  }
533  } );
534  } );
535 
536  // Actually commit the DB master changes
537  wfGetLBFactory()->commitMasterChanges( __METHOD__ );
538 
539  // Release the lock
540  $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__ );
541  }
542 }
deferred txt A few of the database updates required by various functions here can be deferred until after the result page is displayed to the user For updating the view updating the linked to tables after a etc PHP does not yet have any way to tell the server to actually return and disconnect while still running these but it might have such a feature in the future We handle these by creating a deferred update object and putting those objects on a global list
Definition: deferred.txt:11
getType()
Definition: Job.php:121
static doUpdates($mode= 'run', $type=self::ALL)
Do any deferred updates and clear the list.
Database error base class.
the array() calling protocol came about after MediaWiki 1.4rc1.
callable null $debug
Debug output handler.
Definition: JobRunner.php:36
Apache License January AND DISTRIBUTION Definitions License shall mean the terms and conditions for use
div flags Integer display flags(NO_ACTION_LINK, NO_EXTRA_USER_LINKS) 'LogException'returning false will NOT prevent logging $e
Definition: hooks.txt:1932
const LAG_CHECK_PERIOD
Definition: JobRunner.php:44
setLogger(LoggerInterface $logger)
Definition: JobRunner.php:58
static instance()
Singleton.
Definition: Profiler.php:60
teardown()
Do any final cleanup after run(), deferred updates, and all DB commits happen.
Definition: Job.php:316
processing should stop and the error should be shown to the user * false
Definition: hooks.txt:189
query($sql, $fname=__METHOD__, $tempIgnore=false)
Run an SQL query and return the result.
wfGetRusage()
Get system resource usage of current request context.
Class to both describe a background job and handle jobs.
Definition: Job.php:31
setDebugHandler($debug)
Definition: JobRunner.php:50
getReadyTimestamp()
Definition: Job.php:175
const MAX_ALLOWED_LAG
Definition: JobRunner.php:43
this hook is for auditing only $response
Definition: hooks.txt:762
when a variable name is used in a it is silently declared as a new local masking the global
Definition: design.txt:93
No-op job that does nothing.
forEachOpenConnection($callback, array $params=[])
Call a function with each open connection object.
loadBackoffs(array $backoffs, $mode= 'wait')
Get the previous backoff expiries from persistent storage On I/O or lock acquisition failure this ret...
Definition: JobRunner.php:368
wfTimestamp($outputtype=TS_UNIX, $ts=0)
Get a timestamp string in one of various formats.
Database load balancing object.
wfGetLB($wiki=false)
Get a load balancer object.
Exception class for replica DB wait timeouts.
Definition: LBFactory.php:480
wfReadOnly()
Check whether the wiki is in read-only mode.
wfTempDir()
Tries to get the system directory for temporary files.
run()
Run the job.
static getMain()
Static methods.
run(array $options)
Run jobs of the specified number/type for the specified time.
Definition: JobRunner.php:96
static singleton()
Get an instance of this class.
Definition: LinkCache.php:61
$wait
if($limit) $timestamp
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content as context as context $options
Definition: hooks.txt:1004
getRootJobParams()
Definition: Job.php:274
writesOrCallbacksPending()
Returns true if there is a transaction open with possible write queries or transaction pre-commit/idl...
toString()
Definition: Job.php:335
static overrideRequestId($id)
Override the unique request ID.
Definition: WebRequest.php:281
wfWikiID()
Get an ASCII string identifying this wiki This is used as a prefix in memcached keys.
This document is intended to provide useful advice for parties seeking to redistribute MediaWiki to end users It s targeted particularly at maintainers for Linux since it s been observed that distribution packages of MediaWiki often break We ve consistently had to recommend that users seeking support use official tarballs instead of their distribution s and this often solves whatever problem the user is having It would be nice if this could such as
Definition: distributors.txt:9
const TS_DB
MySQL DATETIME (YYYY-MM-DD HH:MM:SS)
static rollbackMasterChangesAndLog($e)
If there are any open database transactions, roll them back and log the stack trace of the exception ...
static singleton($wiki=false)
commitMasterChanges(Job $job)
Issue a commit on all masters who are currently in a transaction and have made changes to the databas...
Definition: JobRunner.php:488
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback the instantiator that will return a new instance of the service When it will create an instance of MediaWikiServices and populate it with the services defined in the files listed by thereby bootstrapping the DI framework Per $wgServiceWiringFiles lists includes ServiceWiring php
Definition: injection.txt:35
wfGetLBFactory()
Get the load balancer factory object.
if(!defined( 'MEDIAWIKI')) $fname
This file is not a valid entry point, perform no further processing unless MEDIAWIKI is defined...
Definition: Setup.php:35
Job queue runner utility methods.
Definition: JobRunner.php:34
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content $content
Definition: hooks.txt:1004
const ERROR_BACKOFF_TTL
Definition: JobRunner.php:45
if(count($args)< 1) $job
getBackoffTimeToWait(Job $job)
Definition: JobRunner.php:334
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set $status
Definition: hooks.txt:1004
MediaWiki Logger LoggerFactory implements a PSR[0] compatible message logging system Named Psr Log LoggerInterface instances can be obtained from the MediaWiki Logger LoggerFactory::getInstance() static method.MediaWiki\Logger\LoggerFactory expects a class implementing the MediaWiki\Logger\Spi interface to act as a factory for new Psr\Log\LoggerInterface instances.The"Spi"in MediaWiki\Logger\Spi stands for"service provider interface".An SPI is an API intended to be implemented or extended by a third party.This software design pattern is intended to enable framework extension and replaceable components.It is specifically used in the MediaWiki\Logger\LoggerFactory service to allow alternate PSR-3 logging implementations to be easily integrated with MediaWiki.The service provider interface allows the backend logging library to be implemented in multiple ways.The $wgMWLoggerDefaultSpi global provides the classname of the default MediaWiki\Logger\Spi implementation to be loaded at runtime.This can either be the name of a class implementing the MediaWiki\Logger\Spi with a zero argument const ructor or a callable that will return an MediaWiki\Logger\Spi instance.Alternately the MediaWiki\Logger\LoggerFactory MediaWiki Logger LoggerFactory
Definition: logger.txt:5
checkMemoryOK()
Make sure that this script is not too close to the memory usage limit.
Definition: JobRunner.php:445
const TS_UNIX
Unix time - the number of seconds since 1970-01-01 00:00:00 UTC.
getLastError()
Definition: Job.php:395
static logException($e)
Log an exception to the exception log (if enabled).
workItemCount()
Definition: Job.php:207
debugCallback($msg)
Log the job message.
Definition: JobRunner.php:473
do that in ParserLimitReportFormat instead use this to modify the parameters of the image and a DIV can begin in one section and end in another Make sure your code can handle that case gracefully See the EditSectionClearerLink extension for an example zero but section is usually empty its values are the globals values before the output is cached one of or reset my talk my contributions etc etc otherwise the built in rate limiting checks are if enabled allows for interception of redirect as a string mapping parameter names to values & $type
Definition: hooks.txt:2338
Basic database interface for live and lazy-loaded DB handles.
Definition: IDatabase.php:35
syncBackoffDeltas(array $backoffs, array &$deltas, $mode= 'wait')
Merge the current backoff expiries from persistent storage.
Definition: JobRunner.php:405
executeJob(Job $job, $stats, $popTime)
Definition: JobRunner.php:255
__construct(LoggerInterface $logger=null)
Definition: JobRunner.php:65