MediaWiki  1.29.2
JobRunner.php
Go to the documentation of this file.
1 <?php
26 use Liuggio\StatsdClient\Factory\StatsdDataFactory;
27 use Psr\Log\LoggerAwareInterface;
28 use Psr\Log\LoggerInterface;
29 use Wikimedia\ScopedCallback;
33 
40 class JobRunner implements LoggerAwareInterface {
42  protected $debug;
43 
47  protected $logger;
48 
49  const MAX_ALLOWED_LAG = 3; // abort if more than this much DB lag is present
50  const LAG_CHECK_PERIOD = 1.0; // check replica DB lag this many seconds
51  const ERROR_BACKOFF_TTL = 1; // seconds to back off a queue due to errors
52  const READONLY_BACKOFF_TTL = 30; // seconds to back off a queue due to read-only errors
53 
57  public function setDebugHandler( $debug ) {
58  $this->debug = $debug;
59  }
60 
65  public function setLogger( LoggerInterface $logger ) {
66  $this->logger = $logger;
67  }
68 
72  public function __construct( LoggerInterface $logger = null ) {
73  if ( $logger === null ) {
74  $logger = LoggerFactory::getInstance( 'runJobs' );
75  }
76  $this->setLogger( $logger );
77  }
78 
103  public function run( array $options ) {
104  global $wgJobClasses, $wgTrxProfilerLimits;
105 
106  $response = [ 'jobs' => [], 'reached' => 'none-ready' ];
107 
108  $type = isset( $options['type'] ) ? $options['type'] : false;
109  $maxJobs = isset( $options['maxJobs'] ) ? $options['maxJobs'] : false;
110  $maxTime = isset( $options['maxTime'] ) ? $options['maxTime'] : false;
111  $noThrottle = isset( $options['throttle'] ) && !$options['throttle'];
112 
113  // Bail if job type is invalid
114  if ( $type !== false && !isset( $wgJobClasses[$type] ) ) {
115  $response['reached'] = 'none-possible';
116  return $response;
117  }
118  // Bail out if DB is in read-only mode
119  if ( wfReadOnly() ) {
120  $response['reached'] = 'read-only';
121  return $response;
122  }
123 
124  $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
125  // Bail out if there is too much DB lag.
126  // This check should not block as we want to try other wiki queues.
127  list( , $maxLag ) = $lbFactory->getMainLB( wfWikiID() )->getMaxLag();
128  if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
129  $response['reached'] = 'replica-lag-limit';
130  return $response;
131  }
132 
133  // Flush any pending DB writes for sanity
134  $lbFactory->commitAll( __METHOD__ );
135 
136  // Catch huge single updates that lead to replica DB lag
137  $trxProfiler = Profiler::instance()->getTransactionProfiler();
138  $trxProfiler->setLogger( LoggerFactory::getInstance( 'DBPerformance' ) );
139  $trxProfiler->setExpectations( $wgTrxProfilerLimits['JobRunner'], __METHOD__ );
140 
141  // Some jobs types should not run until a certain timestamp
142  $backoffs = []; // map of (type => UNIX expiry)
143  $backoffDeltas = []; // map of (type => seconds)
144  $wait = 'wait'; // block to read backoffs the first time
145 
146  $group = JobQueueGroup::singleton();
147  $stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
148  $jobsPopped = 0;
149  $timeMsTotal = 0;
150  $startTime = microtime( true ); // time since jobs started running
151  $lastCheckTime = 1; // timestamp of last replica DB check
152  do {
153  // Sync the persistent backoffs with concurrent runners
154  $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
155  $blacklist = $noThrottle ? [] : array_keys( $backoffs );
156  $wait = 'nowait'; // less important now
157 
158  if ( $type === false ) {
159  $job = $group->pop(
162  $blacklist
163  );
164  } elseif ( in_array( $type, $blacklist ) ) {
165  $job = false; // requested queue in backoff state
166  } else {
167  $job = $group->pop( $type ); // job from a single queue
168  }
169  $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
170 
171  if ( $job ) { // found a job
172  ++$jobsPopped;
173  $popTime = time();
174  $jType = $job->getType();
175 
176  WebRequest::overrideRequestId( $job->getRequestId() );
177 
178  // Back off of certain jobs for a while (for throttling and for errors)
179  $ttw = $this->getBackoffTimeToWait( $job );
180  if ( $ttw > 0 ) {
181  // Always add the delta for other runners in case the time running the
182  // job negated the backoff for each individually but not collectively.
183  $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
184  ? $backoffDeltas[$jType] + $ttw
185  : $ttw;
186  $backoffs = $this->syncBackoffDeltas( $backoffs, $backoffDeltas, $wait );
187  }
188 
189  $info = $this->executeJob( $job, $lbFactory, $stats, $popTime );
190  if ( $info['status'] !== false || !$job->allowRetries() ) {
191  $group->ack( $job ); // succeeded or job cannot be retried
192  $lbFactory->commitMasterChanges( __METHOD__ ); // flush any JobQueueDB writes
193  }
194 
195  // Back off of certain jobs for a while (for throttling and for errors)
196  if ( $info['status'] === false && mt_rand( 0, 49 ) == 0 ) {
197  $ttw = max( $ttw, $this->getErrorBackoffTTL( $info['error'] ) );
198  $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
199  ? $backoffDeltas[$jType] + $ttw
200  : $ttw;
201  }
202 
203  $response['jobs'][] = [
204  'type' => $jType,
205  'status' => ( $info['status'] === false ) ? 'failed' : 'ok',
206  'error' => $info['error'],
207  'time' => $info['timeMs']
208  ];
209  $timeMsTotal += $info['timeMs'];
210 
211  // Break out if we hit the job count or wall time limits...
212  if ( $maxJobs && $jobsPopped >= $maxJobs ) {
213  $response['reached'] = 'job-limit';
214  break;
215  } elseif ( $maxTime && ( microtime( true ) - $startTime ) > $maxTime ) {
216  $response['reached'] = 'time-limit';
217  break;
218  }
219 
220  // Don't let any of the main DB replica DBs get backed up.
221  // This only waits for so long before exiting and letting
222  // other wikis in the farm (on different masters) get a chance.
223  $timePassed = microtime( true ) - $lastCheckTime;
224  if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
225  try {
226  $lbFactory->waitForReplication( [
227  'ifWritesSince' => $lastCheckTime,
228  'timeout' => self::MAX_ALLOWED_LAG
229  ] );
230  } catch ( DBReplicationWaitError $e ) {
231  $response['reached'] = 'replica-lag-limit';
232  break;
233  }
234  $lastCheckTime = microtime( true );
235  }
236  // Don't let any queue replica DBs/backups fall behind
237  if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
238  $group->waitForBackups();
239  }
240 
241  // Bail if near-OOM instead of in a job
242  if ( !$this->checkMemoryOK() ) {
243  $response['reached'] = 'memory-limit';
244  break;
245  }
246  }
247  } while ( $job ); // stop when there are no jobs
248 
249  // Sync the persistent backoffs for the next runJobs.php pass
250  if ( $backoffDeltas ) {
251  $this->syncBackoffDeltas( $backoffs, $backoffDeltas, 'wait' );
252  }
253 
254  $response['backoffs'] = $backoffs;
255  $response['elapsed'] = $timeMsTotal;
256 
257  return $response;
258  }
259 
264  private function getErrorBackoffTTL( $error ) {
265  return strpos( $error, 'DBReadOnlyError' ) !== false
268  }
269 
277  private function executeJob( Job $job, LBFactory $lbFactory, $stats, $popTime ) {
278  $jType = $job->getType();
279  $msg = $job->toString() . " STARTING";
280  $this->logger->debug( $msg );
281  $this->debugCallback( $msg );
282 
283  // Run the job...
284  $rssStart = $this->getMaxRssKb();
285  $jobStartTime = microtime( true );
286  try {
287  $fnameTrxOwner = get_class( $job ) . '::run'; // give run() outer scope
288  $lbFactory->beginMasterChanges( $fnameTrxOwner );
289  $status = $job->run();
290  $error = $job->getLastError();
291  $this->commitMasterChanges( $lbFactory, $job, $fnameTrxOwner );
292  // Important: this must be the last deferred update added (T100085, T154425)
294  // Run any deferred update tasks; doUpdates() manages transactions itself
296  } catch ( Exception $e ) {
298  $status = false;
299  $error = get_class( $e ) . ': ' . $e->getMessage();
300  }
301  // Always attempt to call teardown() even if Job throws exception.
302  try {
303  $job->teardown( $status );
304  } catch ( Exception $e ) {
306  }
307 
308  // Commit all outstanding connections that are in a transaction
309  // to get a fresh repeatable read snapshot on every connection.
310  // Note that jobs are still responsible for handling replica DB lag.
311  $lbFactory->flushReplicaSnapshots( __METHOD__ );
312  // Clear out title cache data from prior snapshots
313  MediaWikiServices::getInstance()->getLinkCache()->clear();
314  $timeMs = intval( ( microtime( true ) - $jobStartTime ) * 1000 );
315  $rssEnd = $this->getMaxRssKb();
316 
317  // Record how long jobs wait before getting popped
318  $readyTs = $job->getReadyTimestamp();
319  if ( $readyTs ) {
320  $pickupDelay = max( 0, $popTime - $readyTs );
321  $stats->timing( 'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
322  $stats->timing( "jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
323  }
324  // Record root job age for jobs being run
325  $rootTimestamp = $job->getRootJobParams()['rootJobTimestamp'];
326  if ( $rootTimestamp ) {
327  $age = max( 0, $popTime - wfTimestamp( TS_UNIX, $rootTimestamp ) );
328  $stats->timing( "jobqueue.pickup_root_age.$jType", 1000 * $age );
329  }
330  // Track the execution time for jobs
331  $stats->timing( "jobqueue.run.$jType", $timeMs );
332  // Track RSS increases for jobs (in case of memory leaks)
333  if ( $rssStart && $rssEnd ) {
334  $stats->updateCount( "jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
335  }
336 
337  if ( $status === false ) {
338  $msg = $job->toString() . " t=$timeMs error={$error}";
339  $this->logger->error( $msg );
340  $this->debugCallback( $msg );
341  } else {
342  $msg = $job->toString() . " t=$timeMs good";
343  $this->logger->info( $msg );
344  $this->debugCallback( $msg );
345  }
346 
347  return [ 'status' => $status, 'error' => $error, 'timeMs' => $timeMs ];
348  }
349 
353  private function getMaxRssKb() {
354  $info = wfGetRusage() ?: [];
355  // see https://linux.die.net/man/2/getrusage
356  return isset( $info['ru_maxrss'] ) ? (int)$info['ru_maxrss'] : null;
357  }
358 
364  private function getBackoffTimeToWait( Job $job ) {
365  global $wgJobBackoffThrottling;
366 
367  if ( !isset( $wgJobBackoffThrottling[$job->getType()] ) ||
368  $job instanceof DuplicateJob // no work was done
369  ) {
370  return 0; // not throttled
371  }
372 
373  $itemsPerSecond = $wgJobBackoffThrottling[$job->getType()];
374  if ( $itemsPerSecond <= 0 ) {
375  return 0; // not throttled
376  }
377 
378  $seconds = 0;
379  if ( $job->workItemCount() > 0 ) {
380  $exactSeconds = $job->workItemCount() / $itemsPerSecond;
381  // use randomized rounding
382  $seconds = floor( $exactSeconds );
383  $remainder = $exactSeconds - $seconds;
384  $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
385  }
386 
387  return (int)$seconds;
388  }
389 
398  private function loadBackoffs( array $backoffs, $mode = 'wait' ) {
399  $file = wfTempDir() . '/mw-runJobs-backoffs.json';
400  if ( is_file( $file ) ) {
401  $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
402  $handle = fopen( $file, 'rb' );
403  if ( !flock( $handle, LOCK_SH | $noblock ) ) {
404  fclose( $handle );
405  return $backoffs; // don't wait on lock
406  }
407  $content = stream_get_contents( $handle );
408  flock( $handle, LOCK_UN );
409  fclose( $handle );
410  $ctime = microtime( true );
411  $cBackoffs = json_decode( $content, true ) ?: [];
412  foreach ( $cBackoffs as $type => $timestamp ) {
413  if ( $timestamp < $ctime ) {
414  unset( $cBackoffs[$type] );
415  }
416  }
417  } else {
418  $cBackoffs = [];
419  }
420 
421  return $cBackoffs;
422  }
423 
435  private function syncBackoffDeltas( array $backoffs, array &$deltas, $mode = 'wait' ) {
436  if ( !$deltas ) {
437  return $this->loadBackoffs( $backoffs, $mode );
438  }
439 
440  $noblock = ( $mode === 'nowait' ) ? LOCK_NB : 0;
441  $file = wfTempDir() . '/mw-runJobs-backoffs.json';
442  $handle = fopen( $file, 'wb+' );
443  if ( !flock( $handle, LOCK_EX | $noblock ) ) {
444  fclose( $handle );
445  return $backoffs; // don't wait on lock
446  }
447  $ctime = microtime( true );
448  $content = stream_get_contents( $handle );
449  $cBackoffs = json_decode( $content, true ) ?: [];
450  foreach ( $deltas as $type => $seconds ) {
451  $cBackoffs[$type] = isset( $cBackoffs[$type] ) && $cBackoffs[$type] >= $ctime
452  ? $cBackoffs[$type] + $seconds
453  : $ctime + $seconds;
454  }
455  foreach ( $cBackoffs as $type => $timestamp ) {
456  if ( $timestamp < $ctime ) {
457  unset( $cBackoffs[$type] );
458  }
459  }
460  ftruncate( $handle, 0 );
461  fwrite( $handle, json_encode( $cBackoffs ) );
462  flock( $handle, LOCK_UN );
463  fclose( $handle );
464 
465  $deltas = [];
466 
467  return $cBackoffs;
468  }
469 
475  private function checkMemoryOK() {
476  static $maxBytes = null;
477  if ( $maxBytes === null ) {
478  $m = [];
479  if ( preg_match( '!^(\d+)(k|m|g|)$!i', ini_get( 'memory_limit' ), $m ) ) {
480  list( , $num, $unit ) = $m;
481  $conv = [ 'g' => 1073741824, 'm' => 1048576, 'k' => 1024, '' => 1 ];
482  $maxBytes = $num * $conv[strtolower( $unit )];
483  } else {
484  $maxBytes = 0;
485  }
486  }
487  $usedBytes = memory_get_usage();
488  if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
489  $msg = "Detected excessive memory usage ($usedBytes/$maxBytes).";
490  $this->debugCallback( $msg );
491  $this->logger->error( $msg );
492 
493  return false;
494  }
495 
496  return true;
497  }
498 
503  private function debugCallback( $msg ) {
504  if ( $this->debug ) {
505  call_user_func_array( $this->debug, [ wfTimestamp( TS_DB ) . " $msg\n" ] );
506  }
507  }
508 
520  private function commitMasterChanges( LBFactory $lbFactory, Job $job, $fnameTrxOwner ) {
521  global $wgJobSerialCommitThreshold;
522 
523  $time = false;
524  $lb = $lbFactory->getMainLB( wfWikiID() );
525  if ( $wgJobSerialCommitThreshold !== false && $lb->getServerCount() > 1 ) {
526  // Generally, there is one master connection to the local DB
527  $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
528  // We need natively blocking fast locks
529  if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
530  $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY );
531  if ( $time < $wgJobSerialCommitThreshold ) {
532  $dbwSerial = false;
533  }
534  } else {
535  $dbwSerial = false;
536  }
537  } else {
538  // There are no replica DBs or writes are all to foreign DB (we don't handle that)
539  $dbwSerial = false;
540  }
541 
542  if ( !$dbwSerial ) {
543  $lbFactory->commitMasterChanges( $fnameTrxOwner );
544  return;
545  }
546 
547  $ms = intval( 1000 * $time );
548  $msg = $job->toString() . " COMMIT ENQUEUED [{$ms}ms of writes]";
549  $this->logger->info( $msg );
550  $this->debugCallback( $msg );
551 
552  // Wait for an exclusive lock to commit
553  if ( !$dbwSerial->lock( 'jobrunner-serial-commit', __METHOD__, 30 ) ) {
554  // This will trigger a rollback in the main loop
555  throw new DBError( $dbwSerial, "Timed out waiting on commit queue." );
556  }
557  $unlocker = new ScopedCallback( function () use ( $dbwSerial ) {
558  $dbwSerial->unlock( 'jobrunner-serial-commit', __METHOD__ );
559  } );
560 
561  // Wait for the replica DBs to catch up
562  $pos = $lb->getMasterPos();
563  if ( $pos ) {
564  $lb->waitForAll( $pos );
565  }
566 
567  // Actually commit the DB master changes
568  $lbFactory->commitMasterChanges( $fnameTrxOwner );
569  ScopedCallback::consume( $unlocker );
570  }
571 }
JobQueueGroup\USE_CACHE
const USE_CACHE
Definition: JobQueueGroup.php:51
false
processing should stop and the error should be shown to the user * false
Definition: hooks.txt:189
JobRunner\ERROR_BACKOFF_TTL
const ERROR_BACKOFF_TTL
Definition: JobRunner.php:51
JobRunner\$logger
$logger
Definition: JobRunner.php:47
Profiler\instance
static instance()
Singleton.
Definition: Profiler.php:62
JobRunner\__construct
__construct(LoggerInterface $logger=null)
Definition: JobRunner.php:72
JobQueueGroup\TYPE_DEFAULT
const TYPE_DEFAULT
Definition: JobQueueGroup.php:48
wfTimestamp
wfTimestamp( $outputtype=TS_UNIX, $ts=0)
Get a timestamp string in one of various formats.
Definition: GlobalFunctions.php:1994
Wikimedia\Rdbms\DBReplicationWaitError
Exception class for replica DB wait timeouts.
Definition: DBReplicationWaitError.php:28
$status
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist Do not use this to implement individual filters if they are compatible with the ChangesListFilter and ChangesListFilterGroup structure use sub classes of those in conjunction with the ChangesListSpecialPageStructuredFilters hook This hook can be used to implement filters that do not implement that or custom behavior that is not an individual filter e g Watchlist and Watchlist you will want to construct new ChangesListBooleanFilter or ChangesListStringOptionsFilter objects When constructing you specify which group they belong to You can reuse existing or create your you must register them with $special registerFilterGroup removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set $status
Definition: hooks.txt:1049
JobRunner\getBackoffTimeToWait
getBackoffTimeToWait(Job $job)
Definition: JobRunner.php:364
use
as see the revision history and available at free of to any person obtaining a copy of this software and associated documentation to deal in the Software without including without limitation the rights to use
Definition: MIT-LICENSE.txt:10
JobRunner\executeJob
executeJob(Job $job, LBFactory $lbFactory, $stats, $popTime)
Definition: JobRunner.php:277
wfReadOnly
wfReadOnly()
Check whether the wiki is in read-only mode.
Definition: GlobalFunctions.php:1277
$type
do that in ParserLimitReportFormat instead use this to modify the parameters of the image and a DIV can begin in one section and end in another Make sure your code can handle that case gracefully See the EditSectionClearerLink extension for an example zero but section is usually empty its values are the globals values before the output is cached my talk my contributions etc etc otherwise the built in rate limiting checks are if enabled allows for interception of redirect as a string mapping parameter names to values & $type
Definition: hooks.txt:2536
$lbFactory
$lbFactory
Definition: doMaintenance.php:117
Wikimedia\Rdbms\DBError
Database error base class.
Definition: DBError.php:30
php
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback the instantiator that will return a new instance of the service When it will create an instance of MediaWikiServices and populate it with the services defined in the files listed by thereby bootstrapping the DI framework Per $wgServiceWiringFiles lists includes ServiceWiring php
Definition: injection.txt:35
JobRunner\syncBackoffDeltas
syncBackoffDeltas(array $backoffs, array &$deltas, $mode='wait')
Merge the current backoff expiries from persistent storage.
Definition: JobRunner.php:435
Job
Class to both describe a background job and handle jobs.
Definition: Job.php:31
DeferredUpdates\addCallableUpdate
static addCallableUpdate( $callable, $stage=self::POSTSEND, IDatabase $dbw=null)
Add a callable update.
Definition: DeferredUpdates.php:111
JobRunner\setLogger
setLogger(LoggerInterface $logger)
Definition: JobRunner.php:65
JobRunner\READONLY_BACKOFF_TTL
const READONLY_BACKOFF_TTL
Definition: JobRunner.php:52
$content
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist Do not use this to implement individual filters if they are compatible with the ChangesListFilter and ChangesListFilterGroup structure use sub classes of those in conjunction with the ChangesListSpecialPageStructuredFilters hook This hook can be used to implement filters that do not implement that or custom behavior that is not an individual filter e g Watchlist and Watchlist you will want to construct new ChangesListBooleanFilter or ChangesListStringOptionsFilter objects When constructing you specify which group they belong to You can reuse existing or create your you must register them with $special registerFilterGroup removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content $content
Definition: hooks.txt:1049
JobRunner\debugCallback
debugCallback( $msg)
Log the job message.
Definition: JobRunner.php:503
MWExceptionHandler\rollbackMasterChangesAndLog
static rollbackMasterChangesAndLog( $e)
Roll back any open database transactions and log the stack trace of the exception.
Definition: MWExceptionHandler.php:93
JobRunner\run
run(array $options)
Run jobs of the specified number/type for the specified time.
Definition: JobRunner.php:103
JobRunner\LAG_CHECK_PERIOD
const LAG_CHECK_PERIOD
Definition: JobRunner.php:50
$time
see documentation in includes Linker php for Linker::makeImageLink & $time
Definition: hooks.txt:1769
global
when a variable name is used in a it is silently declared as a new masking the global
Definition: design.txt:93
JobRunner\loadBackoffs
loadBackoffs(array $backoffs, $mode='wait')
Get the previous backoff expiries from persistent storage On I/O or lock acquisition failure this ret...
Definition: JobRunner.php:398
DuplicateJob
No-op job that does nothing.
Definition: DuplicateJob.php:29
list
deferred txt A few of the database updates required by various functions here can be deferred until after the result page is displayed to the user For updating the view updating the linked to tables after a etc PHP does not yet have any way to tell the server to actually return and disconnect while still running these but it might have such a feature in the future We handle these by creating a deferred update object and putting those objects on a global list
Definition: deferred.txt:11
JobRunner\setDebugHandler
setDebugHandler( $debug)
Definition: JobRunner.php:57
wfWikiID
wfWikiID()
Get an ASCII string identifying this wiki This is used as a prefix in memcached keys.
Definition: GlobalFunctions.php:3011
$e
div flags Integer display flags(NO_ACTION_LINK, NO_EXTRA_USER_LINKS) 'LogException' returning false will NOT prevent logging $e
Definition: hooks.txt:2122
JobRunner\$debug
callable null $debug
Debug output handler.
Definition: JobRunner.php:42
$response
this hook is for auditing only $response
Definition: hooks.txt:783
DeferredUpdates\doUpdates
static doUpdates( $mode='run', $stage=self::ALL)
Do any deferred updates and clear the list.
Definition: DeferredUpdates.php:123
JobRunner\getMaxRssKb
getMaxRssKb()
Definition: JobRunner.php:353
wfTempDir
wfTempDir()
Tries to get the system directory for temporary files.
Definition: GlobalFunctions.php:2061
JobRunner\commitMasterChanges
commitMasterChanges(LBFactory $lbFactory, Job $job, $fnameTrxOwner)
Issue a commit on all masters who are currently in a transaction and have made changes to the databas...
Definition: JobRunner.php:520
$job
if(count( $args)< 1) $job
Definition: recompressTracked.php:47
Wikimedia\Rdbms\LBFactory
An interface for generating database load balancers.
Definition: LBFactory.php:38
JobQueueGroup\singleton
static singleton( $wiki=false)
Definition: JobQueueGroup.php:71
as
This document is intended to provide useful advice for parties seeking to redistribute MediaWiki to end users It s targeted particularly at maintainers for Linux since it s been observed that distribution packages of MediaWiki often break We ve consistently had to recommend that users seeking support use official tarballs instead of their distribution s and this often solves whatever problem the user is having It would be nice if this could such as
Definition: distributors.txt:9
LoggerFactory
MediaWiki Logger LoggerFactory implements a PSR[0] compatible message logging system Named Psr Log LoggerInterface instances can be obtained from the MediaWiki Logger LoggerFactory::getInstance() static method. MediaWiki\Logger\LoggerFactory expects a class implementing the MediaWiki\Logger\Spi interface to act as a factory for new Psr\Log\LoggerInterface instances. The "Spi" in MediaWiki\Logger\Spi stands for "service provider interface". An SPI is an API intended to be implemented or extended by a third party. This software design pattern is intended to enable framework extension and replaceable components. It is specifically used in the MediaWiki\Logger\LoggerFactory service to allow alternate PSR-3 logging implementations to be easily integrated with MediaWiki. The service provider interface allows the backend logging library to be implemented in multiple ways. The $wgMWLoggerDefaultSpi global provides the classname of the default MediaWiki\Logger\Spi implementation to be loaded at runtime. This can either be the name of a class implementing the MediaWiki\Logger\Spi with a zero argument const ructor or a callable that will return an MediaWiki\Logger\Spi instance. Alternately the MediaWiki\Logger\LoggerFactory MediaWiki Logger LoggerFactory
Definition: logger.txt:5
$wait
$wait
Definition: styleTest.css.php:46
class
you have access to all of the normal MediaWiki so you can get a DB use the etc For full docs on the Maintenance class
Definition: maintenance.txt:52
JobRunner\MAX_ALLOWED_LAG
const MAX_ALLOWED_LAG
Definition: JobRunner.php:49
MediaWikiServices
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency MediaWikiServices
Definition: injection.txt:23
JobRunner
Job queue runner utility methods.
Definition: JobRunner.php:40
WebRequest\overrideRequestId
static overrideRequestId( $id)
Override the unique request ID.
Definition: WebRequest.php:288
JobRunner\getErrorBackoffTTL
getErrorBackoffTTL( $error)
Definition: JobRunner.php:264
wfGetRusage
wfGetRusage()
Get system resource usage of current request context.
Definition: ProfilerFunctions.php:32
$options
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist Do not use this to implement individual filters if they are compatible with the ChangesListFilter and ChangesListFilterGroup structure use sub classes of those in conjunction with the ChangesListSpecialPageStructuredFilters hook This hook can be used to implement filters that do not implement that or custom behavior that is not an individual filter e g Watchlist and Watchlist you will want to construct new ChangesListBooleanFilter or ChangesListStringOptionsFilter objects When constructing you specify which group they belong to You can reuse existing or create your you must register them with $special registerFilterGroup removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content as context as context $options
Definition: hooks.txt:1049
array
the array() calling protocol came about after MediaWiki 1.4rc1.
MWExceptionHandler\logException
static logException( $e, $catcher=self::CAUGHT_BY_OTHER)
Log an exception to the exception log (if enabled).
Definition: MWExceptionHandler.php:596
JobRunner\checkMemoryOK
checkMemoryOK()
Make sure that this script is not too close to the memory usage limit.
Definition: JobRunner.php:475