26use Liuggio\StatsdClient\Factory\StatsdDataFactory;
27use Psr\Log\LoggerAwareInterface;
28use Psr\Log\LoggerInterface;
29use Wikimedia\ScopedCallback;
70 $logger = LoggerFactory::getInstance(
'runJobs' );
102 $response = [
'jobs' => [],
'reached' =>
'none-ready' ];
120 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
124 if ( $maxLag >= self::MAX_ALLOWED_LAG ) {
125 $response[
'reached'] =
'replica-lag-limit';
134 $trxProfiler->setLogger( LoggerFactory::getInstance(
'DBPerformance' ) );
143 $stats = MediaWikiServices::getInstance()->getStatsdDataFactory();
146 $startTime = microtime(
true );
151 $blacklist = $noThrottle ? [] : array_keys( $backoffs );
154 if (
$type ===
false ) {
160 } elseif ( in_array(
$type, $blacklist ) ) {
165 $lbFactory->commitMasterChanges( __METHOD__ );
170 $jType =
$job->getType();
172 WebRequest::overrideRequestId(
$job->getRequestId() );
179 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
180 ? $backoffDeltas[$jType] + $ttw
186 if ( $info[
'status'] !==
false || !
$job->allowRetries() ) {
188 $lbFactory->commitMasterChanges( __METHOD__ );
192 if ( $info[
'status'] ===
false && mt_rand( 0, 49 ) == 0 ) {
193 $ttw = max( $ttw, self::ERROR_BACKOFF_TTL );
194 $backoffDeltas[$jType] = isset( $backoffDeltas[$jType] )
195 ? $backoffDeltas[$jType] + $ttw
201 'status' => ( $info[
'status'] ===
false ) ?
'failed' :
'ok',
202 'error' => $info[
'error'],
203 'time' => $info[
'timeMs']
205 $timeMsTotal += $info[
'timeMs'];
208 if ( $maxJobs && $jobsPopped >= $maxJobs ) {
211 } elseif ( $maxTime && ( microtime(
true ) - $startTime ) > $maxTime ) {
219 $timePassed = microtime(
true ) - $lastCheckTime;
220 if ( $timePassed >= self::LAG_CHECK_PERIOD || $timePassed < 0 ) {
223 'ifWritesSince' => $lastCheckTime,
224 'timeout' => self::MAX_ALLOWED_LAG
227 $response[
'reached'] =
'replica-lag-limit';
230 $lastCheckTime = microtime(
true );
233 if ( $jobsPopped > 0 && ( $jobsPopped % 100 ) == 0 ) {
234 $group->waitForBackups();
246 if ( $backoffDeltas ) {
264 $jType =
$job->getType();
265 $msg =
$job->toString() .
" STARTING";
266 $this->logger->debug( $msg );
271 $jobStartTime = microtime(
true );
273 $fnameTrxOwner = get_class(
$job ) .
'::run';
274 $lbFactory->beginMasterChanges( $fnameTrxOwner );
276 $error =
$job->getLastError();
279 DeferredUpdates::addCallableUpdate( [ JobQueueGroup::class,
'pushLazyJobs' ] );
281 DeferredUpdates::doUpdates();
282 }
catch ( Exception
$e ) {
283 MWExceptionHandler::rollbackMasterChangesAndLog(
$e );
285 $error = get_class(
$e ) .
': ' .
$e->getMessage();
290 }
catch ( Exception
$e ) {
291 MWExceptionHandler::logException(
$e );
297 $lbFactory->flushReplicaSnapshots( __METHOD__ );
299 MediaWikiServices::getInstance()->getLinkCache()->clear();
300 $timeMs = intval( ( microtime(
true ) - $jobStartTime ) * 1000 );
304 $readyTs =
$job->getReadyTimestamp();
306 $pickupDelay = max( 0, $popTime - $readyTs );
307 $stats->timing(
'jobqueue.pickup_delay.all', 1000 * $pickupDelay );
308 $stats->timing(
"jobqueue.pickup_delay.$jType", 1000 * $pickupDelay );
311 $rootTimestamp =
$job->getRootJobParams()[
'rootJobTimestamp'];
312 if ( $rootTimestamp ) {
314 $stats->timing(
"jobqueue.pickup_root_age.$jType", 1000 * $age );
317 $stats->timing(
"jobqueue.run.$jType", $timeMs );
319 if ( $rssStart && $rssEnd ) {
320 $stats->updateCount(
"jobqueue.rss_delta.$jType", $rssEnd - $rssStart );
324 $msg =
$job->toString() .
" t=$timeMs error={$error}";
325 $this->logger->error( $msg );
328 $msg =
$job->toString() .
" t=$timeMs good";
329 $this->logger->info( $msg );
333 return [
'status' =>
$status,
'error' => $error,
'timeMs' => $timeMs ];
342 return isset( $info[
'ru_maxrss'] ) ? (int)$info[
'ru_maxrss'] :
null;
360 if ( $itemsPerSecond <= 0 ) {
365 if (
$job->workItemCount() > 0 ) {
366 $exactSeconds =
$job->workItemCount() / $itemsPerSecond;
368 $seconds = floor( $exactSeconds );
369 $remainder = $exactSeconds - $seconds;
370 $seconds += ( mt_rand() / mt_getrandmax() < $remainder ) ? 1 : 0;
373 return (
int)$seconds;
385 $file =
wfTempDir() .
'/mw-runJobs-backoffs.json';
386 if ( is_file( $file ) ) {
387 $noblock = ( $mode ===
'nowait' ) ? LOCK_NB : 0;
388 $handle = fopen( $file,
'rb' );
389 if ( !flock( $handle, LOCK_SH | $noblock ) ) {
393 $content = stream_get_contents( $handle );
394 flock( $handle, LOCK_UN );
396 $ctime = microtime(
true );
397 $cBackoffs = json_decode(
$content,
true ) ?: [];
400 unset( $cBackoffs[
$type] );
426 $noblock = ( $mode ===
'nowait' ) ? LOCK_NB : 0;
427 $file =
wfTempDir() .
'/mw-runJobs-backoffs.json';
428 $handle = fopen( $file,
'wb+' );
429 if ( !flock( $handle, LOCK_EX | $noblock ) ) {
433 $ctime = microtime(
true );
434 $content = stream_get_contents( $handle );
435 $cBackoffs = json_decode(
$content,
true ) ?: [];
436 foreach ( $deltas
as $type => $seconds ) {
437 $cBackoffs[
$type] = isset( $cBackoffs[
$type] ) && $cBackoffs[
$type] >= $ctime
438 ? $cBackoffs[
$type] + $seconds
443 unset( $cBackoffs[
$type] );
446 ftruncate( $handle, 0 );
447 fwrite( $handle, json_encode( $cBackoffs ) );
448 flock( $handle, LOCK_UN );
462 static $maxBytes =
null;
463 if ( $maxBytes ===
null ) {
465 if ( preg_match(
'!^(\d+)(k|m|g|)$!i', ini_get(
'memory_limit' ), $m ) ) {
466 list( , $num, $unit ) = $m;
467 $conv = [
'g' => 1073741824,
'm' => 1048576,
'k' => 1024,
'' => 1 ];
468 $maxBytes = $num * $conv[strtolower( $unit )];
473 $usedBytes = memory_get_usage();
474 if ( $maxBytes && $usedBytes >= 0.95 * $maxBytes ) {
475 $msg =
"Detected excessive memory usage ($usedBytes/$maxBytes).";
477 $this->logger->error( $msg );
490 if ( $this->debug ) {
513 $dbwSerial = $lb->getAnyOpenConnection( $lb->getWriterIndex() );
515 if ( $dbwSerial && $dbwSerial->namedLocksEnqueue() ) {
516 $time = $dbwSerial->pendingWriteQueryDuration( $dbwSerial::ESTIMATE_DB_APPLY );
529 $lbFactory->commitMasterChanges( $fnameTrxOwner );
533 $ms = intval( 1000 *
$time );
534 $msg =
$job->toString() .
" COMMIT ENQUEUED [{$ms}ms of writes]";
535 $this->logger->info( $msg );
539 if ( !$dbwSerial->lock(
'jobrunner-serial-commit', __METHOD__, 30 ) ) {
541 throw new DBError( $dbwSerial,
"Timed out waiting on commit queue." );
543 $unlocker =
new ScopedCallback(
function ()
use ( $dbwSerial ) {
544 $dbwSerial->unlock(
'jobrunner-serial-commit', __METHOD__ );
548 $pos = $lb->getMasterPos();
550 $lb->waitForAll( $pos );
554 $lbFactory->commitMasterChanges( $fnameTrxOwner );
555 ScopedCallback::consume( $unlocker );
Apache License January AND DISTRIBUTION Definitions License shall mean the terms and conditions for use
$wgTrxProfilerLimits
Performance expectations for DB usage.
float[] $wgJobBackoffThrottling
Map of job types to how many job "work items" should be run per second on each job runner process.
$wgJobClasses
Maps jobs to their handling classes; extensions can add to this to provide custom jobs.
float bool $wgJobSerialCommitThreshold
Make job runners commit changes for replica DB-lag prone jobs one job at a time.
wfTempDir()
Tries to get the system directory for temporary files.
wfReadOnly()
Check whether the wiki is in read-only mode.
wfTimestamp( $outputtype=TS_UNIX, $ts=0)
Get a timestamp string in one of various formats.
wfWikiID()
Get an ASCII string identifying this wiki This is used as a prefix in memcached keys.
wfGetRusage()
Get system resource usage of current request context.
Database error base class.
Exception class for replica DB wait timeouts.
No-op job that does nothing.
static singleton( $wiki=false)
Job queue runner utility methods.
run(array $options)
Run jobs of the specified number/type for the specified time.
__construct(LoggerInterface $logger=null)
callable null $debug
Debug output handler.
setLogger(LoggerInterface $logger)
syncBackoffDeltas(array $backoffs, array &$deltas, $mode='wait')
Merge the current backoff expiries from persistent storage.
executeJob(Job $job, LBFactory $lbFactory, $stats, $popTime)
debugCallback( $msg)
Log the job message.
commitMasterChanges(LBFactory $lbFactory, Job $job, $fnameTrxOwner)
Issue a commit on all masters who are currently in a transaction and have made changes to the databas...
getBackoffTimeToWait(Job $job)
checkMemoryOK()
Make sure that this script is not too close to the memory usage limit.
loadBackoffs(array $backoffs, $mode='wait')
Get the previous backoff expiries from persistent storage On I/O or lock acquisition failure this ret...
Class to both describe a background job and handle jobs.
An interface for generating database load balancers.
static instance()
Singleton.
deferred txt A few of the database updates required by various functions here can be deferred until after the result page is displayed to the user For updating the view updating the linked to tables after a etc PHP does not yet have any way to tell the server to actually return and disconnect while still running these but it might have such a feature in the future We handle these by creating a deferred update object and putting those objects on a global list
when a variable name is used in a it is silently declared as a new local masking the global
This document is intended to provide useful advice for parties seeking to redistribute MediaWiki to end users It s targeted particularly at maintainers for Linux since it s been observed that distribution packages of MediaWiki often break We ve consistently had to recommend that users seeking support use official tarballs instead of their distribution s and this often solves whatever problem the user is having It would be nice if this could such as
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set $status
the array() calling protocol came about after MediaWiki 1.4rc1.
namespace are movable Hooks may change this value to override the return value of MWNamespace::isMovable(). 'NewDifferenceEngine' do that in ParserLimitReportFormat instead use this to modify the parameters of the image and a DIV can begin in one section and end in another Make sure your code can handle that case gracefully See the EditSectionClearerLink extension for an example zero but section is usually empty its values are the globals values before the output is cached one of or reset my talk my contributions etc etc otherwise the built in rate limiting checks are if enabled allows for interception of redirect as a string mapping parameter names to values & $type
see documentation in includes Linker php for Linker::makeImageLink & $time
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content as context as context $options
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content $content
this hook is for auditing only $response
processing should stop and the error should be shown to the user * false
returning false will NOT prevent logging $e
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback the instantiator that will return a new instance of the service When it will create an instance of MediaWikiServices and populate it with the services defined in the files listed by thereby bootstrapping the DI framework Per $wgServiceWiringFiles lists includes ServiceWiring php
if(count( $args)< 1) $job
const TS_DB
MySQL DATETIME (YYYY-MM-DD HH:MM:SS)
const TS_UNIX
Unix time - the number of seconds since 1970-01-01 00:00:00 UTC.