27use Wikimedia\ScopedCallback;
58 $this->cluster =
$params[
'cluster'] ??
false;
59 $this->
cache = ObjectCache::getMainWANInstance();
63 return [
'random',
'timestamp',
'fifo' ];
77 $found =
$dbr->selectField(
78 'job',
'1', [
'job_cmd' => $this->
type,
'job_token' =>
'' ], __METHOD__
94 $size = $this->
cache->get( $key );
95 if ( is_int( $size ) ) {
101 $size = (int)
$dbr->selectField(
'job',
'COUNT(*)',
102 [
'job_cmd' => $this->type,
'job_token' =>
'' ],
108 $this->
cache->set( $key, $size, self::CACHE_TTL_SHORT );
118 if ( $this->claimTTL <= 0 ) {
124 $count = $this->
cache->get( $key );
125 if ( is_int( $count ) ) {
131 $count = (int)
$dbr->selectField(
'job',
'COUNT(*)',
132 [
'job_cmd' => $this->type,
"job_token != {$dbr->addQuotes( '' )}" ],
138 $this->
cache->set( $key, $count, self::CACHE_TTL_SHORT );
149 if ( $this->claimTTL <= 0 ) {
155 $count = $this->
cache->get( $key );
156 if ( is_int( $count ) ) {
162 $count = (int)
$dbr->selectField(
'job',
'COUNT(*)',
164 'job_cmd' => $this->type,
165 "job_token != {$dbr->addQuotes( '' )}",
166 "job_attempts >= " .
$dbr->addQuotes( $this->maxTries )
174 $this->
cache->set( $key, $count, self::CACHE_TTL_SHORT );
198 $dbw->onTransactionPreCommitOrIdle(
217 if ( !count( $jobs ) ) {
223 foreach ( $jobs as
$job ) {
225 if (
$job->ignoreDuplicates() ) {
226 $rowSet[$row[
'job_sha1']] = $row;
232 if ( $flags & self::QOS_ATOMIC ) {
237 if ( count( $rowSet ) ) {
241 'job_sha1' => array_keys( $rowSet ),
246 foreach (
$res as $row ) {
247 wfDebug(
"Job with hash '{$row->job_sha1}' is a duplicate.\n" );
248 unset( $rowSet[$row->job_sha1] );
252 $rows = array_merge( $rowList, array_values( $rowSet ) );
254 foreach ( array_chunk(
$rows, 50 ) as $rowBatch ) {
255 $dbw->
insert(
'job', $rowBatch, $method );
259 count( $rowSet ) + count( $rowList ) - count(
$rows )
264 if ( $flags & self::QOS_ATOMIC ) {
276 $autoTrx = $dbw->getFlag(
DBO_TRX );
278 $scopedReset =
new ScopedCallback(
function () use ( $dbw, $autoTrx ) {
279 $dbw->setFlag( $autoTrx ?
DBO_TRX : 0 );
286 if ( in_array( $this->order, [
'fifo',
'timestamp' ] ) ) {
289 $rand = mt_rand( 0, self::MAX_JOB_RANDOM );
290 $gte = (bool)mt_rand( 0, 1 );
299 $title = Title::makeTitle( $row->job_namespace, $row->job_title );
301 self::extractBlob( $row->job_params ), $row->job_id );
302 $job->metadata[
'id'] = $row->job_id;
303 $job->metadata[
'timestamp'] = $row->job_timestamp;
307 if ( !
$job || mt_rand( 0, 9 ) == 0 ) {
333 $invertedDirection =
false;
342 $ineq = $gte ?
'>=' :
'<=';
343 $dir = $gte ?
'ASC' :
'DESC';
344 $row = $dbw->selectRow(
'job', self::selectFields(),
346 'job_cmd' => $this->
type,
348 "job_random {$ineq} {$dbw->addQuotes( $rand )}" ],
350 [
'ORDER BY' =>
"job_random {$dir}" ]
352 if ( !$row && !$invertedDirection ) {
354 $invertedDirection =
true;
361 $row = $dbw->selectRow(
'job', self::selectFields(),
363 'job_cmd' => $this->
type,
367 [
'OFFSET' => mt_rand( 0, self::MAX_OFFSET ) ]
379 'job_token' => $uuid,
380 'job_token_timestamp' => $dbw->timestamp(),
381 'job_attempts = job_attempts+1' ],
382 [
'job_cmd' => $this->type,
'job_id' => $row->job_id,
'job_token' =>
'' ],
387 if ( !$dbw->affectedRows() ) {
409 if ( $dbw->getType() ===
'mysql' ) {
414 $dbw->query(
"UPDATE {$dbw->tableName( 'job' )} " .
416 "job_token = {$dbw->addQuotes( $uuid ) }, " .
417 "job_token_timestamp = {$dbw->addQuotes( $dbw->timestamp() )}, " .
418 "job_attempts = job_attempts+1 " .
420 "job_cmd = {$dbw->addQuotes( $this->type )} " .
421 "AND job_token = {$dbw->addQuotes( '' )} " .
422 ") ORDER BY job_id ASC LIMIT 1",
430 'job_token' => $uuid,
431 'job_token_timestamp' => $dbw->timestamp(),
432 'job_attempts = job_attempts+1' ],
434 $dbw->selectSQLText(
'job',
'job_id',
435 [
'job_cmd' => $this->type,
'job_token' =>
'' ],
437 [
'ORDER BY' =>
'job_id ASC',
'LIMIT' => 1 ] ) .
444 if ( $dbw->affectedRows() ) {
445 $row = $dbw->selectRow(
'job', self::selectFields(),
446 [
'job_cmd' => $this->
type,
'job_token' => $uuid ], __METHOD__
449 wfDebug(
"Row deleted as duplicate by another process.\n" );
465 if ( !isset(
$job->metadata[
'id'] ) ) {
466 throw new MWException(
"Job of type '{$job->getType()}' has no ID." );
471 $autoTrx = $dbw->getFlag(
DBO_TRX );
473 $scopedReset =
new ScopedCallback(
function () use ( $dbw, $autoTrx ) {
474 $dbw->setFlag( $autoTrx ?
DBO_TRX : 0 );
479 [
'job_cmd' => $this->
type,
'job_id' => $job->metadata[
'id'] ], __METHOD__ );
495 if ( !isset(
$params[
'rootJobSignature'] ) ) {
496 throw new MWException(
"Cannot register root job; missing 'rootJobSignature'." );
497 } elseif ( !isset(
$params[
'rootJobTimestamp'] ) ) {
498 throw new MWException(
"Cannot register root job; missing 'rootJobTimestamp'." );
508 $dbw->onTransactionCommitOrIdle(
511 if ( $timestamp && $timestamp >=
$params[
'rootJobTimestamp'] ) {
531 $dbw->delete(
'job', [
'job_cmd' => $this->
type ] );
544 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
545 $lbFactory->waitForReplication( [
'wiki' => $this->wiki,
'cluster' => $this->cluster ] );
552 foreach ( [
'size',
'acquiredcount' ] as
$type ) {
581 $dbr->select(
'job', self::selectFields(), $conds ),
585 Title::makeTitle( $row->job_namespace, $row->job_title ),
586 strlen( $row->job_params ) ?
unserialize( $row->job_params ) : []
588 $job->metadata[
'id'] = $row->job_id;
589 $job->metadata[
'timestamp'] = $row->job_timestamp;
600 return $this->cluster
601 ?
"DBCluster:{$this->cluster}:{$this->wiki}"
602 :
"LBFactory:{$this->wiki}";
611 $res =
$dbr->select(
'job',
'DISTINCT job_cmd',
612 [
'job_cmd' => $types ], __METHOD__ );
615 foreach (
$res as $row ) {
616 $types[] = $row->job_cmd;
624 $res =
$dbr->select(
'job', [
'job_cmd',
'COUNT(*) AS count' ],
625 [
'job_cmd' => $types ], __METHOD__, [
'GROUP BY' =>
'job_cmd' ] );
628 foreach (
$res as $row ) {
629 $sizes[$row->job_cmd] = (int)$row->count;
646 if ( !$dbw->lock(
"jobqueue-recycle-{$this->type}", __METHOD__, 1 ) ) {
651 if ( $this->claimTTL > 0 ) {
652 $claimCutoff = $dbw->timestamp( $now - $this->claimTTL );
656 $res = $dbw->select(
'job',
'job_id',
658 'job_cmd' => $this->
type,
659 "job_token != {$dbw->addQuotes( '' )}",
660 "job_token_timestamp < {$dbw->addQuotes( $claimCutoff )}",
661 "job_attempts < {$dbw->addQuotes( $this->maxTries )}" ],
667 }, iterator_to_array(
$res )
669 if ( count( $ids ) ) {
676 'job_token_timestamp' => $dbw->timestamp( $now ) ],
681 $affected = $dbw->affectedRows();
684 $this->aggr->notifyQueueNonEmpty( $this->wiki, $this->
type );
689 $pruneCutoff = $dbw->timestamp( $now - self::MAX_AGE_PRUNE );
692 "job_token != {$dbw->addQuotes( '' )}",
693 "job_token_timestamp < {$dbw->addQuotes( $pruneCutoff )}"
695 if ( $this->claimTTL > 0 ) {
696 $conds[] =
"job_attempts >= {$dbw->addQuotes( $this->maxTries )}";
700 $res = $dbw->select(
'job',
'job_id', $conds, __METHOD__ );
704 }, iterator_to_array(
$res )
706 if ( count( $ids ) ) {
707 $dbw->delete(
'job', [
'job_id' => $ids ], __METHOD__ );
708 $affected = $dbw->affectedRows();
713 $dbw->unlock(
"jobqueue-recycle-{$this->type}", __METHOD__ );
729 'job_cmd' =>
$job->getType(),
730 'job_namespace' =>
$job->getTitle()->getNamespace(),
731 'job_title' =>
$job->getTitle()->getDBkey(),
735 'job_sha1' => Wikimedia\base_convert(
739 'job_random' => mt_rand( 0, self::MAX_JOB_RANDOM )
771 protected function getDB( $index ) {
772 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
773 $lb = ( $this->cluster !==
false )
774 ? $lbFactory->getExternalLB( $this->cluster )
775 : $lbFactory->getMainLB( $this->wiki );
777 return ( $lb->getServerType( $lb->getWriterIndex() ) !==
'sqlite' )
780 ? $lb->getConnectionRef( $index, [], $this->wiki, $lb::CONN_TRX_AUTOCOMMIT )
782 : $lb->getConnectionRef( $index, [], $this->wiki );
790 $cluster = is_string( $this->cluster ) ? $this->cluster :
'main';
792 return $this->
cache->makeGlobalKey(
813 if ( (
string)
$blob !==
'' ) {
844 'job_token_timestamp',
unserialize( $serialized)
wfDebug( $text, $dest='all', array $context=[])
Sends a line to the debug log if enabled or, optionally, to a comment in output.
wfRandomString( $length=32)
Get a random string containing a number of pseudo-random hex characters.
if(defined( 'MW_SETUP_CALLBACK')) $fname
Customization point after all loading (constants, functions, classes, DefaultSettings,...
Class to handle job queues stored in the DB.
claimOldest( $uuid)
Reserve a row with a single UPDATE without holding row locks over RTTs...
supportedOrders()
Get the allowed queue orders for configuration validation.
doGetSiblingQueueSizes(array $types)
insertFields(IJobSpecification $job, IDatabase $db)
bool string $cluster
Name of an external DB cluster.
__construct(array $params)
Additional parameters include:
doBatchPush(array $jobs, $flags)
throwDBException(DBError $e)
static makeBlob( $params)
claimRandom( $uuid, $rand, $gte)
Reserve a row with a single UPDATE without holding row locks over RTTs...
doGetSiblingQueuesWithJobs(array $types)
recycleAndDeleteStaleJobs()
Recycle or destroy any jobs that have been claimed for too long.
doBatchPushInternal(IDatabase $dbw, array $jobs, $flags, $method)
This function should not be called outside of JobQueueDB.
optimalOrder()
Get the default queue order to use if configuration does not specify one.
getCoalesceLocationInternal()
Do not use this function outside of JobQueue/JobQueueGroup.
static extractBlob( $blob)
doDeduplicateRootJob(IJobSpecification $job)
static selectFields()
Return the list of job fields that should be selected.
getJobIterator(array $conds)
Class to handle enqueueing and running of background jobs.
static incrStats( $key, $type, $delta=1)
Call wfIncrStats() for the queue overall and for the queue type.
getRootJobCacheKey( $signature)
Class to both describe a background job and handle jobs.
static factory( $command, Title $title, $params=[])
Create the appropriate object to handle a specific job.
Convenience class for generating iterators from iterators.
Multi-datacenter aware caching interface.
get( $key, &$curTTL=null, array $checkKeys=[], &$asOf=null)
Fetch the value of a key from cache.
set( $key, $value, $ttl=0, array $opts=[])
Set the value of a key in cache.
do that in ParserLimitReportFormat instead use this to modify the parameters of the image all existing parser cache entries will be invalid To avoid you ll need to handle that somehow(e.g. with the RejectParserCacheValue hook) because MediaWiki won 't do it for you. & $defaults also a ContextSource after deleting those rows but within the same transaction $rows
processing should stop and the error should be shown to the user * false
returning false will NOT prevent logging $e
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback function
Job queue task description interface.
you have access to all of the normal MediaWiki so you can get a DB use the cache
The wiki should then use memcached to cache various data To use multiple just add more items to the array To increase the weight of a make its entry a array("192.168.0.1:11211", 2))
This document describes the state of Postgres support in and is fairly well maintained The main code is very well while extensions are very hit and miss it is probably the most supported database after MySQL Much of the work in making MediaWiki database agnostic came about through the work of creating Postgres as and are nearing end of but without copying over all the usage comments General notes on the but these can almost always be programmed around *Although Postgres has a true BOOLEAN type
if(count( $args)< 1) $job