MediaWiki REL1_39
JobQueueDB.php
Go to the documentation of this file.
1<?php
28use Wikimedia\ScopedCallback;
29
36class JobQueueDB extends JobQueue {
37 private const CACHE_TTL_SHORT = 30; // integer; seconds to cache info without re-validating
38 private const MAX_AGE_PRUNE = 604800; // integer; seconds a job can live once claimed
39 private const MAX_JOB_RANDOM = 2147483647; // integer; 2^31 - 1, used for job_random
40 private const MAX_OFFSET = 255; // integer; maximum number of rows to skip
41
43 protected $conn;
44
46 protected $server;
48 protected $cluster;
49
59 protected function __construct( array $params ) {
60 parent::__construct( $params );
61
62 if ( isset( $params['server'] ) ) {
63 $this->server = $params['server'];
64 } elseif ( isset( $params['cluster'] ) && is_string( $params['cluster'] ) ) {
65 $this->cluster = $params['cluster'];
66 }
67 }
68
69 protected function supportedOrders() {
70 return [ 'random', 'timestamp', 'fifo' ];
71 }
72
73 protected function optimalOrder() {
74 return 'random';
75 }
76
81 protected function doIsEmpty() {
82 $dbr = $this->getReplicaDB();
84 $scope = $this->getScopedNoTrxFlag( $dbr );
85 try {
86 // unclaimed job
87 $found = (bool)$dbr->selectField( 'job', '1',
88 [ 'job_cmd' => $this->type, 'job_token' => '' ],
89 __METHOD__
90 );
91 } catch ( DBError $e ) {
92 throw $this->getDBException( $e );
93 }
94
95 return !$found;
96 }
97
102 protected function doGetSize() {
103 $key = $this->getCacheKey( 'size' );
104
105 $size = $this->wanCache->get( $key );
106 if ( is_int( $size ) ) {
107 return $size;
108 }
109
110 $dbr = $this->getReplicaDB();
112 $scope = $this->getScopedNoTrxFlag( $dbr );
113 try {
114 $size = (int)$dbr->selectField( 'job', 'COUNT(*)',
115 [ 'job_cmd' => $this->type, 'job_token' => '' ],
116 __METHOD__
117 );
118 } catch ( DBError $e ) {
119 throw $this->getDBException( $e );
120 }
121 $this->wanCache->set( $key, $size, self::CACHE_TTL_SHORT );
122
123 return $size;
124 }
125
130 protected function doGetAcquiredCount() {
131 if ( $this->claimTTL <= 0 ) {
132 return 0; // no acknowledgements
133 }
134
135 $key = $this->getCacheKey( 'acquiredcount' );
136
137 $count = $this->wanCache->get( $key );
138 if ( is_int( $count ) ) {
139 return $count;
140 }
141
142 $dbr = $this->getReplicaDB();
144 $scope = $this->getScopedNoTrxFlag( $dbr );
145 try {
146 $count = (int)$dbr->selectField( 'job', 'COUNT(*)',
147 [ 'job_cmd' => $this->type, "job_token != {$dbr->addQuotes( '' )}" ],
148 __METHOD__
149 );
150 } catch ( DBError $e ) {
151 throw $this->getDBException( $e );
152 }
153 $this->wanCache->set( $key, $count, self::CACHE_TTL_SHORT );
154
155 return $count;
156 }
157
163 protected function doGetAbandonedCount() {
164 if ( $this->claimTTL <= 0 ) {
165 return 0; // no acknowledgements
166 }
167
168 $key = $this->getCacheKey( 'abandonedcount' );
169
170 $count = $this->wanCache->get( $key );
171 if ( is_int( $count ) ) {
172 return $count;
173 }
174
175 $dbr = $this->getReplicaDB();
177 $scope = $this->getScopedNoTrxFlag( $dbr );
178 try {
179 $count = (int)$dbr->selectField( 'job', 'COUNT(*)',
180 [
181 'job_cmd' => $this->type,
182 "job_token != {$dbr->addQuotes( '' )}",
183 "job_attempts >= " . $dbr->addQuotes( $this->maxTries )
184 ],
185 __METHOD__
186 );
187 } catch ( DBError $e ) {
188 throw $this->getDBException( $e );
189 }
190
191 $this->wanCache->set( $key, $count, self::CACHE_TTL_SHORT );
192
193 return $count;
194 }
195
203 protected function doBatchPush( array $jobs, $flags ) {
204 $dbw = $this->getPrimaryDB();
206 $scope = $this->getScopedNoTrxFlag( $dbw );
207 // In general, there will be two cases here:
208 // a) sqlite; DB connection is probably a regular round-aware handle.
209 // If the connection is busy with a transaction, then defer the job writes
210 // until right before the main round commit step. Any errors that bubble
211 // up will rollback the main commit round.
212 // b) mysql/postgres; DB connection is generally a separate CONN_TRX_AUTOCOMMIT handle.
213 // No transaction is active nor will be started by writes, so enqueue the jobs
214 // now so that any errors will show up immediately as the interface expects. Any
215 // errors that bubble up will rollback the main commit round.
216 $fname = __METHOD__;
217 $dbw->onTransactionPreCommitOrIdle(
218 function ( IDatabase $dbw ) use ( $jobs, $flags, $fname ) {
219 $this->doBatchPushInternal( $dbw, $jobs, $flags, $fname );
220 },
221 $fname
222 );
223 }
224
236 public function doBatchPushInternal( IDatabase $dbw, array $jobs, $flags, $method ) {
237 if ( $jobs === [] ) {
238 return;
239 }
240
241 $rowSet = []; // (sha1 => job) map for jobs that are de-duplicated
242 $rowList = []; // list of jobs for jobs that are not de-duplicated
243 foreach ( $jobs as $job ) {
244 $row = $this->insertFields( $job, $dbw );
245 if ( $job->ignoreDuplicates() ) {
246 $rowSet[$row['job_sha1']] = $row;
247 } else {
248 $rowList[] = $row;
249 }
250 }
251
252 if ( $flags & self::QOS_ATOMIC ) {
253 $dbw->startAtomic( $method ); // wrap all the job additions in one transaction
254 }
255 try {
256 // Strip out any duplicate jobs that are already in the queue...
257 if ( count( $rowSet ) ) {
258 $res = $dbw->select( 'job', 'job_sha1',
259 [
260 // No job_type condition since it's part of the job_sha1 hash
261 'job_sha1' => array_map( 'strval', array_keys( $rowSet ) ),
262 'job_token' => '' // unclaimed
263 ],
264 $method
265 );
266 foreach ( $res as $row ) {
267 wfDebug( "Job with hash '{$row->job_sha1}' is a duplicate." );
268 unset( $rowSet[$row->job_sha1] ); // already enqueued
269 }
270 }
271 // Build the full list of job rows to insert
272 $rows = array_merge( $rowList, array_values( $rowSet ) );
273 // Insert the job rows in chunks to avoid replica DB lag...
274 foreach ( array_chunk( $rows, 50 ) as $rowBatch ) {
275 $dbw->insert( 'job', $rowBatch, $method );
276 }
277 $this->incrStats( 'inserts', $this->type, count( $rows ) );
278 $this->incrStats( 'dupe_inserts', $this->type,
279 count( $rowSet ) + count( $rowList ) - count( $rows )
280 );
281 } catch ( DBError $e ) {
282 throw $this->getDBException( $e );
283 }
284 if ( $flags & self::QOS_ATOMIC ) {
285 $dbw->endAtomic( $method );
286 }
287 }
288
293 protected function doPop() {
294 $dbw = $this->getPrimaryDB();
296 $scope = $this->getScopedNoTrxFlag( $dbw );
297
298 $job = false; // job popped off
299 try {
300 $uuid = wfRandomString( 32 ); // pop attempt
301 do { // retry when our row is invalid or deleted as a duplicate
302 // Try to reserve a row in the DB...
303 if ( in_array( $this->order, [ 'fifo', 'timestamp' ] ) ) {
304 $row = $this->claimOldest( $uuid );
305 } else { // random first
306 $rand = mt_rand( 0, self::MAX_JOB_RANDOM ); // encourage concurrent UPDATEs
307 $gte = (bool)mt_rand( 0, 1 ); // find rows with rand before/after $rand
308 $row = $this->claimRandom( $uuid, $rand, $gte );
309 }
310 // Check if we found a row to reserve...
311 if ( !$row ) {
312 break; // nothing to do
313 }
314 $this->incrStats( 'pops', $this->type );
315
316 // Get the job object from the row...
317 $job = $this->jobFromRow( $row );
318 break; // done
319 } while ( true );
320
321 if ( !$job || mt_rand( 0, 9 ) == 0 ) {
322 // Handled jobs that need to be recycled/deleted;
323 // any recycled jobs will be picked up next attempt
325 }
326 } catch ( DBError $e ) {
327 throw $this->getDBException( $e );
328 }
329
330 return $job;
331 }
332
341 protected function claimRandom( $uuid, $rand, $gte ) {
342 $dbw = $this->getPrimaryDB();
344 $scope = $this->getScopedNoTrxFlag( $dbw );
345 // Check cache to see if the queue has <= OFFSET items
346 $tinyQueue = $this->wanCache->get( $this->getCacheKey( 'small' ) );
347
348 $invertedDirection = false; // whether one job_random direction was already scanned
349 // This uses a replication safe method for acquiring jobs. One could use UPDATE+LIMIT
350 // instead, but that either uses ORDER BY (in which case it deadlocks in MySQL) or is
351 // not replication safe. Due to https://bugs.mysql.com/bug.php?id=6980, subqueries cannot
352 // be used here with MySQL.
353 do {
354 if ( $tinyQueue ) { // queue has <= MAX_OFFSET rows
355 // For small queues, using OFFSET will overshoot and return no rows more often.
356 // Instead, this uses job_random to pick a row (possibly checking both directions).
357 $ineq = $gte ? '>=' : '<=';
358 $dir = $gte ? 'ASC' : 'DESC';
359 $row = $dbw->selectRow( 'job', self::selectFields(), // find a random job
360 [
361 'job_cmd' => $this->type,
362 'job_token' => '', // unclaimed
363 "job_random {$ineq} {$dbw->addQuotes( $rand )}" ],
364 __METHOD__,
365 [ 'ORDER BY' => "job_random {$dir}" ]
366 );
367 if ( !$row && !$invertedDirection ) {
368 $gte = !$gte;
369 $invertedDirection = true;
370 continue; // try the other direction
371 }
372 } else { // table *may* have >= MAX_OFFSET rows
373 // T44614: "ORDER BY job_random" with a job_random inequality causes high CPU
374 // in MySQL if there are many rows for some reason. This uses a small OFFSET
375 // instead of job_random for reducing excess claim retries.
376 $row = $dbw->selectRow( 'job', self::selectFields(), // find a random job
377 [
378 'job_cmd' => $this->type,
379 'job_token' => '', // unclaimed
380 ],
381 __METHOD__,
382 [ 'OFFSET' => mt_rand( 0, self::MAX_OFFSET ) ]
383 );
384 if ( !$row ) {
385 $tinyQueue = true; // we know the queue must have <= MAX_OFFSET rows
386 $this->wanCache->set( $this->getCacheKey( 'small' ), 1, 30 );
387 continue; // use job_random
388 }
389 }
390
391 if ( !$row ) {
392 break;
393 }
394
395 $dbw->update( 'job', // update by PK
396 [
397 'job_token' => $uuid,
398 'job_token_timestamp' => $dbw->timestamp(),
399 'job_attempts = job_attempts+1' ],
400 [ 'job_cmd' => $this->type, 'job_id' => $row->job_id, 'job_token' => '' ],
401 __METHOD__
402 );
403 // This might get raced out by another runner when claiming the previously
404 // selected row. The use of job_random should minimize this problem, however.
405 if ( !$dbw->affectedRows() ) {
406 $row = false; // raced out
407 }
408 } while ( !$row );
409
410 return $row;
411 }
412
419 protected function claimOldest( $uuid ) {
420 $dbw = $this->getPrimaryDB();
422 $scope = $this->getScopedNoTrxFlag( $dbw );
423
424 $row = false; // the row acquired
425 do {
426 if ( $dbw->getType() === 'mysql' ) {
427 // Per https://bugs.mysql.com/bug.php?id=6980, we can't use subqueries on the
428 // same table being changed in an UPDATE query in MySQL (gives Error: 1093).
429 // Postgres has no such limitation. However, MySQL offers an
430 // alternative here by supporting ORDER BY + LIMIT for UPDATE queries.
431 $dbw->query( "UPDATE {$dbw->tableName( 'job' )} " .
432 "SET " .
433 "job_token = {$dbw->addQuotes( $uuid ) }, " .
434 "job_token_timestamp = {$dbw->addQuotes( $dbw->timestamp() )}, " .
435 "job_attempts = job_attempts+1 " .
436 "WHERE ( " .
437 "job_cmd = {$dbw->addQuotes( $this->type )} " .
438 "AND job_token = {$dbw->addQuotes( '' )} " .
439 ") ORDER BY job_id ASC LIMIT 1",
440 __METHOD__
441 );
442 } else {
443 // Use a subquery to find the job, within an UPDATE to claim it.
444 // This uses as much of the DB wrapper functions as possible.
445 $dbw->update( 'job',
446 [
447 'job_token' => $uuid,
448 'job_token_timestamp' => $dbw->timestamp(),
449 'job_attempts = job_attempts+1' ],
450 [ 'job_id = (' .
451 $dbw->selectSQLText( 'job', 'job_id',
452 [ 'job_cmd' => $this->type, 'job_token' => '' ],
453 __METHOD__,
454 [ 'ORDER BY' => 'job_id ASC', 'LIMIT' => 1 ] ) .
455 ')'
456 ],
457 __METHOD__
458 );
459 }
460
461 if ( !$dbw->affectedRows() ) {
462 break;
463 }
464
465 // Fetch any row that we just reserved...
466 $row = $dbw->selectRow( 'job', self::selectFields(),
467 [ 'job_cmd' => $this->type, 'job_token' => $uuid ], __METHOD__
468 );
469 if ( !$row ) { // raced out by duplicate job removal
470 wfDebug( "Row deleted as duplicate by another process." );
471 }
472 } while ( !$row );
473
474 return $row;
475 }
476
482 protected function doAck( RunnableJob $job ) {
483 $id = $job->getMetadata( 'id' );
484 if ( $id === null ) {
485 throw new MWException( "Job of type '{$job->getType()}' has no ID." );
486 }
487
488 $dbw = $this->getPrimaryDB();
490 $scope = $this->getScopedNoTrxFlag( $dbw );
491 try {
492 // Delete a row with a single DELETE without holding row locks over RTTs...
493 $dbw->delete(
494 'job',
495 [ 'job_cmd' => $this->type, 'job_id' => $id ],
496 __METHOD__
497 );
498
499 $this->incrStats( 'acks', $this->type );
500 } catch ( DBError $e ) {
501 throw $this->getDBException( $e );
502 }
503 }
504
512 // Callers should call JobQueueGroup::push() before this method so that if the
513 // insert fails, the de-duplication registration will be aborted. Since the insert
514 // is deferred till "transaction idle", do the same here, so that the ordering is
515 // maintained. Having only the de-duplication registration succeed would cause
516 // jobs to become no-ops without any actual jobs that made them redundant.
517 $dbw = $this->getPrimaryDB();
519 $scope = $this->getScopedNoTrxFlag( $dbw );
520 $dbw->onTransactionCommitOrIdle(
521 function () use ( $job ) {
522 parent::doDeduplicateRootJob( $job );
523 },
524 __METHOD__
525 );
526
527 return true;
528 }
529
534 protected function doDelete() {
535 $dbw = $this->getPrimaryDB();
537 $scope = $this->getScopedNoTrxFlag( $dbw );
538 try {
539 $dbw->delete( 'job', [ 'job_cmd' => $this->type ], __METHOD__ );
540 } catch ( DBError $e ) {
541 throw $this->getDBException( $e );
542 }
543
544 return true;
545 }
546
551 protected function doWaitForBackups() {
552 if ( $this->server ) {
553 return; // not using LBFactory instance
554 }
555
556 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
557 $lbFactory->waitForReplication( [
558 'domain' => $this->domain,
559 'cluster' => is_string( $this->cluster ) ? $this->cluster : false
560 ] );
561 }
562
566 protected function doFlushCaches() {
567 foreach ( [ 'size', 'acquiredcount' ] as $type ) {
568 $this->wanCache->delete( $this->getCacheKey( $type ) );
569 }
570 }
571
576 public function getAllQueuedJobs() {
577 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), 'job_token' => '' ] );
578 }
579
584 public function getAllAcquiredJobs() {
585 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), "job_token > ''" ] );
586 }
587
592 public function getAllAbandonedJobs() {
593 return $this->getJobIterator( [
594 'job_cmd' => $this->getType(),
595 "job_token > ''",
596 "job_attempts >= " . intval( $this->maxTries )
597 ] );
598 }
599
604 protected function getJobIterator( array $conds ) {
605 $dbr = $this->getReplicaDB();
607 $scope = $this->getScopedNoTrxFlag( $dbr );
608 try {
609 return new MappedIterator(
610 $dbr->select( 'job', self::selectFields(), $conds, __METHOD__ ),
611 function ( $row ) {
612 return $this->jobFromRow( $row );
613 }
614 );
615 } catch ( DBError $e ) {
616 throw $this->getDBException( $e );
617 }
618 }
619
620 public function getCoalesceLocationInternal() {
621 if ( $this->server ) {
622 return null; // not using the LBFactory instance
623 }
624
625 return is_string( $this->cluster )
626 ? "DBCluster:{$this->cluster}:{$this->domain}"
627 : "LBFactory:{$this->domain}";
628 }
629
630 protected function doGetSiblingQueuesWithJobs( array $types ) {
631 $dbr = $this->getReplicaDB();
633 $scope = $this->getScopedNoTrxFlag( $dbr );
634 // @note: this does not check whether the jobs are claimed or not.
635 // This is useful so JobQueueGroup::pop() also sees queues that only
636 // have stale jobs. This lets recycleAndDeleteStaleJobs() re-enqueue
637 // failed jobs so that they can be popped again for that edge case.
638 $res = $dbr->select( 'job', 'DISTINCT job_cmd',
639 [ 'job_cmd' => $types ], __METHOD__ );
640
641 $types = [];
642 foreach ( $res as $row ) {
643 $types[] = $row->job_cmd;
644 }
645
646 return $types;
647 }
648
649 protected function doGetSiblingQueueSizes( array $types ) {
650 $dbr = $this->getReplicaDB();
652 $scope = $this->getScopedNoTrxFlag( $dbr );
653
654 $res = $dbr->select( 'job', [ 'job_cmd', 'count' => 'COUNT(*)' ],
655 [ 'job_cmd' => $types ], __METHOD__, [ 'GROUP BY' => 'job_cmd' ] );
656
657 $sizes = [];
658 foreach ( $res as $row ) {
659 $sizes[$row->job_cmd] = (int)$row->count;
660 }
661
662 return $sizes;
663 }
664
670 public function recycleAndDeleteStaleJobs() {
671 $now = time();
672 $count = 0; // affected rows
673 $dbw = $this->getPrimaryDB();
675 $scope = $this->getScopedNoTrxFlag( $dbw );
676
677 try {
678 if ( !$dbw->lock( "jobqueue-recycle-{$this->type}", __METHOD__, 1 ) ) {
679 return $count; // already in progress
680 }
681
682 // Remove claims on jobs acquired for too long if enabled...
683 if ( $this->claimTTL > 0 ) {
684 $claimCutoff = $dbw->timestamp( $now - $this->claimTTL );
685 // Get the IDs of jobs that have be claimed but not finished after too long.
686 // These jobs can be recycled into the queue by expiring the claim. Selecting
687 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
688 $res = $dbw->select( 'job', 'job_id',
689 [
690 'job_cmd' => $this->type,
691 "job_token != {$dbw->addQuotes( '' )}", // was acquired
692 "job_token_timestamp < {$dbw->addQuotes( $claimCutoff )}", // stale
693 "job_attempts < {$dbw->addQuotes( $this->maxTries )}" ], // retries left
694 __METHOD__
695 );
696 $ids = array_map(
697 static function ( $o ) {
698 return $o->job_id;
699 }, iterator_to_array( $res )
700 );
701 if ( count( $ids ) ) {
702 // Reset job_token for these jobs so that other runners will pick them up.
703 // Set the timestamp to the current time, as it is useful to now that the job
704 // was already tried before (the timestamp becomes the "released" time).
705 $dbw->update( 'job',
706 [
707 'job_token' => '',
708 'job_token_timestamp' => $dbw->timestamp( $now ) // time of release
709 ],
710 [ 'job_id' => $ids, "job_token != {$dbw->addQuotes( '' )}" ],
711 __METHOD__
712 );
713 $affected = $dbw->affectedRows();
714 $count += $affected;
715 $this->incrStats( 'recycles', $this->type, $affected );
716 }
717 }
718
719 // Just destroy any stale jobs...
720 $pruneCutoff = $dbw->timestamp( $now - self::MAX_AGE_PRUNE );
721 $conds = [
722 'job_cmd' => $this->type,
723 "job_token != {$dbw->addQuotes( '' )}", // was acquired
724 "job_token_timestamp < {$dbw->addQuotes( $pruneCutoff )}" // stale
725 ];
726 if ( $this->claimTTL > 0 ) { // only prune jobs attempted too many times...
727 $conds[] = "job_attempts >= {$dbw->addQuotes( $this->maxTries )}";
728 }
729 // Get the IDs of jobs that are considered stale and should be removed. Selecting
730 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
731 $res = $dbw->select( 'job', 'job_id', $conds, __METHOD__ );
732 $ids = array_map(
733 static function ( $o ) {
734 return $o->job_id;
735 }, iterator_to_array( $res )
736 );
737 if ( count( $ids ) ) {
738 $dbw->delete( 'job', [ 'job_id' => $ids ], __METHOD__ );
739 $affected = $dbw->affectedRows();
740 $count += $affected;
741 $this->incrStats( 'abandons', $this->type, $affected );
742 }
743
744 $dbw->unlock( "jobqueue-recycle-{$this->type}", __METHOD__ );
745 } catch ( DBError $e ) {
746 throw $this->getDBException( $e );
747 }
748
749 return $count;
750 }
751
757 protected function insertFields( IJobSpecification $job, IDatabase $db ) {
758 return [
759 // Fields that describe the nature of the job
760 'job_cmd' => $job->getType(),
761 'job_namespace' => $job->getParams()['namespace'] ?? NS_SPECIAL,
762 'job_title' => $job->getParams()['title'] ?? '',
763 'job_params' => self::makeBlob( $job->getParams() ),
764 // Additional job metadata
765 'job_timestamp' => $db->timestamp(),
766 'job_sha1' => Wikimedia\base_convert(
767 sha1( serialize( $job->getDeduplicationInfo() ) ),
768 16, 36, 31
769 ),
770 'job_random' => mt_rand( 0, self::MAX_JOB_RANDOM )
771 ];
772 }
773
778 protected function getReplicaDB() {
779 try {
780 return $this->getDB( DB_REPLICA );
781 } catch ( DBConnectionError $e ) {
782 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
783 }
784 }
785
791 protected function getPrimaryDB() {
792 try {
793 return $this->getDB( DB_PRIMARY );
794 } catch ( DBConnectionError $e ) {
795 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
796 }
797 }
798
804 public function getMasterDB() {
805 wfDeprecated( __METHOD__, '1.37' );
806 return $this->getPrimaryDB();
807 }
808
813 protected function getDB( $index ) {
814 if ( $this->server ) {
815 if ( $this->conn instanceof IDatabase ) {
816 return $this->conn;
817 } elseif ( $this->conn instanceof DBError ) {
818 throw $this->conn;
819 }
820
821 try {
822 $this->conn = Database::factory( $this->server['type'], $this->server );
823 } catch ( DBError $e ) {
824 $this->conn = $e;
825 throw $e;
826 }
827
828 return $this->conn;
829 } else {
830 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
831 $lb = is_string( $this->cluster )
832 ? $lbFactory->getExternalLB( $this->cluster )
833 : $lbFactory->getMainLB( $this->domain );
834
835 if ( $lb->getServerType( $lb->getWriterIndex() ) !== 'sqlite' ) {
836 // Keep a separate connection to avoid contention and deadlocks;
837 // However, SQLite has the opposite behavior due to DB-level locking.
838 $flags = $lb::CONN_TRX_AUTOCOMMIT;
839 } else {
840 // Jobs insertion will be deferred until the PRESEND stage to reduce contention.
841 $flags = 0;
842 }
843
844 return $lb->getMaintenanceConnectionRef( $index, [], $this->domain, $flags );
845 }
846 }
847
852 private function getScopedNoTrxFlag( IDatabase $db ) {
853 $autoTrx = $db->getFlag( DBO_TRX ); // get current setting
854 $db->clearFlag( DBO_TRX ); // make each query its own transaction
855
856 return new ScopedCallback( static function () use ( $db, $autoTrx ) {
857 if ( $autoTrx ) {
858 $db->setFlag( DBO_TRX ); // restore old setting
859 }
860 } );
861 }
862
867 private function getCacheKey( $property ) {
868 $cluster = is_string( $this->cluster ) ? $this->cluster : 'main';
869
870 return $this->wanCache->makeGlobalKey(
871 'jobqueue',
872 $this->domain,
873 $cluster,
874 $this->type,
875 $property
876 );
877 }
878
883 protected static function makeBlob( $params ) {
884 if ( $params !== false ) {
885 return serialize( $params );
886 } else {
887 return '';
888 }
889 }
890
895 protected function jobFromRow( $row ) {
896 $params = ( (string)$row->job_params !== '' ) ? unserialize( $row->job_params ) : [];
897 if ( !is_array( $params ) ) { // this shouldn't happen
898 throw new UnexpectedValueException(
899 "Could not unserialize job with ID '{$row->job_id}'." );
900 }
901
902 $params += [ 'namespace' => $row->job_namespace, 'title' => $row->job_title ];
903 $job = $this->factoryJob( $row->job_cmd, $params );
904 $job->setMetadata( 'id', $row->job_id );
905 $job->setMetadata( 'timestamp', $row->job_timestamp );
906
907 return $job;
908 }
909
914 protected function getDBException( DBError $e ) {
915 return new JobQueueError( get_class( $e ) . ": " . $e->getMessage() );
916 }
917
923 public static function selectFields() {
924 return [
925 'job_id',
926 'job_cmd',
927 'job_namespace',
928 'job_title',
929 'job_timestamp',
930 'job_params',
931 'job_random',
932 'job_attempts',
933 'job_token',
934 'job_token_timestamp',
935 'job_sha1',
936 ];
937 }
938}
serialize()
unserialize( $serialized)
getDB()
const NS_SPECIAL
Definition Defines.php:53
wfDebug( $text, $dest='all', array $context=[])
Sends a line to the debug log if enabled or, optionally, to a comment in output.
wfRandomString( $length=32)
Get a random string containing a number of pseudo-random hex characters.
wfDeprecated( $function, $version=false, $component=false, $callerOffset=2)
Logs a warning that a deprecated feature was used.
Class to handle job queues stored in the DB.
claimOldest( $uuid)
Reserve a row with a single UPDATE without holding row locks over RTTs...
doAck(RunnableJob $job)
supportedOrders()
Get the allowed queue orders for configuration validation.
doGetSiblingQueueSizes(array $types)
insertFields(IJobSpecification $job, IDatabase $db)
__construct(array $params)
Additional parameters include:
getDBException(DBError $e)
getAllAbandonedJobs()
jobFromRow( $row)
doBatchPush(array $jobs, $flags)
static makeBlob( $params)
claimRandom( $uuid, $rand, $gte)
Reserve a row with a single UPDATE without holding row locks over RTTs...
doGetSiblingQueuesWithJobs(array $types)
recycleAndDeleteStaleJobs()
Recycle or destroy any jobs that have been claimed for too long.
doGetAbandonedCount()
doBatchPushInternal(IDatabase $dbw, array $jobs, $flags, $method)
This function should not be called outside of JobQueueDB.
optimalOrder()
Get the default queue order to use if configuration does not specify one.
getDB( $index)
string null $cluster
Name of an external DB cluster or null for the local DB cluster.
IMaintainableDatabase DBError null $conn
getCoalesceLocationInternal()
Do not use this function outside of JobQueue/JobQueueGroup.
doDeduplicateRootJob(IJobSpecification $job)
static selectFields()
Return the list of job fields that should be selected.
getJobIterator(array $conds)
array null $server
Server configuration array.
Class to handle enqueueing and running of background jobs.
Definition JobQueue.php:36
incrStats( $key, $type, $delta=1)
Call StatsdDataFactoryInterface::updateCount() for the queue overall and for the queue type.
Definition JobQueue.php:773
factoryJob( $command, $params)
Definition JobQueue.php:738
MediaWiki exception.
Convenience class for generating iterators from iterators.
Service locator for MediaWiki core services.
Database error base class.
Definition DBError.php:31
Interface for serializable objects that describe a job queue task.
Job that has a run() method and metadata accessors for JobQueue::pop() and JobQueue::ack()
Basic database interface for live and lazy-loaded relation database handles.
Definition IDatabase.php:39
endAtomic( $fname=__METHOD__)
Ends an atomic section of SQL statements.
select( $table, $vars, $conds='', $fname=__METHOD__, $options=[], $join_conds=[])
Execute a SELECT query constructed using the various parameters provided.
getFlag( $flag)
Returns a boolean whether the flag $flag is set for this connection.
clearFlag( $flag, $remember=self::REMEMBER_NOTHING)
Clear a flag for this connection.
insert( $table, $rows, $fname=__METHOD__, $options=[])
Insert row(s) into a table, in the provided order.
startAtomic( $fname=__METHOD__, $cancelable=self::ATOMIC_NOT_CANCELABLE)
Begin an atomic section of SQL statements.
setFlag( $flag, $remember=self::REMEMBER_NOTHING)
Set a flag for this connection.
Advanced database interface for IDatabase handles that include maintenance methods.
timestamp( $ts=0)
Convert a timestamp in one of the formats accepted by ConvertibleTimestamp to the format used for ins...
const DB_REPLICA
Definition defines.php:26
const DB_PRIMARY
Definition defines.php:28
const DBO_TRX
Definition defines.php:12
if(count( $args)< 1) $job