MediaWiki master
JobQueueDB.php
Go to the documentation of this file.
1<?php
29use Wikimedia\ScopedCallback;
30
37class JobQueueDB extends JobQueue {
38 /* seconds to cache info without re-validating */
39 private const CACHE_TTL_SHORT = 30;
40 /* seconds a job can live once claimed */
41 private const MAX_AGE_PRUNE = 7 * 24 * 3600;
46 private const MAX_JOB_RANDOM = 2_147_483_647;
47 /* maximum number of rows to skip */
48 private const MAX_OFFSET = 255;
49
51 protected $conn;
52
54 protected $server;
56 protected $cluster;
57
67 protected function __construct( array $params ) {
68 parent::__construct( $params );
69
70 if ( isset( $params['server'] ) ) {
71 $this->server = $params['server'];
72 } elseif ( isset( $params['cluster'] ) && is_string( $params['cluster'] ) ) {
73 $this->cluster = $params['cluster'];
74 }
75 }
76
77 protected function supportedOrders() {
78 return [ 'random', 'timestamp', 'fifo' ];
79 }
80
81 protected function optimalOrder() {
82 return 'random';
83 }
84
89 protected function doIsEmpty() {
90 $dbr = $this->getReplicaDB();
92 $scope = $this->getScopedNoTrxFlag( $dbr );
93 try {
94 // unclaimed job
95 $found = (bool)$dbr->newSelectQueryBuilder()
96 ->select( '1' )
97 ->from( 'job' )
98 ->where( [ 'job_cmd' => $this->type, 'job_token' => '' ] )
99 ->caller( __METHOD__ )->fetchField();
100 } catch ( DBError $e ) {
101 throw $this->getDBException( $e );
102 }
103
104 return !$found;
105 }
106
111 protected function doGetSize() {
112 $key = $this->getCacheKey( 'size' );
113
114 $size = $this->wanCache->get( $key );
115 if ( is_int( $size ) ) {
116 return $size;
117 }
118
119 $dbr = $this->getReplicaDB();
121 $scope = $this->getScopedNoTrxFlag( $dbr );
122 try {
123 $size = $dbr->newSelectQueryBuilder()
124 ->from( 'job' )
125 ->where( [ 'job_cmd' => $this->type, 'job_token' => '' ] )
126 ->caller( __METHOD__ )->fetchRowCount();
127 } catch ( DBError $e ) {
128 throw $this->getDBException( $e );
129 }
130 $this->wanCache->set( $key, $size, self::CACHE_TTL_SHORT );
131
132 return $size;
133 }
134
139 protected function doGetAcquiredCount() {
140 if ( $this->claimTTL <= 0 ) {
141 return 0; // no acknowledgements
142 }
143
144 $key = $this->getCacheKey( 'acquiredcount' );
145
146 $count = $this->wanCache->get( $key );
147 if ( is_int( $count ) ) {
148 return $count;
149 }
150
151 $dbr = $this->getReplicaDB();
153 $scope = $this->getScopedNoTrxFlag( $dbr );
154 try {
155 $count = $dbr->newSelectQueryBuilder()
156 ->from( 'job' )
157 ->where( [
158 'job_cmd' => $this->type,
159 $dbr->expr( 'job_token', '!=', '' ),
160 ] )
161 ->caller( __METHOD__ )->fetchRowCount();
162 } catch ( DBError $e ) {
163 throw $this->getDBException( $e );
164 }
165 $this->wanCache->set( $key, $count, self::CACHE_TTL_SHORT );
166
167 return $count;
168 }
169
176 protected function doGetAbandonedCount() {
177 if ( $this->claimTTL <= 0 ) {
178 return 0; // no acknowledgements
179 }
180
181 $key = $this->getCacheKey( 'abandonedcount' );
182
183 $count = $this->wanCache->get( $key );
184 if ( is_int( $count ) ) {
185 return $count;
186 }
187
188 $dbr = $this->getReplicaDB();
190 $scope = $this->getScopedNoTrxFlag( $dbr );
191 try {
192 $count = $dbr->newSelectQueryBuilder()
193 ->from( 'job' )
194 ->where(
195 [
196 'job_cmd' => $this->type,
197 $dbr->expr( 'job_token', '!=', '' ),
198 $dbr->expr( 'job_attempts', '>=', $this->maxTries ),
199 ]
200 )
201 ->caller( __METHOD__ )->fetchRowCount();
202 } catch ( DBError $e ) {
203 throw $this->getDBException( $e );
204 }
205
206 $this->wanCache->set( $key, $count, self::CACHE_TTL_SHORT );
207
208 return $count;
209 }
210
218 protected function doBatchPush( array $jobs, $flags ) {
219 $dbw = $this->getPrimaryDB();
221 $scope = $this->getScopedNoTrxFlag( $dbw );
222 // In general, there will be two cases here:
223 // a) sqlite; DB connection is probably a regular round-aware handle.
224 // If the connection is busy with a transaction, then defer the job writes
225 // until right before the main round commit step. Any errors that bubble
226 // up will rollback the main commit round.
227 // b) mysql/postgres; DB connection is generally a separate CONN_TRX_AUTOCOMMIT handle.
228 // No transaction is active nor will be started by writes, so enqueue the jobs
229 // now so that any errors will show up immediately as the interface expects. Any
230 // errors that bubble up will rollback the main commit round.
231 $fname = __METHOD__;
232 $dbw->onTransactionPreCommitOrIdle(
233 function ( IDatabase $dbw ) use ( $jobs, $flags, $fname ) {
234 $this->doBatchPushInternal( $dbw, $jobs, $flags, $fname );
235 },
236 $fname
237 );
238 }
239
251 public function doBatchPushInternal( IDatabase $dbw, array $jobs, $flags, $method ) {
252 if ( $jobs === [] ) {
253 return;
254 }
255
256 $rowSet = []; // (sha1 => job) map for jobs that are de-duplicated
257 $rowList = []; // list of jobs for jobs that are not de-duplicated
258 foreach ( $jobs as $job ) {
259 $row = $this->insertFields( $job, $dbw );
260 if ( $job->ignoreDuplicates() ) {
261 $rowSet[$row['job_sha1']] = $row;
262 } else {
263 $rowList[] = $row;
264 }
265 }
266
267 if ( $flags & self::QOS_ATOMIC ) {
268 $dbw->startAtomic( $method ); // wrap all the job additions in one transaction
269 }
270 try {
271 // Strip out any duplicate jobs that are already in the queue...
272 if ( count( $rowSet ) ) {
273 $res = $dbw->newSelectQueryBuilder()
274 ->select( 'job_sha1' )
275 ->from( 'job' )
276 ->where(
277 [
278 // No job_type condition since it's part of the job_sha1 hash
279 'job_sha1' => array_map( 'strval', array_keys( $rowSet ) ),
280 'job_token' => '' // unclaimed
281 ]
282 )
283 ->caller( $method )->fetchResultSet();
284 foreach ( $res as $row ) {
285 wfDebug( "Job with hash '{$row->job_sha1}' is a duplicate." );
286 unset( $rowSet[$row->job_sha1] ); // already enqueued
287 }
288 }
289 // Build the full list of job rows to insert
290 $rows = array_merge( $rowList, array_values( $rowSet ) );
291 // Insert the job rows in chunks to avoid replica DB lag...
292 foreach ( array_chunk( $rows, 50 ) as $rowBatch ) {
294 ->insertInto( 'job' )
295 ->rows( $rowBatch )
296 ->caller( $method )->execute();
297 }
298 $this->incrStats( 'inserts', $this->type, count( $rows ) );
299 $this->incrStats( 'dupe_inserts', $this->type,
300 count( $rowSet ) + count( $rowList ) - count( $rows )
301 );
302 } catch ( DBError $e ) {
303 throw $this->getDBException( $e );
304 }
305 if ( $flags & self::QOS_ATOMIC ) {
306 $dbw->endAtomic( $method );
307 }
308 }
309
314 protected function doPop() {
315 $dbw = $this->getPrimaryDB();
317 $scope = $this->getScopedNoTrxFlag( $dbw );
318
319 $job = false; // job popped off
320 try {
321 $uuid = wfRandomString( 32 ); // pop attempt
322 do { // retry when our row is invalid or deleted as a duplicate
323 // Try to reserve a row in the DB...
324 if ( in_array( $this->order, [ 'fifo', 'timestamp' ] ) ) {
325 $row = $this->claimOldest( $uuid );
326 } else { // random first
327 $rand = mt_rand( 0, self::MAX_JOB_RANDOM ); // encourage concurrent UPDATEs
328 $gte = (bool)mt_rand( 0, 1 ); // find rows with rand before/after $rand
329 $row = $this->claimRandom( $uuid, $rand, $gte );
330 }
331 // Check if we found a row to reserve...
332 if ( !$row ) {
333 break; // nothing to do
334 }
335 $this->incrStats( 'pops', $this->type );
336
337 // Get the job object from the row...
338 $job = $this->jobFromRow( $row );
339 break; // done
340 } while ( true );
341
342 if ( !$job || mt_rand( 0, 9 ) == 0 ) {
343 // Handled jobs that need to be recycled/deleted;
344 // any recycled jobs will be picked up next attempt
346 }
347 } catch ( DBError $e ) {
348 throw $this->getDBException( $e );
349 }
350
351 return $job;
352 }
353
362 protected function claimRandom( $uuid, $rand, $gte ) {
363 $dbw = $this->getPrimaryDB();
365 $scope = $this->getScopedNoTrxFlag( $dbw );
366 // Check cache to see if the queue has <= OFFSET items
367 $tinyQueue = $this->wanCache->get( $this->getCacheKey( 'small' ) );
368
369 $invertedDirection = false; // whether one job_random direction was already scanned
370 // This uses a replication safe method for acquiring jobs. One could use UPDATE+LIMIT
371 // instead, but that either uses ORDER BY (in which case it deadlocks in MySQL) or is
372 // not replication safe. Due to https://bugs.mysql.com/bug.php?id=6980, subqueries cannot
373 // be used here with MySQL.
374 do {
375 if ( $tinyQueue ) { // queue has <= MAX_OFFSET rows
376 // For small queues, using OFFSET will overshoot and return no rows more often.
377 // Instead, this uses job_random to pick a row (possibly checking both directions).
378 $row = $dbw->newSelectQueryBuilder()
379 ->select( self::selectFields() )
380 ->from( 'job' )
381 ->where(
382 [
383 'job_cmd' => $this->type,
384 'job_token' => '', // unclaimed
385 $dbw->expr( 'job_random', $gte ? '>=' : '<=', $rand )
386 ]
387 )
388 ->orderBy(
389 'job_random',
390 $gte ? SelectQueryBuilder::SORT_ASC : SelectQueryBuilder::SORT_DESC
391 )
392 ->caller( __METHOD__ )->fetchRow();
393 if ( !$row && !$invertedDirection ) {
394 $gte = !$gte;
395 $invertedDirection = true;
396 continue; // try the other direction
397 }
398 } else { // table *may* have >= MAX_OFFSET rows
399 // T44614: "ORDER BY job_random" with a job_random inequality causes high CPU
400 // in MySQL if there are many rows for some reason. This uses a small OFFSET
401 // instead of job_random for reducing excess claim retries.
402 $row = $dbw->newSelectQueryBuilder()
403 ->select( self::selectFields() )
404 ->from( 'job' )
405 ->where(
406 [
407 'job_cmd' => $this->type,
408 'job_token' => '', // unclaimed
409 ]
410 )
411 ->offset( mt_rand( 0, self::MAX_OFFSET ) )
412 ->caller( __METHOD__ )->fetchRow();
413 if ( !$row ) {
414 $tinyQueue = true; // we know the queue must have <= MAX_OFFSET rows
415 $this->wanCache->set( $this->getCacheKey( 'small' ), 1, 30 );
416 continue; // use job_random
417 }
418 }
419
420 if ( !$row ) {
421 break;
422 }
423
424 $dbw->newUpdateQueryBuilder()
425 ->update( 'job' ) // update by PK
426 ->set( [
427 'job_token' => $uuid,
428 'job_token_timestamp' => $dbw->timestamp(),
429 'job_attempts' => new RawSQLValue( 'job_attempts+1' ),
430 ] )
431 ->where( [
432 'job_cmd' => $this->type,
433 'job_id' => $row->job_id,
434 'job_token' => ''
435 ] )
436 ->caller( __METHOD__ )->execute();
437
438 // This might get raced out by another runner when claiming the previously
439 // selected row. The use of job_random should minimize this problem, however.
440 if ( !$dbw->affectedRows() ) {
441 $row = false; // raced out
442 }
443 } while ( !$row );
444
445 return $row;
446 }
447
454 protected function claimOldest( $uuid ) {
455 $dbw = $this->getPrimaryDB();
457 $scope = $this->getScopedNoTrxFlag( $dbw );
458
459 $row = false; // the row acquired
460 do {
461 if ( $dbw->getType() === 'mysql' ) {
462 // Per https://bugs.mysql.com/bug.php?id=6980, we can't use subqueries on the
463 // same table being changed in an UPDATE query in MySQL (gives Error: 1093).
464 // Postgres has no such limitation. However, MySQL offers an
465 // alternative here by supporting ORDER BY + LIMIT for UPDATE queries.
466 $dbw->query( "UPDATE {$dbw->tableName( 'job' )} " .
467 "SET " .
468 "job_token = {$dbw->addQuotes( $uuid ) }, " .
469 "job_token_timestamp = {$dbw->addQuotes( $dbw->timestamp() )}, " .
470 "job_attempts = job_attempts+1 " .
471 "WHERE ( " .
472 "job_cmd = {$dbw->addQuotes( $this->type )} " .
473 "AND job_token = {$dbw->addQuotes( '' )} " .
474 ") ORDER BY job_id ASC LIMIT 1",
475 __METHOD__
476 );
477 } else {
478 // Use a subquery to find the job, within an UPDATE to claim it.
479 // This uses as much of the DB wrapper functions as possible.
480 $qb = $dbw->newSelectQueryBuilder()
481 ->select( 'job_id' )
482 ->from( 'job' )
483 ->where( [ 'job_cmd' => $this->type, 'job_token' => '' ] )
484 ->orderBy( 'job_id', SelectQueryBuilder::SORT_ASC )
485 ->limit( 1 );
486
487 $dbw->newUpdateQueryBuilder()
488 ->update( 'job' )
489 ->set( [
490 'job_token' => $uuid,
491 'job_token_timestamp' => $dbw->timestamp(),
492 'job_attempts' => new RawSQLValue( 'job_attempts+1' ),
493 ] )
494 ->where( [ 'job_id' => new RawSQLValue( '(' . $qb->getSQL() . ')' ) ] )
495 ->caller( __METHOD__ )->execute();
496 }
497
498 if ( !$dbw->affectedRows() ) {
499 break;
500 }
501
502 // Fetch any row that we just reserved...
503 $row = $dbw->newSelectQueryBuilder()
504 ->select( self::selectFields() )
505 ->from( 'job' )
506 ->where( [ 'job_cmd' => $this->type, 'job_token' => $uuid ] )
507 ->caller( __METHOD__ )->fetchRow();
508 if ( !$row ) { // raced out by duplicate job removal
509 wfDebug( "Row deleted as duplicate by another process." );
510 }
511 } while ( !$row );
512
513 return $row;
514 }
515
522 protected function doAck( RunnableJob $job ) {
523 $id = $job->getMetadata( 'id' );
524 if ( $id === null ) {
525 throw new UnexpectedValueException( "Job of type '{$job->getType()}' has no ID." );
526 }
527
528 $dbw = $this->getPrimaryDB();
530 $scope = $this->getScopedNoTrxFlag( $dbw );
531 try {
532 // Delete a row with a single DELETE without holding row locks over RTTs...
533 $dbw->newDeleteQueryBuilder()
534 ->deleteFrom( 'job' )
535 ->where( [ 'job_cmd' => $this->type, 'job_id' => $id ] )
536 ->caller( __METHOD__ )->execute();
537
538 $this->incrStats( 'acks', $this->type );
539 } catch ( DBError $e ) {
540 throw $this->getDBException( $e );
541 }
542 }
543
551 // Callers should call JobQueueGroup::push() before this method so that if the
552 // insert fails, the de-duplication registration will be aborted. Since the insert
553 // is deferred till "transaction idle", do the same here, so that the ordering is
554 // maintained. Having only the de-duplication registration succeed would cause
555 // jobs to become no-ops without any actual jobs that made them redundant.
556 $dbw = $this->getPrimaryDB();
558 $scope = $this->getScopedNoTrxFlag( $dbw );
559 $dbw->onTransactionCommitOrIdle(
560 function () use ( $job ) {
561 parent::doDeduplicateRootJob( $job );
562 },
563 __METHOD__
564 );
565
566 return true;
567 }
568
573 protected function doDelete() {
574 $dbw = $this->getPrimaryDB();
576 $scope = $this->getScopedNoTrxFlag( $dbw );
577 try {
578 $dbw->newDeleteQueryBuilder()
579 ->deleteFrom( 'job' )
580 ->where( [ 'job_cmd' => $this->type ] )
581 ->caller( __METHOD__ )->execute();
582 } catch ( DBError $e ) {
583 throw $this->getDBException( $e );
584 }
585
586 return true;
587 }
588
593 protected function doWaitForBackups() {
594 if ( $this->server ) {
595 return; // not using LBFactory instance
596 }
597
598 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
599 $lbFactory->waitForReplication();
600 }
601
605 protected function doFlushCaches() {
606 foreach ( [ 'size', 'acquiredcount' ] as $type ) {
607 $this->wanCache->delete( $this->getCacheKey( $type ) );
608 }
609 }
610
615 public function getAllQueuedJobs() {
616 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), 'job_token' => '' ] );
617 }
618
623 public function getAllAcquiredJobs() {
624 $dbr = $this->getReplicaDB();
625 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), $dbr->expr( 'job_token', '>', '' ) ] );
626 }
627
632 public function getAllAbandonedJobs() {
633 $dbr = $this->getReplicaDB();
634 return $this->getJobIterator( [
635 'job_cmd' => $this->getType(),
636 $dbr->expr( 'job_token', '>', '' ),
637 $dbr->expr( 'job_attempts', '>=', intval( $this->maxTries ) ),
638 ] );
639 }
640
645 protected function getJobIterator( array $conds ) {
646 $dbr = $this->getReplicaDB();
648 $scope = $this->getScopedNoTrxFlag( $dbr );
649 $qb = $dbr->newSelectQueryBuilder()
650 ->select( self::selectFields() )
651 ->from( 'job' )
652 ->where( $conds );
653 try {
654 return new MappedIterator(
655 $qb->caller( __METHOD__ )->fetchResultSet(),
656 function ( $row ) {
657 return $this->jobFromRow( $row );
658 }
659 );
660 } catch ( DBError $e ) {
661 throw $this->getDBException( $e );
662 }
663 }
664
665 public function getCoalesceLocationInternal() {
666 if ( $this->server ) {
667 return null; // not using the LBFactory instance
668 }
669
670 return is_string( $this->cluster )
671 ? "DBCluster:{$this->cluster}:{$this->domain}"
672 : "LBFactory:{$this->domain}";
673 }
674
675 protected function doGetSiblingQueuesWithJobs( array $types ) {
676 $dbr = $this->getReplicaDB();
678 $scope = $this->getScopedNoTrxFlag( $dbr );
679 // @note: this does not check whether the jobs are claimed or not.
680 // This is useful so JobQueueGroup::pop() also sees queues that only
681 // have stale jobs. This lets recycleAndDeleteStaleJobs() re-enqueue
682 // failed jobs so that they can be popped again for that edge case.
683 $res = $dbr->newSelectQueryBuilder()
684 ->select( 'job_cmd' )
685 ->distinct()
686 ->from( 'job' )
687 ->where( [ 'job_cmd' => $types ] )
688 ->caller( __METHOD__ )->fetchResultSet();
689
690 $types = [];
691 foreach ( $res as $row ) {
692 $types[] = $row->job_cmd;
693 }
694
695 return $types;
696 }
697
698 protected function doGetSiblingQueueSizes( array $types ) {
699 $dbr = $this->getReplicaDB();
701 $scope = $this->getScopedNoTrxFlag( $dbr );
702
703 $res = $dbr->newSelectQueryBuilder()
704 ->select( [ 'job_cmd', 'count' => 'COUNT(*)' ] )
705 ->from( 'job' )
706 ->where( [ 'job_cmd' => $types ] )
707 ->groupBy( 'job_cmd' )
708 ->caller( __METHOD__ )->fetchResultSet();
709
710 $sizes = [];
711 foreach ( $res as $row ) {
712 $sizes[$row->job_cmd] = (int)$row->count;
713 }
714
715 return $sizes;
716 }
717
723 public function recycleAndDeleteStaleJobs() {
724 $now = time();
725 $count = 0; // affected rows
726 $dbw = $this->getPrimaryDB();
728 $scope = $this->getScopedNoTrxFlag( $dbw );
729
730 try {
731 if ( !$dbw->lock( "jobqueue-recycle-{$this->type}", __METHOD__, 1 ) ) {
732 return $count; // already in progress
733 }
734
735 // Remove claims on jobs acquired for too long if enabled...
736 if ( $this->claimTTL > 0 ) {
737 $claimCutoff = $dbw->timestamp( $now - $this->claimTTL );
738 // Get the IDs of jobs that have be claimed but not finished after too long.
739 // These jobs can be recycled into the queue by expiring the claim. Selecting
740 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
741 $res = $dbw->newSelectQueryBuilder()
742 ->select( 'job_id' )
743 ->from( 'job' )
744 ->where(
745 [
746 'job_cmd' => $this->type,
747 $dbw->expr( 'job_token', '!=', '' ), // was acquired
748 $dbw->expr( 'job_token_timestamp', '<', $claimCutoff ), // stale
749 $dbw->expr( 'job_attempts', '<', $this->maxTries ), // retries left
750 ]
751 )
752 ->caller( __METHOD__ )->fetchResultSet();
753 $ids = array_map(
754 static function ( $o ) {
755 return $o->job_id;
756 }, iterator_to_array( $res )
757 );
758 if ( count( $ids ) ) {
759 // Reset job_token for these jobs so that other runners will pick them up.
760 // Set the timestamp to the current time, as it is useful to now that the job
761 // was already tried before (the timestamp becomes the "released" time).
762 $dbw->newUpdateQueryBuilder()
763 ->update( 'job' )
764 ->set( [
765 'job_token' => '',
766 'job_token_timestamp' => $dbw->timestamp( $now ) // time of release
767 ] )
768 ->where( [
769 'job_id' => $ids,
770 $dbw->expr( 'job_token', '!=', '' ),
771 ] )
772 ->caller( __METHOD__ )->execute();
773
774 $affected = $dbw->affectedRows();
775 $count += $affected;
776 $this->incrStats( 'recycles', $this->type, $affected );
777 }
778 }
779
780 // Just destroy any stale jobs...
781 $pruneCutoff = $dbw->timestamp( $now - self::MAX_AGE_PRUNE );
782 $qb = $dbw->newSelectQueryBuilder()
783 ->select( 'job_id' )
784 ->from( 'job' )
785 ->where(
786 [
787 'job_cmd' => $this->type,
788 $dbw->expr( 'job_token', '!=', '' ), // was acquired
789 $dbw->expr( 'job_token_timestamp', '<', $pruneCutoff ) // stale
790 ]
791 );
792 if ( $this->claimTTL > 0 ) { // only prune jobs attempted too many times...
793 $qb->andWhere( $dbw->expr( 'job_attempts', '>=', $this->maxTries ) );
794 }
795 // Get the IDs of jobs that are considered stale and should be removed. Selecting
796 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
797 $res = $qb->caller( __METHOD__ )->fetchResultSet();
798 $ids = array_map(
799 static function ( $o ) {
800 return $o->job_id;
801 }, iterator_to_array( $res )
802 );
803 if ( count( $ids ) ) {
804 $dbw->newDeleteQueryBuilder()
805 ->deleteFrom( 'job' )
806 ->where( [ 'job_id' => $ids ] )
807 ->caller( __METHOD__ )->execute();
808 $affected = $dbw->affectedRows();
809 $count += $affected;
810 $this->incrStats( 'abandons', $this->type, $affected );
811 }
812
813 $dbw->unlock( "jobqueue-recycle-{$this->type}", __METHOD__ );
814 } catch ( DBError $e ) {
815 throw $this->getDBException( $e );
816 }
817
818 return $count;
819 }
820
827 return [
828 // Fields that describe the nature of the job
829 'job_cmd' => $job->getType(),
830 'job_namespace' => $job->getParams()['namespace'] ?? NS_SPECIAL,
831 'job_title' => $job->getParams()['title'] ?? '',
832 'job_params' => self::makeBlob( $job->getParams() ),
833 // Additional job metadata
834 'job_timestamp' => $db->timestamp(),
835 'job_sha1' => Wikimedia\base_convert(
836 sha1( serialize( $job->getDeduplicationInfo() ) ),
837 16, 36, 31
838 ),
839 'job_random' => mt_rand( 0, self::MAX_JOB_RANDOM )
840 ];
841 }
842
847 protected function getReplicaDB() {
848 try {
849 return $this->getDB( DB_REPLICA );
850 } catch ( DBConnectionError $e ) {
851 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
852 }
853 }
854
860 protected function getPrimaryDB() {
861 try {
862 return $this->getDB( DB_PRIMARY );
863 } catch ( DBConnectionError $e ) {
864 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
865 }
866 }
867
872 protected function getDB( $index ) {
873 if ( $this->server ) {
874 if ( $this->conn instanceof IDatabase ) {
875 return $this->conn;
876 } elseif ( $this->conn instanceof DBError ) {
877 throw $this->conn;
878 }
879
880 try {
881 $this->conn = MediaWikiServices::getInstance()->getDatabaseFactory()->create(
882 $this->server['type'],
883 $this->server
884 );
885 } catch ( DBError $e ) {
886 $this->conn = $e;
887 throw $e;
888 }
889
890 return $this->conn;
891 } else {
892 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
893 $lb = is_string( $this->cluster )
894 ? $lbFactory->getExternalLB( $this->cluster )
895 : $lbFactory->getMainLB( $this->domain );
896
897 if ( $lb->getServerType( ServerInfo::WRITER_INDEX ) !== 'sqlite' ) {
898 // Keep a separate connection to avoid contention and deadlocks;
899 // However, SQLite has the opposite behavior due to DB-level locking.
900 $flags = $lb::CONN_TRX_AUTOCOMMIT;
901 } else {
902 // Jobs insertion will be deferred until the PRESEND stage to reduce contention.
903 $flags = 0;
904 }
905
906 return $lb->getMaintenanceConnectionRef( $index, [], $this->domain, $flags );
907 }
908 }
909
914 private function getScopedNoTrxFlag( IReadableDatabase $db ) {
915 $autoTrx = $db->getFlag( DBO_TRX ); // get current setting
916 $db->clearFlag( DBO_TRX ); // make each query its own transaction
917
918 return new ScopedCallback( static function () use ( $db, $autoTrx ) {
919 if ( $autoTrx ) {
920 $db->setFlag( DBO_TRX ); // restore old setting
921 }
922 } );
923 }
924
929 private function getCacheKey( $property ) {
930 $cluster = is_string( $this->cluster ) ? $this->cluster : 'main';
931
932 return $this->wanCache->makeGlobalKey(
933 'jobqueue',
934 $this->domain,
935 $cluster,
936 $this->type,
937 $property
938 );
939 }
940
945 protected static function makeBlob( $params ) {
946 if ( $params !== false ) {
947 return serialize( $params );
948 } else {
949 return '';
950 }
951 }
952
957 protected function jobFromRow( $row ) {
958 $params = ( (string)$row->job_params !== '' ) ? unserialize( $row->job_params ) : [];
959 if ( !is_array( $params ) ) { // this shouldn't happen
960 throw new UnexpectedValueException(
961 "Could not unserialize job with ID '{$row->job_id}'." );
962 }
963
964 $params += [ 'namespace' => $row->job_namespace, 'title' => $row->job_title ];
965 $job = $this->factoryJob( $row->job_cmd, $params );
966 $job->setMetadata( 'id', $row->job_id );
967 $job->setMetadata( 'timestamp', $row->job_timestamp );
968
969 return $job;
970 }
971
976 protected function getDBException( DBError $e ) {
977 return new JobQueueError( get_class( $e ) . ": " . $e->getMessage() );
978 }
979
985 public static function selectFields() {
986 return [
987 'job_id',
988 'job_cmd',
989 'job_namespace',
990 'job_title',
991 'job_timestamp',
992 'job_params',
993 'job_random',
994 'job_attempts',
995 'job_token',
996 'job_token_timestamp',
997 'job_sha1',
998 ];
999 }
1000}
getDB()
const NS_SPECIAL
Definition Defines.php:54
wfDebug( $text, $dest='all', array $context=[])
Sends a line to the debug log if enabled or, optionally, to a comment in output.
wfRandomString( $length=32)
Get a random string containing a number of pseudo-random hex characters.
array $params
The job parameters.
getCacheKey()
Get the cache key used to store status.
Database-backed job queue storage.
claimOldest( $uuid)
Reserve a row with a single UPDATE without holding row locks over RTTs...
doAck(RunnableJob $job)
supportedOrders()
Get the allowed queue orders for configuration validation.
doGetSiblingQueueSizes(array $types)
__construct(array $params)
Additional parameters include:
getDBException(DBError $e)
getAllAbandonedJobs()
jobFromRow( $row)
doBatchPush(array $jobs, $flags)
insertFields(IJobSpecification $job, IReadableDatabase $db)
static makeBlob( $params)
claimRandom( $uuid, $rand, $gte)
Reserve a row with a single UPDATE without holding row locks over RTTs...
doGetSiblingQueuesWithJobs(array $types)
recycleAndDeleteStaleJobs()
Recycle or destroy any jobs that have been claimed for too long.
doGetAbandonedCount()
doBatchPushInternal(IDatabase $dbw, array $jobs, $flags, $method)
This function should not be called outside of JobQueueDB.
optimalOrder()
Get the default queue order to use if configuration does not specify one.
getDB( $index)
string null $cluster
Name of an external DB cluster or null for the local DB cluster.
IMaintainableDatabase DBError null $conn
getCoalesceLocationInternal()
Do not use this function outside of JobQueue/JobQueueGroup.
doDeduplicateRootJob(IJobSpecification $job)
static selectFields()
Return the list of job fields that should be selected.
getJobIterator(array $conds)
array null $server
Server configuration array.
Base class for queueing and running background jobs from a storage backend.
Definition JobQueue.php:43
incrStats( $key, $type, $delta=1)
Call StatsdDataFactoryInterface::updateCount() for the queue overall and for the queue type.
Definition JobQueue.php:777
string $type
Job type.
Definition JobQueue.php:47
factoryJob( $command, $params)
Definition JobQueue.php:743
Convenience class for generating iterators from iterators.
Service locator for MediaWiki core services.
Database error base class.
Definition DBError.php:36
Raw SQL value to be used in query builders.
Build SELECT queries with a fluent interface.
Information about an individual database host.
Interface for serializable objects that describe a job queue task.
Job that has a run() method and metadata accessors for JobQueue::pop() and JobQueue::ack().
setFlag( $flag, $remember=self::REMEMBER_NOTHING)
Set a flag for this connection.
clearFlag( $flag, $remember=self::REMEMBER_NOTHING)
Clear a flag for this connection.
getFlag( $flag)
Returns a boolean whether the flag $flag is set for this connection.
Basic database interface for live and lazy-loaded relation database handles.
Definition IDatabase.php:39
endAtomic( $fname=__METHOD__)
Ends an atomic section of SQL statements.
startAtomic( $fname=__METHOD__, $cancelable=self::ATOMIC_NOT_CANCELABLE)
Begin an atomic section of SQL statements.
newInsertQueryBuilder()
Get an InsertQueryBuilder bound to this connection.
Advanced database interface for IDatabase handles that include maintenance methods.
A database connection without write operations.
newSelectQueryBuilder()
Create an empty SelectQueryBuilder which can be used to run queries against this connection.
timestamp( $ts=0)
Convert a timestamp in one of the formats accepted by ConvertibleTimestamp to the format used for ins...
const DB_REPLICA
Definition defines.php:26
const DB_PRIMARY
Definition defines.php:28
const DBO_TRX
Definition defines.php:12
if(count( $args)< 1) $job