MediaWiki master
JobQueueDB.php
Go to the documentation of this file.
1<?php
29use Wikimedia\ScopedCallback;
30
37class JobQueueDB extends JobQueue {
38 /* seconds to cache info without re-validating */
39 private const CACHE_TTL_SHORT = 30;
40 /* seconds a job can live once claimed */
41 private const MAX_AGE_PRUNE = 7 * 24 * 3600;
46 private const MAX_JOB_RANDOM = 2_147_483_647;
47 /* maximum number of rows to skip */
48 private const MAX_OFFSET = 255;
49
51 protected $conn;
52
54 protected $server;
56 protected $cluster;
57
66 protected function __construct( array $params ) {
67 parent::__construct( $params );
68
69 if ( isset( $params['server'] ) ) {
70 $this->server = $params['server'];
71 // Always use autocommit mode, even if DBO_TRX is configured
72 $this->server['flags'] ??= 0;
73 $this->server['flags'] &= ~( IDatabase::DBO_TRX | IDatabase::DBO_DEFAULT );
74 } elseif ( isset( $params['cluster'] ) && is_string( $params['cluster'] ) ) {
75 $this->cluster = $params['cluster'];
76 }
77 }
78
79 protected function supportedOrders() {
80 return [ 'random', 'timestamp', 'fifo' ];
81 }
82
83 protected function optimalOrder() {
84 return 'random';
85 }
86
91 protected function doIsEmpty() {
92 $dbr = $this->getReplicaDB();
93 try {
94 // unclaimed job
95 $found = (bool)$dbr->newSelectQueryBuilder()
96 ->select( '1' )
97 ->from( 'job' )
98 ->where( [ 'job_cmd' => $this->type, 'job_token' => '' ] )
99 ->caller( __METHOD__ )->fetchField();
100 } catch ( DBError $e ) {
101 throw $this->getDBException( $e );
102 }
103
104 return !$found;
105 }
106
111 protected function doGetSize() {
112 $key = $this->getCacheKey( 'size' );
113
114 $size = $this->wanCache->get( $key );
115 if ( is_int( $size ) ) {
116 return $size;
117 }
118
119 $dbr = $this->getReplicaDB();
120 try {
121 $size = $dbr->newSelectQueryBuilder()
122 ->from( 'job' )
123 ->where( [ 'job_cmd' => $this->type, 'job_token' => '' ] )
124 ->caller( __METHOD__ )->fetchRowCount();
125 } catch ( DBError $e ) {
126 throw $this->getDBException( $e );
127 }
128 $this->wanCache->set( $key, $size, self::CACHE_TTL_SHORT );
129
130 return $size;
131 }
132
137 protected function doGetAcquiredCount() {
138 if ( $this->claimTTL <= 0 ) {
139 return 0; // no acknowledgements
140 }
141
142 $key = $this->getCacheKey( 'acquiredcount' );
143
144 $count = $this->wanCache->get( $key );
145 if ( is_int( $count ) ) {
146 return $count;
147 }
148
149 $dbr = $this->getReplicaDB();
150 try {
151 $count = $dbr->newSelectQueryBuilder()
152 ->from( 'job' )
153 ->where( [
154 'job_cmd' => $this->type,
155 $dbr->expr( 'job_token', '!=', '' ),
156 ] )
157 ->caller( __METHOD__ )->fetchRowCount();
158 } catch ( DBError $e ) {
159 throw $this->getDBException( $e );
160 }
161 $this->wanCache->set( $key, $count, self::CACHE_TTL_SHORT );
162
163 return $count;
164 }
165
172 protected function doGetAbandonedCount() {
173 if ( $this->claimTTL <= 0 ) {
174 return 0; // no acknowledgements
175 }
176
177 $key = $this->getCacheKey( 'abandonedcount' );
178
179 $count = $this->wanCache->get( $key );
180 if ( is_int( $count ) ) {
181 return $count;
182 }
183
184 $dbr = $this->getReplicaDB();
185 try {
186 $count = $dbr->newSelectQueryBuilder()
187 ->from( 'job' )
188 ->where(
189 [
190 'job_cmd' => $this->type,
191 $dbr->expr( 'job_token', '!=', '' ),
192 $dbr->expr( 'job_attempts', '>=', $this->maxTries ),
193 ]
194 )
195 ->caller( __METHOD__ )->fetchRowCount();
196 } catch ( DBError $e ) {
197 throw $this->getDBException( $e );
198 }
199
200 $this->wanCache->set( $key, $count, self::CACHE_TTL_SHORT );
201
202 return $count;
203 }
204
212 protected function doBatchPush( array $jobs, $flags ) {
213 // Silence expectations related to getting a primary DB, as we have to get a primary DB to insert the job.
214 $transactionProfiler = Profiler::instance()->getTransactionProfiler();
215 $scope = $transactionProfiler->silenceForScope();
216 $dbw = $this->getPrimaryDB();
217 ScopedCallback::consume( $scope );
218 // In general, there will be two cases here:
219 // a) sqlite; DB connection is probably a regular round-aware handle.
220 // If the connection is busy with a transaction, then defer the job writes
221 // until right before the main round commit step. Any errors that bubble
222 // up will rollback the main commit round.
223 // b) mysql/postgres; DB connection is generally a separate CONN_TRX_AUTOCOMMIT handle.
224 // No transaction is active nor will be started by writes, so enqueue the jobs
225 // now so that any errors will show up immediately as the interface expects. Any
226 // errors that bubble up will rollback the main commit round.
227 $fname = __METHOD__;
228 $dbw->onTransactionPreCommitOrIdle(
229 function ( IDatabase $dbw ) use ( $jobs, $flags, $fname ) {
230 $this->doBatchPushInternal( $dbw, $jobs, $flags, $fname );
231 },
232 $fname
233 );
234 }
235
247 public function doBatchPushInternal( IDatabase $dbw, array $jobs, $flags, $method ) {
248 if ( $jobs === [] ) {
249 return;
250 }
251
252 $rowSet = []; // (sha1 => job) map for jobs that are de-duplicated
253 $rowList = []; // list of jobs for jobs that are not de-duplicated
254 foreach ( $jobs as $job ) {
255 $row = $this->insertFields( $job, $dbw );
256 if ( $job->ignoreDuplicates() ) {
257 $rowSet[$row['job_sha1']] = $row;
258 } else {
259 $rowList[] = $row;
260 }
261 }
262
263 if ( $flags & self::QOS_ATOMIC ) {
264 $dbw->startAtomic( $method ); // wrap all the job additions in one transaction
265 }
266 try {
267 // Strip out any duplicate jobs that are already in the queue...
268 if ( count( $rowSet ) ) {
269 $res = $dbw->newSelectQueryBuilder()
270 ->select( 'job_sha1' )
271 ->from( 'job' )
272 ->where(
273 [
274 // No job_type condition since it's part of the job_sha1 hash
275 'job_sha1' => array_map( 'strval', array_keys( $rowSet ) ),
276 'job_token' => '' // unclaimed
277 ]
278 )
279 ->caller( $method )->fetchResultSet();
280 foreach ( $res as $row ) {
281 wfDebug( "Job with hash '{$row->job_sha1}' is a duplicate." );
282 unset( $rowSet[$row->job_sha1] ); // already enqueued
283 }
284 }
285 // Build the full list of job rows to insert
286 $rows = array_merge( $rowList, array_values( $rowSet ) );
287 // Silence expectations related to inserting to the job table, because we have to perform the inserts to
288 // track the job.
289 $transactionProfiler = Profiler::instance()->getTransactionProfiler();
290 $scope = $transactionProfiler->silenceForScope();
291 // Insert the job rows in chunks to avoid replica DB lag...
292 foreach ( array_chunk( $rows, 50 ) as $rowBatch ) {
294 ->insertInto( 'job' )
295 ->rows( $rowBatch )
296 ->caller( $method )->execute();
297 }
298 ScopedCallback::consume( $scope );
299 $this->incrStats( 'inserts', $this->type, count( $rows ) );
300 $this->incrStats( 'dupe_inserts', $this->type,
301 count( $rowSet ) + count( $rowList ) - count( $rows )
302 );
303 } catch ( DBError $e ) {
304 throw $this->getDBException( $e );
305 }
306 if ( $flags & self::QOS_ATOMIC ) {
307 $dbw->endAtomic( $method );
308 }
309 }
310
315 protected function doPop() {
316 $job = false; // job popped off
317 try {
318 $uuid = wfRandomString( 32 ); // pop attempt
319 do { // retry when our row is invalid or deleted as a duplicate
320 // Try to reserve a row in the DB...
321 if ( in_array( $this->order, [ 'fifo', 'timestamp' ] ) ) {
322 $row = $this->claimOldest( $uuid );
323 } else { // random first
324 $rand = mt_rand( 0, self::MAX_JOB_RANDOM ); // encourage concurrent UPDATEs
325 $gte = (bool)mt_rand( 0, 1 ); // find rows with rand before/after $rand
326 $row = $this->claimRandom( $uuid, $rand, $gte );
327 }
328 // Check if we found a row to reserve...
329 if ( !$row ) {
330 break; // nothing to do
331 }
332 $this->incrStats( 'pops', $this->type );
333
334 // Get the job object from the row...
335 $job = $this->jobFromRow( $row );
336 break; // done
337 } while ( true );
338
339 if ( !$job || mt_rand( 0, 9 ) == 0 ) {
340 // Handled jobs that need to be recycled/deleted;
341 // any recycled jobs will be picked up next attempt
343 }
344 } catch ( DBError $e ) {
345 throw $this->getDBException( $e );
346 }
347
348 return $job;
349 }
350
359 protected function claimRandom( $uuid, $rand, $gte ) {
360 $dbw = $this->getPrimaryDB();
361 // Check cache to see if the queue has <= OFFSET items
362 $tinyQueue = $this->wanCache->get( $this->getCacheKey( 'small' ) );
363
364 $invertedDirection = false; // whether one job_random direction was already scanned
365 // This uses a replication safe method for acquiring jobs. One could use UPDATE+LIMIT
366 // instead, but that either uses ORDER BY (in which case it deadlocks in MySQL) or is
367 // not replication safe. Due to https://bugs.mysql.com/bug.php?id=6980, subqueries cannot
368 // be used here with MySQL.
369 do {
370 if ( $tinyQueue ) { // queue has <= MAX_OFFSET rows
371 // For small queues, using OFFSET will overshoot and return no rows more often.
372 // Instead, this uses job_random to pick a row (possibly checking both directions).
373 $row = $dbw->newSelectQueryBuilder()
374 ->select( self::selectFields() )
375 ->from( 'job' )
376 ->where(
377 [
378 'job_cmd' => $this->type,
379 'job_token' => '', // unclaimed
380 $dbw->expr( 'job_random', $gte ? '>=' : '<=', $rand )
381 ]
382 )
383 ->orderBy(
384 'job_random',
385 $gte ? SelectQueryBuilder::SORT_ASC : SelectQueryBuilder::SORT_DESC
386 )
387 ->caller( __METHOD__ )->fetchRow();
388 if ( !$row && !$invertedDirection ) {
389 $gte = !$gte;
390 $invertedDirection = true;
391 continue; // try the other direction
392 }
393 } else { // table *may* have >= MAX_OFFSET rows
394 // T44614: "ORDER BY job_random" with a job_random inequality causes high CPU
395 // in MySQL if there are many rows for some reason. This uses a small OFFSET
396 // instead of job_random for reducing excess claim retries.
397 $row = $dbw->newSelectQueryBuilder()
398 ->select( self::selectFields() )
399 ->from( 'job' )
400 ->where(
401 [
402 'job_cmd' => $this->type,
403 'job_token' => '', // unclaimed
404 ]
405 )
406 ->offset( mt_rand( 0, self::MAX_OFFSET ) )
407 ->caller( __METHOD__ )->fetchRow();
408 if ( !$row ) {
409 $tinyQueue = true; // we know the queue must have <= MAX_OFFSET rows
410 $this->wanCache->set( $this->getCacheKey( 'small' ), 1, 30 );
411 continue; // use job_random
412 }
413 }
414
415 if ( !$row ) {
416 break;
417 }
418
419 $dbw->newUpdateQueryBuilder()
420 ->update( 'job' ) // update by PK
421 ->set( [
422 'job_token' => $uuid,
423 'job_token_timestamp' => $dbw->timestamp(),
424 'job_attempts' => new RawSQLValue( 'job_attempts+1' ),
425 ] )
426 ->where( [
427 'job_cmd' => $this->type,
428 'job_id' => $row->job_id,
429 'job_token' => ''
430 ] )
431 ->caller( __METHOD__ )->execute();
432
433 // This might get raced out by another runner when claiming the previously
434 // selected row. The use of job_random should minimize this problem, however.
435 if ( !$dbw->affectedRows() ) {
436 $row = false; // raced out
437 }
438 } while ( !$row );
439
440 return $row;
441 }
442
449 protected function claimOldest( $uuid ) {
450 $dbw = $this->getPrimaryDB();
451
452 $row = false; // the row acquired
453 do {
454 if ( $dbw->getType() === 'mysql' ) {
455 // Per https://bugs.mysql.com/bug.php?id=6980, we can't use subqueries on the
456 // same table being changed in an UPDATE query in MySQL (gives Error: 1093).
457 // Postgres has no such limitation. However, MySQL offers an
458 // alternative here by supporting ORDER BY + LIMIT for UPDATE queries.
459 $dbw->query( "UPDATE {$dbw->tableName( 'job' )} " .
460 "SET " .
461 "job_token = {$dbw->addQuotes( $uuid ) }, " .
462 "job_token_timestamp = {$dbw->addQuotes( $dbw->timestamp() )}, " .
463 "job_attempts = job_attempts+1 " .
464 "WHERE ( " .
465 "job_cmd = {$dbw->addQuotes( $this->type )} " .
466 "AND job_token = {$dbw->addQuotes( '' )} " .
467 ") ORDER BY job_id ASC LIMIT 1",
468 __METHOD__
469 );
470 } else {
471 // Use a subquery to find the job, within an UPDATE to claim it.
472 // This uses as much of the DB wrapper functions as possible.
473 $qb = $dbw->newSelectQueryBuilder()
474 ->select( 'job_id' )
475 ->from( 'job' )
476 ->where( [ 'job_cmd' => $this->type, 'job_token' => '' ] )
477 ->orderBy( 'job_id', SelectQueryBuilder::SORT_ASC )
478 ->limit( 1 );
479
480 $dbw->newUpdateQueryBuilder()
481 ->update( 'job' )
482 ->set( [
483 'job_token' => $uuid,
484 'job_token_timestamp' => $dbw->timestamp(),
485 'job_attempts' => new RawSQLValue( 'job_attempts+1' ),
486 ] )
487 ->where( [ 'job_id' => new RawSQLValue( '(' . $qb->getSQL() . ')' ) ] )
488 ->caller( __METHOD__ )->execute();
489 }
490
491 if ( !$dbw->affectedRows() ) {
492 break;
493 }
494
495 // Fetch any row that we just reserved...
496 $row = $dbw->newSelectQueryBuilder()
497 ->select( self::selectFields() )
498 ->from( 'job' )
499 ->where( [ 'job_cmd' => $this->type, 'job_token' => $uuid ] )
500 ->caller( __METHOD__ )->fetchRow();
501 if ( !$row ) { // raced out by duplicate job removal
502 wfDebug( "Row deleted as duplicate by another process." );
503 }
504 } while ( !$row );
505
506 return $row;
507 }
508
515 protected function doAck( RunnableJob $job ) {
516 $id = $job->getMetadata( 'id' );
517 if ( $id === null ) {
518 throw new UnexpectedValueException( "Job of type '{$job->getType()}' has no ID." );
519 }
520
521 $dbw = $this->getPrimaryDB();
522 try {
523 // Delete a row with a single DELETE without holding row locks over RTTs...
524 $dbw->newDeleteQueryBuilder()
525 ->deleteFrom( 'job' )
526 ->where( [ 'job_cmd' => $this->type, 'job_id' => $id ] )
527 ->caller( __METHOD__ )->execute();
528
529 $this->incrStats( 'acks', $this->type );
530 } catch ( DBError $e ) {
531 throw $this->getDBException( $e );
532 }
533 }
534
542 // Callers should call JobQueueGroup::push() before this method so that if the
543 // insert fails, the de-duplication registration will be aborted. Since the insert
544 // is deferred till "transaction idle", do the same here, so that the ordering is
545 // maintained. Having only the de-duplication registration succeed would cause
546 // jobs to become no-ops without any actual jobs that made them redundant.
547 $dbw = $this->getPrimaryDB();
548 $dbw->onTransactionCommitOrIdle(
549 function () use ( $job ) {
550 parent::doDeduplicateRootJob( $job );
551 },
552 __METHOD__
553 );
554
555 return true;
556 }
557
562 protected function doDelete() {
563 $dbw = $this->getPrimaryDB();
564 try {
565 $dbw->newDeleteQueryBuilder()
566 ->deleteFrom( 'job' )
567 ->where( [ 'job_cmd' => $this->type ] )
568 ->caller( __METHOD__ )->execute();
569 } catch ( DBError $e ) {
570 throw $this->getDBException( $e );
571 }
572
573 return true;
574 }
575
580 protected function doWaitForBackups() {
581 if ( $this->server ) {
582 return; // not using LBFactory instance
583 }
584
585 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
586 $lbFactory->waitForReplication();
587 }
588
592 protected function doFlushCaches() {
593 foreach ( [ 'size', 'acquiredcount' ] as $type ) {
594 $this->wanCache->delete( $this->getCacheKey( $type ) );
595 }
596 }
597
602 public function getAllQueuedJobs() {
603 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), 'job_token' => '' ] );
604 }
605
610 public function getAllAcquiredJobs() {
611 $dbr = $this->getReplicaDB();
612 return $this->getJobIterator( [ 'job_cmd' => $this->getType(), $dbr->expr( 'job_token', '>', '' ) ] );
613 }
614
619 public function getAllAbandonedJobs() {
620 $dbr = $this->getReplicaDB();
621 return $this->getJobIterator( [
622 'job_cmd' => $this->getType(),
623 $dbr->expr( 'job_token', '>', '' ),
624 $dbr->expr( 'job_attempts', '>=', intval( $this->maxTries ) ),
625 ] );
626 }
627
632 protected function getJobIterator( array $conds ) {
633 $dbr = $this->getReplicaDB();
634 $qb = $dbr->newSelectQueryBuilder()
635 ->select( self::selectFields() )
636 ->from( 'job' )
637 ->where( $conds );
638 try {
639 return new MappedIterator(
640 $qb->caller( __METHOD__ )->fetchResultSet(),
641 function ( $row ) {
642 return $this->jobFromRow( $row );
643 }
644 );
645 } catch ( DBError $e ) {
646 throw $this->getDBException( $e );
647 }
648 }
649
650 public function getCoalesceLocationInternal() {
651 if ( $this->server ) {
652 return null; // not using the LBFactory instance
653 }
654
655 return is_string( $this->cluster )
656 ? "DBCluster:{$this->cluster}:{$this->domain}"
657 : "LBFactory:{$this->domain}";
658 }
659
660 protected function doGetSiblingQueuesWithJobs( array $types ) {
661 $dbr = $this->getReplicaDB();
662 // @note: this does not check whether the jobs are claimed or not.
663 // This is useful so JobQueueGroup::pop() also sees queues that only
664 // have stale jobs. This lets recycleAndDeleteStaleJobs() re-enqueue
665 // failed jobs so that they can be popped again for that edge case.
666 $res = $dbr->newSelectQueryBuilder()
667 ->select( 'job_cmd' )
668 ->distinct()
669 ->from( 'job' )
670 ->where( [ 'job_cmd' => $types ] )
671 ->caller( __METHOD__ )->fetchResultSet();
672
673 $types = [];
674 foreach ( $res as $row ) {
675 $types[] = $row->job_cmd;
676 }
677
678 return $types;
679 }
680
681 protected function doGetSiblingQueueSizes( array $types ) {
682 $dbr = $this->getReplicaDB();
683
684 $res = $dbr->newSelectQueryBuilder()
685 ->select( [ 'job_cmd', 'count' => 'COUNT(*)' ] )
686 ->from( 'job' )
687 ->where( [ 'job_cmd' => $types ] )
688 ->groupBy( 'job_cmd' )
689 ->caller( __METHOD__ )->fetchResultSet();
690
691 $sizes = [];
692 foreach ( $res as $row ) {
693 $sizes[$row->job_cmd] = (int)$row->count;
694 }
695
696 return $sizes;
697 }
698
704 public function recycleAndDeleteStaleJobs() {
705 $now = time();
706 $count = 0; // affected rows
707 $dbw = $this->getPrimaryDB();
708
709 try {
710 if ( !$dbw->lock( "jobqueue-recycle-{$this->type}", __METHOD__, 1 ) ) {
711 return $count; // already in progress
712 }
713
714 // Remove claims on jobs acquired for too long if enabled...
715 if ( $this->claimTTL > 0 ) {
716 $claimCutoff = $dbw->timestamp( $now - $this->claimTTL );
717 // Get the IDs of jobs that have be claimed but not finished after too long.
718 // These jobs can be recycled into the queue by expiring the claim. Selecting
719 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
720 $res = $dbw->newSelectQueryBuilder()
721 ->select( 'job_id' )
722 ->from( 'job' )
723 ->where(
724 [
725 'job_cmd' => $this->type,
726 $dbw->expr( 'job_token', '!=', '' ), // was acquired
727 $dbw->expr( 'job_token_timestamp', '<', $claimCutoff ), // stale
728 $dbw->expr( 'job_attempts', '<', $this->maxTries ), // retries left
729 ]
730 )
731 ->caller( __METHOD__ )->fetchResultSet();
732 $ids = array_map(
733 static function ( $o ) {
734 return $o->job_id;
735 }, iterator_to_array( $res )
736 );
737 if ( count( $ids ) ) {
738 // Reset job_token for these jobs so that other runners will pick them up.
739 // Set the timestamp to the current time, as it is useful to now that the job
740 // was already tried before (the timestamp becomes the "released" time).
741 $dbw->newUpdateQueryBuilder()
742 ->update( 'job' )
743 ->set( [
744 'job_token' => '',
745 'job_token_timestamp' => $dbw->timestamp( $now ) // time of release
746 ] )
747 ->where( [
748 'job_id' => $ids,
749 $dbw->expr( 'job_token', '!=', '' ),
750 ] )
751 ->caller( __METHOD__ )->execute();
752
753 $affected = $dbw->affectedRows();
754 $count += $affected;
755 $this->incrStats( 'recycles', $this->type, $affected );
756 }
757 }
758
759 // Just destroy any stale jobs...
760 $pruneCutoff = $dbw->timestamp( $now - self::MAX_AGE_PRUNE );
761 $qb = $dbw->newSelectQueryBuilder()
762 ->select( 'job_id' )
763 ->from( 'job' )
764 ->where(
765 [
766 'job_cmd' => $this->type,
767 $dbw->expr( 'job_token', '!=', '' ), // was acquired
768 $dbw->expr( 'job_token_timestamp', '<', $pruneCutoff ) // stale
769 ]
770 );
771 if ( $this->claimTTL > 0 ) { // only prune jobs attempted too many times...
772 $qb->andWhere( $dbw->expr( 'job_attempts', '>=', $this->maxTries ) );
773 }
774 // Get the IDs of jobs that are considered stale and should be removed. Selecting
775 // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
776 $res = $qb->caller( __METHOD__ )->fetchResultSet();
777 $ids = array_map(
778 static function ( $o ) {
779 return $o->job_id;
780 }, iterator_to_array( $res )
781 );
782 if ( count( $ids ) ) {
783 $dbw->newDeleteQueryBuilder()
784 ->deleteFrom( 'job' )
785 ->where( [ 'job_id' => $ids ] )
786 ->caller( __METHOD__ )->execute();
787 $affected = $dbw->affectedRows();
788 $count += $affected;
789 $this->incrStats( 'abandons', $this->type, $affected );
790 }
791
792 $dbw->unlock( "jobqueue-recycle-{$this->type}", __METHOD__ );
793 } catch ( DBError $e ) {
794 throw $this->getDBException( $e );
795 }
796
797 return $count;
798 }
799
806 return [
807 // Fields that describe the nature of the job
808 'job_cmd' => $job->getType(),
809 'job_namespace' => $job->getParams()['namespace'] ?? NS_SPECIAL,
810 'job_title' => $job->getParams()['title'] ?? '',
811 'job_params' => self::makeBlob( $job->getParams() ),
812 // Additional job metadata
813 'job_timestamp' => $db->timestamp(),
814 'job_sha1' => Wikimedia\base_convert(
815 sha1( serialize( $job->getDeduplicationInfo() ) ),
816 16, 36, 31
817 ),
818 'job_random' => mt_rand( 0, self::MAX_JOB_RANDOM )
819 ];
820 }
821
826 protected function getReplicaDB() {
827 try {
828 return $this->getDB( DB_REPLICA );
829 } catch ( DBConnectionError $e ) {
830 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
831 }
832 }
833
839 protected function getPrimaryDB() {
840 try {
841 return $this->getDB( DB_PRIMARY );
842 } catch ( DBConnectionError $e ) {
843 throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
844 }
845 }
846
851 protected function getDB( $index ) {
852 if ( $this->server ) {
853 if ( $this->conn instanceof IDatabase ) {
854 return $this->conn;
855 } elseif ( $this->conn instanceof DBError ) {
856 throw $this->conn;
857 }
858
859 try {
860 $this->conn = MediaWikiServices::getInstance()->getDatabaseFactory()->create(
861 $this->server['type'],
862 $this->server
863 );
864 } catch ( DBError $e ) {
865 $this->conn = $e;
866 throw $e;
867 }
868
869 return $this->conn;
870 } else {
871 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
872 $lb = is_string( $this->cluster )
873 ? $lbFactory->getExternalLB( $this->cluster )
874 : $lbFactory->getMainLB( $this->domain );
875
876 if ( $lb->getServerType( ServerInfo::WRITER_INDEX ) !== 'sqlite' ) {
877 // Keep a separate connection to avoid contention and deadlocks;
878 // However, SQLite has the opposite behavior due to DB-level locking.
879 $flags = $lb::CONN_TRX_AUTOCOMMIT;
880 } else {
881 // Jobs insertion will be deferred until the PRESEND stage to reduce contention.
882 $flags = 0;
883 }
884
885 return $lb->getMaintenanceConnectionRef( $index, [], $this->domain, $flags );
886 }
887 }
888
893 private function getCacheKey( $property ) {
894 $cluster = is_string( $this->cluster ) ? $this->cluster : 'main';
895
896 return $this->wanCache->makeGlobalKey(
897 'jobqueue',
898 $this->domain,
899 $cluster,
900 $this->type,
901 $property
902 );
903 }
904
909 protected static function makeBlob( $params ) {
910 if ( $params !== false ) {
911 return serialize( $params );
912 } else {
913 return '';
914 }
915 }
916
921 protected function jobFromRow( $row ) {
922 $params = ( (string)$row->job_params !== '' ) ? unserialize( $row->job_params ) : [];
923 if ( !is_array( $params ) ) { // this shouldn't happen
924 throw new UnexpectedValueException(
925 "Could not unserialize job with ID '{$row->job_id}'." );
926 }
927
928 $params += [ 'namespace' => $row->job_namespace, 'title' => $row->job_title ];
929 $job = $this->factoryJob( $row->job_cmd, $params );
930 $job->setMetadata( 'id', $row->job_id );
931 $job->setMetadata( 'timestamp', $row->job_timestamp );
932
933 return $job;
934 }
935
940 protected function getDBException( DBError $e ) {
941 return new JobQueueError( get_class( $e ) . ": " . $e->getMessage() );
942 }
943
949 public static function selectFields() {
950 return [
951 'job_id',
952 'job_cmd',
953 'job_namespace',
954 'job_title',
955 'job_timestamp',
956 'job_params',
957 'job_random',
958 'job_attempts',
959 'job_token',
960 'job_token_timestamp',
961 'job_sha1',
962 ];
963 }
964}
const NS_SPECIAL
Definition Defines.php:54
wfDebug( $text, $dest='all', array $context=[])
Sends a line to the debug log if enabled or, optionally, to a comment in output.
wfRandomString( $length=32)
Get a random string containing a number of pseudo-random hex characters.
array $params
The job parameters.
getCacheKey()
Get the cache key used to store status.
Database-backed job queue storage.
claimOldest( $uuid)
Reserve a row with a single UPDATE without holding row locks over RTTs...
doAck(RunnableJob $job)
supportedOrders()
Get the allowed queue orders for configuration validation.
doGetSiblingQueueSizes(array $types)
__construct(array $params)
Additional parameters include:
getDBException(DBError $e)
getAllAbandonedJobs()
jobFromRow( $row)
doBatchPush(array $jobs, $flags)
insertFields(IJobSpecification $job, IReadableDatabase $db)
static makeBlob( $params)
claimRandom( $uuid, $rand, $gte)
Reserve a row with a single UPDATE without holding row locks over RTTs...
doGetSiblingQueuesWithJobs(array $types)
recycleAndDeleteStaleJobs()
Recycle or destroy any jobs that have been claimed for too long.
doGetAbandonedCount()
doBatchPushInternal(IDatabase $dbw, array $jobs, $flags, $method)
This function should not be called outside of JobQueueDB.
optimalOrder()
Get the default queue order to use if configuration does not specify one.
getDB( $index)
string null $cluster
Name of an external DB cluster or null for the local DB cluster.
IMaintainableDatabase DBError null $conn
getCoalesceLocationInternal()
Do not use this function outside of JobQueue/JobQueueGroup.
doDeduplicateRootJob(IJobSpecification $job)
static selectFields()
Return the list of job fields that should be selected.
getJobIterator(array $conds)
array null $server
Server configuration array.
Base class for queueing and running background jobs from a storage backend.
Definition JobQueue.php:45
incrStats( $key, $type, $delta=1)
Call StatsdDataFactoryInterface::updateCount() for the queue overall and for the queue type.
Definition JobQueue.php:779
string $type
Job type.
Definition JobQueue.php:49
factoryJob( $command, $params)
Definition JobQueue.php:745
Convenience class for generating iterators from iterators.
Service locator for MediaWiki core services.
static instance()
Definition Profiler.php:105
Database error base class.
Definition DBError.php:36
Raw SQL value to be used in query builders.
Build SELECT queries with a fluent interface.
Container for accessing information about the database servers in a database cluster.
Interface for serializable objects that describe a job queue task.
Job that has a run() method and metadata accessors for JobQueue::pop() and JobQueue::ack().
Interface to a relational database.
Definition IDatabase.php:45
endAtomic( $fname=__METHOD__)
Ends an atomic section of SQL statements.
startAtomic( $fname=__METHOD__, $cancelable=self::ATOMIC_NOT_CANCELABLE)
Begin an atomic section of SQL statements.
newInsertQueryBuilder()
Get an InsertQueryBuilder bound to this connection.
Advanced database interface for IDatabase handles that include maintenance methods.
A database connection without write operations.
newSelectQueryBuilder()
Create an empty SelectQueryBuilder which can be used to run queries against this connection.
timestamp( $ts=0)
Convert a timestamp in one of the formats accepted by ConvertibleTimestamp to the format used for ins...
const DB_REPLICA
Definition defines.php:26
const DB_PRIMARY
Definition defines.php:28
if(count( $args)< 1) $job