MediaWiki  master
JobQueueDB.php
Go to the documentation of this file.
1 <?php
28 use Wikimedia\ScopedCallback;
29 
36 class JobQueueDB extends JobQueue {
37  private const CACHE_TTL_SHORT = 30; // integer; seconds to cache info without re-validating
38  private const MAX_AGE_PRUNE = 604800; // integer; seconds a job can live once claimed
39  private const MAX_JOB_RANDOM = 2147483647; // integer; 2^31 - 1, used for job_random
40  private const MAX_OFFSET = 255; // integer; maximum number of rows to skip
41 
43  protected $conn;
44 
46  protected $server;
48  protected $cluster;
49 
59  protected function __construct( array $params ) {
60  parent::__construct( $params );
61 
62  if ( isset( $params['server'] ) ) {
63  $this->server = $params['server'];
64  } elseif ( isset( $params['cluster'] ) && is_string( $params['cluster'] ) ) {
65  $this->cluster = $params['cluster'];
66  }
67  }
68 
69  protected function supportedOrders() {
70  return [ 'random', 'timestamp', 'fifo' ];
71  }
72 
73  protected function optimalOrder() {
74  return 'random';
75  }
76 
81  protected function doIsEmpty() {
82  $dbr = $this->getReplicaDB();
84  $scope = $this->getScopedNoTrxFlag( $dbr );
85  try {
86  $found = $dbr->selectField( // unclaimed job
87  'job', '1', [ 'job_cmd' => $this->type, 'job_token' => '' ], __METHOD__
88  );
89  } catch ( DBError $e ) {
90  throw $this->getDBException( $e );
91  }
92 
93  return !$found;
94  }
95 
100  protected function doGetSize() {
101  $key = $this->getCacheKey( 'size' );
102 
103  $size = $this->wanCache->get( $key );
104  if ( is_int( $size ) ) {
105  return $size;
106  }
107 
108  $dbr = $this->getReplicaDB();
110  $scope = $this->getScopedNoTrxFlag( $dbr );
111  try {
112  $size = (int)$dbr->selectField( 'job', 'COUNT(*)',
113  [ 'job_cmd' => $this->type, 'job_token' => '' ],
114  __METHOD__
115  );
116  } catch ( DBError $e ) {
117  throw $this->getDBException( $e );
118  }
119  $this->wanCache->set( $key, $size, self::CACHE_TTL_SHORT );
120 
121  return $size;
122  }
123 
128  protected function doGetAcquiredCount() {
129  if ( $this->claimTTL <= 0 ) {
130  return 0; // no acknowledgements
131  }
132 
133  $key = $this->getCacheKey( 'acquiredcount' );
134 
135  $count = $this->wanCache->get( $key );
136  if ( is_int( $count ) ) {
137  return $count;
138  }
139 
140  $dbr = $this->getReplicaDB();
142  $scope = $this->getScopedNoTrxFlag( $dbr );
143  try {
144  $count = (int)$dbr->selectField( 'job', 'COUNT(*)',
145  [ 'job_cmd' => $this->type, "job_token != {$dbr->addQuotes( '' )}" ],
146  __METHOD__
147  );
148  } catch ( DBError $e ) {
149  throw $this->getDBException( $e );
150  }
151  $this->wanCache->set( $key, $count, self::CACHE_TTL_SHORT );
152 
153  return $count;
154  }
155 
161  protected function doGetAbandonedCount() {
162  if ( $this->claimTTL <= 0 ) {
163  return 0; // no acknowledgements
164  }
165 
166  $key = $this->getCacheKey( 'abandonedcount' );
167 
168  $count = $this->wanCache->get( $key );
169  if ( is_int( $count ) ) {
170  return $count;
171  }
172 
173  $dbr = $this->getReplicaDB();
175  $scope = $this->getScopedNoTrxFlag( $dbr );
176  try {
177  $count = (int)$dbr->selectField( 'job', 'COUNT(*)',
178  [
179  'job_cmd' => $this->type,
180  "job_token != {$dbr->addQuotes( '' )}",
181  "job_attempts >= " . $dbr->addQuotes( $this->maxTries )
182  ],
183  __METHOD__
184  );
185  } catch ( DBError $e ) {
186  throw $this->getDBException( $e );
187  }
188 
189  $this->wanCache->set( $key, $count, self::CACHE_TTL_SHORT );
190 
191  return $count;
192  }
193 
201  protected function doBatchPush( array $jobs, $flags ) {
202  $dbw = $this->getMasterDB();
204  $scope = $this->getScopedNoTrxFlag( $dbw );
205  // In general, there will be two cases here:
206  // a) sqlite; DB connection is probably a regular round-aware handle.
207  // If the connection is busy with a transaction, then defer the job writes
208  // until right before the main round commit step. Any errors that bubble
209  // up will rollback the main commit round.
210  // b) mysql/postgres; DB connection is generally a separate CONN_TRX_AUTOCOMMIT handle.
211  // No transaction is active nor will be started by writes, so enqueue the jobs
212  // now so that any errors will show up immediately as the interface expects. Any
213  // errors that bubble up will rollback the main commit round.
214  $fname = __METHOD__;
215  $dbw->onTransactionPreCommitOrIdle(
216  function ( IDatabase $dbw ) use ( $jobs, $flags, $fname ) {
217  $this->doBatchPushInternal( $dbw, $jobs, $flags, $fname );
218  },
219  $fname
220  );
221  }
222 
234  public function doBatchPushInternal( IDatabase $dbw, array $jobs, $flags, $method ) {
235  if ( $jobs === [] ) {
236  return;
237  }
238 
239  $rowSet = []; // (sha1 => job) map for jobs that are de-duplicated
240  $rowList = []; // list of jobs for jobs that are not de-duplicated
241  foreach ( $jobs as $job ) {
242  $row = $this->insertFields( $job, $dbw );
243  if ( $job->ignoreDuplicates() ) {
244  $rowSet[$row['job_sha1']] = $row;
245  } else {
246  $rowList[] = $row;
247  }
248  }
249 
250  if ( $flags & self::QOS_ATOMIC ) {
251  $dbw->startAtomic( $method ); // wrap all the job additions in one transaction
252  }
253  try {
254  // Strip out any duplicate jobs that are already in the queue...
255  if ( count( $rowSet ) ) {
256  $res = $dbw->select( 'job', 'job_sha1',
257  [
258  // No job_type condition since it's part of the job_sha1 hash
259  'job_sha1' => array_map( 'strval', array_keys( $rowSet ) ),
260  'job_token' => '' // unclaimed
261  ],
262  $method
263  );
264  foreach ( $res as $row ) {
265  wfDebug( "Job with hash '{$row->job_sha1}' is a duplicate." );
266  unset( $rowSet[$row->job_sha1] ); // already enqueued
267  }
268  }
269  // Build the full list of job rows to insert
270  $rows = array_merge( $rowList, array_values( $rowSet ) );
271  // Insert the job rows in chunks to avoid replica DB lag...
272  foreach ( array_chunk( $rows, 50 ) as $rowBatch ) {
273  $dbw->insert( 'job', $rowBatch, $method );
274  }
275  $this->incrStats( 'inserts', $this->type, count( $rows ) );
276  $this->incrStats( 'dupe_inserts', $this->type,
277  count( $rowSet ) + count( $rowList ) - count( $rows )
278  );
279  } catch ( DBError $e ) {
280  throw $this->getDBException( $e );
281  }
282  if ( $flags & self::QOS_ATOMIC ) {
283  $dbw->endAtomic( $method );
284  }
285  }
286 
291  protected function doPop() {
292  $dbw = $this->getMasterDB();
294  $scope = $this->getScopedNoTrxFlag( $dbw );
295 
296  $job = false; // job popped off
297  try {
298  $uuid = wfRandomString( 32 ); // pop attempt
299  do { // retry when our row is invalid or deleted as a duplicate
300  // Try to reserve a row in the DB...
301  if ( in_array( $this->order, [ 'fifo', 'timestamp' ] ) ) {
302  $row = $this->claimOldest( $uuid );
303  } else { // random first
304  $rand = mt_rand( 0, self::MAX_JOB_RANDOM ); // encourage concurrent UPDATEs
305  $gte = (bool)mt_rand( 0, 1 ); // find rows with rand before/after $rand
306  $row = $this->claimRandom( $uuid, $rand, $gte );
307  }
308  // Check if we found a row to reserve...
309  if ( !$row ) {
310  break; // nothing to do
311  }
312  $this->incrStats( 'pops', $this->type );
313 
314  // Get the job object from the row...
315  $job = $this->jobFromRow( $row );
316  break; // done
317  } while ( true );
318 
319  if ( !$job || mt_rand( 0, 9 ) == 0 ) {
320  // Handled jobs that need to be recycled/deleted;
321  // any recycled jobs will be picked up next attempt
322  $this->recycleAndDeleteStaleJobs();
323  }
324  } catch ( DBError $e ) {
325  throw $this->getDBException( $e );
326  }
327 
328  return $job;
329  }
330 
339  protected function claimRandom( $uuid, $rand, $gte ) {
340  $dbw = $this->getMasterDB();
342  $scope = $this->getScopedNoTrxFlag( $dbw );
343  // Check cache to see if the queue has <= OFFSET items
344  $tinyQueue = $this->wanCache->get( $this->getCacheKey( 'small' ) );
345 
346  $invertedDirection = false; // whether one job_random direction was already scanned
347  // This uses a replication safe method for acquiring jobs. One could use UPDATE+LIMIT
348  // instead, but that either uses ORDER BY (in which case it deadlocks in MySQL) or is
349  // not replication safe. Due to https://bugs.mysql.com/bug.php?id=6980, subqueries cannot
350  // be used here with MySQL.
351  do {
352  if ( $tinyQueue ) { // queue has <= MAX_OFFSET rows
353  // For small queues, using OFFSET will overshoot and return no rows more often.
354  // Instead, this uses job_random to pick a row (possibly checking both directions).
355  $ineq = $gte ? '>=' : '<=';
356  $dir = $gte ? 'ASC' : 'DESC';
357  $row = $dbw->selectRow( 'job', self::selectFields(), // find a random job
358  [
359  'job_cmd' => $this->type,
360  'job_token' => '', // unclaimed
361  "job_random {$ineq} {$dbw->addQuotes( $rand )}" ],
362  __METHOD__,
363  [ 'ORDER BY' => "job_random {$dir}" ]
364  );
365  if ( !$row && !$invertedDirection ) {
366  $gte = !$gte;
367  $invertedDirection = true;
368  continue; // try the other direction
369  }
370  } else { // table *may* have >= MAX_OFFSET rows
371  // T44614: "ORDER BY job_random" with a job_random inequality causes high CPU
372  // in MySQL if there are many rows for some reason. This uses a small OFFSET
373  // instead of job_random for reducing excess claim retries.
374  $row = $dbw->selectRow( 'job', self::selectFields(), // find a random job
375  [
376  'job_cmd' => $this->type,
377  'job_token' => '', // unclaimed
378  ],
379  __METHOD__,
380  [ 'OFFSET' => mt_rand( 0, self::MAX_OFFSET ) ]
381  );
382  if ( !$row ) {
383  $tinyQueue = true; // we know the queue must have <= MAX_OFFSET rows
384  $this->wanCache->set( $this->getCacheKey( 'small' ), 1, 30 );
385  continue; // use job_random
386  }
387  }
388 
389  if ( !$row ) {
390  break;
391  }
392 
393  $dbw->update( 'job', // update by PK
394  [
395  'job_token' => $uuid,
396  'job_token_timestamp' => $dbw->timestamp(),
397  'job_attempts = job_attempts+1' ],
398  [ 'job_cmd' => $this->type, 'job_id' => $row->job_id, 'job_token' => '' ],
399  __METHOD__
400  );
401  // This might get raced out by another runner when claiming the previously
402  // selected row. The use of job_random should minimize this problem, however.
403  if ( !$dbw->affectedRows() ) {
404  $row = false; // raced out
405  }
406  } while ( !$row );
407 
408  return $row;
409  }
410 
417  protected function claimOldest( $uuid ) {
418  $dbw = $this->getMasterDB();
420  $scope = $this->getScopedNoTrxFlag( $dbw );
421 
422  $row = false; // the row acquired
423  do {
424  if ( $dbw->getType() === 'mysql' ) {
425  // Per https://bugs.mysql.com/bug.php?id=6980, we can't use subqueries on the
426  // same table being changed in an UPDATE query in MySQL (gives Error: 1093).
427  // Postgres has no such limitation. However, MySQL offers an
428  // alternative here by supporting ORDER BY + LIMIT for UPDATE queries.
429  $dbw->query( "UPDATE {$dbw->tableName( 'job' )} " .
430  "SET " .
431  "job_token = {$dbw->addQuotes( $uuid ) }, " .
432  "job_token_timestamp = {$dbw->addQuotes( $dbw->timestamp() )}, " .
433  "job_attempts = job_attempts+1 " .
434  "WHERE ( " .
435  "job_cmd = {$dbw->addQuotes( $this->type )} " .
436  "AND job_token = {$dbw->addQuotes( '' )} " .
437  ") ORDER BY job_id ASC LIMIT 1",
438  __METHOD__
439  );
440  } else {
441  // Use a subquery to find the job, within an UPDATE to claim it.
442  // This uses as much of the DB wrapper functions as possible.
443  $dbw->update( 'job',
444  [
445  'job_token' => $uuid,
446  'job_token_timestamp' => $dbw->timestamp(),
447  'job_attempts = job_attempts+1' ],
448  [ 'job_id = (' .
449  $dbw->selectSQLText( 'job', 'job_id',
450  [ 'job_cmd' => $this->type, 'job_token' => '' ],
451  __METHOD__,
452  [ 'ORDER BY' => 'job_id ASC', 'LIMIT' => 1 ] ) .
453  ')'
454  ],
455  __METHOD__
456  );
457  }
458 
459  if ( !$dbw->affectedRows() ) {
460  break;
461  }
462 
463  // Fetch any row that we just reserved...
464  $row = $dbw->selectRow( 'job', self::selectFields(),
465  [ 'job_cmd' => $this->type, 'job_token' => $uuid ], __METHOD__
466  );
467  if ( !$row ) { // raced out by duplicate job removal
468  wfDebug( "Row deleted as duplicate by another process." );
469  }
470  } while ( !$row );
471 
472  return $row;
473  }
474 
480  protected function doAck( RunnableJob $job ) {
481  $id = $job->getMetadata( 'id' );
482  if ( $id === null ) {
483  throw new MWException( "Job of type '{$job->getType()}' has no ID." );
484  }
485 
486  $dbw = $this->getMasterDB();
488  $scope = $this->getScopedNoTrxFlag( $dbw );
489  try {
490  // Delete a row with a single DELETE without holding row locks over RTTs...
491  $dbw->delete(
492  'job',
493  [ 'job_cmd' => $this->type, 'job_id' => $id ],
494  __METHOD__
495  );
496 
497  $this->incrStats( 'acks', $this->type );
498  } catch ( DBError $e ) {
499  throw $this->getDBException( $e );
500  }
501  }
502 
510  // Callers should call JobQueueGroup::push() before this method so that if the
511  // insert fails, the de-duplication registration will be aborted. Since the insert
512  // is deferred till "transaction idle", do the same here, so that the ordering is
513  // maintained. Having only the de-duplication registration succeed would cause
514  // jobs to become no-ops without any actual jobs that made them redundant.
515  $dbw = $this->getMasterDB();
517  $scope = $this->getScopedNoTrxFlag( $dbw );
518  $dbw->onTransactionCommitOrIdle(
519  function () use ( $job ) {
520  parent::doDeduplicateRootJob( $job );
521  },
522  __METHOD__
523  );
524 
525  return true;
526  }
527 
532  protected function doDelete() {
533  $dbw = $this->getMasterDB();
535  $scope = $this->getScopedNoTrxFlag( $dbw );
536  try {
537  $dbw->delete( 'job', [ 'job_cmd' => $this->type ], __METHOD__ );
538  } catch ( DBError $e ) {
539  throw $this->getDBException( $e );
540  }
541 
542  return true;
543  }
544 
549  protected function doWaitForBackups() {
550  if ( $this->server ) {
551  return; // not using LBFactory instance
552  }
553 
554  $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
555  $lbFactory->waitForReplication( [
556  'domain' => $this->domain,
557  'cluster' => is_string( $this->cluster ) ? $this->cluster : false
558  ] );
559  }
560 
564  protected function doFlushCaches() {
565  foreach ( [ 'size', 'acquiredcount' ] as $type ) {
566  $this->wanCache->delete( $this->getCacheKey( $type ) );
567  }
568  }
569 
574  public function getAllQueuedJobs() {
575  return $this->getJobIterator( [ 'job_cmd' => $this->getType(), 'job_token' => '' ] );
576  }
577 
582  public function getAllAcquiredJobs() {
583  return $this->getJobIterator( [ 'job_cmd' => $this->getType(), "job_token > ''" ] );
584  }
585 
590  protected function getJobIterator( array $conds ) {
591  $dbr = $this->getReplicaDB();
593  $scope = $this->getScopedNoTrxFlag( $dbr );
594  try {
595  return new MappedIterator(
596  $dbr->select( 'job', self::selectFields(), $conds, __METHOD__ ),
597  function ( $row ) {
598  return $this->jobFromRow( $row );
599  }
600  );
601  } catch ( DBError $e ) {
602  throw $this->getDBException( $e );
603  }
604  }
605 
606  public function getCoalesceLocationInternal() {
607  if ( $this->server ) {
608  return null; // not using the LBFactory instance
609  }
610 
611  return is_string( $this->cluster )
612  ? "DBCluster:{$this->cluster}:{$this->domain}"
613  : "LBFactory:{$this->domain}";
614  }
615 
616  protected function doGetSiblingQueuesWithJobs( array $types ) {
617  $dbr = $this->getReplicaDB();
619  $scope = $this->getScopedNoTrxFlag( $dbr );
620  // @note: this does not check whether the jobs are claimed or not.
621  // This is useful so JobQueueGroup::pop() also sees queues that only
622  // have stale jobs. This lets recycleAndDeleteStaleJobs() re-enqueue
623  // failed jobs so that they can be popped again for that edge case.
624  $res = $dbr->select( 'job', 'DISTINCT job_cmd',
625  [ 'job_cmd' => $types ], __METHOD__ );
626 
627  $types = [];
628  foreach ( $res as $row ) {
629  $types[] = $row->job_cmd;
630  }
631 
632  return $types;
633  }
634 
635  protected function doGetSiblingQueueSizes( array $types ) {
636  $dbr = $this->getReplicaDB();
638  $scope = $this->getScopedNoTrxFlag( $dbr );
639 
640  $res = $dbr->select( 'job', [ 'job_cmd', 'COUNT(*) AS count' ],
641  [ 'job_cmd' => $types ], __METHOD__, [ 'GROUP BY' => 'job_cmd' ] );
642 
643  $sizes = [];
644  foreach ( $res as $row ) {
645  $sizes[$row->job_cmd] = (int)$row->count;
646  }
647 
648  return $sizes;
649  }
650 
656  public function recycleAndDeleteStaleJobs() {
657  $now = time();
658  $count = 0; // affected rows
659  $dbw = $this->getMasterDB();
661  $scope = $this->getScopedNoTrxFlag( $dbw );
662 
663  try {
664  if ( !$dbw->lock( "jobqueue-recycle-{$this->type}", __METHOD__, 1 ) ) {
665  return $count; // already in progress
666  }
667 
668  // Remove claims on jobs acquired for too long if enabled...
669  if ( $this->claimTTL > 0 ) {
670  $claimCutoff = $dbw->timestamp( $now - $this->claimTTL );
671  // Get the IDs of jobs that have be claimed but not finished after too long.
672  // These jobs can be recycled into the queue by expiring the claim. Selecting
673  // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
674  $res = $dbw->select( 'job', 'job_id',
675  [
676  'job_cmd' => $this->type,
677  "job_token != {$dbw->addQuotes( '' )}", // was acquired
678  "job_token_timestamp < {$dbw->addQuotes( $claimCutoff )}", // stale
679  "job_attempts < {$dbw->addQuotes( $this->maxTries )}" ], // retries left
680  __METHOD__
681  );
682  $ids = array_map(
683  function ( $o ) {
684  return $o->job_id;
685  }, iterator_to_array( $res )
686  );
687  if ( count( $ids ) ) {
688  // Reset job_token for these jobs so that other runners will pick them up.
689  // Set the timestamp to the current time, as it is useful to now that the job
690  // was already tried before (the timestamp becomes the "released" time).
691  $dbw->update( 'job',
692  [
693  'job_token' => '',
694  'job_token_timestamp' => $dbw->timestamp( $now ) // time of release
695  ],
696  [ 'job_id' => $ids, "job_token != ''" ],
697  __METHOD__
698  );
699  $affected = $dbw->affectedRows();
700  $count += $affected;
701  $this->incrStats( 'recycles', $this->type, $affected );
702  }
703  }
704 
705  // Just destroy any stale jobs...
706  $pruneCutoff = $dbw->timestamp( $now - self::MAX_AGE_PRUNE );
707  $conds = [
708  'job_cmd' => $this->type,
709  "job_token != {$dbw->addQuotes( '' )}", // was acquired
710  "job_token_timestamp < {$dbw->addQuotes( $pruneCutoff )}" // stale
711  ];
712  if ( $this->claimTTL > 0 ) { // only prune jobs attempted too many times...
713  $conds[] = "job_attempts >= {$dbw->addQuotes( $this->maxTries )}";
714  }
715  // Get the IDs of jobs that are considered stale and should be removed. Selecting
716  // the IDs first means that the UPDATE can be done by primary key (less deadlocks).
717  $res = $dbw->select( 'job', 'job_id', $conds, __METHOD__ );
718  $ids = array_map(
719  function ( $o ) {
720  return $o->job_id;
721  }, iterator_to_array( $res )
722  );
723  if ( count( $ids ) ) {
724  $dbw->delete( 'job', [ 'job_id' => $ids ], __METHOD__ );
725  $affected = $dbw->affectedRows();
726  $count += $affected;
727  $this->incrStats( 'abandons', $this->type, $affected );
728  }
729 
730  $dbw->unlock( "jobqueue-recycle-{$this->type}", __METHOD__ );
731  } catch ( DBError $e ) {
732  throw $this->getDBException( $e );
733  }
734 
735  return $count;
736  }
737 
743  protected function insertFields( IJobSpecification $job, IDatabase $db ) {
744  return [
745  // Fields that describe the nature of the job
746  'job_cmd' => $job->getType(),
747  'job_namespace' => $job->getParams()['namespace'] ?? NS_SPECIAL,
748  'job_title' => $job->getParams()['title'] ?? '',
749  'job_params' => self::makeBlob( $job->getParams() ),
750  // Additional job metadata
751  'job_timestamp' => $db->timestamp(),
752  'job_sha1' => Wikimedia\base_convert(
753  sha1( serialize( $job->getDeduplicationInfo() ) ),
754  16, 36, 31
755  ),
756  'job_random' => mt_rand( 0, self::MAX_JOB_RANDOM )
757  ];
758  }
759 
764  protected function getReplicaDB() {
765  try {
766  return $this->getDB( DB_REPLICA );
767  } catch ( DBConnectionError $e ) {
768  throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
769  }
770  }
771 
776  protected function getMasterDB() {
777  try {
778  return $this->getDB( DB_MASTER );
779  } catch ( DBConnectionError $e ) {
780  throw new JobQueueConnectionError( "DBConnectionError:" . $e->getMessage() );
781  }
782  }
783 
788  protected function getDB( $index ) {
789  if ( $this->server ) {
790  if ( $this->conn instanceof IDatabase ) {
791  return $this->conn;
792  } elseif ( $this->conn instanceof DBError ) {
793  throw $this->conn;
794  }
795 
796  try {
797  $this->conn = Database::factory( $this->server['type'], $this->server );
798  } catch ( DBError $e ) {
799  $this->conn = $e;
800  throw $e;
801  }
802 
803  return $this->conn;
804  } else {
805  $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
806  $lb = is_string( $this->cluster )
807  ? $lbFactory->getExternalLB( $this->cluster )
808  : $lbFactory->getMainLB( $this->domain );
809 
810  if ( $lb->getServerType( $lb->getWriterIndex() ) !== 'sqlite' ) {
811  // Keep a separate connection to avoid contention and deadlocks;
812  // However, SQLite has the opposite behavior due to DB-level locking.
813  $flags = $lb::CONN_TRX_AUTOCOMMIT;
814  } else {
815  // Jobs insertion will be defered until the PRESEND stage to reduce contention.
816  $flags = 0;
817  }
818 
819  return $lb->getMaintenanceConnectionRef( $index, [], $this->domain, $flags );
820  }
821  }
822 
827  private function getScopedNoTrxFlag( IDatabase $db ) {
828  $autoTrx = $db->getFlag( DBO_TRX ); // get current setting
829  $db->clearFlag( DBO_TRX ); // make each query its own transaction
830 
831  return new ScopedCallback( function () use ( $db, $autoTrx ) {
832  if ( $autoTrx ) {
833  $db->setFlag( DBO_TRX ); // restore old setting
834  }
835  } );
836  }
837 
842  private function getCacheKey( $property ) {
843  $cluster = is_string( $this->cluster ) ? $this->cluster : 'main';
844 
845  return $this->wanCache->makeGlobalKey(
846  'jobqueue',
847  $this->domain,
848  $cluster,
849  $this->type,
850  $property
851  );
852  }
853 
858  protected static function makeBlob( $params ) {
859  if ( $params !== false ) {
860  return serialize( $params );
861  } else {
862  return '';
863  }
864  }
865 
870  protected function jobFromRow( $row ) {
871  $params = ( (string)$row->job_params !== '' ) ? unserialize( $row->job_params ) : [];
872  if ( !is_array( $params ) ) { // this shouldn't happen
873  throw new UnexpectedValueException(
874  "Could not unserialize job with ID '{$row->job_id}'." );
875  }
876 
877  $params += [ 'namespace' => $row->job_namespace, 'title' => $row->job_title ];
878  $job = $this->factoryJob( $row->job_cmd, $params );
879  $job->setMetadata( 'id', $row->job_id );
880  $job->setMetadata( 'timestamp', $row->job_timestamp );
881 
882  return $job;
883  }
884 
889  protected function getDBException( DBError $e ) {
890  return new JobQueueError( get_class( $e ) . ": " . $e->getMessage() );
891  }
892 
898  public static function selectFields() {
899  return [
900  'job_id',
901  'job_cmd',
902  'job_namespace',
903  'job_title',
904  'job_timestamp',
905  'job_params',
906  'job_random',
907  'job_attempts',
908  'job_token',
909  'job_token_timestamp',
910  'job_sha1',
911  ];
912  }
913 }
JobQueueDB\MAX_AGE_PRUNE
const MAX_AGE_PRUNE
Definition: JobQueueDB.php:38
JobQueueDB\doBatchPushInternal
doBatchPushInternal(IDatabase $dbw, array $jobs, $flags, $method)
This function should not be called outside of JobQueueDB.
Definition: JobQueueDB.php:234
JobQueueDB\insertFields
insertFields(IJobSpecification $job, IDatabase $db)
Definition: JobQueueDB.php:743
MappedIterator
Convenience class for generating iterators from iterators.
Definition: MappedIterator.php:28
Wikimedia\Rdbms\Database
Relational database abstraction object.
Definition: Database.php:50
JobQueueDB\doWaitForBackups
doWaitForBackups()
Definition: JobQueueDB.php:549
JobQueueDB\doFlushCaches
doFlushCaches()
Definition: JobQueueDB.php:564
JobQueueDB\doGetSiblingQueuesWithJobs
doGetSiblingQueuesWithJobs(array $types)
Stable to override.
Definition: JobQueueDB.php:616
JobQueueDB\getCoalesceLocationInternal
getCoalesceLocationInternal()
Do not use this function outside of JobQueue/JobQueueGroup.
Definition: JobQueueDB.php:606
JobQueueDB\getAllAcquiredJobs
getAllAcquiredJobs()
Definition: JobQueueDB.php:582
JobQueueDB\doGetSiblingQueueSizes
doGetSiblingQueueSizes(array $types)
Stable to override.
Definition: JobQueueDB.php:635
MediaWiki\MediaWikiServices
MediaWikiServices is the service locator for the application scope of MediaWiki.
Definition: MediaWikiServices.php:157
JobQueue\incrStats
incrStats( $key, $type, $delta=1)
Call StatsdDataFactoryInterface::updateCount() for the queue overall and for the queue type.
Definition: JobQueue.php:761
JobQueueDB\doGetSize
doGetSize()
Definition: JobQueueDB.php:100
Wikimedia\Rdbms\IDatabase\endAtomic
endAtomic( $fname=__METHOD__)
Ends an atomic section of SQL statements.
JobQueueDB\optimalOrder
optimalOrder()
Get the default queue order to use if configuration does not specify one.
Definition: JobQueueDB.php:73
JobQueueDB\getDB
getDB( $index)
Definition: JobQueueDB.php:788
RunnableJob
Job that has a run() method and metadata accessors for JobQueue::pop() and JobQueue::ack()
Definition: RunnableJob.php:37
JobQueueDB\__construct
__construct(array $params)
Additional parameters include:
Definition: JobQueueDB.php:59
JobQueueDB\getScopedNoTrxFlag
getScopedNoTrxFlag(IDatabase $db)
Definition: JobQueueDB.php:827
JobQueueDB\getMasterDB
getMasterDB()
Definition: JobQueueDB.php:776
JobQueueDB
Class to handle job queues stored in the DB.
Definition: JobQueueDB.php:36
$res
$res
Definition: testCompression.php:57
serialize
serialize()
Definition: ApiMessageTrait.php:138
JobQueueDB\CACHE_TTL_SHORT
const CACHE_TTL_SHORT
Definition: JobQueueDB.php:37
Wikimedia\Rdbms\DBError
Database error base class @newable Stable to extend.
Definition: DBError.php:32
DBO_TRX
const DBO_TRX
Definition: defines.php:12
Wikimedia\Rdbms\IDatabase
Basic database interface for live and lazy-loaded relation database handles.
Definition: IDatabase.php:38
JobQueueDB\getReplicaDB
getReplicaDB()
Definition: JobQueueDB.php:764
JobQueueDB\getJobIterator
getJobIterator(array $conds)
Definition: JobQueueDB.php:590
$dbr
$dbr
Definition: testCompression.php:54
JobQueueDB\makeBlob
static makeBlob( $params)
Definition: JobQueueDB.php:858
JobQueueDB\supportedOrders
supportedOrders()
Get the allowed queue orders for configuration validation.
Definition: JobQueueDB.php:69
NS_SPECIAL
const NS_SPECIAL
Definition: Defines.php:58
Wikimedia\Rdbms\IDatabase\timestamp
timestamp( $ts=0)
Convert a timestamp in one of the formats accepted by ConvertibleTimestamp to the format used for ins...
MWException
MediaWiki exception.
Definition: MWException.php:29
JobQueueDB\doDeduplicateRootJob
doDeduplicateRootJob(IJobSpecification $job)
Definition: JobQueueDB.php:509
JobQueue\$type
string $type
Job type.
Definition: JobQueue.php:39
JobQueueDB\recycleAndDeleteStaleJobs
recycleAndDeleteStaleJobs()
Recycle or destroy any jobs that have been claimed for too long.
Definition: JobQueueDB.php:656
JobQueueDB\MAX_OFFSET
const MAX_OFFSET
Definition: JobQueueDB.php:40
JobQueueDB\doBatchPush
doBatchPush(array $jobs, $flags)
Definition: JobQueueDB.php:201
JobQueueDB\$conn
IMaintainableDatabase DBError null $conn
Definition: JobQueueDB.php:43
DB_REPLICA
const DB_REPLICA
Definition: defines.php:25
JobQueueDB\doGetAcquiredCount
doGetAcquiredCount()
Definition: JobQueueDB.php:128
JobQueueError
@newable
Definition: JobQueueError.php:29
DB_MASTER
const DB_MASTER
Definition: defines.php:26
wfDebug
wfDebug( $text, $dest='all', array $context=[])
Sends a line to the debug log if enabled or, optionally, to a comment in output.
Definition: GlobalFunctions.php:910
JobQueueDB\doIsEmpty
doIsEmpty()
Definition: JobQueueDB.php:81
JobQueueDB\$cluster
string null $cluster
Name of an external DB cluster or null for the local DB cluster.
Definition: JobQueueDB.php:48
JobQueueDB\MAX_JOB_RANDOM
const MAX_JOB_RANDOM
Definition: JobQueueDB.php:39
JobQueueDB\doDelete
doDelete()
Definition: JobQueueDB.php:532
JobQueueDB\doGetAbandonedCount
doGetAbandonedCount()
Definition: JobQueueDB.php:161
JobQueueDB\getAllQueuedJobs
getAllQueuedJobs()
Definition: JobQueueDB.php:574
JobQueueDB\jobFromRow
jobFromRow( $row)
Definition: JobQueueDB.php:870
JobQueueDB\selectFields
static selectFields()
Return the list of job fields that should be selected.
Definition: JobQueueDB.php:898
JobQueueConnectionError
@newable
Definition: JobQueueConnectionError.php:29
JobQueueDB\doAck
doAck(RunnableJob $job)
Definition: JobQueueDB.php:480
Wikimedia\Rdbms\IDatabase\getFlag
getFlag( $flag)
Returns a boolean whether the flag $flag is set for this connection.
JobQueueDB\claimOldest
claimOldest( $uuid)
Reserve a row with a single UPDATE without holding row locks over RTTs...
Definition: JobQueueDB.php:417
Wikimedia\Rdbms\IDatabase\insert
insert( $table, $rows, $fname=__METHOD__, $options=[])
Insert the given row(s) into a table.
JobQueueDB\$server
array null $server
Server configuration array.
Definition: JobQueueDB.php:46
JobQueueDB\getDBException
getDBException(DBError $e)
Definition: JobQueueDB.php:889
unserialize
unserialize( $serialized)
Definition: ApiMessageTrait.php:146
JobQueueDB\getCacheKey
getCacheKey( $property)
Definition: JobQueueDB.php:842
$job
if(count( $args)< 1) $job
Definition: recompressTracked.php:50
JobQueue
Class to handle enqueueing and running of background jobs.
Definition: JobQueue.php:35
Wikimedia\Rdbms\DBConnectionError
@newable
Definition: DBConnectionError.php:27
Wikimedia\Rdbms\IDatabase\select
select( $table, $vars, $conds='', $fname=__METHOD__, $options=[], $join_conds=[])
Execute a SELECT query constructed using the various parameters provided.
JobQueue\factoryJob
factoryJob( $command, $params)
Definition: JobQueue.php:739
JobQueueDB\doPop
doPop()
Definition: JobQueueDB.php:291
Wikimedia\Rdbms\IDatabase\setFlag
setFlag( $flag, $remember=self::REMEMBER_NOTHING)
Set a flag for this connection.
Wikimedia\Rdbms\IMaintainableDatabase
Advanced database interface for IDatabase handles that include maintenance methods.
Definition: IMaintainableDatabase.php:38
IJobSpecification
Interface for serializable objects that describe a job queue task.
Definition: IJobSpecification.php:35
JobQueue\getType
getType()
Definition: JobQueue.php:166
Wikimedia\Rdbms\IDatabase\clearFlag
clearFlag( $flag, $remember=self::REMEMBER_NOTHING)
Clear a flag for this connection.
wfRandomString
wfRandomString( $length=32)
Get a random string containing a number of pseudo-random hex characters.
Definition: GlobalFunctions.php:273
JobQueueDB\claimRandom
claimRandom( $uuid, $rand, $gte)
Reserve a row with a single UPDATE without holding row locks over RTTs...
Definition: JobQueueDB.php:339
Wikimedia\Rdbms\IDatabase\startAtomic
startAtomic( $fname=__METHOD__, $cancelable=self::ATOMIC_NOT_CANCELABLE)
Begin an atomic section of SQL statements.