MediaWiki REL1_28
recompressTracked.php
Go to the documentation of this file.
1<?php
27
29require __DIR__ . '/../commandLine.inc';
30
31if ( count( $args ) < 1 ) {
32 echo "Usage: php recompressTracked.php [options] <cluster> [... <cluster>...]
33Moves blobs indexed by trackBlobs.php to a specified list of destination clusters,
34and recompresses them in the process. Restartable.
35
36Options:
37 --procs <procs> Set the number of child processes (default 1)
38 --copy-only Copy only, do not update the text table. Restart
39 without this option to complete.
40 --debug-log <file> Log debugging data to the specified file
41 --info-log <file> Log progress messages to the specified file
42 --critical-log <file> Log error messages to the specified file
43";
44 exit( 1 );
45}
46
48$job->execute();
49
58 public $batchSize = 1000;
59 public $orphanBatchSize = 1000;
60 public $reportingInterval = 10;
61 public $numProcs = 1;
62 public $numBatches = 0;
65 public $copyOnly = false;
66 public $isChild = false;
67 public $replicaId = false;
68 public $noCount = false;
70 public $store;
71
72 private static $optionsWithArgs = [
73 'procs',
74 'replica-id',
75 'debug-log',
76 'info-log',
77 'critical-log'
78 ];
79
80 private static $cmdLineOptionMap = [
81 'no-count' => 'noCount',
82 'procs' => 'numProcs',
83 'copy-only' => 'copyOnly',
84 'child' => 'isChild',
85 'replica-id' => 'replicaId',
86 'debug-log' => 'debugLog',
87 'info-log' => 'infoLog',
88 'critical-log' => 'criticalLog',
89 ];
90
91 static function getOptionsWithArgs() {
93 }
94
95 static function newFromCommandLine( $args, $options ) {
96 $jobOptions = [ 'destClusters' => $args ];
97 foreach ( self::$cmdLineOptionMap as $cmdOption => $classOption ) {
98 if ( isset( $options[$cmdOption] ) ) {
99 $jobOptions[$classOption] = $options[$cmdOption];
100 }
101 }
102
103 return new self( $jobOptions );
104 }
105
106 function __construct( $options ) {
107 foreach ( $options as $name => $value ) {
108 $this->$name = $value;
109 }
110 $this->store = new ExternalStoreDB;
111 if ( !$this->isChild ) {
112 $GLOBALS['wgDebugLogPrefix'] = "RCT M: ";
113 } elseif ( $this->replicaId !== false ) {
114 $GLOBALS['wgDebugLogPrefix'] = "RCT {$this->replicaId}: ";
115 }
116 $this->pageBlobClass = function_exists( 'xdiff_string_bdiff' ) ?
117 'DiffHistoryBlob' : 'ConcatenatedGzipHistoryBlob';
118 $this->orphanBlobClass = 'ConcatenatedGzipHistoryBlob';
119 }
120
121 function debug( $msg ) {
122 wfDebug( "$msg\n" );
123 if ( $this->debugLog ) {
124 $this->logToFile( $msg, $this->debugLog );
125 }
126 }
127
128 function info( $msg ) {
129 echo "$msg\n";
130 if ( $this->infoLog ) {
131 $this->logToFile( $msg, $this->infoLog );
132 }
133 }
134
135 function critical( $msg ) {
136 echo "$msg\n";
137 if ( $this->criticalLog ) {
138 $this->logToFile( $msg, $this->criticalLog );
139 }
140 }
141
142 function logToFile( $msg, $file ) {
143 $header = '[' . date( 'd\TH:i:s' ) . '] ' . wfHostname() . ' ' . posix_getpid();
144 if ( $this->replicaId !== false ) {
145 $header .= "({$this->replicaId})";
146 }
147 $header .= ' ' . wfWikiID();
148 LegacyLogger::emit( sprintf( "%-50s %s\n", $header, $msg ), $file );
149 }
150
156 function syncDBs() {
157 $dbw = wfGetDB( DB_MASTER );
159 $pos = $dbw->getMasterPos();
160 $dbr->masterPosWait( $pos, 100000 );
161 }
162
166 function execute() {
167 if ( $this->isChild ) {
168 $this->executeChild();
169 } else {
170 $this->executeParent();
171 }
172 }
173
177 function executeParent() {
178 if ( !$this->checkTrackingTable() ) {
179 return;
180 }
181
182 $this->syncDBs();
183 $this->startReplicaProcs();
184 $this->doAllPages();
185 $this->doAllOrphans();
186 $this->killReplicaProcs();
187 }
188
195 if ( !$dbr->tableExists( 'blob_tracking' ) ) {
196 $this->critical( "Error: blob_tracking table does not exist" );
197
198 return false;
199 }
200 $row = $dbr->selectRow( 'blob_tracking', '*', '', __METHOD__ );
201 if ( !$row ) {
202 $this->info( "Warning: blob_tracking table contains no rows, skipping this wiki." );
203
204 return false;
205 }
206
207 return true;
208 }
209
216 function startReplicaProcs() {
217 $cmd = 'php ' . wfEscapeShellArg( __FILE__ );
218 foreach ( self::$cmdLineOptionMap as $cmdOption => $classOption ) {
219 if ( $cmdOption == 'replica-id' ) {
220 continue;
221 } elseif ( in_array( $cmdOption, self::$optionsWithArgs ) && isset( $this->$classOption ) ) {
222 $cmd .= " --$cmdOption " . wfEscapeShellArg( $this->$classOption );
223 } elseif ( $this->$classOption ) {
224 $cmd .= " --$cmdOption";
225 }
226 }
227 $cmd .= ' --child' .
228 ' --wiki ' . wfEscapeShellArg( wfWikiID() ) .
229 ' ' . call_user_func_array( 'wfEscapeShellArg', $this->destClusters );
230
231 $this->replicaPipes = $this->replicaProcs = [];
232 for ( $i = 0; $i < $this->numProcs; $i++ ) {
233 $pipes = [];
234 $spec = [
235 [ 'pipe', 'r' ],
236 [ 'file', 'php://stdout', 'w' ],
237 [ 'file', 'php://stderr', 'w' ]
238 ];
239 MediaWiki\suppressWarnings();
240 $proc = proc_open( "$cmd --replica-id $i", $spec, $pipes );
241 MediaWiki\restoreWarnings();
242 if ( !$proc ) {
243 $this->critical( "Error opening replica DB process: $cmd" );
244 exit( 1 );
245 }
246 $this->replicaProcs[$i] = $proc;
247 $this->replicaPipes[$i] = $pipes[0];
248 }
249 $this->prevReplicaId = -1;
250 }
251
255 function killReplicaProcs() {
256 $this->info( "Waiting for replica DB processes to finish..." );
257 for ( $i = 0; $i < $this->numProcs; $i++ ) {
258 $this->dispatchToReplica( $i, 'quit' );
259 }
260 for ( $i = 0; $i < $this->numProcs; $i++ ) {
261 $status = proc_close( $this->replicaProcs[$i] );
262 if ( $status ) {
263 $this->critical( "Warning: child #$i exited with status $status" );
264 }
265 }
266 $this->info( "Done." );
267 }
268
273 function dispatch( /*...*/ ) {
274 $args = func_get_args();
275 $pipes = $this->replicaPipes;
276 $numPipes = stream_select( $x = [], $pipes, $y = [], 3600 );
277 if ( !$numPipes ) {
278 $this->critical( "Error waiting to write to replica DBs. Aborting" );
279 exit( 1 );
280 }
281 for ( $i = 0; $i < $this->numProcs; $i++ ) {
282 $replicaId = ( $i + $this->prevReplicaId + 1 ) % $this->numProcs;
283 if ( isset( $pipes[$replicaId] ) ) {
284 $this->prevReplicaId = $replicaId;
285 $this->dispatchToReplica( $replicaId, $args );
286
287 return;
288 }
289 }
290 $this->critical( "Unreachable" );
291 exit( 1 );
292 }
293
300 $args = (array)$args;
301 $cmd = implode( ' ', $args );
302 fwrite( $this->replicaPipes[$replicaId], "$cmd\n" );
303 }
304
308 function doAllPages() {
310 $i = 0;
311 $startId = 0;
312 if ( $this->noCount ) {
313 $numPages = '[unknown]';
314 } else {
315 $numPages = $dbr->selectField( 'blob_tracking',
316 'COUNT(DISTINCT bt_page)',
317 # A condition is required so that this query uses the index
318 [ 'bt_moved' => 0 ],
319 __METHOD__
320 );
321 }
322 if ( $this->copyOnly ) {
323 $this->info( "Copying pages..." );
324 } else {
325 $this->info( "Moving pages..." );
326 }
327 while ( true ) {
328 $res = $dbr->select( 'blob_tracking',
329 [ 'bt_page' ],
330 [
331 'bt_moved' => 0,
332 'bt_page > ' . $dbr->addQuotes( $startId )
333 ],
334 __METHOD__,
335 [
336 'DISTINCT',
337 'ORDER BY' => 'bt_page',
338 'LIMIT' => $this->batchSize,
339 ]
340 );
341 if ( !$res->numRows() ) {
342 break;
343 }
344 foreach ( $res as $row ) {
345 $startId = $row->bt_page;
346 $this->dispatch( 'doPage', $row->bt_page );
347 $i++;
348 }
349 $this->report( 'pages', $i, $numPages );
350 }
351 $this->report( 'pages', $i, $numPages );
352 if ( $this->copyOnly ) {
353 $this->info( "All page copies queued." );
354 } else {
355 $this->info( "All page moves queued." );
356 }
357 }
358
365 function report( $label, $current, $end ) {
366 $this->numBatches++;
367 if ( $current == $end || $this->numBatches >= $this->reportingInterval ) {
368 $this->numBatches = 0;
369 $this->info( "$label: $current / $end" );
370 MediaWikiServices::getInstance()->getDBLoadBalancerFactory()->waitForReplication();
371 }
372 }
373
377 function doAllOrphans() {
379 $startId = 0;
380 $i = 0;
381 if ( $this->noCount ) {
382 $numOrphans = '[unknown]';
383 } else {
384 $numOrphans = $dbr->selectField( 'blob_tracking',
385 'COUNT(DISTINCT bt_text_id)',
386 [ 'bt_moved' => 0, 'bt_page' => 0 ],
387 __METHOD__ );
388 if ( !$numOrphans ) {
389 return;
390 }
391 }
392 if ( $this->copyOnly ) {
393 $this->info( "Copying orphans..." );
394 } else {
395 $this->info( "Moving orphans..." );
396 }
397
398 while ( true ) {
399 $res = $dbr->select( 'blob_tracking',
400 [ 'bt_text_id' ],
401 [
402 'bt_moved' => 0,
403 'bt_page' => 0,
404 'bt_text_id > ' . $dbr->addQuotes( $startId )
405 ],
406 __METHOD__,
407 [
408 'DISTINCT',
409 'ORDER BY' => 'bt_text_id',
410 'LIMIT' => $this->batchSize
411 ]
412 );
413 if ( !$res->numRows() ) {
414 break;
415 }
416 $ids = [];
417 foreach ( $res as $row ) {
418 $startId = $row->bt_text_id;
419 $ids[] = $row->bt_text_id;
420 $i++;
421 }
422 // Need to send enough orphan IDs to the child at a time to fill a blob,
423 // so orphanBatchSize needs to be at least ~100.
424 // batchSize can be smaller or larger.
425 while ( count( $ids ) > $this->orphanBatchSize ) {
426 $args = array_slice( $ids, 0, $this->orphanBatchSize );
427 $ids = array_slice( $ids, $this->orphanBatchSize );
428 array_unshift( $args, 'doOrphanList' );
429 call_user_func_array( [ $this, 'dispatch' ], $args );
430 }
431 if ( count( $ids ) ) {
432 $args = $ids;
433 array_unshift( $args, 'doOrphanList' );
434 call_user_func_array( [ $this, 'dispatch' ], $args );
435 }
436
437 $this->report( 'orphans', $i, $numOrphans );
438 }
439 $this->report( 'orphans', $i, $numOrphans );
440 $this->info( "All orphans queued." );
441 }
442
446 function executeChild() {
447 $this->debug( 'starting' );
448 $this->syncDBs();
449
450 while ( !feof( STDIN ) ) {
451 $line = rtrim( fgets( STDIN ) );
452 if ( $line == '' ) {
453 continue;
454 }
455 $this->debug( $line );
456 $args = explode( ' ', $line );
457 $cmd = array_shift( $args );
458 switch ( $cmd ) {
459 case 'doPage':
460 $this->doPage( intval( $args[0] ) );
461 break;
462 case 'doOrphanList':
463 $this->doOrphanList( array_map( 'intval', $args ) );
464 break;
465 case 'quit':
466 return;
467 }
468 MediaWikiServices::getInstance()->getDBLoadBalancerFactory()->waitForReplication();
469 }
470 }
471
477 function doPage( $pageId ) {
478 $title = Title::newFromID( $pageId );
479 if ( $title ) {
480 $titleText = $title->getPrefixedText();
481 } else {
482 $titleText = '[deleted]';
483 }
485
486 // Finish any incomplete transactions
487 if ( !$this->copyOnly ) {
488 $this->finishIncompleteMoves( [ 'bt_page' => $pageId ] );
489 $this->syncDBs();
490 }
491
492 $startId = 0;
493 $trx = new CgzCopyTransaction( $this, $this->pageBlobClass );
494
495 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
496 while ( true ) {
497 $res = $dbr->select(
498 [ 'blob_tracking', 'text' ],
499 '*',
500 [
501 'bt_page' => $pageId,
502 'bt_text_id > ' . $dbr->addQuotes( $startId ),
503 'bt_moved' => 0,
504 'bt_new_url IS NULL',
505 'bt_text_id=old_id',
506 ],
507 __METHOD__,
508 [
509 'ORDER BY' => 'bt_text_id',
510 'LIMIT' => $this->batchSize
511 ]
512 );
513 if ( !$res->numRows() ) {
514 break;
515 }
516
517 $lastTextId = 0;
518 foreach ( $res as $row ) {
519 $startId = $row->bt_text_id;
520 if ( $lastTextId == $row->bt_text_id ) {
521 // Duplicate (null edit)
522 continue;
523 }
524 $lastTextId = $row->bt_text_id;
525 // Load the text
526 $text = Revision::getRevisionText( $row );
527 if ( $text === false ) {
528 $this->critical( "Error loading {$row->bt_rev_id}/{$row->bt_text_id}" );
529 continue;
530 }
531
532 // Queue it
533 if ( !$trx->addItem( $text, $row->bt_text_id ) ) {
534 $this->debug( "$titleText: committing blob with " . $trx->getSize() . " items" );
535 $trx->commit();
536 $trx = new CgzCopyTransaction( $this, $this->pageBlobClass );
537 $lbFactory->waitForReplication();
538 }
539 }
540 }
541
542 $this->debug( "$titleText: committing blob with " . $trx->getSize() . " items" );
543 $trx->commit();
544 }
545
559 function moveTextRow( $textId, $url ) {
560 if ( $this->copyOnly ) {
561 $this->critical( "Internal error: can't call moveTextRow() in --copy-only mode" );
562 exit( 1 );
563 }
564 $dbw = wfGetDB( DB_MASTER );
565 $dbw->begin( __METHOD__ );
566 $dbw->update( 'text',
567 [ // set
568 'old_text' => $url,
569 'old_flags' => 'external,utf-8',
570 ],
571 [ // where
572 'old_id' => $textId
573 ],
574 __METHOD__
575 );
576 $dbw->update( 'blob_tracking',
577 [ 'bt_moved' => 1 ],
578 [ 'bt_text_id' => $textId ],
579 __METHOD__
580 );
581 $dbw->commit( __METHOD__ );
582 }
583
594 function finishIncompleteMoves( $conds ) {
596 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
597
598 $startId = 0;
599 $conds = array_merge( $conds, [
600 'bt_moved' => 0,
601 'bt_new_url IS NOT NULL'
602 ] );
603 while ( true ) {
604 $res = $dbr->select( 'blob_tracking',
605 '*',
606 array_merge( $conds, [ 'bt_text_id > ' . $dbr->addQuotes( $startId ) ] ),
607 __METHOD__,
608 [
609 'ORDER BY' => 'bt_text_id',
610 'LIMIT' => $this->batchSize,
611 ]
612 );
613 if ( !$res->numRows() ) {
614 break;
615 }
616 $this->debug( 'Incomplete: ' . $res->numRows() . ' rows' );
617 foreach ( $res as $row ) {
618 $startId = $row->bt_text_id;
619 $this->moveTextRow( $row->bt_text_id, $row->bt_new_url );
620 if ( $row->bt_text_id % 10 == 0 ) {
621 $lbFactory->waitForReplication();
622 }
623 }
624 }
625 }
626
631 function getTargetCluster() {
632 $cluster = next( $this->destClusters );
633 if ( $cluster === false ) {
634 $cluster = reset( $this->destClusters );
635 }
636
637 return $cluster;
638 }
639
645 function getExtDB( $cluster ) {
646 $lb = wfGetLBFactory()->getExternalLB( $cluster );
647
648 return $lb->getConnection( DB_MASTER );
649 }
650
656 function doOrphanList( $textIds ) {
657 // Finish incomplete moves
658 if ( !$this->copyOnly ) {
659 $this->finishIncompleteMoves( [ 'bt_text_id' => $textIds ] );
660 $this->syncDBs();
661 }
662
663 $trx = new CgzCopyTransaction( $this, $this->orphanBlobClass );
664
665 $lbFactory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
666 $res = wfGetDB( DB_REPLICA )->select(
667 [ 'text', 'blob_tracking' ],
668 [ 'old_id', 'old_text', 'old_flags' ],
669 [
670 'old_id' => $textIds,
671 'bt_text_id=old_id',
672 'bt_moved' => 0,
673 ],
674 __METHOD__,
675 [ 'DISTINCT' ]
676 );
677
678 foreach ( $res as $row ) {
679 $text = Revision::getRevisionText( $row );
680 if ( $text === false ) {
681 $this->critical( "Error: cannot load revision text for old_id={$row->old_id}" );
682 continue;
683 }
684
685 if ( !$trx->addItem( $text, $row->old_id ) ) {
686 $this->debug( "[orphan]: committing blob with " . $trx->getSize() . " rows" );
687 $trx->commit();
688 $trx = new CgzCopyTransaction( $this, $this->orphanBlobClass );
689 $lbFactory->waitForReplication();
690 }
691 }
692 $this->debug( "[orphan]: committing blob with " . $trx->getSize() . " rows" );
693 $trx->commit();
694 }
695}
696
702 public $parent;
705 public $cgz;
707
714 $this->blobClass = $blobClass;
715 $this->cgz = false;
716 $this->texts = [];
717 $this->parent = $parent;
718 }
719
727 function addItem( $text, $textId ) {
728 if ( !$this->cgz ) {
729 $class = $this->blobClass;
730 $this->cgz = new $class;
731 }
732 $hash = $this->cgz->addItem( $text );
733 $this->referrers[$textId] = $hash;
734 $this->texts[$textId] = $text;
735
736 return $this->cgz->isHappy();
737 }
738
739 function getSize() {
740 return count( $this->texts );
741 }
742
746 function recompress() {
747 $class = $this->blobClass;
748 $this->cgz = new $class;
749 $this->referrers = [];
750 foreach ( $this->texts as $textId => $text ) {
751 $hash = $this->cgz->addItem( $text );
752 $this->referrers[$textId] = $hash;
753 }
754 }
755
761 function commit() {
762 $originalCount = count( $this->texts );
763 if ( !$originalCount ) {
764 return;
765 }
766
767 /* Check to see if the target text_ids have been moved already.
768 *
769 * We originally read from the replica DB, so this can happen when a single
770 * text_id is shared between multiple pages. It's rare, but possible
771 * if a delete/move/undelete cycle splits up a null edit.
772 *
773 * We do a locking read to prevent closer-run race conditions.
774 */
775 $dbw = wfGetDB( DB_MASTER );
776 $dbw->begin( __METHOD__ );
777 $res = $dbw->select( 'blob_tracking',
778 [ 'bt_text_id', 'bt_moved' ],
779 [ 'bt_text_id' => array_keys( $this->referrers ) ],
780 __METHOD__, [ 'FOR UPDATE' ] );
781 $dirty = false;
782 foreach ( $res as $row ) {
783 if ( $row->bt_moved ) {
784 # This row has already been moved, remove it
785 $this->parent->debug( "TRX: conflict detected in old_id={$row->bt_text_id}" );
786 unset( $this->texts[$row->bt_text_id] );
787 $dirty = true;
788 }
789 }
790
791 // Recompress the blob if necessary
792 if ( $dirty ) {
793 if ( !count( $this->texts ) ) {
794 // All have been moved already
795 if ( $originalCount > 1 ) {
796 // This is suspcious, make noise
797 $this->parent->critical(
798 "Warning: concurrent operation detected, are there two conflicting " .
799 "processes running, doing the same job?" );
800 }
801
802 return;
803 }
804 $this->recompress();
805 }
806
807 // Insert the data into the destination cluster
808 $targetCluster = $this->parent->getTargetCluster();
809 $store = $this->parent->store;
810 $targetDB = $store->getMaster( $targetCluster );
811 $targetDB->clearFlag( DBO_TRX ); // we manage the transactions
812 $targetDB->begin( __METHOD__ );
813 $baseUrl = $this->parent->store->store( $targetCluster, serialize( $this->cgz ) );
814
815 // Write the new URLs to the blob_tracking table
816 foreach ( $this->referrers as $textId => $hash ) {
817 $url = $baseUrl . '/' . $hash;
818 $dbw->update( 'blob_tracking',
819 [ 'bt_new_url' => $url ],
820 [
821 'bt_text_id' => $textId,
822 'bt_moved' => 0, # Check for concurrent conflicting update
823 ],
824 __METHOD__
825 );
826 }
827
828 $targetDB->commit( __METHOD__ );
829 // Critical section here: interruption at this point causes blob duplication
830 // Reversing the order of the commits would cause data loss instead
831 $dbw->commit( __METHOD__ );
832
833 // Write the new URLs to the text table and set the moved flag
834 if ( !$this->parent->copyOnly ) {
835 foreach ( $this->referrers as $textId => $hash ) {
836 $url = $baseUrl . '/' . $hash;
837 $this->parent->moveTextRow( $textId, $url );
838 }
839 }
840 }
841}
Apache License January AND DISTRIBUTION Definitions License shall mean the terms and conditions for use
serialize()
$GLOBALS['IP']
wfDebug( $text, $dest='all', array $context=[])
Sends a line to the debug log if enabled or, optionally, to a comment in output.
wfGetDB( $db, $groups=[], $wiki=false)
Get a Database object.
wfHostname()
Fetch server name for use in error reporting etc.
wfEscapeShellArg()
Version of escapeshellarg() that works better on Windows.
wfGetLBFactory()
Get the load balancer factory object.
wfWikiID()
Get an ASCII string identifying this wiki This is used as a prefix in memcached keys.
$line
Definition cdb.php:59
if( $line===false) $args
Definition cdb.php:64
Class to represent a recompression operation for a single CGZ blob.
ConcatenatedGzipHistoryBlob $cgz
RecompressTracked $parent
addItem( $text, $textId)
Add text.
commit()
Commit the blob.
recompress()
Recompress text after some aberrant modification.
__construct( $parent, $blobClass)
Create a transaction from a RecompressTracked object.
Concatenated gzip (CGZ) storage Improves compression ratio by concatenating like objects before gzipp...
DB accessable external objects.
PSR-3 logger that mimics the historic implementation of MediaWiki's wfErrorLog logging implementation...
MediaWikiServices is the service locator for the application scope of MediaWiki.
Maintenance script that moves blobs indexed by trackBlobs.php to a specified list of destination clus...
executeChild()
Main entry point for worker processes.
moveTextRow( $textId, $url)
Atomic move operation.
dispatch()
Dispatch a command to the next available replica DB.
report( $label, $current, $end)
Display a progress report.
execute()
Execute parent or child depending on the isChild option.
getExtDB( $cluster)
Gets a DB master connection for the given external cluster name.
doOrphanList( $textIds)
Move an orphan text_id to the new cluster.
doAllOrphans()
Move all orphan text to the new clusters.
static newFromCommandLine( $args, $options)
finishIncompleteMoves( $conds)
Moves are done in two phases: bt_new_url and then bt_moved.
doPage( $pageId)
Move tracked text in a given page.
syncDBs()
Wait until the selected replica DB has caught up to the master.
startReplicaProcs()
Start the worker processes.
getTargetCluster()
Returns the name of the next target cluster.
executeParent()
Execute the parent process.
doAllPages()
Move all tracked pages to the new clusters.
killReplicaProcs()
Gracefully terminate the child processes.
dispatchToReplica( $replicaId, $args)
Dispatch a command to a specified replica DB.
checkTrackingTable()
Make sure the tracking table exists and isn't empty.
static getRevisionText( $row, $prefix='old_', $wiki=false)
Get revision text associated with an old or archive row $row is usually an object from wfFetchRow(),...
$res
Definition database.txt:21
We use the convention $dbr for read and $dbw for write to help you keep track of whether the database object is a the world will explode Or to be a subsequent write query which succeeded on the master may fail when replicated to the slave due to a unique key collision Replication on the slave will stop and it may take hours to repair the database and get it back online Setting read_only in my cnf on the slave will avoid this but given the dire we prefer to have as many checks as possible We provide a but the wrapper functions like please read the documentation for except in special pages derived from QueryPage It s a common pitfall for new developers to submit code containing SQL queries which examine huge numbers of rows Remember that COUNT * is(N), counting rows in atable is like counting beans in a bucket.------------------------------------------------------------------------ Replication------------------------------------------------------------------------The largest installation of MediaWiki, Wikimedia, uses a large set ofslave MySQL servers replicating writes made to a master MySQL server. Itis important to understand the issues associated with this setup if youwant to write code destined for Wikipedia.It 's often the case that the best algorithm to use for a given taskdepends on whether or not replication is in use. Due to our unabashedWikipedia-centrism, we often just use the replication-friendly version, but if you like, you can use wfGetLB() ->getServerCount() > 1 tocheck to see if replication is in use.===Lag===Lag primarily occurs when large write queries are sent to the master.Writes on the master are executed in parallel, but they are executed inserial when they are replicated to the slaves. The master writes thequery to the binlog when the transaction is committed. The slaves pollthe binlog and start executing the query as soon as it appears. They canservice reads while they are performing a write query, but will not readanything more from the binlog and thus will perform no more writes. Thismeans that if the write query runs for a long time, the slaves will lagbehind the master for the time it takes for the write query to complete.Lag can be exacerbated by high read load. MediaWiki 's load balancer willstop sending reads to a slave when it is lagged by more than 30 seconds.If the load ratios are set incorrectly, or if there is too much loadgenerally, this may lead to a slave permanently hovering around 30seconds lag.If all slaves are lagged by more than 30 seconds, MediaWiki will stopwriting to the database. All edits and other write operations will berefused, with an error returned to the user. This gives the slaves achance to catch up. Before we had this mechanism, the slaves wouldregularly lag by several minutes, making review of recent editsdifficult.In addition to this, MediaWiki attempts to ensure that the user seesevents occurring on the wiki in chronological order. A few seconds of lagcan be tolerated, as long as the user sees a consistent picture fromsubsequent requests. This is done by saving the master binlog positionin the session, and then at the start of each request, waiting for theslave to catch up to that position before doing any reads from it. Ifthis wait times out, reads are allowed anyway, but the request isconsidered to be in "lagged slave mode". Lagged slave mode can bechecked by calling wfGetLB() ->getLaggedSlaveMode(). The onlypractical consequence at present is a warning displayed in the pagefooter.===Lag avoidance===To avoid excessive lag, queries which write large numbers of rows shouldbe split up, generally to write one row at a time. Multi-row INSERT ...SELECT queries are the worst offenders should be avoided altogether.Instead do the select first and then the insert.===Working with lag===Despite our best efforts, it 's not practical to guarantee a low-lagenvironment. Lag will usually be less than one second, but mayoccasionally be up to 30 seconds. For scalability, it 's very importantto keep load on the master low, so simply sending all your queries tothe master is not the answer. So when you have a genuine need forup-to-date data, the following approach is advised:1) Do a quick query to the master for a sequence number or timestamp 2) Run the full query on the slave and check if it matches the data you gotfrom the master 3) If it doesn 't, run the full query on the masterTo avoid swamping the master every time the slaves lag, use of thisapproach should be kept to a minimum. In most cases you should just readfrom the slave and let the user deal with the delay.------------------------------------------------------------------------ Lock contention------------------------------------------------------------------------Due to the high write rate on Wikipedia(and some other wikis), MediaWiki developers need to be very careful to structure their writesto avoid long-lasting locks. By default, MediaWiki opens a transactionat the first query, and commits it before the output is sent. Locks willbe held from the time when the query is done until the commit. So youcan reduce lock time by doing as much processing as possible before youdo your write queries.Often this approach is not good enough, and it becomes necessary toenclose small groups of queries in their own transaction. Use thefollowing syntax:$dbw=wfGetDB(DB_MASTER
For a write query
Definition database.txt:26
deferred txt A few of the database updates required by various functions here can be deferred until after the result page is displayed to the user For updating the view updating the linked to tables after a etc PHP does not yet have any way to tell the server to actually return and disconnect while still running these but it might have such a feature in the future We handle these by creating a deferred update object and putting those objects on a global then executing the whole list after the page is displayed We don t do anything smart like collating updates to the same table or such because the list is almost always going to have just one item on if that
Definition deferred.txt:13
This document is intended to provide useful advice for parties seeking to redistribute MediaWiki to end users It s targeted particularly at maintainers for Linux since it s been observed that distribution packages of MediaWiki often break We ve consistently had to recommend that users seeking support use official tarballs instead of their distribution s and this often solves whatever problem the user is having It would be nice if this could such as
$lbFactory
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set $status
Definition hooks.txt:1049
the array() calling protocol came about after MediaWiki 1.4rc1.
namespace and then decline to actually register it file or subcat img or subcat $title
Definition hooks.txt:986
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content as context as context $options
Definition hooks.txt:1096
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content as context as context the output can only depend on parameters provided to this hook not on global state indicating whether full HTML should be generated If generation of HTML may be but other information should still be present in the ParserOutput object to manipulate or replace but no entry for that model exists in $wgContentHandlers if desired whether it is OK to use $contentModel on $title Handler functions that modify $ok should generally return false to prevent further hooks from further modifying $ok inclusive false for true for descending in case the handler function wants to provide a converted Content object Note that $result getContentModel() must return $toModel. 'CustomEditor' $rcid is used in generating this variable which contains information about the new such as the revision s whether the revision was marked as a minor edit or etc which include things like revision author info
Definition hooks.txt:1210
Allows to change the fields on the form that will be generated $name
Definition hooks.txt:304
as see the revision history and available at free of to any person obtaining a copy of this software and associated documentation to deal in the Software without including without limitation the rights to and or sell copies of the and to permit persons to whom the Software is furnished to do so
Definition LICENSE.txt:13
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback the instantiator that will return a new instance of the service When it will create an instance of MediaWikiServices and populate it with the services defined in the files listed by thereby bootstrapping the DI framework Per $wgServiceWiringFiles lists includes ServiceWiring php
Definition injection.txt:37
const DB_REPLICA
Definition defines.php:22
const DB_MASTER
Definition defines.php:23
const DBO_TRX
Definition defines.php:9
$optionsWithArgs
if(count( $args)< 1) $job
MediaWiki s SiteStore can be cached and stored in a flat in a json format If the SiteStore is frequently the file cache may provide a performance benefit over a database store
Definition sitescache.txt:4
$header