MediaWiki  master
compressOld.php
Go to the documentation of this file.
1 <?php
45 
46 require_once __DIR__ . '/../Maintenance.php';
47 
53 class CompressOld extends Maintenance {
54  public function __construct() {
55  parent::__construct();
56  $this->addDescription( 'Compress the text of a wiki' );
57  $this->addOption( 'type', 'Set compression type to either: gzip|concat', false, true, 't' );
58  $this->addOption(
59  'chunksize',
60  'Maximum number of revisions in a concat chunk',
61  false,
62  true,
63  'c'
64  );
65  $this->addOption(
66  'begin-date',
67  'Earliest date to check for uncompressed revisions',
68  false,
69  true,
70  'b'
71  );
72  $this->addOption( 'end-date', 'Latest revision date to compress', false, true, 'e' );
73  $this->addOption(
74  'startid',
75  'The id to start from (gzip -> text table, concat -> page table)',
76  false,
77  true,
78  's'
79  );
80  $this->addOption(
81  'extdb',
82  'Store specified revisions in an external cluster (untested)',
83  false,
84  true
85  );
86  $this->addOption(
87  'endid',
88  'The page_id to stop at (only when using concat compression type)',
89  false,
90  true,
91  'n'
92  );
93  }
94 
95  public function execute() {
96  global $wgDBname;
97  if ( !function_exists( "gzdeflate" ) ) {
98  $this->fatalError( "You must enable zlib support in PHP to compress old revisions!\n" .
99  "Please see https://www.php.net/manual/en/ref.zlib.php\n" );
100  }
101 
102  $type = $this->getOption( 'type', 'concat' );
103  $chunkSize = $this->getOption( 'chunksize', 20 );
104  $startId = $this->getOption( 'startid', 0 );
105  $beginDate = $this->getOption( 'begin-date', '' );
106  $endDate = $this->getOption( 'end-date', '' );
107  $extDB = $this->getOption( 'extdb', '' );
108  $endId = $this->getOption( 'endid', false );
109 
110  if ( $type != 'concat' && $type != 'gzip' ) {
111  $this->error( "Type \"{$type}\" not supported" );
112  }
113 
114  if ( $extDB != '' ) {
115  $this->output( "Compressing database {$wgDBname} to external cluster {$extDB}\n"
116  . str_repeat( '-', 76 ) . "\n\n" );
117  } else {
118  $this->output( "Compressing database {$wgDBname}\n"
119  . str_repeat( '-', 76 ) . "\n\n" );
120  }
121 
122  $success = true;
123  if ( $type == 'concat' ) {
124  $success = $this->compressWithConcat( $startId, $chunkSize, $beginDate,
125  $endDate, $extDB, $endId );
126  } else {
127  $this->compressOldPages( $startId, $extDB );
128  }
129 
130  if ( $success ) {
131  $this->output( "Done.\n" );
132  }
133  }
134 
141  private function compressOldPages( $start = 0, $extdb = '' ) {
142  $chunksize = 50;
143  $this->output( "Starting from old_id $start...\n" );
144  $dbw = $this->getDB( DB_MASTER );
145  do {
146  $res = $dbw->select(
147  'text',
148  [ 'old_id', 'old_flags', 'old_text' ],
149  "old_id>=$start",
150  __METHOD__,
151  [ 'ORDER BY' => 'old_id', 'LIMIT' => $chunksize, 'FOR UPDATE' ]
152  );
153 
154  if ( $res->numRows() == 0 ) {
155  break;
156  }
157 
158  $last = $start;
159 
160  foreach ( $res as $row ) {
161  # print " {$row->old_id} - {$row->old_namespace}:{$row->old_title}\n";
162  $this->compressPage( $row, $extdb );
163  $last = $row->old_id;
164  }
165 
166  $start = $last + 1; # Deletion may leave long empty stretches
167  $this->output( "$start...\n" );
168  } while ( true );
169  }
170 
178  private function compressPage( $row, $extdb ) {
179  if ( strpos( $row->old_flags, 'gzip' ) !== false
180  || strpos( $row->old_flags, 'object' ) !== false
181  ) {
182  # print "Already compressed row {$row->old_id}\n";
183  return false;
184  }
185  $dbw = $this->getDB( DB_MASTER );
186  $flags = $row->old_flags ? "{$row->old_flags},gzip" : "gzip";
187  $compress = gzdeflate( $row->old_text );
188 
189  # Store in external storage if required
190  if ( $extdb !== '' ) {
191  $storeObj = new ExternalStoreDB;
192  $compress = $storeObj->store( $extdb, $compress );
193  if ( $compress === false ) {
194  $this->error( "Unable to store object" );
195 
196  return false;
197  }
198  }
199 
200  # Update text row
201  $dbw->update( 'text',
202  [ /* SET */
203  'old_flags' => $flags,
204  'old_text' => $compress
205  ], [ /* WHERE */
206  'old_id' => $row->old_id
207  ], __METHOD__,
208  [ 'LIMIT' => 1 ]
209  );
210 
211  return true;
212  }
213 
225  private function compressWithConcat( $startId, $maxChunkSize, $beginDate,
226  $endDate, $extdb = "", $maxPageId = false
227  ) {
229 
230  $dbr = $this->getDB( DB_REPLICA );
231  $dbw = $this->getDB( DB_MASTER );
232 
233  # Set up external storage
234  if ( $extdb != '' ) {
235  $storeObj = new ExternalStoreDB;
236  }
237 
238  # Get all articles by page_id
239  if ( !$maxPageId ) {
240  $maxPageId = $dbr->selectField( 'page', 'max(page_id)', '', __METHOD__ );
241  }
242  $this->output( "Starting from $startId of $maxPageId\n" );
243  $pageConds = [];
244 
245  /*
246  if ( $exclude_ns0 ) {
247  print "Excluding main namespace\n";
248  $pageConds[] = 'page_namespace<>0';
249  }
250  if ( $queryExtra ) {
251  $pageConds[] = $queryExtra;
252  }
253  */
254 
255  # For each article, get a list of revisions which fit the criteria
256 
257  # No recompression, use a condition on old_flags
258  # Don't compress object type entities, because that might produce data loss when
259  # overwriting bulk storage concat rows. Don't compress external references, because
260  # the script doesn't yet delete rows from external storage.
261  $conds = [
262  'old_flags NOT ' . $dbr->buildLike( $dbr->anyString(), 'object', $dbr->anyString() )
263  . ' AND old_flags NOT '
264  . $dbr->buildLike( $dbr->anyString(), 'external', $dbr->anyString() )
265  ];
266 
267  if ( $beginDate ) {
268  if ( !preg_match( '/^\d{14}$/', $beginDate ) ) {
269  $this->error( "Invalid begin date \"$beginDate\"\n" );
270 
271  return false;
272  }
273  $conds[] = "rev_timestamp>'" . $beginDate . "'";
274  }
275  if ( $endDate ) {
276  if ( !preg_match( '/^\d{14}$/', $endDate ) ) {
277  $this->error( "Invalid end date \"$endDate\"\n" );
278 
279  return false;
280  }
281  $conds[] = "rev_timestamp<'" . $endDate . "'";
282  }
283 
284  if ( $wgMultiContentRevisionSchemaMigrationStage & SCHEMA_COMPAT_READ_OLD ) {
285  $tables = [ 'revision', 'text' ];
286  $conds[] = 'rev_text_id=old_id';
287  } else {
288  $slotRoleStore = MediaWikiServices::getInstance()->getSlotRoleStore();
289  $tables = [ 'revision', 'slots', 'content', 'text' ];
290  $conds = array_merge( [
291  'rev_id=slot_revision_id',
292  'slot_role_id=' . $slotRoleStore->getId( SlotRecord::MAIN ),
293  'content_id=slot_content_id',
294  'SUBSTRING(content_address, 1, 3)=' . $dbr->addQuotes( 'tt:' ),
295  'SUBSTRING(content_address, 4)=old_id',
296  ], $conds );
297  }
298 
299  $fields = [ 'rev_id', 'old_id', 'old_flags', 'old_text' ];
300  $revLoadOptions = 'FOR UPDATE';
301 
302  # Don't work with current revisions
303  # Don't lock the page table for update either -- TS 2006-04-04
304  # $tables[] = 'page';
305  # $conds[] = 'page_id=rev_page AND rev_id != page_latest';
306 
307  for ( $pageId = $startId; $pageId <= $maxPageId; $pageId++ ) {
308  wfWaitForSlaves();
309 
310  # Wake up
311  $dbr->ping();
312 
313  # Get the page row
314  $pageRes = $dbr->select( 'page',
315  [ 'page_id', 'page_namespace', 'page_title', 'page_latest' ],
316  $pageConds + [ 'page_id' => $pageId ], __METHOD__ );
317  if ( $pageRes->numRows() == 0 ) {
318  continue;
319  }
320  $pageRow = $dbr->fetchObject( $pageRes );
321 
322  # Display progress
323  $titleObj = Title::makeTitle( $pageRow->page_namespace, $pageRow->page_title );
324  $this->output( "$pageId\t" . $titleObj->getPrefixedDBkey() . " " );
325 
326  # Load revisions
327  $revRes = $dbw->select( $tables, $fields,
328  array_merge( [
329  'rev_page' => $pageRow->page_id,
330  # Don't operate on the current revision
331  # Use < instead of <> in case the current revision has changed
332  # since the page select, which wasn't locking
333  'rev_id < ' . $pageRow->page_latest
334  ], $conds ),
335  __METHOD__,
336  $revLoadOptions
337  );
338  $revs = [];
339  foreach ( $revRes as $revRow ) {
340  $revs[] = $revRow;
341  }
342 
343  if ( count( $revs ) < 2 ) {
344  # No revisions matching, no further processing
345  $this->output( "\n" );
346  continue;
347  }
348 
349  # For each chunk
350  $i = 0;
351  while ( $i < count( $revs ) ) {
352  if ( $i < count( $revs ) - $maxChunkSize ) {
353  $thisChunkSize = $maxChunkSize;
354  } else {
355  $thisChunkSize = count( $revs ) - $i;
356  }
357 
358  $chunk = new ConcatenatedGzipHistoryBlob();
359  $stubs = [];
360  $this->beginTransaction( $dbw, __METHOD__ );
361  $usedChunk = false;
362  $primaryOldid = $revs[$i]->old_id;
363 
364  # Get the text of each revision and add it to the object
365  for ( $j = 0; $j < $thisChunkSize && $chunk->isHappy(); $j++ ) {
366  $oldid = $revs[$i + $j]->old_id;
367 
368  # Get text
369  $text = Revision::getRevisionText( $revs[$i + $j] );
370 
371  if ( $text === false ) {
372  $this->error( "\nError, unable to get text in old_id $oldid" );
373  # $dbw->delete( 'old', [ 'old_id' => $oldid ] );
374  }
375 
376  if ( $extdb == "" && $j == 0 ) {
377  $chunk->setText( $text );
378  $this->output( '.' );
379  } else {
380  # Don't make a stub if it's going to be longer than the article
381  # Stubs are typically about 100 bytes
382  if ( strlen( $text ) < 120 ) {
383  $stub = false;
384  $this->output( 'x' );
385  } else {
386  $stub = new HistoryBlobStub( $chunk->addItem( $text ) );
387  $stub->setLocation( $primaryOldid );
388  $stub->setReferrer( $oldid );
389  $this->output( '.' );
390  $usedChunk = true;
391  }
392  $stubs[$j] = $stub;
393  }
394  }
395  $thisChunkSize = $j;
396 
397  # If we couldn't actually use any stubs because the pages were too small, do nothing
398  if ( $usedChunk ) {
399  if ( $extdb != "" ) {
400  # Move blob objects to External Storage
401  $stored = $storeObj->store( $extdb, serialize( $chunk ) );
402  if ( $stored === false ) {
403  $this->error( "Unable to store object" );
404 
405  return false;
406  }
407  # Store External Storage URLs instead of Stub placeholders
408  foreach ( $stubs as $stub ) {
409  if ( $stub === false ) {
410  continue;
411  }
412  # $stored should provide base path to a BLOB
413  $url = $stored . "/" . $stub->getHash();
414  $dbw->update( 'text',
415  [ /* SET */
416  'old_text' => $url,
417  'old_flags' => 'external,utf-8',
418  ], [ /* WHERE */
419  'old_id' => $stub->getReferrer(),
420  ]
421  );
422  }
423  } else {
424  # Store the main object locally
425  $dbw->update( 'text',
426  [ /* SET */
427  'old_text' => serialize( $chunk ),
428  'old_flags' => 'object,utf-8',
429  ], [ /* WHERE */
430  'old_id' => $primaryOldid
431  ]
432  );
433 
434  # Store the stub objects
435  for ( $j = 1; $j < $thisChunkSize; $j++ ) {
436  # Skip if not compressing and don't overwrite the first revision
437  if ( $stubs[$j] !== false && $revs[$i + $j]->old_id != $primaryOldid ) {
438  $dbw->update( 'text',
439  [ /* SET */
440  'old_text' => serialize( $stubs[$j] ),
441  'old_flags' => 'object,utf-8',
442  ], [ /* WHERE */
443  'old_id' => $revs[$i + $j]->old_id
444  ]
445  );
446  }
447  }
448  }
449  }
450  # Done, next
451  $this->output( "/" );
452  $this->commitTransaction( $dbw, __METHOD__ );
453  $i += $thisChunkSize;
454  }
455  $this->output( "\n" );
456  }
457 
458  return true;
459  }
460 }
461 
463 require_once RUN_MAINTENANCE_IF_MAIN;
commitTransaction(IDatabase $dbw, $fname)
Commit the transcation on a DB handle and wait for replica DBs to catch up.
$success
$maintClass
error( $err, $die=0)
Throw an error to the user.
serialize()
int $wgMultiContentRevisionSchemaMigrationStage
RevisionStore table schema migration stage (content, slots, content_models & slot_roles tables)...
Apache License January AND DISTRIBUTION Definitions License shall mean the terms and conditions for use
getOption( $name, $default=null)
Get an option, or return the default.
Abstract maintenance class for quickly writing and churning out maintenance scripts with minimal effo...
Definition: maintenance.txt:39
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency MediaWikiServices
Definition: injection.txt:23
compressPage( $row, $extdb)
Compress the text in gzip format.
require_once RUN_MAINTENANCE_IF_MAIN
Definition: maintenance.txt:50
Pointer object for an item within a CGZ blob stored in the text table.
const DB_MASTER
Definition: defines.php:26
this hook is for auditing only RecentChangesLinked and Watchlist Do not use this to implement individual filters if they are compatible with the ChangesListFilter and ChangesListFilterGroup structure use sub classes of those in conjunction with the ChangesListSpecialPageStructuredFilters hook This hook can be used to implement filters that do not implement that or custom behavior that is not an individual filter e g Watchlist & $tables
Definition: hooks.txt:979
static getRevisionText( $row, $prefix='old_', $wiki=false)
Get revision text associated with an old or archive row.
Definition: Revision.php:1046
$last
setLocation( $id)
Sets the location (old_id) of the main object to which this object points.
compressWithConcat( $startId, $maxChunkSize, $beginDate, $endDate, $extdb="", $maxPageId=false)
Compress the text in chunks after concatenating the revisions.
Concatenated gzip (CGZ) storage Improves compression ratio by concatenating like objects before gzipp...
$res
Definition: database.txt:21
wfWaitForSlaves( $ifWritesSince=null, $wiki=false, $cluster=false, $timeout=null)
Waits for the replica DBs to catch up to the master position.
addDescription( $text)
Set the description text.
getDB( $db, $groups=[], $wiki=false)
Returns a database to be used by current maintenance script.
output( $out, $channel=null)
Throw some output to the user.
This document is intended to provide useful advice for parties seeking to redistribute MediaWiki to end users It s targeted particularly at maintainers for Linux since it s been observed that distribution packages of MediaWiki often break We ve consistently had to recommend that users seeking support use official tarballs instead of their distribution s and this often solves whatever problem the user is having It would be nice if this could such as
Definition: distributors.txt:9
static makeTitle( $ns, $title, $fragment='', $interwiki='')
Create a new Title from a namespace index and a DB key.
Definition: Title.php:589
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback the instantiator that will return a new instance of the service When it will create an instance of MediaWikiServices and populate it with the services defined in the files listed by thereby bootstrapping the DI framework Per $wgServiceWiringFiles lists includes ServiceWiring php
Definition: injection.txt:35
compressOldPages( $start=0, $extdb='')
Fetch the text row-by-row to &#39;compressPage&#39; function for compression.
you have access to all of the normal MediaWiki so you can get a DB use the etc For full docs on the Maintenance class
Definition: maintenance.txt:52
store( $location, $data)
Maintenance script that compress the text of a wiki.
Definition: compressOld.php:53
addOption( $name, $description, $required=false, $withArg=false, $shortName=false, $multiOccurrence=false)
Add a parameter to the script.
const DB_REPLICA
Definition: defines.php:25
const SCHEMA_COMPAT_READ_OLD
Definition: Defines.php:281
The wiki should then use memcached to cache various data To use multiple just add more items to the array To increase the weight of a make its entry a controlled by the following MediaWiki still creates a BagOStuff but calls it to it are no ops If the cache daemon can t be it should also disable itself fairly $wgDBname
Definition: memcached.txt:93
fatalError( $msg, $exitCode=1)
Output a message and terminate the current script.
DB accessible external objects.
beginTransaction(IDatabase $dbw, $fname)
Begin a transcation on a DB.