MediaWiki REL1_34
compressOld.php
Go to the documentation of this file.
1<?php
45
46require_once __DIR__ . '/../Maintenance.php';
47
53class CompressOld extends Maintenance {
54 public function __construct() {
55 parent::__construct();
56 $this->addDescription( 'Compress the text of a wiki' );
57 $this->addOption( 'type', 'Set compression type to either: gzip|concat', false, true, 't' );
58 $this->addOption(
59 'chunksize',
60 'Maximum number of revisions in a concat chunk',
61 false,
62 true,
63 'c'
64 );
65 $this->addOption(
66 'begin-date',
67 'Earliest date to check for uncompressed revisions',
68 false,
69 true,
70 'b'
71 );
72 $this->addOption( 'end-date', 'Latest revision date to compress', false, true, 'e' );
73 $this->addOption(
74 'startid',
75 'The id to start from (gzip -> text table, concat -> page table)',
76 false,
77 true,
78 's'
79 );
80 $this->addOption(
81 'extdb',
82 'Store specified revisions in an external cluster (untested)',
83 false,
84 true
85 );
86 $this->addOption(
87 'endid',
88 'The page_id to stop at (only when using concat compression type)',
89 false,
90 true,
91 'n'
92 );
93 }
94
95 public function execute() {
96 global $wgDBname;
97 if ( !function_exists( "gzdeflate" ) ) {
98 $this->fatalError( "You must enable zlib support in PHP to compress old revisions!\n" .
99 "Please see https://www.php.net/manual/en/ref.zlib.php\n" );
100 }
101
102 $type = $this->getOption( 'type', 'concat' );
103 $chunkSize = $this->getOption( 'chunksize', 20 );
104 $startId = $this->getOption( 'startid', 0 );
105 $beginDate = $this->getOption( 'begin-date', '' );
106 $endDate = $this->getOption( 'end-date', '' );
107 $extDB = $this->getOption( 'extdb', '' );
108 $endId = $this->getOption( 'endid', false );
109
110 if ( $type != 'concat' && $type != 'gzip' ) {
111 $this->error( "Type \"{$type}\" not supported" );
112 }
113
114 if ( $extDB != '' ) {
115 $this->output( "Compressing database {$wgDBname} to external cluster {$extDB}\n"
116 . str_repeat( '-', 76 ) . "\n\n" );
117 } else {
118 $this->output( "Compressing database {$wgDBname}\n"
119 . str_repeat( '-', 76 ) . "\n\n" );
120 }
121
122 $success = true;
123 if ( $type == 'concat' ) {
124 $success = $this->compressWithConcat( $startId, $chunkSize, $beginDate,
125 $endDate, $extDB, $endId );
126 } else {
127 $this->compressOldPages( $startId, $extDB );
128 }
129
130 if ( $success ) {
131 $this->output( "Done.\n" );
132 }
133 }
134
141 private function compressOldPages( $start = 0, $extdb = '' ) {
142 $chunksize = 50;
143 $this->output( "Starting from old_id $start...\n" );
144 $dbw = $this->getDB( DB_MASTER );
145 do {
146 $res = $dbw->select(
147 'text',
148 [ 'old_id', 'old_flags', 'old_text' ],
149 "old_id>=$start",
150 __METHOD__,
151 [ 'ORDER BY' => 'old_id', 'LIMIT' => $chunksize, 'FOR UPDATE' ]
152 );
153
154 if ( $res->numRows() == 0 ) {
155 break;
156 }
157
158 $last = $start;
159
160 foreach ( $res as $row ) {
161 # print " {$row->old_id} - {$row->old_namespace}:{$row->old_title}\n";
162 $this->compressPage( $row, $extdb );
163 $last = $row->old_id;
164 }
165
166 $start = $last + 1; # Deletion may leave long empty stretches
167 $this->output( "$start...\n" );
168 } while ( true );
169 }
170
178 private function compressPage( $row, $extdb ) {
179 if ( strpos( $row->old_flags, 'gzip' ) !== false
180 || strpos( $row->old_flags, 'object' ) !== false
181 ) {
182 # print "Already compressed row {$row->old_id}\n";
183 return false;
184 }
185 $dbw = $this->getDB( DB_MASTER );
186 $flags = $row->old_flags ? "{$row->old_flags},gzip" : "gzip";
187 $compress = gzdeflate( $row->old_text );
188
189 # Store in external storage if required
190 if ( $extdb !== '' ) {
191 $esFactory = MediaWikiServices::getInstance()->getExternalStoreFactory();
193 $storeObj = $esFactory->getStore( 'DB' );
194 $compress = $storeObj->store( $extdb, $compress );
195 if ( $compress === false ) {
196 $this->error( "Unable to store object" );
197
198 return false;
199 }
200 }
201
202 # Update text row
203 $dbw->update( 'text',
204 [ /* SET */
205 'old_flags' => $flags,
206 'old_text' => $compress
207 ], [ /* WHERE */
208 'old_id' => $row->old_id
209 ], __METHOD__,
210 [ 'LIMIT' => 1 ]
211 );
212
213 return true;
214 }
215
228 private function compressWithConcat( $startId, $maxChunkSize, $beginDate,
229 $endDate, $extdb = "", $maxPageId = false
230 ) {
232
233 $dbr = $this->getDB( DB_REPLICA );
234 $dbw = $this->getDB( DB_MASTER );
235
236 # Set up external storage
237 if ( $extdb != '' ) {
238 $esFactory = MediaWikiServices::getInstance()->getExternalStoreFactory();
240 $storeObj = $esFactory->getStore( 'DB' );
241 }
242 // @phan-suppress-next-line PhanAccessMethodInternal
243 $blobStore = MediaWikiServices::getInstance()
244 ->getBlobStoreFactory()
245 ->newSqlBlobStore();
246
247 # Get all articles by page_id
248 if ( !$maxPageId ) {
249 $maxPageId = $dbr->selectField( 'page', 'max(page_id)', '', __METHOD__ );
250 }
251 $this->output( "Starting from $startId of $maxPageId\n" );
252 $pageConds = [];
253
254 /*
255 if ( $exclude_ns0 ) {
256 print "Excluding main namespace\n";
257 $pageConds[] = 'page_namespace<>0';
258 }
259 if ( $queryExtra ) {
260 $pageConds[] = $queryExtra;
261 }
262 */
263
264 # For each article, get a list of revisions which fit the criteria
265
266 # No recompression, use a condition on old_flags
267 # Don't compress object type entities, because that might produce data loss when
268 # overwriting bulk storage concat rows. Don't compress external references, because
269 # the script doesn't yet delete rows from external storage.
270 $conds = [
271 'old_flags NOT ' . $dbr->buildLike( $dbr->anyString(), 'object', $dbr->anyString() )
272 . ' AND old_flags NOT '
273 . $dbr->buildLike( $dbr->anyString(), 'external', $dbr->anyString() )
274 ];
275
276 if ( $beginDate ) {
277 if ( !preg_match( '/^\d{14}$/', $beginDate ) ) {
278 $this->error( "Invalid begin date \"$beginDate\"\n" );
279
280 return false;
281 }
282 $conds[] = "rev_timestamp>'" . $beginDate . "'";
283 }
284 if ( $endDate ) {
285 if ( !preg_match( '/^\d{14}$/', $endDate ) ) {
286 $this->error( "Invalid end date \"$endDate\"\n" );
287
288 return false;
289 }
290 $conds[] = "rev_timestamp<'" . $endDate . "'";
291 }
292
294 $tables = [ 'revision', 'text' ];
295 $conds[] = 'rev_text_id=old_id';
296 } else {
297 $slotRoleStore = MediaWikiServices::getInstance()->getSlotRoleStore();
298 $tables = [ 'revision', 'slots', 'content', 'text' ];
299 $conds = array_merge( [
300 'rev_id=slot_revision_id',
301 'slot_role_id=' . $slotRoleStore->getId( SlotRecord::MAIN ),
302 'content_id=slot_content_id',
303 'SUBSTRING(content_address, 1, 3)=' . $dbr->addQuotes( 'tt:' ),
304 'SUBSTRING(content_address, 4)=old_id',
305 ], $conds );
306 }
307
308 $fields = [ 'rev_id', 'old_id', 'old_flags', 'old_text' ];
309 $revLoadOptions = 'FOR UPDATE';
310
311 # Don't work with current revisions
312 # Don't lock the page table for update either -- TS 2006-04-04
313 # $tables[] = 'page';
314 # $conds[] = 'page_id=rev_page AND rev_id != page_latest';
315
316 for ( $pageId = $startId; $pageId <= $maxPageId; $pageId++ ) {
318
319 # Wake up
320 $dbr->ping();
321
322 # Get the page row
323 $pageRes = $dbr->select( 'page',
324 [ 'page_id', 'page_namespace', 'page_title', 'page_latest' ],
325 $pageConds + [ 'page_id' => $pageId ], __METHOD__ );
326 if ( $pageRes->numRows() == 0 ) {
327 continue;
328 }
329 $pageRow = $dbr->fetchObject( $pageRes );
330
331 # Display progress
332 $titleObj = Title::makeTitle( $pageRow->page_namespace, $pageRow->page_title );
333 $this->output( "$pageId\t" . $titleObj->getPrefixedDBkey() . " " );
334
335 # Load revisions
336 $revRes = $dbw->select( $tables, $fields,
337 array_merge( [
338 'rev_page' => $pageRow->page_id,
339 # Don't operate on the current revision
340 # Use < instead of <> in case the current revision has changed
341 # since the page select, which wasn't locking
342 'rev_id < ' . $pageRow->page_latest
343 ], $conds ),
344 __METHOD__,
345 $revLoadOptions
346 );
347 $revs = [];
348 foreach ( $revRes as $revRow ) {
349 $revs[] = $revRow;
350 }
351
352 if ( count( $revs ) < 2 ) {
353 # No revisions matching, no further processing
354 $this->output( "\n" );
355 continue;
356 }
357
358 # For each chunk
359 $i = 0;
360 while ( $i < count( $revs ) ) {
361 if ( $i < count( $revs ) - $maxChunkSize ) {
362 $thisChunkSize = $maxChunkSize;
363 } else {
364 $thisChunkSize = count( $revs ) - $i;
365 }
366
367 $chunk = new ConcatenatedGzipHistoryBlob();
368 $stubs = [];
369 $this->beginTransaction( $dbw, __METHOD__ );
370 $usedChunk = false;
371 $primaryOldid = $revs[$i]->old_id;
372
373 # Get the text of each revision and add it to the object
374 for ( $j = 0; $j < $thisChunkSize && $chunk->isHappy(); $j++ ) {
375 $oldid = $revs[$i + $j]->old_id;
376
377 # Get text. We do not need the full `extractBlob` since the query is built
378 # to fetch non-externalstore blobs.
379 $text = $blobStore->decompressData(
380 $revs[$i + $j]->old_text,
381 explode( ',', $revs[$i + $j]->old_flags )
382 );
383
384 if ( $text === false ) {
385 $this->error( "\nError, unable to get text in old_id $oldid" );
386 # $dbw->delete( 'old', [ 'old_id' => $oldid ] );
387 }
388
389 if ( $extdb == "" && $j == 0 ) {
390 $chunk->setText( $text );
391 $this->output( '.' );
392 } else {
393 # Don't make a stub if it's going to be longer than the article
394 # Stubs are typically about 100 bytes
395 if ( strlen( $text ) < 120 ) {
396 $stub = false;
397 $this->output( 'x' );
398 } else {
399 $stub = new HistoryBlobStub( $chunk->addItem( $text ) );
400 $stub->setLocation( $primaryOldid );
401 $stub->setReferrer( $oldid );
402 $this->output( '.' );
403 $usedChunk = true;
404 }
405 $stubs[$j] = $stub;
406 }
407 }
408 $thisChunkSize = $j;
409
410 # If we couldn't actually use any stubs because the pages were too small, do nothing
411 if ( $usedChunk ) {
412 if ( $extdb != "" ) {
413 # Move blob objects to External Storage
414 $stored = $storeObj->store( $extdb, serialize( $chunk ) );
415 if ( $stored === false ) {
416 $this->error( "Unable to store object" );
417
418 return false;
419 }
420 # Store External Storage URLs instead of Stub placeholders
421 foreach ( $stubs as $stub ) {
422 if ( $stub === false ) {
423 continue;
424 }
425 # $stored should provide base path to a BLOB
426 $url = $stored . "/" . $stub->getHash();
427 $dbw->update( 'text',
428 [ /* SET */
429 'old_text' => $url,
430 'old_flags' => 'external,utf-8',
431 ], [ /* WHERE */
432 'old_id' => $stub->getReferrer(),
433 ]
434 );
435 }
436 } else {
437 # Store the main object locally
438 $dbw->update( 'text',
439 [ /* SET */
440 'old_text' => serialize( $chunk ),
441 'old_flags' => 'object,utf-8',
442 ], [ /* WHERE */
443 'old_id' => $primaryOldid
444 ]
445 );
446
447 # Store the stub objects
448 for ( $j = 1; $j < $thisChunkSize; $j++ ) {
449 # Skip if not compressing and don't overwrite the first revision
450 if ( $stubs[$j] !== false && $revs[$i + $j]->old_id != $primaryOldid ) {
451 $dbw->update( 'text',
452 [ /* SET */
453 'old_text' => serialize( $stubs[$j] ),
454 'old_flags' => 'object,utf-8',
455 ], [ /* WHERE */
456 'old_id' => $revs[$i + $j]->old_id
457 ]
458 );
459 }
460 }
461 }
462 }
463 # Done, next
464 $this->output( "/" );
465 $this->commitTransaction( $dbw, __METHOD__ );
466 $i += $thisChunkSize;
467 }
468 $this->output( "\n" );
469 }
470
471 return true;
472 }
473}
474
475$maintClass = CompressOld::class;
476require_once RUN_MAINTENANCE_IF_MAIN;
serialize()
getDB()
int $wgMultiContentRevisionSchemaMigrationStage
RevisionStore table schema migration stage (content, slots, content_models & slot_roles tables).
$wgDBname
Current wiki database name.
wfWaitForSlaves( $ifWritesSince=null, $wiki=false, $cluster=false, $timeout=null)
Waits for the replica DBs to catch up to the master position.
const RUN_MAINTENANCE_IF_MAIN
Maintenance script that compress the text of a wiki.
execute()
Do the actual work.
compressPage( $row, $extdb)
Compress the text in gzip format.
__construct()
Default constructor.
compressWithConcat( $startId, $maxChunkSize, $beginDate, $endDate, $extdb="", $maxPageId=false)
Compress the text in chunks after concatenating the revisions.
compressOldPages( $start=0, $extdb='')
Fetch the text row-by-row to 'compressPage' function for compression.
Concatenated gzip (CGZ) storage Improves compression ratio by concatenating like objects before gzipp...
Pointer object for an item within a CGZ blob stored in the text table.
Abstract maintenance class for quickly writing and churning out maintenance scripts with minimal effo...
error( $err, $die=0)
Throw an error to the user.
beginTransaction(IDatabase $dbw, $fname)
Begin a transcation on a DB.
commitTransaction(IDatabase $dbw, $fname)
Commit the transcation on a DB handle and wait for replica DBs to catch up.
output( $out, $channel=null)
Throw some output to the user.
addDescription( $text)
Set the description text.
addOption( $name, $description, $required=false, $withArg=false, $shortName=false, $multiOccurrence=false)
Add a parameter to the script.
getOption( $name, $default=null)
Get an option, or return the default.
fatalError( $msg, $exitCode=1)
Output a message and terminate the current script.
MediaWikiServices is the service locator for the application scope of MediaWiki.
Value object representing a content slot associated with a page revision.
$maintClass
const SCHEMA_COMPAT_READ_OLD
Definition Defines.php:274
$last
const DB_REPLICA
Definition defines.php:25
const DB_MASTER
Definition defines.php:26