MediaWiki REL1_31
compressOld.php
Go to the documentation of this file.
1<?php
44require_once __DIR__ . '/../Maintenance.php';
45
51class CompressOld extends Maintenance {
55 const LS_INDIVIDUAL = 0;
56
60 const LS_CHUNKED = 1;
61
62 public function __construct() {
63 parent::__construct();
64 $this->addDescription( 'Compress the text of a wiki' );
65 $this->addOption( 'type', 'Set compression type to either: gzip|concat', false, true, 't' );
66 $this->addOption(
67 'chunksize',
68 'Maximum number of revisions in a concat chunk',
69 false,
70 true,
71 'c'
72 );
73 $this->addOption(
74 'begin-date',
75 'Earliest date to check for uncompressed revisions',
76 false,
77 true,
78 'b'
79 );
80 $this->addOption( 'end-date', 'Latest revision date to compress', false, true, 'e' );
81 $this->addOption(
82 'startid',
83 'The id to start from (gzip -> text table, concat -> page table)',
84 false,
85 true,
86 's'
87 );
88 $this->addOption(
89 'extdb',
90 'Store specified revisions in an external cluster (untested)',
91 false,
92 true
93 );
94 $this->addOption(
95 'endid',
96 'The page_id to stop at (only when using concat compression type)',
97 false,
98 true,
99 'n'
100 );
101 }
102
103 public function execute() {
104 global $wgDBname;
105 if ( !function_exists( "gzdeflate" ) ) {
106 $this->fatalError( "You must enable zlib support in PHP to compress old revisions!\n" .
107 "Please see http://www.php.net/manual/en/ref.zlib.php\n" );
108 }
109
110 $type = $this->getOption( 'type', 'concat' );
111 $chunkSize = $this->getOption( 'chunksize', 20 );
112 $startId = $this->getOption( 'startid', 0 );
113 $beginDate = $this->getOption( 'begin-date', '' );
114 $endDate = $this->getOption( 'end-date', '' );
115 $extDB = $this->getOption( 'extdb', '' );
116 $endId = $this->getOption( 'endid', false );
117
118 if ( $type != 'concat' && $type != 'gzip' ) {
119 $this->error( "Type \"{$type}\" not supported" );
120 }
121
122 if ( $extDB != '' ) {
123 $this->output( "Compressing database {$wgDBname} to external cluster {$extDB}\n"
124 . str_repeat( '-', 76 ) . "\n\n" );
125 } else {
126 $this->output( "Compressing database {$wgDBname}\n"
127 . str_repeat( '-', 76 ) . "\n\n" );
128 }
129
130 $success = true;
131 if ( $type == 'concat' ) {
132 $success = $this->compressWithConcat( $startId, $chunkSize, $beginDate,
133 $endDate, $extDB, $endId );
134 } else {
135 $this->compressOldPages( $startId, $extDB );
136 }
137
138 if ( $success ) {
139 $this->output( "Done.\n" );
140 }
141 }
142
149 private function compressOldPages( $start = 0, $extdb = '' ) {
150 $chunksize = 50;
151 $this->output( "Starting from old_id $start...\n" );
152 $dbw = $this->getDB( DB_MASTER );
153 do {
154 $res = $dbw->select(
155 'text',
156 [ 'old_id', 'old_flags', 'old_text' ],
157 "old_id>=$start",
158 __METHOD__,
159 [ 'ORDER BY' => 'old_id', 'LIMIT' => $chunksize, 'FOR UPDATE' ]
160 );
161
162 if ( $res->numRows() == 0 ) {
163 break;
164 }
165
166 $last = $start;
167
168 foreach ( $res as $row ) {
169 # print " {$row->old_id} - {$row->old_namespace}:{$row->old_title}\n";
170 $this->compressPage( $row, $extdb );
171 $last = $row->old_id;
172 }
173
174 $start = $last + 1; # Deletion may leave long empty stretches
175 $this->output( "$start...\n" );
176 } while ( true );
177 }
178
186 private function compressPage( $row, $extdb ) {
187 if ( false !== strpos( $row->old_flags, 'gzip' )
188 || false !== strpos( $row->old_flags, 'object' )
189 ) {
190 # print "Already compressed row {$row->old_id}\n";
191 return false;
192 }
193 $dbw = $this->getDB( DB_MASTER );
194 $flags = $row->old_flags ? "{$row->old_flags},gzip" : "gzip";
195 $compress = gzdeflate( $row->old_text );
196
197 # Store in external storage if required
198 if ( $extdb !== '' ) {
199 $storeObj = new ExternalStoreDB;
200 $compress = $storeObj->store( $extdb, $compress );
201 if ( $compress === false ) {
202 $this->error( "Unable to store object" );
203
204 return false;
205 }
206 }
207
208 # Update text row
209 $dbw->update( 'text',
210 [ /* SET */
211 'old_flags' => $flags,
212 'old_text' => $compress
213 ], [ /* WHERE */
214 'old_id' => $row->old_id
215 ], __METHOD__,
216 [ 'LIMIT' => 1 ]
217 );
218
219 return true;
220 }
221
233 private function compressWithConcat( $startId, $maxChunkSize, $beginDate,
234 $endDate, $extdb = "", $maxPageId = false
235 ) {
236 $loadStyle = self::LS_CHUNKED;
237
238 $dbr = $this->getDB( DB_REPLICA );
239 $dbw = $this->getDB( DB_MASTER );
240
241 # Set up external storage
242 if ( $extdb != '' ) {
243 $storeObj = new ExternalStoreDB;
244 }
245
246 # Get all articles by page_id
247 if ( !$maxPageId ) {
248 $maxPageId = $dbr->selectField( 'page', 'max(page_id)', '', __METHOD__ );
249 }
250 $this->output( "Starting from $startId of $maxPageId\n" );
251 $pageConds = [];
252
253 /*
254 if ( $exclude_ns0 ) {
255 print "Excluding main namespace\n";
256 $pageConds[] = 'page_namespace<>0';
257 }
258 if ( $queryExtra ) {
259 $pageConds[] = $queryExtra;
260 }
261 */
262
263 # For each article, get a list of revisions which fit the criteria
264
265 # No recompression, use a condition on old_flags
266 # Don't compress object type entities, because that might produce data loss when
267 # overwriting bulk storage concat rows. Don't compress external references, because
268 # the script doesn't yet delete rows from external storage.
269 $conds = [
270 'old_flags NOT ' . $dbr->buildLike( $dbr->anyString(), 'object', $dbr->anyString() )
271 . ' AND old_flags NOT '
272 . $dbr->buildLike( $dbr->anyString(), 'external', $dbr->anyString() )
273 ];
274
275 if ( $beginDate ) {
276 if ( !preg_match( '/^\d{14}$/', $beginDate ) ) {
277 $this->error( "Invalid begin date \"$beginDate\"\n" );
278
279 return false;
280 }
281 $conds[] = "rev_timestamp>'" . $beginDate . "'";
282 }
283 if ( $endDate ) {
284 if ( !preg_match( '/^\d{14}$/', $endDate ) ) {
285 $this->error( "Invalid end date \"$endDate\"\n" );
286
287 return false;
288 }
289 $conds[] = "rev_timestamp<'" . $endDate . "'";
290 }
291 if ( $loadStyle == self::LS_CHUNKED ) {
292 $tables = [ 'revision', 'text' ];
293 $fields = [ 'rev_id', 'rev_text_id', 'old_flags', 'old_text' ];
294 $conds[] = 'rev_text_id=old_id';
295 $revLoadOptions = 'FOR UPDATE';
296 } else {
297 $tables = [ 'revision' ];
298 $fields = [ 'rev_id', 'rev_text_id' ];
299 $revLoadOptions = [];
300 }
301
302 # Don't work with current revisions
303 # Don't lock the page table for update either -- TS 2006-04-04
304 # $tables[] = 'page';
305 # $conds[] = 'page_id=rev_page AND rev_id != page_latest';
306
307 for ( $pageId = $startId; $pageId <= $maxPageId; $pageId++ ) {
309
310 # Wake up
311 $dbr->ping();
312
313 # Get the page row
314 $pageRes = $dbr->select( 'page',
315 [ 'page_id', 'page_namespace', 'page_title', 'page_latest' ],
316 $pageConds + [ 'page_id' => $pageId ], __METHOD__ );
317 if ( $pageRes->numRows() == 0 ) {
318 continue;
319 }
320 $pageRow = $dbr->fetchObject( $pageRes );
321
322 # Display progress
323 $titleObj = Title::makeTitle( $pageRow->page_namespace, $pageRow->page_title );
324 $this->output( "$pageId\t" . $titleObj->getPrefixedDBkey() . " " );
325
326 # Load revisions
327 $revRes = $dbw->select( $tables, $fields,
328 array_merge( [
329 'rev_page' => $pageRow->page_id,
330 # Don't operate on the current revision
331 # Use < instead of <> in case the current revision has changed
332 # since the page select, which wasn't locking
333 'rev_id < ' . $pageRow->page_latest
334 ], $conds ),
335 __METHOD__,
336 $revLoadOptions
337 );
338 $revs = [];
339 foreach ( $revRes as $revRow ) {
340 $revs[] = $revRow;
341 }
342
343 if ( count( $revs ) < 2 ) {
344 # No revisions matching, no further processing
345 $this->output( "\n" );
346 continue;
347 }
348
349 # For each chunk
350 $i = 0;
351 while ( $i < count( $revs ) ) {
352 if ( $i < count( $revs ) - $maxChunkSize ) {
353 $thisChunkSize = $maxChunkSize;
354 } else {
355 $thisChunkSize = count( $revs ) - $i;
356 }
357
358 $chunk = new ConcatenatedGzipHistoryBlob();
359 $stubs = [];
360 $this->beginTransaction( $dbw, __METHOD__ );
361 $usedChunk = false;
362 $primaryOldid = $revs[$i]->rev_text_id;
363
364 # Get the text of each revision and add it to the object
365 // phpcs:ignore Generic.CodeAnalysis.ForLoopWithTestFunctionCall
366 for ( $j = 0; $j < $thisChunkSize && $chunk->isHappy(); $j++ ) {
367 $oldid = $revs[$i + $j]->rev_text_id;
368
369 # Get text
370 if ( $loadStyle == self::LS_INDIVIDUAL ) {
371 $textRow = $dbw->selectRow( 'text',
372 [ 'old_flags', 'old_text' ],
373 [ 'old_id' => $oldid ],
374 __METHOD__,
375 'FOR UPDATE'
376 );
377 $text = Revision::getRevisionText( $textRow );
378 } else {
379 $text = Revision::getRevisionText( $revs[$i + $j] );
380 }
381
382 if ( $text === false ) {
383 $this->error( "\nError, unable to get text in old_id $oldid" );
384 # $dbw->delete( 'old', [ 'old_id' => $oldid ] );
385 }
386
387 if ( $extdb == "" && $j == 0 ) {
388 $chunk->setText( $text );
389 $this->output( '.' );
390 } else {
391 # Don't make a stub if it's going to be longer than the article
392 # Stubs are typically about 100 bytes
393 if ( strlen( $text ) < 120 ) {
394 $stub = false;
395 $this->output( 'x' );
396 } else {
397 $stub = new HistoryBlobStub( $chunk->addItem( $text ) );
398 $stub->setLocation( $primaryOldid );
399 $stub->setReferrer( $oldid );
400 $this->output( '.' );
401 $usedChunk = true;
402 }
403 $stubs[$j] = $stub;
404 }
405 }
406 $thisChunkSize = $j;
407
408 # If we couldn't actually use any stubs because the pages were too small, do nothing
409 if ( $usedChunk ) {
410 if ( $extdb != "" ) {
411 # Move blob objects to External Storage
412 $stored = $storeObj->store( $extdb, serialize( $chunk ) );
413 if ( $stored === false ) {
414 $this->error( "Unable to store object" );
415
416 return false;
417 }
418 # Store External Storage URLs instead of Stub placeholders
419 foreach ( $stubs as $stub ) {
420 if ( $stub === false ) {
421 continue;
422 }
423 # $stored should provide base path to a BLOB
424 $url = $stored . "/" . $stub->getHash();
425 $dbw->update( 'text',
426 [ /* SET */
427 'old_text' => $url,
428 'old_flags' => 'external,utf-8',
429 ], [ /* WHERE */
430 'old_id' => $stub->getReferrer(),
431 ]
432 );
433 }
434 } else {
435 # Store the main object locally
436 $dbw->update( 'text',
437 [ /* SET */
438 'old_text' => serialize( $chunk ),
439 'old_flags' => 'object,utf-8',
440 ], [ /* WHERE */
441 'old_id' => $primaryOldid
442 ]
443 );
444
445 # Store the stub objects
446 for ( $j = 1; $j < $thisChunkSize; $j++ ) {
447 # Skip if not compressing and don't overwrite the first revision
448 if ( $stubs[$j] !== false && $revs[$i + $j]->rev_text_id != $primaryOldid ) {
449 $dbw->update( 'text',
450 [ /* SET */
451 'old_text' => serialize( $stubs[$j] ),
452 'old_flags' => 'object,utf-8',
453 ], [ /* WHERE */
454 'old_id' => $revs[$i + $j]->rev_text_id
455 ]
456 );
457 }
458 }
459 }
460 }
461 # Done, next
462 $this->output( "/" );
463 $this->commitTransaction( $dbw, __METHOD__ );
464 $i += $thisChunkSize;
465 }
466 $this->output( "\n" );
467 }
468
469 return true;
470 }
471}
472
473$maintClass = CompressOld::class;
474require_once RUN_MAINTENANCE_IF_MAIN;
serialize()
wfWaitForSlaves( $ifWritesSince=null, $wiki=false, $cluster=false, $timeout=null)
Waits for the replica DBs to catch up to the master position.
Maintenance script that compress the text of a wiki.
execute()
Do the actual work.
compressPage( $row, $extdb)
Compress the text in gzip format.
__construct()
Default constructor.
const LS_CHUNKED
Option to load revisions in chunks.
compressWithConcat( $startId, $maxChunkSize, $beginDate, $endDate, $extdb="", $maxPageId=false)
Compress the text in chunks after concatenating the revisions.
compressOldPages( $start=0, $extdb='')
Fetch the text row-by-row to 'compressPage' function for compression.
const LS_INDIVIDUAL
Option to load each revision individually.
Concatenated gzip (CGZ) storage Improves compression ratio by concatenating like objects before gzipp...
DB accessible external objects.
store( $location, $data)
Insert a data item into a given location.
Pointer object for an item within a CGZ blob stored in the text table.
Abstract maintenance class for quickly writing and churning out maintenance scripts with minimal effo...
beginTransaction(IDatabase $dbw, $fname)
Begin a transcation on a DB.
commitTransaction(IDatabase $dbw, $fname)
Commit the transcation on a DB handle and wait for replica DBs to catch up.
getDB( $db, $groups=[], $wiki=false)
Returns a database to be used by current maintenance script.
addDescription( $text)
Set the description text.
addOption( $name, $description, $required=false, $withArg=false, $shortName=false, $multiOccurrence=false)
Add a parameter to the script.
getOption( $name, $default=null)
Get an option, or return the default.
fatalError( $msg, $exitCode=1)
Output a message and terminate the current script.
$maintClass
$res
Definition database.txt:21
design txt This is a brief overview of the new design More thorough and up to date information is available on the documentation wiki at etc Handles the details of getting and saving to the user table of the and dealing with sessions and cookies OutputPage Encapsulates the entire HTML page that will be sent in response to any server request It is used by calling its functions to add in any and then calling output() to send it all. It could be easily changed to send incrementally if that becomes useful
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist Do not use this to implement individual filters if they are compatible with the ChangesListFilter and ChangesListFilterGroup structure use sub classes of those in conjunction with the ChangesListSpecialPageStructuredFilters hook This hook can be used to implement filters that do not implement that or custom behavior that is not an individual filter e g Watchlist & $tables
Definition hooks.txt:1015
do that in ParserLimitReportFormat instead use this to modify the parameters of the image all existing parser cache entries will be invalid To avoid you ll need to handle that somehow(e.g. with the RejectParserCacheValue hook) because MediaWiki won 't do it for you. & $defaults error
Definition hooks.txt:2612
require_once RUN_MAINTENANCE_IF_MAIN
controlled by $wgMainCacheType controlled by $wgParserCacheType controlled by $wgMessageCacheType If you set CACHE_NONE to one of the three control default value for MediaWiki still create a but requests to it are no ops and we always fall through to the database If the cache daemon can t be it should also disable itself fairly smoothly By $wgMemc is used but when it is $parserMemc or $messageMemc this is mentioned $wgDBname
$last
const DB_REPLICA
Definition defines.php:25
const DB_MASTER
Definition defines.php:29