MediaWiki REL1_32
upgradeLogging.php
Go to the documentation of this file.
1<?php
24require __DIR__ . '/../commandLine.inc';
25
27
35
39 public $dbw;
40 public $batchSize = 1000;
41 public $minTs = false;
42
43 function execute() {
44 $this->dbw = $this->getDB( DB_MASTER );
45 $logging = $this->dbw->tableName( 'logging' );
46 $logging_1_10 = $this->dbw->tableName( 'logging_1_10' );
47 $logging_pre_1_10 = $this->dbw->tableName( 'logging_pre_1_10' );
48
49 if ( $this->dbw->tableExists( 'logging_pre_1_10' ) && !$this->dbw->tableExists( 'logging' ) ) {
50 # Fix previous aborted run
51 echo "Cleaning up from previous aborted run\n";
52 $this->dbw->query( "RENAME TABLE $logging_pre_1_10 TO $logging", __METHOD__ );
53 }
54
55 if ( $this->dbw->tableExists( 'logging_pre_1_10' ) ) {
56 echo "This script has already been run to completion\n";
57
58 return;
59 }
60
61 # Create the target table
62 if ( !$this->dbw->tableExists( 'logging_1_10' ) ) {
63 global $wgDBTableOptions;
64
65 $sql = <<<EOT
66CREATE TABLE $logging_1_10 (
67 -- Log ID, for referring to this specific log entry, probably for deletion and such.
68 log_id int unsigned NOT NULL auto_increment,
69
70 -- Symbolic keys for the general log type and the action type
71 -- within the log. The output format will be controlled by the
72 -- action field, but only the type controls categorization.
73 log_type varbinary(10) NOT NULL default '',
74 log_action varbinary(10) NOT NULL default '',
75
76 -- Timestamp. Duh.
77 log_timestamp binary(14) NOT NULL default '19700101000000',
78
79 -- The user who performed this action; key to user_id
80 log_user int unsigned NOT NULL default 0,
81
82 -- Key to the page affected. Where a user is the target,
83 -- this will point to the user page.
84 log_namespace int NOT NULL default 0,
85 log_title varchar(255) binary NOT NULL default '',
86
87 -- Freeform text. Interpreted as edit history comments.
88 log_comment varchar(255) NOT NULL default '',
89
90 -- LF separated list of miscellaneous parameters
91 log_params blob NOT NULL,
92
93 -- rev_deleted for logs
94 log_deleted tinyint unsigned NOT NULL default '0',
95
96 PRIMARY KEY log_id (log_id),
97 KEY type_time (log_type, log_timestamp),
98 KEY user_time (log_user, log_timestamp),
99 KEY page_time (log_namespace, log_title, log_timestamp),
100 KEY times (log_timestamp)
101
103EOT;
104 echo "Creating table logging_1_10\n";
105 $this->dbw->query( $sql, __METHOD__ );
106 }
107
108 # Synchronise the tables
109 echo "Doing initial sync...\n";
110 $this->sync( 'logging', 'logging_1_10' );
111 echo "Sync done\n\n";
112
113 # Rename the old table away
114 echo "Renaming the old table to $logging_pre_1_10\n";
115 $this->dbw->query( "RENAME TABLE $logging TO $logging_pre_1_10", __METHOD__ );
116
117 # Copy remaining old rows
118 # Done before the new table is active so that $copyPos is accurate
119 echo "Doing final sync...\n";
120 $this->sync( 'logging_pre_1_10', 'logging_1_10' );
121
122 # Move the new table in
123 echo "Moving the new table in...\n";
124 $this->dbw->query( "RENAME TABLE $logging_1_10 TO $logging", __METHOD__ );
125 echo "Finished.\n";
126 }
127
133 function sync( $srcTable, $dstTable ) {
134 $batchSize = 1000;
135 $minTs = $this->dbw->selectField( $srcTable, 'MIN(log_timestamp)', '', __METHOD__ );
136 $minTsUnix = wfTimestamp( TS_UNIX, $minTs );
137 $numRowsCopied = 0;
138
139 while ( true ) {
140 $maxTs = $this->dbw->selectField( $srcTable, 'MAX(log_timestamp)', '', __METHOD__ );
141 $copyPos = $this->dbw->selectField( $dstTable, 'MAX(log_timestamp)', '', __METHOD__ );
142 $maxTsUnix = wfTimestamp( TS_UNIX, $maxTs );
143 $copyPosUnix = wfTimestamp( TS_UNIX, $copyPos );
144
145 if ( $copyPos === null ) {
146 $percent = 0;
147 } else {
148 $percent = ( $copyPosUnix - $minTsUnix ) / ( $maxTsUnix - $minTsUnix ) * 100;
149 }
150 printf( "%s %.2f%%\n", $copyPos, $percent );
151
152 # Handle all entries with timestamp equal to $copyPos
153 if ( $copyPos !== null ) {
154 $numRowsCopied += $this->copyExactMatch( $srcTable, $dstTable, $copyPos );
155 }
156
157 # Now copy a batch of rows
158 if ( $copyPos === null ) {
159 $conds = false;
160 } else {
161 $conds = [ 'log_timestamp > ' . $this->dbw->addQuotes( $copyPos ) ];
162 }
163 $srcRes = $this->dbw->select( $srcTable, '*', $conds, __METHOD__,
164 [ 'LIMIT' => $batchSize, 'ORDER BY' => 'log_timestamp' ] );
165
166 if ( !$srcRes->numRows() ) {
167 # All done
168 break;
169 }
170
171 $batch = [];
172 foreach ( $srcRes as $srcRow ) {
173 $batch[] = (array)$srcRow;
174 }
175 $this->dbw->insert( $dstTable, $batch, __METHOD__ );
176 $numRowsCopied += count( $batch );
177
179 }
180 echo "Copied $numRowsCopied rows\n";
181 }
182
183 function copyExactMatch( $srcTable, $dstTable, $copyPos ) {
184 $numRowsCopied = 0;
185 $srcRes = $this->dbw->select( $srcTable, '*', [ 'log_timestamp' => $copyPos ], __METHOD__ );
186 $dstRes = $this->dbw->select( $dstTable, '*', [ 'log_timestamp' => $copyPos ], __METHOD__ );
187
188 if ( $srcRes->numRows() ) {
189 $srcRow = $srcRes->fetchObject();
190 $srcFields = array_keys( (array)$srcRow );
191 $srcRes->seek( 0 );
192 $dstRowsSeen = [];
193
194 # Make a hashtable of rows that already exist in the destination
195 foreach ( $dstRes as $dstRow ) {
196 $reducedDstRow = [];
197 foreach ( $srcFields as $field ) {
198 $reducedDstRow[$field] = $dstRow->$field;
199 }
200 $hash = md5( serialize( $reducedDstRow ) );
201 $dstRowsSeen[$hash] = true;
202 }
203
204 # Copy all the source rows that aren't already in the destination
205 foreach ( $srcRes as $srcRow ) {
206 $hash = md5( serialize( (array)$srcRow ) );
207 if ( !isset( $dstRowsSeen[$hash] ) ) {
208 $this->dbw->insert( $dstTable, (array)$srcRow, __METHOD__ );
209 $numRowsCopied++;
210 }
211 }
212 }
213
214 return $numRowsCopied;
215 }
216}
217
219$ul->execute();
serialize()
$wgDBTableOptions
MySQL table options to use during installation or update.
wfWaitForSlaves( $ifWritesSince=null, $wiki=false, $cluster=false, $timeout=null)
Waits for the replica DBs to catch up to the master position.
wfTimestamp( $outputtype=TS_UNIX, $ts=0)
Get a timestamp string in one of various formats.
Maintenance script that upgrade for log_id/log_deleted fields in a replication-safe way.
copyExactMatch( $srcTable, $dstTable, $copyPos)
IMaintainableDatabase $dbw
sync( $srcTable, $dstTable)
Copy all rows from $srcTable to $dstTable.
deferred txt A few of the database updates required by various functions here can be deferred until after the result page is displayed to the user For updating the view updating the linked to tables after a etc PHP does not yet have any way to tell the server to actually return and disconnect while still running these but it might have such a feature in the future We handle these by creating a deferred update object and putting those objects on a global list
Definition deferred.txt:11
presenting them properly to the user as errors is done by the caller return true use this to change the list i e etc next in line in page history
Definition hooks.txt:1818
either a unescaped string or a HtmlArmor object after in associative array form externallinks including delete and has completed for all link tables whether this was an auto creation use $formDescriptor instead default is conds Array Extra conditions for the No matching items in log is displayed if loglist is empty msgKey Array If you want a nice box with a set this to the key of the message First element is the message key
Definition hooks.txt:2214
this hook is for auditing only or null if authentication failed before getting that far or null if we can t even determine that probably a stub it is not rendered in wiki pages or galleries in category pages allow injecting custom HTML after the section Any uses of the hook need to handle escaping see BaseTemplate::getToolbox and BaseTemplate::makeListItem for details on the format of individual items inside of this array or by returning and letting standard HTTP rendering take place modifiable or by returning false and taking over the output modifiable modifiable after all normalizations have been performed
Definition hooks.txt:922
as see the revision history and logs
Advanced database interface for IDatabase handles that include maintenance methods.
$batch
Definition linkcache.txt:23
The wiki should then use memcached to cache various data To use multiple just add more items to the array To increase the weight of a make its entry a array("192.168.0.1:11211", 2))
This document describes the state of Postgres support in and is fairly well maintained The main code is very well while extensions are very hit and miss it is probably the most supported database after MySQL Much of the work in making MediaWiki database agnostic came about through the work of creating Postgres as and are nearing end of but without copying over all the usage comments General notes on the but these can almost always be programmed around *Although Postgres has a true BOOLEAN type
Definition postgres.txt:36
const DB_MASTER
Definition defines.php:26