47 self::$pngSig = pack(
"C8", 137, 80, 78, 71, 13, 10, 26, 10 );
53 'xml:com.adobe.xmp' =>
'xmp',
54 # Artist is unofficial. Author is the recommended
55 # keyword in the PNG spec. However some people output
56 # Artist so support both.
61 'comment' =>
'PNGFileComment',
62 'description' =>
'ImageDescription',
63 'title' =>
'ObjectName',
64 'copyright' =>
'Copyright',
65 # Source as in original device used to make image
66 # not as in who gave you the image
68 'software' =>
'Software',
69 'disclaimer' =>
'Disclaimer',
70 'warning' =>
'ContentWarning',
71 'url' =>
'Identifier', # Not sure
if this is best mapping. Maybe WebStatement.
73 'creation time' =>
'DateTimeDigitized',
82 $colorType =
'unknown';
85 throw new Exception( __METHOD__ .
": No file name specified" );
86 } elseif ( !file_exists( $filename ) || is_dir( $filename ) ) {
87 throw new Exception( __METHOD__ .
": File $filename does not exist" );
90 $fh = fopen( $filename,
'rb' );
93 throw new Exception( __METHOD__ .
": Unable to open file $filename" );
97 $buf = fread( $fh, 8 );
98 if ( $buf != self::$pngSig ) {
99 throw new Exception( __METHOD__ .
": Not a valid PNG file; header: $buf" );
103 while ( !feof( $fh ) ) {
104 $buf = fread( $fh, 4 );
105 if ( !$buf || strlen( $buf ) < 4 ) {
106 throw new Exception( __METHOD__ .
": Read error" );
108 $chunk_size = unpack(
"N", $buf )[1];
110 if ( $chunk_size < 0 ) {
111 throw new Exception( __METHOD__ .
": Chunk size too big for unpack" );
114 $chunk_type = fread( $fh, 4 );
115 if ( !$chunk_type || strlen( $chunk_type ) < 4 ) {
116 throw new Exception( __METHOD__ .
": Read error" );
119 if ( $chunk_type ==
"IHDR" ) {
120 $buf = self::read( $fh, $chunk_size );
121 if ( !$buf || strlen( $buf ) < $chunk_size ) {
122 throw new Exception( __METHOD__ .
": Read error" );
124 $bitDepth = ord( substr( $buf, 8, 1 ) );
127 switch ( ord( substr( $buf, 9, 1 ) ) ) {
129 $colorType =
'greyscale';
132 $colorType =
'truecolour';
135 $colorType =
'index-coloured';
138 $colorType =
'greyscale-alpha';
141 $colorType =
'truecolour-alpha';
144 $colorType =
'unknown';
147 } elseif ( $chunk_type ==
"acTL" ) {
148 $buf = fread( $fh, $chunk_size );
149 if ( !$buf || strlen( $buf ) < $chunk_size || $chunk_size < 4 ) {
150 throw new Exception( __METHOD__ .
": Read error" );
153 $actl = unpack(
"Nframes/Nplays", $buf );
154 $frameCount = $actl[
'frames'];
155 $loopCount = $actl[
'plays'];
156 } elseif ( $chunk_type ==
"fcTL" ) {
157 $buf = self::read( $fh, $chunk_size );
158 if ( !$buf || strlen( $buf ) < $chunk_size ) {
159 throw new Exception( __METHOD__ .
": Read error" );
161 $buf = substr( $buf, 20 );
162 if ( strlen( $buf ) < 4 ) {
163 throw new Exception( __METHOD__ .
": Read error" );
166 $fctldur = unpack(
"ndelay_num/ndelay_den", $buf );
167 if ( $fctldur[
'delay_den'] == 0 ) {
168 $fctldur[
'delay_den'] = 100;
170 if ( $fctldur[
'delay_num'] ) {
171 $duration += $fctldur[
'delay_num'] / $fctldur[
'delay_den'];
173 } elseif ( $chunk_type ==
"iTXt" ) {
175 $buf = self::read( $fh, $chunk_size );
178 '/^([^\x00]{1,79})\x00(\x00|\x01)\x00([^\x00]*)(.)[^\x00]*\x00(.*)$/Ds',
187 $items[1] = strtolower( $items[1] );
188 if ( !isset( self::$textChunks[$items[1]] ) ) {
190 fseek( $fh, self::$crcSize, SEEK_CUR );
194 $items[3] = strtolower( $items[3] );
195 if ( $items[3] ==
'' ) {
197 $items[3] =
'x-default';
201 if ( $items[2] ==
"\x01" ) {
202 if ( function_exists(
'gzuncompress' ) && $items[4] ===
"\x00" ) {
203 MediaWiki\suppressWarnings();
204 $items[5] = gzuncompress( $items[5] );
205 MediaWiki\restoreWarnings();
207 if ( $items[5] ===
false ) {
209 wfDebug( __METHOD__ .
' Error decompressing iTxt chunk - ' . $items[1] .
"\n" );
210 fseek( $fh, self::$crcSize, SEEK_CUR );
214 wfDebug( __METHOD__ .
' Skipping compressed png iTXt chunk due to lack of zlib,'
215 .
" or potentially invalid compression method\n" );
216 fseek( $fh, self::$crcSize, SEEK_CUR );
220 $finalKeyword = self::$textChunks[$items[1]];
221 $text[$finalKeyword][$items[3]] = $items[5];
222 $text[$finalKeyword][
'_type'] =
'lang';
225 throw new Exception( __METHOD__ .
": Read error on iTXt chunk" );
227 } elseif ( $chunk_type ==
'tEXt' ) {
228 $buf = self::read( $fh, $chunk_size );
231 if ( strpos( $buf,
"\x00" ) ===
false ) {
232 throw new Exception( __METHOD__ .
": Read error on tEXt chunk" );
235 list( $keyword,
$content ) = explode(
"\x00", $buf, 2 );
236 if ( $keyword ===
'' ||
$content ===
'' ) {
237 throw new Exception( __METHOD__ .
": Read error on tEXt chunk" );
241 $keyword = strtolower( $keyword );
242 if ( !isset( self::$textChunks[$keyword] ) ) {
244 fseek( $fh, self::$crcSize, SEEK_CUR );
247 MediaWiki\suppressWarnings();
249 MediaWiki\restoreWarnings();
252 throw new Exception( __METHOD__ .
": Read error (error with iconv)" );
255 $finalKeyword = self::$textChunks[$keyword];
256 $text[$finalKeyword][
'x-default'] =
$content;
257 $text[$finalKeyword][
'_type'] =
'lang';
258 } elseif ( $chunk_type ==
'zTXt' ) {
259 if ( function_exists(
'gzuncompress' ) ) {
260 $buf = self::read( $fh, $chunk_size );
263 if ( strpos( $buf,
"\x00" ) ===
false ) {
264 throw new Exception( __METHOD__ .
": Read error on zTXt chunk" );
267 list( $keyword, $postKeyword ) = explode(
"\x00", $buf, 2 );
268 if ( $keyword ===
'' || $postKeyword ===
'' ) {
269 throw new Exception( __METHOD__ .
": Read error on zTXt chunk" );
272 $keyword = strtolower( $keyword );
274 if ( !isset( self::$textChunks[$keyword] ) ) {
276 fseek( $fh, self::$crcSize, SEEK_CUR );
279 $compression = substr( $postKeyword, 0, 1 );
280 $content = substr( $postKeyword, 1 );
281 if ( $compression !==
"\x00" ) {
282 wfDebug( __METHOD__ .
" Unrecognized compression method in zTXt ($keyword). Skipping.\n" );
283 fseek( $fh, self::$crcSize, SEEK_CUR );
287 MediaWiki\suppressWarnings();
289 MediaWiki\restoreWarnings();
293 wfDebug( __METHOD__ .
' Error decompressing zTXt chunk - ' . $keyword .
"\n" );
294 fseek( $fh, self::$crcSize, SEEK_CUR );
298 MediaWiki\suppressWarnings();
300 MediaWiki\restoreWarnings();
303 throw new Exception( __METHOD__ .
": Read error (error with iconv)" );
306 $finalKeyword = self::$textChunks[$keyword];
307 $text[$finalKeyword][
'x-default'] =
$content;
308 $text[$finalKeyword][
'_type'] =
'lang';
310 wfDebug( __METHOD__ .
" Cannot decompress zTXt chunk due to lack of zlib. Skipping.\n" );
311 fseek( $fh, $chunk_size, SEEK_CUR );
313 } elseif ( $chunk_type ==
'tIME' ) {
315 if ( $chunk_size !== 7 ) {
316 throw new Exception( __METHOD__ .
": tIME wrong size" );
318 $buf = self::read( $fh, $chunk_size );
319 if ( !$buf || strlen( $buf ) < $chunk_size ) {
320 throw new Exception( __METHOD__ .
": Read error" );
324 $t = unpack(
"ny/Cm/Cd/Ch/Cmin/Cs", $buf );
325 $strTime = sprintf(
"%04d%02d%02d%02d%02d%02d",
327 $t[
'min'],
$t[
's'] );
332 $text[
'DateTime'] = $exifTime;
334 } elseif ( $chunk_type ==
'pHYs' ) {
336 if ( $chunk_size !== 9 ) {
337 throw new Exception( __METHOD__ .
": pHYs wrong size" );
340 $buf = self::read( $fh, $chunk_size );
341 if ( !$buf || strlen( $buf ) < $chunk_size ) {
342 throw new Exception( __METHOD__ .
": Read error" );
345 $dim = unpack(
"Nwidth/Nheight/Cunit", $buf );
346 if ( $dim[
'unit'] == 1 ) {
349 if ( $dim[
'width'] > 0 && $dim[
'height'] > 0 ) {
352 $text[
'XResolution'] = $dim[
'width']
354 $text[
'YResolution'] = $dim[
'height']
356 $text[
'ResolutionUnit'] = 3;
360 } elseif ( $chunk_type ==
"IEND" ) {
363 fseek( $fh, $chunk_size, SEEK_CUR );
365 fseek( $fh, self::$crcSize, SEEK_CUR );
369 if ( $loopCount > 1 ) {
370 $duration *= $loopCount;
373 if ( isset( $text[
'DateTimeDigitized'] ) ) {
376 if (
$name ===
'_type' ) {
401 'frameCount' => $frameCount,
402 'loopCount' => $loopCount,
403 'duration' => $duration,
405 'bitDepth' => $bitDepth,
406 'colorType' => $colorType,
418 private static function read( $fh, $size ) {
419 if ( $size > self::MAX_CHUNK_SIZE ) {
420 throw new Exception( __METHOD__ .
': Chunk size of ' . $size .
421 ' too big. Max size is: ' . self::MAX_CHUNK_SIZE );
424 return fread( $fh, $size );
deferred txt A few of the database updates required by various functions here can be deferred until after the result page is displayed to the user For updating the view updating the linked to tables after a etc PHP does not yet have any way to tell the server to actually return and disconnect while still running these but it might have such a feature in the future We handle these by creating a deferred update object and putting those objects on a global list
We use the convention $dbr for read and $dbw for write to help you keep track of whether the database object is a the world will explode Or to be a subsequent write query which succeeded on the master may fail when replicated to the slave due to a unique key collision Replication on the slave will stop and it may take hours to repair the database and get it back online Setting read_only in my cnf on the slave will avoid this but given the dire we prefer to have as many checks as possible We provide a but the wrapper functions like please read the documentation for except in special pages derived from QueryPage It s a common pitfall for new developers to submit code containing SQL queries which examine huge numbers of rows Remember that COUNT * is(N), counting rows in atable is like counting beans in a bucket.------------------------------------------------------------------------Replication------------------------------------------------------------------------The largest installation of MediaWiki, Wikimedia, uses a large set ofslave MySQL servers replicating writes made to a master MySQL server.Itis important to understand the issues associated with this setup if youwant to write code destined for Wikipedia.It's often the case that the best algorithm to use for a given taskdepends on whether or not replication is in use.Due to our unabashedWikipedia-centrism, we often just use the replication-friendly version, but if you like, you can use wfGetLB() ->getServerCount() > 1 tocheck to see if replication is in use.===Lag===Lag primarily occurs when large write queries are sent to the master.Writes on the master are executed in parallel, but they are executed inserial when they are replicated to the slaves.The master writes thequery to the binlog when the transaction is committed.The slaves pollthe binlog and start executing the query as soon as it appears.They canservice reads while they are performing a write query, but will not readanything more from the binlog and thus will perform no more writes.Thismeans that if the write query runs for a long time, the slaves will lagbehind the master for the time it takes for the write query to complete.Lag can be exacerbated by high read load.MediaWiki's load balancer willstop sending reads to a slave when it is lagged by more than 30 seconds.If the load ratios are set incorrectly, or if there is too much loadgenerally, this may lead to a slave permanently hovering around 30seconds lag.If all slaves are lagged by more than 30 seconds, MediaWiki will stopwriting to the database.All edits and other write operations will berefused, with an error returned to the user.This gives the slaves achance to catch up.Before we had this mechanism, the slaves wouldregularly lag by several minutes, making review of recent editsdifficult.In addition to this, MediaWiki attempts to ensure that the user seesevents occurring on the wiki in chronological order.A few seconds of lagcan be tolerated, as long as the user sees a consistent picture fromsubsequent requests.This is done by saving the master binlog positionin the session, and then at the start of each request, waiting for theslave to catch up to that position before doing any reads from it.Ifthis wait times out, reads are allowed anyway, but the request isconsidered to be in"lagged slave mode".Lagged slave mode can bechecked by calling wfGetLB() ->getLaggedSlaveMode().The onlypractical consequence at present is a warning displayed in the pagefooter.===Lag avoidance===To avoid excessive lag, queries which write large numbers of rows shouldbe split up, generally to write one row at a time.Multi-row INSERT...SELECT queries are the worst offenders should be avoided altogether.Instead do the select first and then the insert.===Working with lag===Despite our best efforts, it's not practical to guarantee a low-lagenvironment.Lag will usually be less than one second, but mayoccasionally be up to 30 seconds.For scalability, it's very importantto keep load on the master low, so simply sending all your queries tothe master is not the answer.So when you have a genuine need forup-to-date data, the following approach is advised:1) Do a quick query to the master for a sequence number or timestamp 2) Run the full query on the slave and check if it matches the data you gotfrom the master 3) If it doesn't, run the full query on the masterTo avoid swamping the master every time the slaves lag, use of thisapproach should be kept to a minimum.In most cases you should just readfrom the slave and let the user deal with the delay.------------------------------------------------------------------------Lock contention------------------------------------------------------------------------Due to the high write rate on Wikipedia(and some other wikis), MediaWiki developers need to be very careful to structure their writesto avoid long-lasting locks.By default, MediaWiki opens a transactionat the first query, and commits it before the output is sent.Locks willbe held from the time when the query is done until the commit.So youcan reduce lock time by doing as much processing as possible before youdo your write queries.Often this approach is not good enough, and it becomes necessary toenclose small groups of queries in their own transaction.Use thefollowing syntax:$dbw=wfGetDB(DB_MASTER
wfDebug($text, $dest= 'all', array $context=[])
Sends a line to the debug log if enabled or, optionally, to a comment in output.
wfTimestamp($outputtype=TS_UNIX, $ts=0)
Get a timestamp string in one of various formats.
const TS_EXIF
An Exif timestamp (YYYY:MM:DD HH:MM:SS)
This document is intended to provide useful advice for parties seeking to redistribute MediaWiki to end users It s targeted particularly at maintainers for Linux since it s been observed that distribution packages of MediaWiki often break We ve consistently had to recommend that users seeking support use official tarballs instead of their distribution s and this often solves whatever problem the user is having It would be nice if this could such as
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback the instantiator that will return a new instance of the service When it will create an instance of MediaWikiServices and populate it with the services defined in the files listed by thereby bootstrapping the DI framework Per $wgServiceWiringFiles lists includes ServiceWiring php
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set and then return false from the hook function Ensure you consume the ChangeTagAfterDelete hook to carry out custom deletion actions as context called by AbstractContent::getParserOutput May be used to override the normal model specific rendering of page content $content
Allows to change the fields on the form that will be generated $name