MediaWiki REL1_33
RecentChangesUpdateJob.php
Go to the documentation of this file.
1<?php
22
30 function __construct( Title $title, array $params ) {
31 parent::__construct( 'recentChangesUpdate', $title, $params );
32
33 if ( !isset( $params['type'] ) ) {
34 throw new Exception( "Missing 'type' parameter." );
35 }
36
37 $this->executionFlags |= self::JOB_NO_EXPLICIT_TRX_ROUND;
38 $this->removeDuplicates = true;
39 }
40
44 final public static function newPurgeJob() {
45 return new self(
46 SpecialPage::getTitleFor( 'Recentchanges' ), [ 'type' => 'purge' ]
47 );
48 }
49
54 final public static function newCacheUpdateJob() {
55 return new self(
56 SpecialPage::getTitleFor( 'Recentchanges' ), [ 'type' => 'cacheUpdate' ]
57 );
58 }
59
60 public function run() {
61 if ( $this->params['type'] === 'purge' ) {
62 $this->purgeExpiredRows();
63 } elseif ( $this->params['type'] === 'cacheUpdate' ) {
64 $this->updateActiveUsers();
65 } else {
66 throw new InvalidArgumentException(
67 "Invalid 'type' parameter '{$this->params['type']}'." );
68 }
69
70 return true;
71 }
72
73 protected function purgeExpiredRows() {
75
76 $dbw = wfGetDB( DB_MASTER );
77 $lockKey = $dbw->getDomainID() . ':recentchanges-prune';
78 if ( !$dbw->lock( $lockKey, __METHOD__, 0 ) ) {
79 // already in progress
80 return;
81 }
82
83 $factory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
84 $ticket = $factory->getEmptyTransactionTicket( __METHOD__ );
85 $cutoff = $dbw->timestamp( time() - $wgRCMaxAge );
86 $rcQuery = RecentChange::getQueryInfo();
87 do {
88 $rcIds = [];
89 $rows = [];
90 $res = $dbw->select(
91 $rcQuery['tables'],
92 $rcQuery['fields'],
93 [ 'rc_timestamp < ' . $dbw->addQuotes( $cutoff ) ],
94 __METHOD__,
95 [ 'LIMIT' => $wgUpdateRowsPerQuery ],
96 $rcQuery['joins']
97 );
98 foreach ( $res as $row ) {
99 $rcIds[] = $row->rc_id;
100 $rows[] = $row;
101 }
102 if ( $rcIds ) {
103 $dbw->delete( 'recentchanges', [ 'rc_id' => $rcIds ], __METHOD__ );
104 Hooks::run( 'RecentChangesPurgeRows', [ $rows ] );
105 // There might be more, so try waiting for replica DBs
106 if ( !$factory->commitAndWaitForReplication(
107 __METHOD__, $ticket, [ 'timeout' => 3 ]
108 ) ) {
109 // Another job will continue anyway
110 break;
111 }
112 }
113 } while ( $rcIds );
114
115 $dbw->unlock( $lockKey, __METHOD__ );
116 }
117
118 protected function updateActiveUsers() {
119 global $wgActiveUserDays;
120
121 // Users that made edits at least this many days ago are "active"
122 $days = $wgActiveUserDays;
123 // Pull in the full window of active users in this update
124 $window = $wgActiveUserDays * 86400;
125
126 $dbw = wfGetDB( DB_MASTER );
127 $factory = MediaWikiServices::getInstance()->getDBLoadBalancerFactory();
128 $ticket = $factory->getEmptyTransactionTicket( __METHOD__ );
129
130 $lockKey = $dbw->getDomainID() . '-activeusers';
131 if ( !$dbw->lock( $lockKey, __METHOD__, 0 ) ) {
132 // Exclusive update (avoids duplicate entries)… it's usually fine to just
133 // drop out here, if the Job is already running.
134 return;
135 }
136
137 // Long-running queries expected
138 $dbw->setSessionOptions( [ 'connTimeout' => 900 ] );
139
140 $nowUnix = time();
141 // Get the last-updated timestamp for the cache
142 $cTime = $dbw->selectField( 'querycache_info',
143 'qci_timestamp',
144 [ 'qci_type' => 'activeusers' ]
145 );
146 $cTimeUnix = $cTime ? wfTimestamp( TS_UNIX, $cTime ) : 1;
147
148 // Pick the date range to fetch from. This is normally from the last
149 // update to till the present time, but has a limited window for sanity.
150 // If the window is limited, multiple runs are need to fully populate it.
151 $sTimestamp = max( $cTimeUnix, $nowUnix - $days * 86400 );
152 $eTimestamp = min( $sTimestamp + $window, $nowUnix );
153
154 // Get all the users active since the last update
155 $actorQuery = ActorMigration::newMigration()->getJoin( 'rc_user' );
156 $res = $dbw->select(
157 [ 'recentchanges' ] + $actorQuery['tables'],
158 [
159 'rc_user_text' => $actorQuery['fields']['rc_user_text'],
160 'lastedittime' => 'MAX(rc_timestamp)'
161 ],
162 [
163 $actorQuery['fields']['rc_user'] . ' > 0', // actual accounts
164 'rc_type != ' . $dbw->addQuotes( RC_EXTERNAL ), // no wikidata
165 'rc_log_type IS NULL OR rc_log_type != ' . $dbw->addQuotes( 'newusers' ),
166 'rc_timestamp >= ' . $dbw->addQuotes( $dbw->timestamp( $sTimestamp ) ),
167 'rc_timestamp <= ' . $dbw->addQuotes( $dbw->timestamp( $eTimestamp ) )
168 ],
169 __METHOD__,
170 [
171 'GROUP BY' => [ $actorQuery['fields']['rc_user_text'] ],
172 'ORDER BY' => 'NULL' // avoid filesort
173 ],
174 $actorQuery['joins']
175 );
176 $names = [];
177 foreach ( $res as $row ) {
178 $names[$row->rc_user_text] = $row->lastedittime;
179 }
180
181 // Find which of the recently active users are already accounted for
182 if ( count( $names ) ) {
183 $res = $dbw->select( 'querycachetwo',
184 [ 'user_name' => 'qcc_title' ],
185 [
186 'qcc_type' => 'activeusers',
187 'qcc_namespace' => NS_USER,
188 'qcc_title' => array_keys( $names ),
189 'qcc_value >= ' . $dbw->addQuotes( $nowUnix - $days * 86400 ), // TS_UNIX
190 ],
191 __METHOD__
192 );
193 // Note: In order for this to be actually consistent, we would need
194 // to update these rows with the new lastedittime.
195 foreach ( $res as $row ) {
196 unset( $names[$row->user_name] );
197 }
198 }
199
200 // Insert the users that need to be added to the list
201 if ( count( $names ) ) {
202 $newRows = [];
203 foreach ( $names as $name => $lastEditTime ) {
204 $newRows[] = [
205 'qcc_type' => 'activeusers',
206 'qcc_namespace' => NS_USER,
207 'qcc_title' => $name,
208 'qcc_value' => wfTimestamp( TS_UNIX, $lastEditTime ),
209 'qcc_namespacetwo' => 0, // unused
210 'qcc_titletwo' => '' // unused
211 ];
212 }
213 foreach ( array_chunk( $newRows, 500 ) as $rowBatch ) {
214 $dbw->insert( 'querycachetwo', $rowBatch, __METHOD__ );
215 $factory->commitAndWaitForReplication( __METHOD__, $ticket );
216 }
217 }
218
219 // If a transaction was already started, it might have an old
220 // snapshot, so kludge the timestamp range back as needed.
221 $asOfTimestamp = min( $eTimestamp, (int)$dbw->trxTimestamp() );
222
223 // Touch the data freshness timestamp
224 $dbw->replace( 'querycache_info',
225 [ 'qci_type' ],
226 [ 'qci_type' => 'activeusers',
227 'qci_timestamp' => $dbw->timestamp( $asOfTimestamp ) ], // not always $now
228 __METHOD__
229 );
230
231 $dbw->unlock( $lockKey, __METHOD__ );
232
233 // Rotate out users that have not edited in too long (according to old data set)
234 $dbw->delete( 'querycachetwo',
235 [
236 'qcc_type' => 'activeusers',
237 'qcc_value < ' . $dbw->addQuotes( $nowUnix - $days * 86400 ) // TS_UNIX
238 ],
239 __METHOD__
240 );
241 }
242}
Apache License January AND DISTRIBUTION Definitions License shall mean the terms and conditions for use
$wgActiveUserDays
How many days user must be idle before he is considered inactive.
$wgRCMaxAge
Recentchanges items are periodically purged; entries older than this many seconds will go.
$wgUpdateRowsPerQuery
Number of rows to update per query.
wfGetDB( $db, $groups=[], $wiki=false)
Get a Database object.
wfTimestamp( $outputtype=TS_UNIX, $ts=0)
Get a timestamp string in one of various formats.
Class to both describe a background job and handle jobs.
Definition Job.php:30
MediaWikiServices is the service locator for the application scope of MediaWiki.
Job for pruning recent changes.
__construct(Title $title, array $params)
Represents a title within MediaWiki.
Definition Title.php:40
$res
Definition database.txt:21
This document is intended to provide useful advice for parties seeking to redistribute MediaWiki to end users It s targeted particularly at maintainers for Linux since it s been observed that distribution packages of MediaWiki often break We ve consistently had to recommend that users seeking support use official tarballs instead of their distribution s and this often solves whatever problem the user is having It would be nice if this could such as
const NS_USER
Definition Defines.php:75
const RC_EXTERNAL
Definition Defines.php:154
do that in ParserLimitReportFormat instead use this to modify the parameters of the image all existing parser cache entries will be invalid To avoid you ll need to handle that somehow(e.g. with the RejectParserCacheValue hook) because MediaWiki won 't do it for you. & $defaults also a ContextSource after deleting those rows but within the same transaction $rows
Definition hooks.txt:2818
namespace and then decline to actually register it file or subcat img or subcat $title
Definition hooks.txt:955
Allows to change the fields on the form that will be generated $name
Definition hooks.txt:271
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback the instantiator that will return a new instance of the service When it will create an instance of MediaWikiServices and populate it with the services defined in the files listed by thereby bootstrapping the DI framework Per $wgServiceWiringFiles lists includes ServiceWiring php
Definition injection.txt:37
The wiki should then use memcached to cache various data To use multiple just add more items to the array To increase the weight of a make its entry a array("192.168.0.1:11211", 2))
const DB_MASTER
Definition defines.php:26
$params