Code Coverage |
||||||||||
Lines |
Functions and Methods |
Classes and Traits |
||||||||
Total | |
93.62% |
44 / 47 |
|
0.00% |
0 / 1 |
CRAP | |
0.00% |
0 / 1 |
BacklinkJobUtils | |
93.62% |
44 / 47 |
|
0.00% |
0 / 1 |
10.03 | |
0.00% |
0 / 1 |
partitionBacklinkJob | |
93.62% |
44 / 47 |
|
0.00% |
0 / 1 |
10.03 |
1 | <?php |
2 | /** |
3 | * This program is free software; you can redistribute it and/or modify |
4 | * it under the terms of the GNU General Public License as published by |
5 | * the Free Software Foundation; either version 2 of the License, or |
6 | * (at your option) any later version. |
7 | * |
8 | * This program is distributed in the hope that it will be useful, |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
11 | * GNU General Public License for more details. |
12 | * |
13 | * You should have received a copy of the GNU General Public License along |
14 | * with this program; if not, write to the Free Software Foundation, Inc., |
15 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
16 | * http://www.gnu.org/copyleft/gpl.html |
17 | * |
18 | * @file |
19 | */ |
20 | |
21 | use MediaWiki\MediaWikiServices; |
22 | use MediaWiki\Page\PageIdentity; |
23 | |
24 | /** |
25 | * Helper for a Job that updates links to a given page title. |
26 | * |
27 | * When an asset changes, a base job can be inserted to update all assets that depend on it. |
28 | * The base job splits into per-title "leaf" jobs and a "remnant" job to handle the remaining |
29 | * range of backlinks. This recurs until the remnant job's backlink range is small enough that |
30 | * only leaf jobs are created from it. |
31 | * |
32 | * For example, if templates A and B are edited (at the same time) the queue will have: |
33 | * (A base, B base) |
34 | * When these jobs run, the queue will have per-title and remnant partition jobs: |
35 | * (titleX,titleY,titleZ,...,A remnant,titleM,titleN,titleO,...,B remnant) |
36 | * |
37 | * This works best when the queue is FIFO, for several reasons: |
38 | * - a) Since the remnant jobs are enqueued after the leaf jobs, the slower leaf jobs have to |
39 | * get popped prior to the fast remnant jobs. This avoids flooding the queue with leaf jobs |
40 | * for every single backlink of widely used assets (which can be millions). |
41 | * - b) Other jobs going in the queue still get a chance to run after a widely used asset changes. |
42 | * This is due to the large remnant job pushing to the end of the queue with each division. |
43 | * |
44 | * The size of the queues used in this manner depend on the number of assets changes and the |
45 | * number of workers. Also, with FIFO-per-partition queues, the queue size can be somewhat larger, |
46 | * depending on the number of queue partitions. |
47 | * |
48 | * @since 1.23 |
49 | * @ingroup JobQueue |
50 | */ |
51 | class BacklinkJobUtils { |
52 | /** |
53 | * Break down $job into approximately ($bSize/$cSize) leaf jobs and a single partition |
54 | * job that covers the remaining backlink range (if needed). Jobs for the first $bSize |
55 | * titles are collated ($cSize per job) into leaf jobs to do actual work. All the |
56 | * resulting jobs are of the same class as $job. No partition job is returned if the |
57 | * range covered by $job was less than $bSize, as the leaf jobs have full coverage. |
58 | * |
59 | * The leaf jobs have the 'pages' param set to a (<page ID>:(<namespace>,<DB key>),...) |
60 | * map so that the run() function knows what pages to act on. The leaf jobs will keep |
61 | * the same job title as the parent job (e.g. $job). |
62 | * |
63 | * The partition jobs have the 'range' parameter set to a map of the format |
64 | * (start:<integer>, end:<integer>, batchSize:<integer>, subranges:((<start>,<end>),...)), |
65 | * the 'table' parameter set to that of $job, and the 'recursive' parameter set to true. |
66 | * This method can be called on the resulting job to repeat the process again. |
67 | * |
68 | * The job provided ($job) must have the 'recursive' parameter set to true and the 'table' |
69 | * parameter must be set to a backlink table. The job title will be used as the title to |
70 | * find backlinks for. Any 'range' parameter must follow the same format as mentioned above. |
71 | * This should be managed by recursive calls to this method. |
72 | * |
73 | * The first jobs return are always the leaf jobs. This lets the caller use push() to |
74 | * put them directly into the queue and works well if the queue is FIFO. In such a queue, |
75 | * the leaf jobs have to get finished first before anything can resolve the next partition |
76 | * job, which keeps the queue very small. |
77 | * |
78 | * $opts includes: |
79 | * - params : extra job parameters to include in each job |
80 | * |
81 | * @param Job $job |
82 | * @param int $bSize BacklinkCache partition size; usually $wgUpdateRowsPerJob |
83 | * @param int $cSize Max titles per leaf job; Usually 1 or a modest value |
84 | * @param array $opts Optional parameter map |
85 | * @return Job[] |
86 | */ |
87 | public static function partitionBacklinkJob( Job $job, $bSize, $cSize, $opts = [] ) { |
88 | $class = get_class( $job ); |
89 | $title = $job->getTitle(); |
90 | $params = $job->getParams(); |
91 | |
92 | $backlinkCache = MediaWikiServices::getInstance()->getBacklinkCacheFactory() |
93 | ->getBacklinkCache( $title ); |
94 | if ( isset( $params['pages'] ) || empty( $params['recursive'] ) ) { |
95 | // this is a leaf node |
96 | $ranges = []; |
97 | $realBSize = 0; |
98 | wfWarn( __METHOD__ . " called on {$job->getType()} leaf job (explosive recursion)." ); |
99 | } elseif ( isset( $params['range'] ) ) { |
100 | // This is a range job to trigger the insertion of partitioned/title jobs... |
101 | $ranges = $params['range']['subranges']; |
102 | $realBSize = $params['range']['batchSize']; |
103 | } else { |
104 | // This is a base job to trigger the insertion of partitioned jobs... |
105 | $ranges = $backlinkCache->partition( $params['table'], $bSize ); |
106 | $realBSize = $bSize; |
107 | } |
108 | |
109 | $extraParams = $opts['params'] ?? []; |
110 | |
111 | $jobs = []; |
112 | // Combine the first range (of size $bSize) backlinks into leaf jobs |
113 | if ( isset( $ranges[0] ) ) { |
114 | $start = $ranges[0][0]; |
115 | $end = isset( $ranges[1] ) ? $ranges[1][0] - 1 : false; |
116 | |
117 | $iter = $backlinkCache->getLinkPages( $params['table'], $start, $end ); |
118 | $pageSources = iterator_to_array( $iter ); |
119 | /** @var PageIdentity[] $pageBatch */ |
120 | foreach ( array_chunk( $pageSources, $cSize ) as $pageBatch ) { |
121 | $pages = []; |
122 | foreach ( $pageBatch as $page ) { |
123 | $pages[$page->getId()] = [ $page->getNamespace(), $page->getDBkey() ]; |
124 | } |
125 | $jobs[] = new $class( |
126 | $title, // maintain parent job title |
127 | [ 'pages' => $pages ] + $extraParams |
128 | ); |
129 | } |
130 | } |
131 | // Take all of the remaining ranges and build a partition job from it |
132 | if ( isset( $ranges[1] ) ) { |
133 | $jobs[] = new $class( |
134 | $title, // maintain parent job title |
135 | [ |
136 | 'recursive' => true, |
137 | 'table' => $params['table'], |
138 | 'range' => [ |
139 | 'start' => $ranges[1][0], |
140 | 'end' => $ranges[count( $ranges ) - 1][1], |
141 | 'batchSize' => $realBSize, |
142 | 'subranges' => array_slice( $ranges, 1 ) |
143 | ], |
144 | // Track how many times the base job divided for debugging |
145 | 'division' => isset( $params['division'] ) |
146 | ? ( $params['division'] + 1 ) |
147 | : 1 |
148 | ] + $extraParams |
149 | ); |
150 | } |
151 | |
152 | return $jobs; |
153 | } |
154 | } |