MediaWiki  1.27.2
FileOpBatch.php
Go to the documentation of this file.
1 <?php
34 class FileOpBatch {
35  /* Timeout related parameters */
36  const MAX_BATCH_SIZE = 1000; // integer
37 
57  public static function attempt( array $performOps, array $opts, FileJournal $journal ) {
59 
60  $n = count( $performOps );
61  if ( $n > self::MAX_BATCH_SIZE ) {
62  $status->fatal( 'backend-fail-batchsize', $n, self::MAX_BATCH_SIZE );
63 
64  return $status;
65  }
66 
67  $batchId = $journal->getTimestampedUUID();
68  $ignoreErrors = !empty( $opts['force'] );
69  $journaled = empty( $opts['nonJournaled'] );
70  $maxConcurrency = isset( $opts['concurrency'] ) ? $opts['concurrency'] : 1;
71 
72  $entries = []; // file journal entry list
73  $predicates = FileOp::newPredicates(); // account for previous ops in prechecks
74  $curBatch = []; // concurrent FileOp sub-batch accumulation
75  $curBatchDeps = FileOp::newDependencies(); // paths used in FileOp sub-batch
76  $pPerformOps = []; // ordered list of concurrent FileOp sub-batches
77  $lastBackend = null; // last op backend name
78  // Do pre-checks for each operation; abort on failure...
79  foreach ( $performOps as $index => $fileOp ) {
80  $backendName = $fileOp->getBackend()->getName();
81  $fileOp->setBatchId( $batchId ); // transaction ID
82  // Decide if this op can be done concurrently within this sub-batch
83  // or if a new concurrent sub-batch must be started after this one...
84  if ( $fileOp->dependsOn( $curBatchDeps )
85  || count( $curBatch ) >= $maxConcurrency
86  || ( $backendName !== $lastBackend && count( $curBatch ) )
87  ) {
88  $pPerformOps[] = $curBatch; // push this batch
89  $curBatch = []; // start a new sub-batch
90  $curBatchDeps = FileOp::newDependencies();
91  }
92  $lastBackend = $backendName;
93  $curBatch[$index] = $fileOp; // keep index
94  // Update list of affected paths in this batch
95  $curBatchDeps = $fileOp->applyDependencies( $curBatchDeps );
96  // Simulate performing the operation...
97  $oldPredicates = $predicates;
98  $subStatus = $fileOp->precheck( $predicates ); // updates $predicates
99  $status->merge( $subStatus );
100  if ( $subStatus->isOK() ) {
101  if ( $journaled ) { // journal log entries
102  $entries = array_merge( $entries,
103  $fileOp->getJournalEntries( $oldPredicates, $predicates ) );
104  }
105  } else { // operation failed?
106  $status->success[$index] = false;
107  ++$status->failCount;
108  if ( !$ignoreErrors ) {
109  return $status; // abort
110  }
111  }
112  }
113  // Push the last sub-batch
114  if ( count( $curBatch ) ) {
115  $pPerformOps[] = $curBatch;
116  }
117 
118  // Log the operations in the file journal...
119  if ( count( $entries ) ) {
120  $subStatus = $journal->logChangeBatch( $entries, $batchId );
121  if ( !$subStatus->isOK() ) {
122  return $subStatus; // abort
123  }
124  }
125 
126  if ( $ignoreErrors ) { // treat precheck() fatals as mere warnings
127  $status->setResult( true, $status->value );
128  }
129 
130  // Attempt each operation (in parallel if allowed and possible)...
131  self::runParallelBatches( $pPerformOps, $status );
132 
133  return $status;
134  }
135 
147  protected static function runParallelBatches( array $pPerformOps, Status $status ) {
148  $aborted = false; // set to true on unexpected errors
149  foreach ( $pPerformOps as $performOpsBatch ) {
151  if ( $aborted ) { // check batch op abort flag...
152  // We can't continue (even with $ignoreErrors) as $predicates is wrong.
153  // Log the remaining ops as failed for recovery...
154  foreach ( $performOpsBatch as $i => $fileOp ) {
155  $status->success[$i] = false;
156  ++$status->failCount;
157  $performOpsBatch[$i]->logFailure( 'attempt_aborted' );
158  }
159  continue;
160  }
162  $statuses = [];
163  $opHandles = [];
164  // Get the backend; all sub-batch ops belong to a single backend
165  $backend = reset( $performOpsBatch )->getBackend();
166  // Get the operation handles or actually do it if there is just one.
167  // If attemptAsync() returns a Status, it was either due to an error
168  // or the backend does not support async ops and did it synchronously.
169  foreach ( $performOpsBatch as $i => $fileOp ) {
170  if ( !isset( $status->success[$i] ) ) { // didn't already fail in precheck()
171  // Parallel ops may be disabled in config due to missing dependencies,
172  // (e.g. needing popen()). When they are, $performOpsBatch has size 1.
173  $subStatus = ( count( $performOpsBatch ) > 1 )
174  ? $fileOp->attemptAsync()
175  : $fileOp->attempt();
176  if ( $subStatus->value instanceof FileBackendStoreOpHandle ) {
177  $opHandles[$i] = $subStatus->value; // deferred
178  } else {
179  $statuses[$i] = $subStatus; // done already
180  }
181  }
182  }
183  // Try to do all the operations concurrently...
184  $statuses = $statuses + $backend->executeOpHandlesInternal( $opHandles );
185  // Marshall and merge all the responses (blocking)...
186  foreach ( $performOpsBatch as $i => $fileOp ) {
187  if ( !isset( $status->success[$i] ) ) { // didn't already fail in precheck()
188  $subStatus = $statuses[$i];
189  $status->merge( $subStatus );
190  if ( $subStatus->isOK() ) {
191  $status->success[$i] = true;
192  ++$status->successCount;
193  } else {
194  $status->success[$i] = false;
195  ++$status->failCount;
196  $aborted = true; // set abort flag; we can't continue
197  }
198  }
199  }
200  }
201  }
202 }
the array() calling protocol came about after MediaWiki 1.4rc1.
merge($other, $overwriteValue=false)
Merge another status object into this one.
Definition: Status.php:378
Helper class for representing batch file operations.
Definition: FileOpBatch.php:34
static newPredicates()
Get a new empty predicates array for precheck()
Definition: FileOp.php:151
This document is intended to provide useful advice for parties seeking to redistribute MediaWiki to end users It s targeted particularly at maintainers for Linux since it s been observed that distribution packages of MediaWiki often break We ve consistently had to recommend that users seeking support use official tarballs instead of their distribution s and this often solves whatever problem the user is having It would be nice if this could such as
Definition: distributors.txt:9
logChangeBatch(array $entries, $batchId)
Log changes made by a batch file operation.
static runParallelBatches(array $pPerformOps, Status $status)
Attempt a list of file operations sub-batches in series.
injection txt This is an overview of how MediaWiki makes use of dependency injection The design described here grew from the discussion of RFC T384 The term dependency this means that anything an object needs to operate should be injected from the the object itself should only know narrow no concrete implementation of the logic it relies on The requirement to inject everything typically results in an architecture that based on two main types of and essentially stateless service objects that use other service objects to operate on the value objects As of the beginning MediaWiki is only starting to use the DI approach Much of the code still relies on global state or direct resulting in a highly cyclical dependency which acts as the top level factory for services in MediaWiki which can be used to gain access to default instances of various services MediaWikiServices however also allows new services to be defined and default services to be redefined Services are defined or redefined by providing a callback the instantiator that will return a new instance of the service When it will create an instance of MediaWikiServices and populate it with the services defined in the files listed by thereby bootstrapping the DI framework Per $wgServiceWiringFiles lists includes ServiceWiring php
Definition: injection.txt:35
static attempt(array $performOps, array $opts, FileJournal $journal)
Attempt to perform a series of file operations.
Definition: FileOpBatch.php:57
static newDependencies()
Get a new empty dependency tracking array for paths read/written to.
Definition: FileOp.php:160
this hook is for auditing only RecentChangesLinked and Watchlist RecentChangesLinked and Watchlist e g Watchlist removed from all revisions and log entries to which it was applied This gives extensions a chance to take it off their books as the deletion has already been partly carried out by this point or something similar the user will be unable to create the tag set $status
Definition: hooks.txt:1004
getTimestampedUUID()
Get a statistically unique ID string.
Definition: FileJournal.php:79
FileBackendStore helper class for performing asynchronous file operations.
const MAX_BATCH_SIZE
Definition: FileOpBatch.php:36
Class for handling file operation journaling.
Definition: FileJournal.php:38
static newGood($value=null)
Factory function for good results.
Definition: Status.php:101