Puppet Class: profile::mediawiki::maintenance::wikidata

Defined in:
modules/profile/manifests/mediawiki/maintenance/wikidata.pp

Overview

Parameters:

  • helmfile_defaults_dir (Stdlib::Unixpath) (defaults to: lookup('profile::kubernetes::deployment_server::global_config::general_dir', {default_value => '/etc/helmfile-defaults'}))


1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
# File 'modules/profile/manifests/mediawiki/maintenance/wikidata.pp', line 1

class profile::mediawiki::maintenance::wikidata(
    Stdlib::Unixpath $helmfile_defaults_dir = lookup('profile::kubernetes::deployment_server::global_config::general_dir', {default_value => '/etc/helmfile-defaults'}),
) {
    require profile::mediawiki::common
    require profile::lvs::configuration

    $team = 'wikidata'

    # Resubmit changes in wb_changes that are older than 6 hours
    profile::mediawiki::periodic_job { 'wikidata_resubmit_changes_for_dispatch':
        command               => '/usr/local/bin/mwscript extensions/Wikibase/repo/maintenance/ResubmitChanges.php --wiki wikidatawiki --minimum-age 21600',
        interval              => '*-*-* *:39:00',
        cron_schedule         => '39 * * * *',
        team                  => $team,
        kubernetes            => true,
        description           => 'Resubmit changes in wb_changes that are older than 6 hours',
        script_label          => 'ResubmitChanges.php-wikidatawiki',
        helmfile_defaults_dir => $helmfile_defaults_dir,
    }

    if $::realm != 'labs' {
        # Update the cached query service maxlag value every minute
        # We don't need to ensure present/absent as the wrapper will ensure nothing
        # is run unless we're in the master dc
        # Logs are saved to /var/log/mediawiki/mediawiki_job_wikidata-updateQueryServiceLag/syslog.log and properly rotated.
        # When calculating maxlag, we want to only query WDQS servers that are currently pooled. See T238751
        $service_name = 'wdqs-main'
        $svc = wmflib::service::fetch(true)[$service_name]
        $svc_lbl = "${service_name}_${svc['port']}"
        # Needed to find the LVS servers we need to check.
        $my_lvs_class = $svc['lvs']['class']
        # Select the virtual LVS instrumentation hostnames for our class at the two core sites:
        $lb = [
            wmflib::service::get_i13n_for_lvs_class($my_lvs_class, 'eqiad'),
            wmflib::service::get_i13n_for_lvs_class($my_lvs_class, 'codfw')
        ].map |$host| { "--lb ${host}:9090" }.join(' ')

        # Set this value relatively low to account for wdqs@codfw which is receiving a lot less traffic than eqiad (see T360993#9669374)
        $pooled_server_min_query_rate = 0.2
        $additional_args = "--lb-pool ${svc_lbl} ${lb} --pooled-server-min-query-rate ${pooled_server_min_query_rate}"
        profile::mediawiki::periodic_job { 'wikidata-updateQueryServiceLag':
            command                    => "/usr/local/bin/mwscript extensions/Wikidata.org/maintenance/updateQueryServiceLag.php --wiki wikidatawiki --cluster wdqs --prometheus prometheus.svc.eqiad.wmnet ${additional_args}",
            interval                   => '*-*-* *:*:00',
            cron_schedule              => '* * * * *',
            team                       => $team,
            kubernetes                 => true,
            description                => 'Update the cached query service maxlag value every minute',
            script_label               => 'updateQueryServiceLag.php-wikidatawiki',
            ttlsecondsafterfinished    => 600, # 10 minutes
            failedjobshistorylimit     => 10, # 10 failed jobs
            successfuljobshistorylimit => 10, # 10 successful jobs
            helmfile_defaults_dir      => $helmfile_defaults_dir,
        }
    }
}