#!/usr/bin/env python3
# -*- coding: utf-8; py-indent-offset: 4 -*-
#
# Author:  Linuxfabrik GmbH, Zurich, Switzerland
# Contact: info (at) linuxfabrik (dot) ch
#          https://www.linuxfabrik.ch/
# License: The Unlicense, see LICENSE file.

# https://github.com/Linuxfabrik/monitoring-plugins/blob/main/CONTRIBUTING.md

"""See the check's README for more details."""

import argparse
import json
import re
import sys
import urllib.parse

import lib.args
import lib.base
import lib.db_sqlite
import lib.human
import lib.lftest
import lib.time
import lib.url
from lib.globals import STATE_CRIT, STATE_OK, STATE_UNKNOWN, STATE_WARN

__author__ = 'Linuxfabrik GmbH, Zurich/Switzerland'
__version__ = '2026041407'

DESCRIPTION = """Monitors PHP-FPM pool performance via the pool status page. Reports pool
saturation (percentage of busy workers), new slow requests since the previous check, the
request rate, process counts, and per-process request details for every pool. Multiple
pools on the same host can be checked in one run by passing `--url` several times; each
URL is treated as an independent pool, and the overall plugin state is the worst of all
pools. Basic authentication can be embedded in the URL as `http://user:pass@host/path`.
Alerts when pool saturation exceeds the warning/critical thresholds or when new slow
requests appear since the last check. Alerts with a configurable severity if a pool is
unreachable. Supports extended reporting via --lengthy."""

DEFAULT_CRIT = 90  # %
DEFAULT_CRIT_SLOW_REQUESTS = 100
DEFAULT_INSECURE = False
DEFAULT_LENGTHY = False
DEFAULT_NO_PROXY = False
DEFAULT_SEVERITY = 'warn'
DEFAULT_TIMEOUT = 8
DEFAULT_URL = 'http://localhost/fpm-status'
DEFAULT_WARN = 80  # %
DEFAULT_WARN_SLOW_REQUESTS = 1


def parse_args():
    """Parse command line arguments using argparse."""
    parser = argparse.ArgumentParser(description=DESCRIPTION)

    parser.add_argument(
        '-V',
        '--version',
        action='version',
        version=f'%(prog)s: v{__version__} by {__author__}',
    )

    parser.add_argument(
        '--always-ok',
        help=lib.args.help('--always-ok'),
        dest='ALWAYS_OK',
        action='store_true',
        default=False,
    )

    parser.add_argument(
        '-c',
        '--critical',
        help='CRIT threshold for pool saturation '
        '(active processes / total processes), in percent. '
        'Default: >= %(default)s',
        dest='CRIT',
        type=int,
        default=DEFAULT_CRIT,
    )

    parser.add_argument(
        '--critical-slowreq',
        help='CRIT threshold for the number of NEW slow requests seen since the '
        'previous check run. '
        'Default: >= %(default)s',
        dest='CRIT_SLOW_REQUESTS',
        type=int,
        default=DEFAULT_CRIT_SLOW_REQUESTS,
    )

    parser.add_argument(
        '--insecure',
        help=lib.args.help('--insecure'),
        dest='INSECURE',
        action='store_true',
        default=DEFAULT_INSECURE,
    )

    parser.add_argument(
        '--lengthy',
        help=lib.args.help('--lengthy'),
        dest='LENGTHY',
        action='store_true',
        default=DEFAULT_LENGTHY,
    )

    parser.add_argument(
        '--no-proxy',
        help=lib.args.help('--no-proxy'),
        dest='NO_PROXY',
        action='store_true',
        default=DEFAULT_NO_PROXY,
    )

    parser.add_argument(
        '--severity',
        help=lib.args.help('--severity')
        + ' Applied to pools that are unreachable or whose status JSON cannot be '
        'parsed. Saturation and slow-request thresholds are unaffected. '
        'Default: %(default)s',
        dest='SEVERITY',
        default=DEFAULT_SEVERITY,
        choices=['warn', 'crit'],
    )

    parser.add_argument(
        '--test',
        help=lib.args.help('--test')
        + ' Can be specified multiple times; each entry maps to the URL at the '
        'same position.',
        dest='TEST',
        action='append',
        default=None,
    )

    parser.add_argument(
        '--timeout',
        help=lib.args.help('--timeout') + ' Default: %(default)s (seconds)',
        dest='TIMEOUT',
        type=int,
        default=DEFAULT_TIMEOUT,
    )

    parser.add_argument(
        '-u',
        '--url',
        help='PHP-FPM status page URL. '
        'Can be specified multiple times to check several pools on the same host; '
        'each URL is treated as an independent pool. '
        'Basic authentication may be embedded in the URL as '
        '`http://user:password@host/pool-status`; the credentials are stripped '
        'from the request URL and sent via the `Authorization` header. '
        f'Default: {DEFAULT_URL}',
        dest='URL',
        action='append',
        default=None,
    )

    parser.add_argument(
        '-w',
        '--warning',
        help='WARN threshold for pool saturation '
        '(active processes / total processes), in percent. '
        'Default: >= %(default)s',
        dest='WARN',
        type=int,
        default=DEFAULT_WARN,
    )

    parser.add_argument(
        '--warning-slowreq',
        help='WARN threshold for the number of NEW slow requests seen since the '
        'previous check run. '
        'Default: >= %(default)s',
        dest='WARN_SLOW_REQUESTS',
        type=int,
        default=DEFAULT_WARN_SLOW_REQUESTS,
    )

    args, _ = parser.parse_known_args()
    return args


def build_fpm_url(url):
    """Append ``json`` and ``full`` query flags to an FPM status URL safely.

    Uses ``urllib.parse`` so URLs that already carry query parameters (for
    example behind a reverse proxy that adds its own flags) don't get a
    syntactically broken ``?`` appended.
    """
    parsed = urllib.parse.urlparse(url)
    query_parts = parsed.query.split('&') if parsed.query else []
    for flag in ('json', 'full'):
        if flag not in query_parts:
            query_parts.append(flag)
    return urllib.parse.urlunparse(parsed._replace(query='&'.join(query_parts)))


def fetch_pool_raw(url, args, test_spec):
    """Return ``(ok, raw_json_or_error)`` for one pool.

    Handles basic-auth extraction and the ``?json&full`` query. In test mode,
    reads the fixture files from ``test_spec`` instead of touching the
    network.
    """
    if test_spec is not None:
        stdout, _, _ = lib.lftest.test(lib.args.csv(test_spec))
        return True, stdout

    auth_url, headers = lib.url.split_basic_auth(url)
    full_url = build_fpm_url(auth_url)
    return lib.url.fetch(
        full_url,
        header=headers,
        insecure=args.INSECURE,
        no_proxy=args.NO_PROXY,
        timeout=args.TIMEOUT,
    )


def open_cache():
    """Open (and lazily create) the delta cache SQLite database."""
    conn = lib.base.coe(
        lib.db_sqlite.connect(
            filename='linuxfabrik-monitoring-plugins-php-fpm-status.db',
        )
    )
    definition = """
        url            TEXT PRIMARY KEY,
        fpm_start_time INT  NOT NULL,
        accepted_conn  INT  NOT NULL,
        slow_requests  INT  NOT NULL,
        ts             REAL NOT NULL
    """
    lib.base.coe(lib.db_sqlite.create_table(conn, definition, table='pool_snapshot'))
    return conn


def load_snapshot(conn, url):
    """Return the previous snapshot for ``url`` or ``None`` if this is the first run.

    ``lib.db_sqlite.select(fetchone=True)`` returns an empty ``list`` when no
    row matches; normalise that to ``None`` so callers only need one check.
    """
    row = lib.base.coe(
        lib.db_sqlite.select(
            conn,
            'SELECT fpm_start_time, accepted_conn, slow_requests, ts '
            'FROM pool_snapshot WHERE url = :url',
            {'url': url},
            fetchone=True,
        )
    )
    return row if row else None


def save_snapshot(conn, url, data, now):
    """Upsert the latest snapshot for ``url`` (DELETE + INSERT pattern)."""
    lib.base.coe(
        lib.db_sqlite.delete(
            conn,
            'DELETE FROM pool_snapshot WHERE url = :url',
            {'url': url},
        )
    )
    lib.base.coe(
        lib.db_sqlite.insert(
            conn,
            {
                'url': url,
                'fpm_start_time': int(data['start time']),
                'accepted_conn': int(data['accepted conn']),
                'slow_requests': int(data.get('slow requests', 0)),
                'ts': now,
            },
            table='pool_snapshot',
        )
    )


def compute_deltas(data, old, now):
    """Return ``(req_rate, slow_delta, baseline)`` for a pool.

    - ``req_rate`` is new accepted connections per second since the previous
      run, or ``None`` for the first run / after an FPM restart.
    - ``slow_delta`` is the number of new slow requests since the previous
      run, or ``None`` for the first run / after an FPM restart.
    - ``baseline`` is ``True`` iff this run is just establishing a baseline
      (no previous snapshot, or the FPM instance has been restarted since
      the last snapshot). In that case, delta-based thresholds must not
      alert.
    """
    if not old:
        return None, None, True

    if int(old['fpm_start_time']) != int(data['start time']):
        # FPM was restarted since the last check; counters reset, so any
        # "delta" would be nonsense. Re-baseline and suppress delta alerts.
        return None, None, True

    dt = now - float(old['ts'])
    if dt <= 0:
        return None, None, True

    accepted_delta = int(data['accepted conn']) - int(old['accepted_conn'])
    slow_delta = int(data.get('slow requests', 0)) - int(old['slow_requests'])
    if accepted_delta < 0 or slow_delta < 0:
        # Counters somehow went backwards without a start-time change. Treat
        # like a restart and re-baseline, rather than emitting negative rates.
        return None, None, True

    return accepted_delta / dt, slow_delta, False


_URI_MAX = 50
_SCRIPT_MAX = 30


def _strip_query_string(uri):
    """Drop the query string from a request URI.

    PHP-FPM reports the full URI including query parameters. For a
    monitoring plugin the query string is noise at best (it is not
    actionable from a check state) and potentially sensitive at worst
    (session tokens, URL-embedded credentials). We keep the path and the
    ``?`` so the admin can still tell that the endpoint took parameters.
    """
    if not uri:
        return uri
    q = uri.find('?')
    if q < 0:
        return uri
    return uri[: q + 1]


def _abbreviate_path(path):
    """Abbreviate a filesystem path by reducing every directory component
    except the last one to its first character.

    Example: ``/usr/share/icingaweb2/public/index.php`` →
    ``/u/s/i/p/index.php``. The basename is always kept in full because
    the admin needs it to identify the script.
    """
    if not path or path in ('-', '/') or '/' not in path:
        return path
    parts = path.split('/')
    head = parts[:-1]
    tail = parts[-1]
    abbrev = '/'.join(p[:1] if p else '' for p in head)
    return f'{abbrev}/{tail}' if abbrev else f'/{tail}'


def _format_request_duration(us):
    """Format a PHP-FPM `request duration` (microseconds) at millisecond
    precision.

    Microseconds are noise in a monitoring plugin; the admin cares whether
    a request was fast (single-digit ms) or slow (seconds), not whether it
    took 925 843 or 925 847 µs. This helper rounds to whole ms and
    produces the usual compact `Xs Yms`, `Xm Ys`, `Xh Ym` shapes.
    """
    ms_total = round(us / 1000)
    if ms_total < 1000:
        return f'{ms_total}ms'
    s_total, ms_rem = divmod(ms_total, 1000)
    if s_total < 60:
        return f'{s_total}s {ms_rem}ms' if ms_rem else f'{s_total}s'
    m_total, s_rem = divmod(s_total, 60)
    if m_total < 60:
        return f'{m_total}m {s_rem}s' if s_rem else f'{m_total}m'
    h_total, m_rem = divmod(m_total, 60)
    return f'{h_total}h {m_rem}m' if m_rem else f'{h_total}h'


def _shorten_left(s, limit):
    """Truncate a string from the left, keeping the tail.

    Used for paths where the basename is the interesting part. Example:
    ``/var/www/html/nextcloud/remote.php`` → ``…www/html/nextcloud/remote.php``.
    """
    if len(s) <= limit:
        return s
    return '…' + s[-(limit - 1) :]


def _shorten_right(s, limit):
    """Truncate a string from the right, appending an ellipsis."""
    if len(s) <= limit:
        return s
    return s[: limit - 1] + '…'


def build_pool_row(name, data, saturation, req_rate, slow_delta):
    """Return a dict representing one row of the pool summary table."""
    return {
        'pool': name,
        'proc_mgr': data.get('process manager', '-'),
        'req_rate': (f'{req_rate:.1f}/s' if req_rate is not None else '-'),
        'active': data.get('active processes', 0),
        'idle': data.get('idle processes', 0),
        'total': data.get('total processes', 0),
        'max_active': data.get('max active processes', 0),
        'listen_queue': data.get('listen queue', 0),
        'max_listen_queue': data.get('max listen queue', 0),
        'saturation': f'{saturation:.1f}%',
        'new_slow': slow_delta if slow_delta is not None else '-',
        'uptime': lib.human.seconds2human(data.get('start since', 0)),
    }


def build_process_rows(data, include_idle=False):
    """Return process rows for the per-pool detail table.

    By default only workers in the ``Running`` state are returned, matching
    the compact default output. ``include_idle=True`` (used by the
    ``--lengthy`` mode) also returns idle workers so the admin always sees
    a populated process table even on a quiet pool.
    """
    rows = []
    for value in data.get('processes', []) or []:
        if not value:
            continue
        state = value.get('state', '')
        if not include_idle and state.lower() != 'running':
            continue
        request_uri = value.get('request uri', '')
        # do not monitor our own status URL used for monitoring
        if 'json' in request_uri and 'full' in request_uri:
            continue
        rows.append(
            {
                'pid': value.get('pid', '-'),
                'state': state or '-',
                'start time': (
                    f'{lib.time.epoch2iso(value["start time"])[:16]}'
                    f' ({lib.human.seconds2human(value["start since"])})'
                ),
                'requests': value.get('requests', 0),
                # request duration is in microseconds; show at ms precision
                'request duration': _format_request_duration(
                    int(value.get('request duration', 0))
                ),
                'request method': value.get('request method', '-'),
                'request uri': _shorten_right(
                    _strip_query_string(request_uri), _URI_MAX
                ),
                'content length': (
                    lib.human.bytes2human(value['content length'])
                    if value.get('content length')
                    else '-'
                ),
                'user': value.get('user', '-'),
                'script': _shorten_left(
                    _abbreviate_path(value.get('script', '-')), _SCRIPT_MAX
                ),
            }
        )
    return rows


def _perfdata_prefix(pool_name):
    """Return a perfdata-safe prefix for a pool name.

    Non-word characters are collapsed to underscores so pool names like
    ``my-app`` or ``web 01`` still yield clean snake_case labels
    (``my_app_saturation``, ``web_01_saturation``).
    """
    return re.sub(r'\W+', '_', pool_name).strip('_') or 'pool'


def build_pool_perfdata(pool_name, data, saturation, req_rate, slow_delta, args):
    """Return perfdata for one pool. All labels are prefixed with the pool name
    in ``<pool>_snake_case`` form, matching the ``procs`` and ``disk-io``
    convention."""
    prefix = _perfdata_prefix(pool_name)
    perfdata = ''

    perfdata += lib.base.get_perfdata(
        f'{prefix}_saturation',
        saturation,
        uom='%',
        warn=args.WARN,
        crit=args.CRIT,
        _min=0,
        _max=100,
    )
    perfdata += lib.base.get_perfdata(
        f'{prefix}_active_processes',
        data.get('active processes', 0),
        _min=0,
        _max=data.get('total processes', 0),
    )
    perfdata += lib.base.get_perfdata(
        f'{prefix}_idle_processes',
        data.get('idle processes', 0),
        _min=0,
        _max=data.get('total processes', 0),
    )
    perfdata += lib.base.get_perfdata(
        f'{prefix}_total_processes',
        data.get('total processes', 0),
        _min=0,
    )
    perfdata += lib.base.get_perfdata(
        f'{prefix}_max_active_processes',
        data.get('max active processes', 0),
        _min=0,
    )
    perfdata += lib.base.get_perfdata(
        f'{prefix}_listen_queue',
        data.get('listen queue', 0),
        _min=0,
    )
    perfdata += lib.base.get_perfdata(
        f'{prefix}_max_listen_queue',
        data.get('max listen queue', 0),
        _min=0,
    )
    perfdata += lib.base.get_perfdata(
        f'{prefix}_listen_queue_len',
        data.get('listen queue len', 0),
        _min=0,
    )
    perfdata += lib.base.get_perfdata(
        f'{prefix}_start_since',
        data.get('start since', 0),
        uom='s',
        _min=0,
    )
    if req_rate is not None:
        perfdata += lib.base.get_perfdata(
            f'{prefix}_accepted_conn_rate',
            round(req_rate, 2),
            _min=0,
        )
    if slow_delta is not None:
        perfdata += lib.base.get_perfdata(
            f'{prefix}_slow_requests_delta',
            slow_delta,
            warn=args.WARN_SLOW_REQUESTS,
            crit=args.CRIT_SLOW_REQUESTS,
            _min=0,
        )
    return perfdata


def analyze_pool(url, raw, old_snapshot, now, args):
    """Parse one pool's JSON response and return a pool-result dict.

    The dict contains the per-pool state, summary header line, pool-table row,
    per-process rows, perfdata, and the new snapshot values (or ``None`` if
    parsing failed).
    """
    try:
        data = json.loads(raw)
    except (ValueError, TypeError) as exc:
        return {
            'url': url,
            'name': url,
            'state': lib.base.str2state(args.SEVERITY),
            'header': f'{url}: cannot parse status JSON ({exc})',
            'pool_row': None,
            'process_rows': [],
            'perfdata': '',
            'ok': False,
            'data': None,
        }

    pool_name = data.get('pool', url)

    total = int(data.get('total processes', 0))
    active = int(data.get('active processes', 0))
    saturation = (active / total * 100) if total > 0 else 0.0
    saturation_state = lib.base.get_state(saturation, args.WARN, args.CRIT)

    req_rate, slow_delta, baseline = compute_deltas(data, old_snapshot, now)

    if baseline:
        slow_state = STATE_OK
    else:
        slow_state = lib.base.get_state(
            slow_delta,
            args.WARN_SLOW_REQUESTS,
            args.CRIT_SLOW_REQUESTS,
        )

    pool_state = lib.base.get_worst(saturation_state, slow_state)

    header = (
        f'Pool {pool_name} ({data.get("process manager", "-")})'
        f': Up {lib.human.seconds2human(data.get("start since", 0))}'
        f' (since {lib.time.epoch2iso(data.get("start time", 0))})'
    )
    if baseline:
        header += ' — baseline captured, waiting for more data'

    pool_row = build_pool_row(pool_name, data, saturation, req_rate, slow_delta)
    process_rows = build_process_rows(data, include_idle=args.LENGTHY)
    perfdata = build_pool_perfdata(
        pool_name, data, saturation, req_rate, slow_delta, args
    )

    return {
        'url': url,
        'name': pool_name,
        'state': pool_state,
        'header': header,
        'pool_row': pool_row,
        'process_rows': process_rows,
        'perfdata': perfdata,
        'ok': True,
        'data': data,
    }


POOL_TABLE_KEYS_DEFAULT = [
    'pool',
    'req_rate',
    'active',
    'total',
    'saturation',
    'new_slow',
]
POOL_TABLE_HEADERS_DEFAULT = [
    'Pool',
    'Req/s',
    'Act',
    'Tot',
    'Sat',
    'Slow+',
]

POOL_TABLE_KEYS_LENGTHY = [
    'pool',
    'proc_mgr',
    'req_rate',
    'active',
    'idle',
    'total',
    'max_active',
    'listen_queue',
    'max_listen_queue',
    'saturation',
    'new_slow',
    'uptime',
]
POOL_TABLE_HEADERS_LENGTHY = [
    'Pool',
    'Mgr',
    'Req/s',
    'Act',
    'Idle',
    'Tot',
    'Peak',
    'LQ',
    'LQmax',
    'Sat',
    'Slow+',
    'Up',
]

PROCESS_TABLE_KEYS_DEFAULT = [
    'pid',
    'requests',
    'request duration',
    'request method',
    'request uri',
    'user',
]
PROCESS_TABLE_HEADERS_DEFAULT = [
    'PID',
    'Reqs',
    'LastDur',
    'Mthd',
    'URI',
    'User',
]

PROCESS_TABLE_KEYS_LENGTHY = [
    'pid',
    'state',
    'start time',
    'requests',
    'request duration',
    'request method',
    'content length',
    'request uri',
    'script',
    'user',
]
PROCESS_TABLE_HEADERS_LENGTHY = [
    'PID',
    'State',
    'Start',
    'Reqs',
    'LastDur',
    'Mthd',
    'CLen',
    'URI',
    'Script',
    'User',
]


def format_single_pool(result, args):
    """Return the message body for the single-pool case."""
    msg = result['header'] + '\n\n'

    if result['pool_row']:
        keys = POOL_TABLE_KEYS_LENGTHY if args.LENGTHY else POOL_TABLE_KEYS_DEFAULT
        headers = (
            POOL_TABLE_HEADERS_LENGTHY if args.LENGTHY else POOL_TABLE_HEADERS_DEFAULT
        )
        # The single-pool layout drops the Uptime column from the default view
        # because the header line already shows uptime; keep it in --lengthy.
        if not args.LENGTHY and 'uptime' in keys:
            idx = keys.index('uptime')
            keys = keys[:idx] + keys[idx + 1 :]
            headers = headers[:idx] + headers[idx + 1 :]
        msg += lib.base.get_table([result['pool_row']], keys, header=headers)

    if result['process_rows']:
        keys = (
            PROCESS_TABLE_KEYS_LENGTHY if args.LENGTHY else PROCESS_TABLE_KEYS_DEFAULT
        )
        headers = (
            PROCESS_TABLE_HEADERS_LENGTHY
            if args.LENGTHY
            else PROCESS_TABLE_HEADERS_DEFAULT
        )
        msg += '\n'
        msg += lib.base.get_table(result['process_rows'], keys, header=headers)
        msg += '\nFor details, see https://www.php.net/manual/en/fpm.status.php'

    return msg


def format_multi_pool(results, args):
    """Return the message body for the multi-pool case (>= 2 URLs)."""
    total = len(results)
    ok_count = sum(1 for r in results if r['ok'] and r['state'] == STATE_OK)

    # Group the non-OK pools by outcome so the admin sees the bad news on the
    # first line (unreachable, then CRIT, then WARN, then UNKNOWN).
    non_ok_parts = []
    unreachable = [r for r in results if not r['ok']]
    if unreachable:
        names = ', '.join(
            f'{r["name"]} {lib.base.state2str(r["state"])}' for r in unreachable
        )
        non_ok_parts.append(f'{len(unreachable)} unreachable ({names})')

    for state, label in (
        (STATE_CRIT, 'CRIT'),
        (STATE_WARN, 'WARN'),
        (STATE_UNKNOWN, 'UNKNOWN'),
    ):
        bad = [r for r in results if r['ok'] and r['state'] == state]
        if not bad:
            continue
        names = ', '.join(r['name'] for r in bad)
        non_ok_parts.append(f'{len(bad)} {label} ({names})')

    summary = f'{total} pools checked, {ok_count} OK'
    if non_ok_parts:
        summary += ', ' + ', '.join(non_ok_parts)

    msg = summary + '\n\n'

    # pool-level overview table: one row per pool (ok or unreachable alike)
    keys = POOL_TABLE_KEYS_LENGTHY if args.LENGTHY else POOL_TABLE_KEYS_DEFAULT
    headers = POOL_TABLE_HEADERS_LENGTHY if args.LENGTHY else POOL_TABLE_HEADERS_DEFAULT
    pool_rows = []
    for r in results:
        if r['ok']:
            pool_rows.append(r['pool_row'])
        else:
            # placeholder row for unreachable pools so the admin sees them in
            # the overview table too
            pool_rows.append(
                dict.fromkeys(POOL_TABLE_KEYS_LENGTHY, '-')
                | {
                    'pool': r['name'],
                    'saturation': 'unreachable',
                }
            )
    msg += lib.base.get_table(pool_rows, keys, header=headers)

    # per-pool running-process tables
    for r in results:
        if not r['ok'] or not r['process_rows']:
            continue
        msg += f'\n\nPool {r["name"]} — processes:\n'
        proc_keys = (
            PROCESS_TABLE_KEYS_LENGTHY if args.LENGTHY else PROCESS_TABLE_KEYS_DEFAULT
        )
        proc_headers = (
            PROCESS_TABLE_HEADERS_LENGTHY
            if args.LENGTHY
            else PROCESS_TABLE_HEADERS_DEFAULT
        )
        msg += lib.base.get_table(r['process_rows'], proc_keys, header=proc_headers)

    msg += '\n\nFor details, see https://www.php.net/manual/en/fpm.status.php'
    return msg


def main():
    """The main function. This is where the magic happens."""

    # parse the command line
    try:
        args = parse_args()
    except SystemExit:
        sys.exit(STATE_UNKNOWN)

    # `action='append'` with a non-None default appends to it; we use `None`
    # and substitute the default list here so that user-supplied values
    # replace the default instead of being appended to it.
    if not args.URL:
        args.URL = [DEFAULT_URL]

    if args.TEST is not None and len(args.TEST) != len(args.URL):
        lib.base.cu(
            '`--test` must be given once per `--url` (got '
            f'{len(args.TEST)} test specs for {len(args.URL)} URLs)'
        )

    now = lib.time.now()

    conn = None
    if args.TEST is None:
        conn = open_cache()

    results = []
    overall_state = STATE_OK
    for index, url in enumerate(args.URL):
        test_spec = args.TEST[index] if args.TEST is not None else None

        ok, raw = fetch_pool_raw(url, args, test_spec)
        if not ok:
            results.append(
                {
                    'url': url,
                    'name': url,
                    'state': lib.base.str2state(args.SEVERITY),
                    'header': f'{url}: unreachable ({raw})',
                    'pool_row': None,
                    'process_rows': [],
                    'perfdata': '',
                    'ok': False,
                    'data': None,
                }
            )
            overall_state = lib.base.get_worst(
                lib.base.str2state(args.SEVERITY), overall_state
            )
            continue

        old_snapshot = load_snapshot(conn, url) if conn else None
        result = analyze_pool(url, raw, old_snapshot, now, args)
        results.append(result)
        overall_state = lib.base.get_worst(result['state'], overall_state)

        if conn is not None and result['data'] is not None:
            save_snapshot(conn, url, result['data'], now)

    if conn is not None:
        lib.base.coe(lib.db_sqlite.commit(conn))
        lib.db_sqlite.close(conn)

    if len(results) == 1:
        msg = format_single_pool(results[0], args)
    else:
        msg = format_multi_pool(results, args)

    perfdata = ''.join(r['perfdata'] for r in results)

    lib.base.oao(msg, overall_state, perfdata, always_ok=args.ALWAYS_OK)


if __name__ == '__main__':
    try:
        main()
    except Exception:
        lib.base.cu()
