#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2019
# This file is part of Shinken Enterprise, all rights reserved.

import collections
from inspect import getargspec

from shinkensolutions.lib_checks.common import *

API_CONNECTION_WARNING_LEVEL = 2

DEFAULT_SCHEDULER_LATENCY_THRESHOLD = 1.5  # s
DEFAULT_NB_CHECK_IN_TIMEOUT_TOLERATE = 0  # check


# Allow \n in the parser output
class MyParser(optparse.OptionParser):
    def format_epilog(self, formatter):
        return self.epilog
    
    
    def format_help(self, formatter=None):
        if formatter is None:
            formatter = self.formatter
        result = []
        if self.usage:
            result.append(self.get_usage() + "\n")
        if self.description:
            result.append(self.format_description(formatter) + "\n")
        result.append(self.format_option_help(formatter))
        result.append(self.format_epilog(formatter))
        return "".join(result)


# Call get_raw_stats and raise the exception if there is one.
def get_raw_stat_with_exp(param='', _uri=''):
    sparam = "?param=%s" % param if param else ''
    _uri = _uri if _uri else uri
    buf, connexion_time = Utils.request_get(result, _uri, '/get_raw_stats%s' % sparam, timeout=opts.timeout, raise_exp=True)
    data = json.loads(buf)
    data['connexion_time'] = connexion_time
    return data


def get_raw_stat():
    sparam = ''
    if last_check:
        sparam = "?param=%s" % last_check
    # Now all others need raw stats, so get it
    buf, connexion_time = ShinkenUtils.request_get_daemon(result, daemon_type, uri, '/get_raw_stats%s' % sparam, timeout=opts.timeout)
    try:
        data = json.loads(buf)
        data['connexion_time'] = connexion_time
        return data
    except:
        result.hard_exit(EXIT_STATUS.WARNING, 'Cannot load performance data from daemon: %s' % buf)


def get_modes_help():
    modes_help = 'Mode, depends of the -t/--daemon-type parameter:\n'
    for (k, d) in MODES.iteritems():
        modes_help += '\t%s:\n' % k
        keys = d.keys()
        keys.sort()
        for k2 in keys:
            modes_help += '\t\t%s %s\n' % (k2, d[k2]['help'])
    
    return modes_help


def get_option_parser():
    parser = MyParser(epilog=get_modes_help())
    parser.add_option('-H', '--hostname', dest='hostname', default='127.0.0.1')
    parser.add_option('-p', '--port', dest='port', default=7770, type=int)
    parser.add_option('-t', '--daemon-type', dest='daemon_type', default='arbiter')
    parser.add_option('-T', '--timeout', dest='timeout', type=int, default=3)
    parser.add_option('-m', '--mode', dest='mode', default='alive', help='')
    parser.add_option('-l', '--last_check', dest='last_check', default='', help='')
    parser.add_option('', '--livedata_warning', dest='livedata_warning', default=1, type=int)
    parser.add_option('', '--livedata_error_displayed_limit', dest='livedata_error_limit', default=5, type=int)
    parser.add_option('', '--active_poller_latency', dest='active_poller_latency', type=float, default=0.5, help='Threshold of latency between a scheduler and a active poller in second.')
    parser.add_option('', '--active_reactionner_latency', dest='active_reactionner_latency', type=float, default=0.5, help='Threshold of latency between a scheduler and a active reactionner in second.')
    parser.add_option('', '--passive_poller_latency', dest='passive_poller_latency', type=float, default=0.5, help='Threshold of latency between a scheduler and a passive poller in second.')
    parser.add_option('', '--check_tolerate', type=int, dest='check_tolerate', default=0, help='Number of checks in timeout before the Poller Running Well change this state to WARNING.')
    parser.add_option('', '--shinkenversion', dest='shinken_supervisor_version', default='', help='This shinken version number to compare with the monitored shinken.')
    return parser


def basic_check(daemon_type):
    data = None
    try:
        data = get_raw_stat_with_exp()
    except Exception as exp:
        # get raw stat failed, try a ping and return different error depending on the ping
        try:
            ShinkenUtils.request_get_daemon(result, daemon_type, uri, '/ping', timeout=opts.timeout)
        except Exception as exp_ping:
            output = "Impossible to contact the %s at %s." % (daemon_type, uri)
            long_output = "%s" % exp_ping
            result.hard_exit(EXIT_STATUS.CRITICAL, output, long_output)
        output = "Impossible to get stats the %s at %s." % (daemon_type, uri)
        long_output = "%s" % exp
        result.hard_exit(EXIT_STATUS.CRITICAL, output, long_output)
    
    # if data contain only conn time,
    if len(data) == 1 and 'connexion_time' in data:
        output = "Connection established in %.3fs but failed to get stats from the %s at %s. The %s probably needs to be updated" % (data['connexion_time'], daemon_type, uri, daemon_type)
        result.hard_exit(EXIT_STATUS.WARNING, output)
    
    ShinkenUtils.minimal_check(result, data, daemon_type, shinken_supervisor_version)
    
    # add the daemon type as data
    data['daemon_type'] = daemon_type
    return data


def alive():
    # simply check if the daemon is alive and run the basic checks
    start_time = time.time()
    data = basic_check(daemon_type)
    connexion_time = time.time() - start_time
    
    result.set_perf_data({'connexion_time': connexion_time})
    result.set_spare_info(data)
    ShinkenUtils.check_arbiter_connection(result, uri, daemon_type, timeout=opts.timeout)
    ShinkenUtils.add_http_error_count_message(result, data)
    ShinkenUtils.add_warning_module_restart(result, data)
    
    version = data.get('daemon_version', None)
    if version:
        result.add_check(EXIT_STATUS.OK, 'Version [%s].' % version)
    result.add_check(EXIT_STATUS.OK, 'Connection established in %.3fs.' % connexion_time, no_new_line=True)
    
    ShinkenUtils.add_module_info(result, data)
    if result.status == EXIT_STATUS.OK:
        result.add_check(EXIT_STATUS.OK, 'The daemon is running well.', title=True)
    else:
        result.add_check(EXIT_STATUS.WARNING, 'The daemon have some issues.', title=True)
    result.exit()


def api_connection():
    # Then pure performance data things
    start_time = time.time()
    ShinkenUtils.request_get_daemon(result, daemon_type, uri, '/get_lock', timeout=opts.timeout)
    get_lock_time = time.time() - start_time
    perfs = {'get_lock_time': get_lock_time}
    result.set_perf_data(perfs)
    
    if get_lock_time > API_CONNECTION_WARNING_LEVEL:
        result.add_check(status=EXIT_STATUS.WARNING,
                         output='API Connexion was too long (%.3f > %ds). It can be a sign that your daemon is overloaded.' % (
                             get_lock_time, API_CONNECTION_WARNING_LEVEL))
    else:
        result.add_check(status=EXIT_STATUS.OK, output='API Connexion is in the good range (%.3f < %ds).' % (
            get_lock_time, API_CONNECTION_WARNING_LEVEL))
    result.exit()


def arbiter_alive():
    # Same alive as other but without the check_arbiter_connection
    start_time = time.time()
    data = basic_check(daemon_type)
    connexion_time = time.time() - start_time
    result.set_perf_data({'connexion_time': connexion_time})
    
    result.set_spare_info(data)
    ShinkenUtils.add_warning_module_restart(result, data)
    ShinkenUtils.add_http_error_count_message(result, data)
    
    if result.status == EXIT_STATUS.OK:
        result.add_check(EXIT_STATUS.OK, 'Your arbiter is running well.', title=True)
    else:
        result.add_check(EXIT_STATUS.WARNING, 'Your arbiter has some issues.', title=True)
    
    version = data.get('daemon_version', None)
    if version:
        result.add_check(EXIT_STATUS.OK, 'Version [%s].' % version)
    result.add_check(EXIT_STATUS.OK, 'Connection established in %.3fs.' % connexion_time)
    ShinkenUtils.add_module_info(result, data)
    result.exit()


def synchronizer_alive():
    # Same alive as other but without the check_arbiter_connection and spare check
    start_time = time.time()
    data = basic_check(daemon_type)
    connexion_time = time.time() - start_time
    result.set_perf_data({'connexion_time': connexion_time})
    
    result.set_spare_info(data)
    ShinkenUtils.add_warning_module_restart(result, data)
    ShinkenUtils.add_http_error_count_message(result, data)
    
    if result.status == EXIT_STATUS.OK:
        result.add_check(EXIT_STATUS.OK, 'The synchronizer is running well.', title=True)
    else:
        result.add_check(EXIT_STATUS.WARNING, 'The synchronizer have some issues.', title=True)
    
    version = data.get('daemon_version', None)
    if version:
        result.add_check(EXIT_STATUS.OK, 'Version [%s].' % version)
    result.add_check(EXIT_STATUS.OK, 'Connection established in %.3fs.' % connexion_time)
    ShinkenUtils.add_module_info(result, data)
    result.exit()


def arbiter_stats():
    raw_data = None
    connexion_time = 0
    arbiter_api_version = ''
    arbiter_version = ''
    
    try:
        buf, connexion_time = ShinkenUtils.request_get_daemon(result, daemon_type, uri, '/get_daemon_infos', timeout=opts.timeout)
        raw_data = json.loads(buf)
    except Exception as e:
        result.hard_exit(EXIT_STATUS.WARNING, 'Cannot load daemon information from arbiter [%s]' % e)
    
    try:
        arbiter = raw_data.get('arbiter', None)
        if arbiter:
            arbiter_version = arbiter.get('version', None)
            arbiter_api_version = arbiter.get('api_version', None)
            output = ShinkenUtils._check_versions(daemon_type, arbiter_api_version, arbiter_version, arbiter_version, shinken_supervisor_version)
            if output:
                result.hard_exit(EXIT_STATUS.WARNING, output)
        else:
            result.hard_exit(EXIT_STATUS.WARNING, 'Your %s is alive but not up to date. Please update.' % daemon_type)
    except AttributeError:
        result.hard_exit(EXIT_STATUS.WARNING, 'Your %s is alive but not up to date. Please update.' % daemon_type)
    
    data = raw_data.get('satellites', [])
    
    result.add_check(output='Arbiter answered in %.3fs.' % connexion_time, title=True)
    if data:
        in_warning = False
        list_daemons = []
        for e in data:
            state = HTMLTag.color_text('[OK]', COLOR.GREEN)
            state_text = ''
            show_version = True
            daemon_version = e.get('daemon_version', None)
            output_version = ShinkenUtils._check_versions(e.get('type', 'daemon'), arbiter_api_version, daemon_version, arbiter_version, shinken_supervisor_version)
            if output_version:
                state = HTMLTag.color_text('[WARNING]', COLOR.ORANGE)
                in_warning = True
                show_version = False
                state_text = output_version
            if abs(e.get('diff_time_with_arbiter', 0)) > 30:
                state = HTMLTag.color_text('[WARNING]', COLOR.ORANGE)
                in_warning = True
                state_text += ' => connexion OK but a time shift of %s!' % HTMLTag.color_text(
                    '%ds' % e['diff_time_with_arbiter'], color=COLOR.ORANGE)
            if not e.get('reachable', True):
                state = HTMLTag.color_text('[WARNING]', COLOR.ORANGE)
                state_text = 'couldn\'t be reached during the last ping'
                in_warning = True
                show_version = False
            if not e['alive']:
                state = HTMLTag.color_text('[WARNING]', COLOR.ORANGE)
                state_text = 'connection %s' % HTMLTag.color_text('KO', color=COLOR.ORANGE)
                in_warning = True
                show_version = False
            
            if show_version:
                list_daemons.append(
                    '%s: %s [%s] Version : [%s] %s' % (state, e['type'], e['display_name'], daemon_version, state_text))
            else:
                list_daemons.append('%s: %s [%s] %s' % (state, e['type'], e['display_name'], state_text))
        
        output = HTMLList.header_list('Connected to daemons', list_daemons)
        status = EXIT_STATUS.WARNING if in_warning else EXIT_STATUS.OK
        result.add_check(status=status, output=output)
    else:
        # if the arbiter has no connection to any daemons that probably means that he is a spare
        # call the raw stats to be sure about that
        stats_data = get_raw_stat()
        result.set_spare_info(stats_data, daemon_type=daemon_type)
    
    result.add_check(output='Version [%s]' % arbiter_version)
    result.exit()


# Spec at SEF-1053
def poller_alive(opts):
    start_time = time.time()
    data = basic_check('poller')
    connexion_time = time.time() - start_time
    
    # Threshold of latency between a scheduler and a active poller
    active_poller_latency = opts.active_poller_latency
    # Number of checks in timeout before the Poller Running Well change this state to WARNING.
    check_tolerate = opts.check_tolerate
    
    style = '<style type="text/css">' \
            '.skn-pahc {' \
            'font-style:italic!important;' \
            'color:#7F7F7F!important;' \
            '}' \
            '</style>'
    
    s_schedulers_ok = []
    as_print_scheduler_info = False
    version = data.get('daemon_version', None)
    nb_check_in_timeout = data.get('nb_check_in_timeout', 0)
    executor_in_overload = data.get('executor_in_overload', False)
    nb_action_done_per_sec = data.get('nb_action_done_per_sec', 0)
    poller_type = data.get('type', 'MISSING_TYPE')
    poller_tag = data.get('tags', ['MISSING TAGS INFO'])
    keep_timeout_time = data.get('keep_timeout_time', 1200)
    workers_restarts = data.get('workers_restarts', {})
    dead_workers = data.get('dead_worker_stat', {})
    
    stats = {'nb_check_in_timeout': nb_check_in_timeout}
    result.set_perf_data(stats)
    
    result.set_spare_info(data)
    
    if workers_restarts:
        list_dead_workers = []
        for woker_type, restart_data in workers_restarts.iteritems():
            woker_type = 'default worker type' if woker_type == 'fork' else woker_type
            list_dead_workers.append("'%s' restarted %s times" % (woker_type, len(restart_data)))
        output = HTMLList.header_list('In the last 24 hours, some workers died but were restarted:', list_dead_workers)
        result.add_check(EXIT_STATUS.WARNING, output)
    
    elif dead_workers:
        list_dead_workers = []
        for woker_type, restart_count in dead_workers.iteritems():
            woker_type = 'default worker type' if woker_type == 'fork' else woker_type
            list_dead_workers.append("'%s' restarted %s times" % (woker_type, restart_count))
        output = HTMLList.header_list('Since last restart, some workers died but were restarted:', list_dead_workers)
        result.add_check(EXIT_STATUS.WARNING, output)
    
    ShinkenUtils.check_arbiter_connection(result, uri, daemon_type, timeout=opts.timeout)
    ShinkenUtils.add_warning_module_restart(result, data)
    ShinkenUtils.add_http_error_count_message(result, data)
    
    if executor_in_overload:
        result.add_check(EXIT_STATUS.WARNING,
                         'Your poller is overloaded, it cannot make more than %d check by sec.%sYou should add a new poller%s' % (
                             nb_action_done_per_sec, BREAK_LINE, BREAK_LINE))
    
    if not poller_type == 'PASSIVE' and data.get('schedulers', None):
        s_schedulers_ko = []
        s_schedulers_ko_info = []
        all_ko = True
        one_ko = False
        for scheduler in data.get('schedulers', []):
            s_scheduler = "%s [%s]" % (scheduler.get('name', 'MISSING_NAME'), scheduler.get('addr', 'MISSING_ADRESSE'))
            
            if scheduler.get('con', False):
                all_ko = False
                s_schedulers_ok.append(HTMLTag.color_text(s_scheduler, COLOR.GREEN, bold=False))
            else:
                one_ko = True
                s_schedulers_ko.append(HTMLTag.color_text(s_scheduler, COLOR.RED, bold=False))
                s_scheduler_ko_info = '%s&nbsp;:&nbsp;<br/>%s<br/>-------<br/>' % (
                    scheduler.get('name', 'MISSING_NAME'), scheduler.get('info', 'MISSING_INFO'))
                s_schedulers_ko_info.append(s_scheduler_ko_info)
        
        if all_ko:
            output = HTMLList.header_list('The poller cannot join the schedulers', s_schedulers_ko)
            long_output = ''.join(s_schedulers_ko_info)
            result.add_check(EXIT_STATUS.CRITICAL, output=output, long_output=long_output)
            as_print_scheduler_info = True
        elif one_ko:
            output = ''.join((HTMLList.header_list('The poller cannot join the scheduler', s_schedulers_ko),
                              HTMLList.header_list('But it joins the schedulers', s_schedulers_ok)))
            result.add_check(EXIT_STATUS.WARNING, output=output, long_output=''.join(s_schedulers_ko_info))
            as_print_scheduler_info = True
        else:
            s_schedulers_ok = []
            s_schedulers_ko = []
            
            one_ko = False
            for scheduler in data.get('schedulers', []):
                latency = scheduler.get('latency', -1)
                s_scheduler = "%s [%s]&nbsp;:&nbsp;%sms" % (
                    scheduler.get('name', 'MISSING_NAME'), scheduler.get('addr', 'MISSING_ADRESSE'),
                    round(latency * 1000, 2))
                
                if latency > active_poller_latency:
                    s_scheduler += " (&nbsp;>&nbsp;%dms&nbsp;)" % (active_poller_latency * 1000)
                    s_schedulers_ko.append(HTMLTag.color_text(s_scheduler, COLOR.RED, bold=False))
                    one_ko = True
                else:
                    s_schedulers_ok.append(HTMLTag.color_text(s_scheduler, COLOR.GREEN, bold=False))
            
            if one_ko:
                output = HTMLList.header_list('The latency between the poller and schedulers is too high',
                                              s_schedulers_ko + s_schedulers_ok)
                result.add_check(EXIT_STATUS.WARNING, output)
                as_print_scheduler_info = True
    
    if nb_check_in_timeout > check_tolerate:
        checks_in_timeout = []
        for check_in_timeout in data.get('checks_in_timeout', []):
            commands = check_in_timeout[0].split('-//-')
            
            if len(commands) > 2:
                host_name = commands[0]
                check_name = commands[1]
            elif len(commands) > 1:
                host_name = commands[0]
                check_name = '<div class="skn-pahc"> check command( %s )</div>' % commands[1]
            else:
                host_name = commands[0]
                check_name = ''
            
            timeout = check_in_timeout[1]
            at = datetime.datetime.fromtimestamp(check_in_timeout[2]).strftime('%H:%M:%S')
            nb = check_in_timeout[3]
            
            checks_in_timeout.append((nb, at, host_name, check_name, timeout))
        
        output = ''.join((style,
                          'You have %d (&nbsp;>&nbsp;%d tolerate&nbsp;) check(s) that timed out in the last %s.<br/>The last actions that timed out&nbsp;:&nbsp;<br/>' % (
                              nb_check_in_timeout, check_tolerate, Utils.print_human_readable_period(keep_timeout_time)),
                          HTMLTable.table(('Nb', 'Last', 'Host', 'Check', 'Timeout'), checks_in_timeout) + BREAK_LINE))
        result.add_check(EXIT_STATUS.WARNING, output)
    
    executor_type_display = HTMLTag.color_text('[&nbsp;PASSIVE MODE&nbsp;]',
                                               COLOR.BLACK) if poller_type == 'PASSIVE' else ''
    result.add_check(output='Poller %s' % executor_type_display)
    result.add_check(output=HTMLList.simple_list(('tags&nbsp;:&nbsp;%s' % ', '.join(poller_tag),)), no_new_line=True)
    
    if not poller_type == 'PASSIVE' and data.get('schedulers', None) and not as_print_scheduler_info:
        output = HTMLList.header_list('The latency between the poller and the schedulers are', s_schedulers_ok)
        result.add_check(output=output, no_new_line=True)
    
    if result.status == EXIT_STATUS.OK:
        result.add_check(EXIT_STATUS.OK, 'Your poller is running well.', title=True)
    else:
        result.add_check(EXIT_STATUS.WARNING, 'Your poller have some issues.', title=True)
    
    if version:
        result.add_check(output='Version [%s]' % version)
    result.add_check(EXIT_STATUS.OK, 'Connection established in %.3fs.' % connexion_time)
    
    ShinkenUtils.add_module_info(result, data)
    result.exit()


# Spec at SEF-1053
def poller_stats():
    data = basic_check('poller')
    
    nb_action_done_per_sec = data.get('nb_action_done_per_sec', 0)
    executor_load = data.get('executor_load', 0)
    cpu_usage = data.get('cpu_usage', 0.) or 0.
    nb_cpus = data.get('nb_cpus', 0)
    ram_usage = int(round(data.get('ram_usage', 0.), 0))
    max_ram_usage = data.get('max_ram_usage', 0.)
    cpu_running_queue = int(data.get('cpu_running_queue', 0))
    max_cpu_queue_per_cpu = data.get('max_cpu_queue_par_cpu', 0.)
    platform = data.get('platform', '')
    
    checks_top_usage = data.get('checks_top_usage', [])
    exec_stats = data.get('exec_stats', {})
    
    cpu_usage_percent = 0 if nb_cpus == 0 else int(round(cpu_usage * 100 / nb_cpus, 0))
    poller_load_percent = 100 if executor_load > EXECUTOR_LOAD_LIMIT else 0
    stats = {
        'nb_action_done_per_sec': nb_action_done_per_sec,
        'poller_load_state'     : poller_load_percent,
        'cpu_usage_percent'     : cpu_usage_percent,
        'cpu_running_queue'     : cpu_running_queue,
        'used_ram_percent'      : ram_usage
    }
    
    style = '<style type="text/css">' \
            '.poller-stats-table, .poller-stats-table td, .poller-stats-table th {' \
            '   border:              1px solid #000000   !important;' \
            '   border-collapse:     collapse            !important;' \
            '   color:               #000000             !important;' \
            '}' \
            '.poller-stats-table {' \
            '    width:              90%                 !important;' \
            '}' \
            '.poller-stats-table-th {' \
            '    background-color :  #DDDDDD             !important;' \
            '    width:              auto                !important;' \
            '    max-width:          20%                 !important;' \
            '    padding:            2px                 !important;' \
            '    word-break:         break-word          !important;' \
            '    background-color:   #FFFFFF             !important;' \
            '}' \
            '.poller-stats-table-td {' \
            '    padding:            2px                 !important;' \
            '    width:              auto                !important;' \
            '    max-width:          20%                 !important;' \
            '    font-weight:        normal              !important;' \
            '    word-break:         break-word          !important;' \
            '    background-color:   #FFFFFF             !important;' \
            '}' \
            '.poller-stats-host-command {' \
            '    font-style:         italic              !important;' \
            '    color:              #7F7F7F             !important;' \
            '}' \
            '.poller-stats-table-center {' \
            '    text-align:         center;' \
            '}' \
            '</style>'
    
    poller_stats_infos = []
    result.set_spare_info(data)
    ShinkenUtils.check_arbiter_connection(result, uri, daemon_type, timeout=opts.timeout)
    
    if nb_action_done_per_sec:
        poller_stats_infos.append('[&nbsp;%s&nbsp;] Checks done per second.' % round(nb_action_done_per_sec, 2))
    else:
        poller_stats_infos.append('The poller has not launched any check in the last 60 seconds.')
    if platform == 'nt':
        poller_stats_infos.append('This poller is running on Windows, impossible to get CPU informations.')
    else:
        if executor_load:
            poller_stats_infos.append('[&nbsp;%s&nbsp;] Poller load.' % (HTMLTag.load_tag(executor_load)))
        else:
            poller_stats_infos.append('Poller load is unavailable, please wait for data.')
        if cpu_usage:
            poller_stats_infos.append(
                '[&nbsp;%s%%&nbsp;] Average CPU running of the checks on %d core.' % (cpu_usage_percent, nb_cpus))
        else:
            poller_stats_infos.append('Average CPU running time of the checks is unavailable, please wait for data.')
    if ram_usage:
        poller_stats_infos.append('[&nbsp;%-.0f%%&nbsp;&nbsp;%s] Average server RAM usage.' % (
            ram_usage, HTMLTag.ram_tag(ram_usage, max_ram_usage)))
    else:
        poller_stats_infos.append('Average RAM usage of the poller is unavailable, please wait for data.')
    if platform != 'nt':
        if cpu_running_queue:
            poller_stats_infos.append('[%i] Processes in the CPU Queue. %s' % (
                cpu_running_queue, HTMLTag.cpu_queue_tag(cpu_running_queue, max_cpu_queue_per_cpu, nb_cpus)))
        else:
            poller_stats_infos.append('CPU running queue of the poller can\'t be compute.')
    
    output = HTMLList.header_list('Poller statistics', poller_stats_infos)
    
    lines = []
    for check_top_usage in checks_top_usage:
        commands = check_top_usage[0].split('-//-')
        
        if len(commands) > 2:
            host_name = commands[0]
            check_name = commands[1]
        elif len(commands) > 1:
            host_name = commands[0]
            check_name = '<div class="poller-stats-host-command"> check command( %s )</div>' % commands[1]
        else:
            host_name = commands[0]
            check_name = ''
        
        cpu_time = check_top_usage[1] * 1000
        
        lines.append((host_name, check_name, "%dms" % cpu_time))
    
    long_output = style if len(lines) else ''
    
    if lines and nb_action_done_per_sec:
        long_output += HTMLTable.table(('Hosts', 'Check', 'Cpu time'), lines, 'Top 5 checks')
    
    if exec_stats and nb_action_done_per_sec:
        long_output += '<br>'
        lines = []
        headers = []
        
        start_range = 0
        exec_stat_ranges = exec_stats.keys()
        exec_stat_ranges = [int(k) for k in exec_stat_ranges]
        exec_stat_ranges.remove(-1)
        exec_stat_ranges.sort()
        for _range in exec_stat_ranges:
            exec_stat = exec_stats[str(_range)]
            headers.append('%s&nbsp;-&nbsp;%sms' % (start_range, _range))
            lines.append('%s<br/>(&nbsp;%s%%&nbsp;)' % (exec_stat[0], exec_stat[1]))
            stats['checks_per_cpu_running_time_%s_%s_ms' % (start_range, _range)] = exec_stat[0]
            start_range = _range
        
        exec_stat = exec_stats['-1']
        headers.append('+&nbsp;%sms' % start_range)
        lines.append('%s<br/>(&nbsp;%s%%&nbsp;)' % (exec_stat[0], exec_stat[1]))
        stats['checks_per_cpu_running_time_%s_ms_and_more' % start_range] = exec_stat[0]
        long_output += HTMLTable.table(headers, [lines], 'Nb Checks per CPU running time')
    
    result.set_perf_data(stats)
    result.add_check(output=output, long_output=long_output)
    result.exit()


def poller_api_connection():
    result.hard_exit(EXIT_STATUS.WARNING,
                     'The mode "api_connection" is deprecated for this plugin version. Please upgrade shinken template.')


def poller_cpu_load():
    result.hard_exit(EXIT_STATUS.WARNING,
                     'The mode "cpu_load" is deprecated for this plugin version. Please upgrade shinken template.')


def poller_overload_protection():
    result.hard_exit(EXIT_STATUS.WARNING,
                     'The mode "overload_protection" is deprecated for this plugin version. Please upgrade shinken template.')


# Spec at SEF-1053
def reactionner_alive(opts):
    start_time = time.time()
    data = basic_check('reactionner')
    connexion_time = time.time() - start_time
    
    # Threshold of latency between a scheduler and a active reactionner
    active_reactionner_latency = opts.active_reactionner_latency
    # Number of checks in timeout before the reactionner Running Well change this state to WARNING.
    check_tolerate = opts.check_tolerate
    
    s_schedulers_ok = []
    as_print_scheduler_info = False
    version = data.get('daemon_version', None)
    reactionner_type = data.get('type', 'MISSING_TYPE')
    reactionner_tag = data.get('tags', ['MISSING TAGS INFO'])
    keep_timeout_time = data.get('keep_timeout_time', 1200)
    nb_check_in_timeout = data.get('nb_check_in_timeout', 0)
    executor_in_overload = data.get('executor_in_overload', False)
    nb_action_done_per_sec = data.get('nb_action_done_per_sec', 0)
    
    stats = {'nb_check_in_timeout': nb_check_in_timeout}
    result.set_perf_data(stats)
    
    result.set_spare_info(data)
    ShinkenUtils.check_arbiter_connection(result, uri, daemon_type, timeout=opts.timeout)
    ShinkenUtils.add_warning_module_restart(result, data)
    ShinkenUtils.add_http_error_count_message(result, data)
    
    workers_restarts = data.get('workers_restarts', {})
    dead_workers = data.get('dead_worker_stat', {})
    
    if workers_restarts:
        list_dead_workers = []
        for woker_type, restart_data in workers_restarts.iteritems():
            woker_type = 'default worker type' if woker_type == 'fork' else woker_type
            list_dead_workers.append("'%s' restarted %s times" % (woker_type, len(restart_data)))
        output = HTMLList.header_list('In the last 24 hours, some workers died but were restarted:', list_dead_workers)
        result.add_check(EXIT_STATUS.WARNING, output)
    
    elif dead_workers:
        # Here the healthcheck ask a daemon who haven't the workers_restarts in get_raw_stats (deprecated)
        list_dead_workers = []
        for woker_type, restart_count in dead_workers.iteritems():
            woker_type = 'default worker type' if woker_type == 'fork' else woker_type
            list_dead_workers.append("'%s' restarted %s times" % (woker_type, restart_count))
        output = HTMLList.header_list('Since last restart, some workers died but were restarted:', list_dead_workers)
        result.add_check(EXIT_STATUS.WARNING, output)
    
    if executor_in_overload:
        result.add_check(EXIT_STATUS.WARNING,
                         'Your reactionner overload, it cannot make more than %d check by sec.%sYou should add a new reactionner%s' % (
                             nb_action_done_per_sec, BREAK_LINE, BREAK_LINE))
    
    if not reactionner_type == 'PASSIVE' and data.get('schedulers', None):
        s_schedulers_ko = []
        s_schedulers_ko_info = []
        all_ko = True
        one_ko = False
        for scheduler in data.get('schedulers', []):
            s_scheduler = "%s [%s]" % (scheduler.get('name', 'MISSING_NAME'), scheduler.get('addr', 'MISSING_ADRESSE'))
            
            if scheduler.get('con', False):
                all_ko = False
                s_schedulers_ok.append(HTMLTag.color_text(s_scheduler, COLOR.GREEN, bold=False))
            else:
                one_ko = True
                s_schedulers_ko.append(HTMLTag.color_text(s_scheduler, COLOR.RED, bold=False))
                s_scheduler_ko_info = '%s&nbsp;:&nbsp;<br/>%s<br/>-------<br/>' % (
                    scheduler.get('name', 'MISSING_NAME'), scheduler.get('info', 'MISSING_INFO'))
                s_schedulers_ko_info.append(s_scheduler_ko_info)
        if all_ko:
            output = HTMLList.header_list("The reactionner cannot join the schedulers)", s_schedulers_ko)
            long_output = "%s" % (''.join(s_schedulers_ko_info))
            result.add_check(EXIT_STATUS.CRITICAL, output=output, long_output=long_output)
            as_print_scheduler_info = True
        elif one_ko:
            output = ''.join((
                HTMLList.header_list('The reactionner cannot join the scheduler', s_schedulers_ko),
                HTMLList.header_list('But it joins the schedulers', s_schedulers_ok)))
            result.add_check(EXIT_STATUS.WARNING, output=output, long_output=''.join(s_schedulers_ko_info))
            as_print_scheduler_info = True
        else:
            s_schedulers_ok = []
            s_schedulers_ko = []
            
            one_ko = False
            for scheduler in data.get('schedulers', []):
                latency = scheduler.get('latency', -1)
                s_scheduler = "%s [%s]&nbsp;:&nbsp;%sms" % (
                    scheduler.get('name', 'MISSING_NAME'), scheduler.get('addr', 'MISSING_ADRESSE'),
                    round(latency * 1000, 2))
                
                if latency > active_reactionner_latency:
                    s_scheduler += " (&nbsp;>&nbsp;%dms&nbsp;)" % (active_reactionner_latency * 1000)
                    s_schedulers_ko.append(HTMLTag.color_text(s_scheduler, COLOR.RED, bold=False))
                    one_ko = True
                else:
                    s_schedulers_ok.append(HTMLTag.color_text(s_scheduler, COLOR.GREEN, bold=False))
            
            if one_ko:
                output = HTMLList.header_list('The latency between the reactionner and schedulers is too high',
                                              s_schedulers_ko + s_schedulers_ok)
                result.add_check(EXIT_STATUS.WARNING, output)
                as_print_scheduler_info = True
    
    if nb_check_in_timeout > check_tolerate:
        checks_in_timeout = []
        for check_in_timeout in data.get('checks_in_timeout', []):
            action_name = check_in_timeout[0]
            timeout = check_in_timeout[1]
            at = datetime.datetime.fromtimestamp(check_in_timeout[2]).strftime('%H:%M:%S')
            nb = check_in_timeout[3]
            checks_in_timeout.append((nb, at, action_name, timeout))
        
        output = ''.join((
            'You have %d (&nbsp;>&nbsp;%d tolerate&nbsp;) action(s) that timed out in the last %s.<br/>The last actions that timed out&nbsp;:&nbsp;<br/>' % (
                nb_check_in_timeout, check_tolerate, Utils.print_human_readable_period(keep_timeout_time)),
            HTMLTable.table(('Nb', 'At', 'Actions', 'Timeout'), checks_in_timeout) + BREAK_LINE))
        result.add_check(EXIT_STATUS.WARNING, output)
    
    executor_type_display = '[&nbsp;PASSIVE MODE&nbsp;]' if reactionner_type == 'PASSIVE' else ''
    result.add_check(output='Reactionner %s' % executor_type_display)
    result.add_check(output=HTMLList.simple_list(('tags&nbsp;:&nbsp;%s' % ', '.join(reactionner_tag),)),
                     no_new_line=True)
    
    if not reactionner_type == 'PASSIVE' and data.get('schedulers', None) and not as_print_scheduler_info:
        output = HTMLList.header_list('The latency between the reactionner and the schedulers are', s_schedulers_ok)
        result.add_check(output=output, no_new_line=True)
    
    if result.status == EXIT_STATUS.OK:
        result.add_check(EXIT_STATUS.OK, 'Your reactionner is running well.', title=True)
    else:
        result.add_check(EXIT_STATUS.WARNING, 'Your reactionner have some issues.', title=True)
    
    if version:
        result.add_check(output='Version [%s]' % version)
    result.add_check(EXIT_STATUS.OK, 'Connection established in %.3fs.' % connexion_time)
    
    ShinkenUtils.add_module_info(result, data)
    result.exit()


# Spec at SEF-1053
def reactionner_stats():
    data = basic_check('reactionner')
    
    nb_action_done_per_sec = data.get('nb_action_done_per_sec', 0)
    cpu_usage = data.get('cpu_usage', 0)
    last_action_launch_time = data.get('last_action_launch_time', -1)
    checks_top_usage = data.get('checks_top_usage', [])
    
    stats = {'nb_action_done_per_sec': nb_action_done_per_sec, 'cpu_usage': cpu_usage}
    result.set_perf_data(stats)
    
    result.set_spare_info(data)
    ShinkenUtils.check_arbiter_connection(result, uri, daemon_type, timeout=opts.timeout)
    
    reactionner_stats_infos = []
    if nb_action_done_per_sec == 0:
        if last_action_launch_time == -1:
            reactionner_stats_infos.append('The reactionner has not launched any action since the daemon start.')
        else:
            reactionner_stats_infos.append(
                'The reactionner has not launched any action since %s.' % Utils.print_time(last_action_launch_time))
    else:
        nb_action_done_per_sec = round(nb_action_done_per_sec, 2)
        if nb_action_done_per_sec < 1:
            reactionner_stats_infos.append('Less than 1 action done per second')
        else:
            reactionner_stats_infos.append('[&nbsp;%s&nbsp;] Actions done per second.' % nb_action_done_per_sec)
        
        if cpu_usage == 0:
            reactionner_stats_infos.append(
                'CPU Percentage used among the server available resources for actions execution is unavailable, please wait for data.')
        else:
            cpu_usage = round(cpu_usage, 2)
            if cpu_usage < 1:
                reactionner_stats_infos.append(
                    'Less than 1% CPU used among the server available resources for checks execution.')
            else:
                reactionner_stats_infos.append(
                    '[&nbsp;%s%%&nbsp;] CPU Percentage used among the server available resources for checks execution.' % cpu_usage)
    output = HTMLList.header_list('Reactionner statistics', reactionner_stats_infos)
    
    result.add_check(output=output, no_new_line=True)
    
    lines = []
    for check_top_usage in checks_top_usage:
        lines.append((check_top_usage[0], '%dms' % (check_top_usage[1] * 1000)))
    
    if lines and nb_action_done_per_sec != -1.0:
        long_output = HTMLTable.table(('Actions', 'Cpu time'), lines, 'Top 5 actions')
        result.add_check(long_output=long_output)
    
    result.exit()


def reactionner_api_connection():
    result.hard_exit(EXIT_STATUS.WARNING,
                     'The mode "api_connection" is deprecated for this plugin version. Please upgrade shinken template.')


def reactionner_cpu_load():
    result.hard_exit(EXIT_STATUS.WARNING,
                     'The mode "cpu_load" is deprecated for this plugin version. Please upgrade shinken template.')


def reactionner_overload_protection():
    result.hard_exit(EXIT_STATUS.WARNING,
                     'The mode "overload_protection" is deprecated for this plugin version. Please upgrade shinken template.')


# Spec at SEF-1143
def scheduler_alive(opts):
    start_time = time.time()
    data = basic_check('scheduler')
    connexion_time = time.time() - start_time
    
    # Threshold of latency between a scheduler and a active poller
    passive_poller_latency = opts.passive_poller_latency
    
    version = data.get('daemon_version', None)
    hosts = data['nb_hosts']
    clusters = data['nb_clusters']
    checks = data['nb_checks']
    passive_pollers = data.get('passive_pollers', [])
    late_checks = data['late_checks']
    average_latency = data['average_latency']
    realm = data['realm']
    late_checks_by_tags = data.get('late_checks_by_tags', {})
    total = hosts + clusters + checks
    
    stats = {
        'nb_late'    : late_checks,
        'nb_checks'  : checks,
        'nb_clusters': clusters,
        'nb_hosts'   : hosts
    }
    result.set_perf_data(stats)
    
    result.set_spare_info(data)
    ShinkenUtils.check_arbiter_connection(result, uri, daemon_type, timeout=opts.timeout)
    ShinkenUtils.add_warning_module_restart(result, data)
    ShinkenUtils.add_http_error_count_message(result, data)
    
    if late_checks:
        percent_check = late_checks * 100 / total
        list_late_checks_by_tags = ['Tag %s&nbsp;:&nbsp;%s checks' % (tag, nb_lates) for tag, nb_lates in
                                    late_checks_by_tags.iteritems()]
        
        late_checks_info = ''.join((
            'There are %s ( %s%% ) checks late!' % (HTMLTag.color_text(late_checks, COLOR.ORANGE), percent_check),
            BREAK_LINE,
            HTMLList.header_list('Late checks grouped by poller tags&nbsp;', list_late_checks_by_tags),
            'Average time before a check is started in a poller %ss.' % round(average_latency, 2)
        
        ))
        result.add_check(EXIT_STATUS.WARNING, late_checks_info)
    
    if passive_pollers:
        s_pollers_con_ok = []
        s_pollers_con_ko = []
        s_pollers_latency_ok = []
        s_pollers_latency_ko = []
        s_pollers_con_ko_info = []
        
        all_con_ko = True
        one_con_ko = False
        one_latency_ko = False
        for poller in passive_pollers:
            latency = poller.get('latency', -1)
            con = poller.get('con', False)
            s_poller_con = "%s [%s]" % (poller.get('name', 'MISSING_NAME'), poller.get('addr', 'MISSING_ADRESSE'))
            s_poller_latency = "%s [%s]&nbsp;:&nbsp;%sms" % (
                poller.get('name', 'MISSING_NAME'), poller.get('addr', 'MISSING_ADRESSE'), round(latency * 1000, 2))
            
            if con:
                all_con_ko = False
                s_pollers_con_ok.append(HTMLTag.color_text(s_poller_con, COLOR.GREEN, bold=False))
            else:
                one_con_ko = True
                s_pollers_con_ko.append(HTMLTag.color_text(s_poller_con, COLOR.ORANGE, bold=False))
                s_poller_ko_info = '%s&nbsp;:&nbsp;<br/>%s<br/>-------<br/>' % (
                    poller.get('name', 'MISSING_NAME'), poller.get('info', 'MISSING_INFO'))
                s_pollers_con_ko_info.append(s_poller_ko_info)
            
            if latency > passive_poller_latency:
                s_poller_latency += " (&nbsp;>&nbsp;%dms&nbsp;)" % (passive_poller_latency * 1000)
                s_pollers_latency_ko.append(HTMLTag.color_text(s_poller_latency, COLOR.ORANGE))
                one_latency_ko = True
            else:
                s_pollers_latency_ok.append(HTMLTag.color_text(s_poller_latency, COLOR.GREEN, bold=False))
        
        if all_con_ko:
            text = HTMLList.header_list("The scheduler cannot join all passive pollers", s_pollers_con_ko)
            result.add_check(EXIT_STATUS.CRITICAL, output=text, long_output=''.join(s_pollers_con_ko_info))
        elif one_con_ko:
            text = HTMLList.header_list('The scheduler cannot join some passive pollers', s_pollers_con_ko)
            text += HTMLList.header_list('But it joins passive pollers', s_pollers_con_ok)
            result.add_check(EXIT_STATUS.WARNING, output=text, long_output=''.join(s_pollers_con_ko_info))
        elif one_latency_ko:
            text = HTMLList.header_list('The latency is too high between some passive pollers and the scheduler',
                                        s_pollers_latency_ko + s_pollers_latency_ok)
            result.add_check(EXIT_STATUS.WARNING, output=text, long_output=''.join(s_pollers_con_ko_info))
        else:
            latency_info = HTMLList.header_list('The latency between the scheduler and the passive pollers are',
                                                s_pollers_latency_ok)
            result.add_check(output=latency_info)
    
    if result.status == EXIT_STATUS.OK:
        result.add_check(EXIT_STATUS.OK, 'Your scheduler is running well.', title=True)
    else:
        result.add_check(EXIT_STATUS.WARNING, 'Your scheduler have some issues.', title=True)
    
    elements_info = HTMLTable.table(('Hosts', 'Clusters', 'Checks', 'Total'), [(hosts, clusters, checks, total)],
                                    'Element in the scheduler')
    realm_info = 'Scheduler on realm %s.' % realm
    
    result.add_check(output=elements_info)
    result.add_check(output=realm_info)
    if version:
        result.add_check(output='Version [%s]' % version)
    result.add_check(EXIT_STATUS.OK, 'Connection established in %.3fs.' % connexion_time)
    
    ShinkenUtils.add_module_info(result, data)
    result.exit()


# Spec at SEF-1143
def scheduler_stats():
    data = basic_check('scheduler')
    
    color_orange = '<div style="color:#FF8F00;font-weight: bold;">%s</div>'
    style = '''<style>.scheduler-stats-heading {font-size: 1.2em !important;font-weight: bold !important;margin: 15px 0 10px 0 !important;} .scheduler-stats-table, .scheduler-stats-table td, .scheduler-stats-table th {border: 1px solid #000000 !important;border-collapse: collapse !important;word-break: break-word !important;color: #000000 !important;margin-top: 4px !important;} .scheduler-stats-table {width: 100% !important;} .scheduler-stats-table th {text-align: center} .scheduler-stats-table th, .scheduler-stats-table td {background-color: #FFFFFF !important;} .scheduler-stats-table.same-with-col th, .scheduler-stats-table.same-with-col td {width: 11% !important} .scheduler-stats-table th.back-grey-sub-info {background-color: #E8E7E7 !important;} .scheduler-stats-table td {background-color: #FFFFFF !important;font-weight: normal !important;padding-left: 5px !important;padding-right: 5px !important;} .scheduler-stats-table .back-grey, .scheduler-stats-table .back-grey td, .scheduler-stats-table .back-grey th {background-color: #DDDDDD !important;} .scheduler-stats-table .invert-color td {background-color: #000000 !important;color: #FFFFFF !important;}</style>'''
    
    result.set_spare_info(data)
    ShinkenUtils.check_arbiter_connection(result, uri, daemon_type, timeout=opts.timeout)
    
    result.add_check(output=style, no_new_line=True)
    
    arbiter_uri = data.get('arbiter_uri', '')
    checks_todo_by_sec = data.get('checks_todo_by_sec', 0)
    notifications_todo_by_sec = data.get('notifications_todo_by_sec', 0)
    average_latency = data.get('average_latency', 0)
    info_pollers = data.get('info_pollers', [])
    info_reactionners = data.get('info_reactionners', [])
    loop_turn_time_avg = data.get('loop_turn_time_avg', 0)
    rogue_pollers = data.get('rogue_pollers', {})
    rogue_reactionners = data.get('rogue_reactionners', {})
    checks_warning_threshold_cpu_usage = data.get('checks_warning_threshold_cpu_usage', {})
    checks_warning_threshold_cpu_usage_nb = data.get('checks_warning_threshold_cpu_usage_nb', {})
    save_retention_time = data.get('save_retention_time', 0)
    save_retention_error = data.get('save_retention_error', '')
    last_retention_save = data.get('last_retention_save', '')
    
    avg_checks_cause_by_sec = {
        'schedule'  : data.get('avg_checks_received_schedule_by_sec', 0),
        'force'     : data.get('avg_checks_received_force_by_sec', 0),
        'retry'     : data.get('avg_checks_received_retry_by_sec', 0),
        'dependency': data.get('avg_checks_received_dependency_by_sec', 0),
    }
    avg_checks_cause_by_sec = collections.OrderedDict(avg_checks_cause_by_sec)
    
    nb_poller = len(info_pollers)
    nb_reactionner = len(info_reactionners)
    nb_poller_in_overload = 0
    nb_reactionner_in_overload = 0
    checks_done_by_sec = 0
    notifications_done_by_sec = 0
    error_message = ''
    info_satellites = {}
    
    exit_status = EXIT_STATUS.OK
    msg_scheduler_cpu_usage = ['<div class="scheduler-stats-heading">Scheduler performance&nbsp;:&nbsp;</div>', BREAK_LINE]
    # Check for global time loop. If more than 1s, warn the user, but display all in percent
    if (loop_turn_time_avg * 100) > 100:
        exit_status = EXIT_STATUS.WARNING
        msg_scheduler_cpu_usage.extend((
            '- Average scheduler CPU usage: %d%%' % min(loop_turn_time_avg * 100, 100),
            color_orange % ' + %d%% estimated CPU usage overload.' % (loop_turn_time_avg * 100 - 100),
            BREAK_LINE
        ))
    else:
        msg_scheduler_cpu_usage.extend(
            ('- Average scheduler CPU usage: %d%%' % min(loop_turn_time_avg * 100, 100), BREAK_LINE))
    
    msg_scheduler_cpu_usage.extend(
        ('- Average time before a check is started in a poller %ss' % round(average_latency, 2), BREAK_LINE))
    
    if last_retention_save:
        msg_scheduler_cpu_usage.extend(
            ('- Date of the last retention save : %s' % last_retention_save, BREAK_LINE))
    else:
        msg_scheduler_cpu_usage.extend((HTMLTag.color_text('- Your retention is not yet saved', COLOR.ORANGE), BREAK_LINE))
    
    if save_retention_error:
        exit_status = EXIT_STATUS.CRITICAL
        error_msg = '- Last retention save was in error : %s.' % save_retention_error
        msg_scheduler_cpu_usage.append(HTMLTag.color_text(error_msg, COLOR.RED))
    elif save_retention_time == -1:
        msg_scheduler_cpu_usage.append('- Retention save in progress')
    elif save_retention_time > 0:
        _msg_ret = Utils.print_human_readable_period(save_retention_time)
        _msg_ret = 'less than 1s' if _msg_ret == '0s' else _msg_ret
        if save_retention_time > 100:
            exit_status = EXIT_STATUS.WARNING
            _msg_ret = HTMLTag.color_text(_msg_ret, COLOR.ORANGE)
        msg_scheduler_cpu_usage.append('- Last retention save time : %s.' % _msg_ret)
        if save_retention_time > 100:
            msg_scheduler_cpu_usage.append(' Your retention save was long.')
        msg_scheduler_cpu_usage.append(BREAK_LINE)
    
    result.add_check(exit_status, output=''.join(msg_scheduler_cpu_usage))
    
    if checks_warning_threshold_cpu_usage:
        msg_checks_warning_threshold_cpu_usage = '<div class="scheduler-stats-heading">There are %s checks that take too much CPU time. Last 5 checks in warning : </div><br/>' % checks_warning_threshold_cpu_usage_nb
        msg_checks_warning_threshold_cpu_usage += '<table class="scheduler-stats-table">' \
                                                  '<tr>' \
                                                  '<th class="back-grey">Command name</th>' \
                                                  '<th class="back-grey">CPU consumed (in sec)</th>' \
                                                  '<th class="back-grey">CPU threshold (in sec)</th>' \
                                                  '<th class="back-grey">At</th>' \
                                                  '</tr>'
        for entry in checks_warning_threshold_cpu_usage:
            msg_checks_warning_threshold_cpu_usage += '<tr>' \
                                                      ' <td>%s</td>' \
                                                      ' <td class="number">%s</td>' \
                                                      ' <td class="number">%s</td>' \
                                                      ' <td>%s</td>' \
                                                      '</tr>' % (entry[0], entry[1], entry[2], Utils.print_time(entry[3]))
        msg_checks_warning_threshold_cpu_usage += "</table>"
        result.add_check(EXIT_STATUS.WARNING, output=msg_checks_warning_threshold_cpu_usage)
    
    # Some unknown satellites talk to us, this shouldn't be a normal case
    if rogue_pollers or rogue_reactionners:
        message = ['Some unknown daemons are currently contacting this Scheduler.',
                   'This may be a temporary problem that can be fixed by restarting the listed daemons, or be an another unrelated issue (network problems for exemple)']
        if rogue_pollers:
            message.append('<table class="scheduler-stats-table" >%s</table>' % ''.join(
                _table_rogue_stats('pollers', rogue_pollers)))
        if rogue_reactionners:
            message.append('<table class="scheduler-stats-table" >%s</table>' % ''.join(
                _table_rogue_stats('reactionners', rogue_reactionners)))
        result.add_check(EXIT_STATUS.WARNING, BREAK_LINE.join(message))
    
    scheduler_satellite = ['<div class="scheduler-stats-heading">Scheduler Satellite&nbsp;:&nbsp;</div><br/>']
    if arbiter_uri:
        arbiter_uri = arbiter_uri.replace("127.0.0.1", address)
        arbiter_uri = arbiter_uri.replace("localhost", address)
        try:
            buf, _ = Utils.request_get(result, arbiter_uri, '/get_satellites', timeout=opts.timeout, raise_exp=True)
            info_satellites = json.loads(buf)
        except:
            error_message = 'Failed to connect to [%s]<br>Note : <div style="font-style: italic;">Address retrieved from arbiter configuration.</div>' % arbiter_uri
            scheduler_satellite.append(error_message)
    else:
        error_message = 'Missing arbiter uri. please upgrade your scheduler'
        scheduler_satellite.append(error_message)
    
    if error_message:
        result.add_check(EXIT_STATUS.WARNING, output=''.join(scheduler_satellite))
    else:
        if nb_poller == 0:
            scheduler_satellite.append('No pollers found for this scheduler! Please wait pollers will connect to the scheduler.<br/>')
        else:
            checks_done_by_sec = sum([info_poller['done_by_sec'] for info_poller in info_pollers])
            lines, executor_in_overload, unreachable_pollers = _table_executor_stats(checks_done_by_sec, checks_todo_by_sec, info_pollers, info_satellites, nb_poller, 'poller')
            scheduler_satellite.append('<table class="scheduler-stats-table" >%s</table><br/>' % ''.join(lines))
            if executor_in_overload:
                nb_poller_in_overload += 1
            if unreachable_pollers:
                result.add_check(EXIT_STATUS.WARNING, output=HTMLList.header_list('The check failed to connect to poller(s)', unreachable_pollers), no_new_line=True)
        
        if nb_reactionner == 0:
            scheduler_satellite.append('No reactionner found for this scheduler! Please wait reactionners will connect to the scheduler.<br/>')
        else:
            notifications_done_by_sec = sum([info_reactionner['done_by_sec'] for info_reactionner in info_reactionners])
            lines, executor_in_overload, unreachable_reactionners = _table_executor_stats(notifications_done_by_sec, notifications_todo_by_sec, info_reactionners, info_satellites, nb_reactionner, 'reactionner')
            scheduler_satellite.append('<table class="scheduler-stats-table">%s</table><br/>' % ''.join(lines))
            if executor_in_overload:
                nb_reactionner_in_overload += 1
            if unreachable_reactionners:
                result.add_check(EXIT_STATUS.WARNING, output=HTMLList.header_list('The check failed to connect to reactionner(s)', unreachable_reactionners), no_new_line=True)
        
        result.add_check(EXIT_STATUS.OK, output=''.join(scheduler_satellite))
    
    check_by_cause = ''
    # add the check cause by seconds
    check_by_cause += '<div class="scheduler-stats-heading">Scheduled checks per second classified by causes&nbsp;:&nbsp;</div>'
    check_by_cause += '<table class="scheduler-stats-table">' \
                      '<tr>' \
                      '<th class="back-grey">Causes</th>' \
                      '<th class="back-grey">Checks per second</th>' \
                      '</tr>'
    total = 0
    for check_cause, avg_per_sec in avg_checks_cause_by_sec.iteritems():
        total += avg_per_sec
        check_by_cause += '<tr>' \
                          ' <td>%s</td>' \
                          ' <td class="number">%0.2f/s</td>' \
                          '</tr>' % (check_cause, round(avg_per_sec, 2))
    check_by_cause += '<tr class="invert-color">' \
                      ' <td>Total</td>' \
                      ' <td>%0.2f/s</td>' \
                      '</tr>' % round(total, 2)
    check_by_cause += "</table>"
    result.add_check(output=check_by_cause)
    
    stats = {
        'checks_todo_by_sec'                      : checks_todo_by_sec,
        'checks_done_by_sec'                      : checks_done_by_sec,
        'notifications_todo_by_sec'               : notifications_todo_by_sec,
        'notifications_done_by_sec'               : notifications_done_by_sec,
        'average_scheduler_cpu_usage'             : min(100, loop_turn_time_avg * 100),
        'average_scheduler_cpu_estimated_overload': max(0, (loop_turn_time_avg * 100) - 100),
        'nb_poller'                               : nb_poller,
        'nb_poller_in_overload'                   : nb_poller_in_overload,
        'nb_reactionner'                          : nb_reactionner,
        'nb_reactionner_in_overload'              : nb_reactionner_in_overload,
    }
    if save_retention_time > 0:
        stats['save_retention_time'] = save_retention_time
    result.set_perf_data(stats)
    
    if result.status == EXIT_STATUS.OK:
        result.add_check(EXIT_STATUS.OK, 'The scheduler is running well.', title=True)
    else:
        result.add_check(EXIT_STATUS.WARNING, 'The scheduler have some issues.', title=True)
    
    result.exit()


def _table_rogue_stats(executor_type, info_rogue_satellites):
    lines = []
    first_line = '<tr>' \
                 '<th class="back-grey">rogue %s name</th>' \
                 '<th class="back-grey">last check date</th>' \
                 '</tr>' % (executor_type,)
    lines.append(first_line)
    for name, last_check_time in info_rogue_satellites.iteritems():
        line = '<tr><td>%s</td><td>%s</td></tr>'
        line = line % (name, Utils.print_time(last_check_time))
        lines.append(line)
    return lines


def _table_executor_stats(actions_done_by_sec, actions_todo_by_sec, info_executors, info_satellites, nb_executor, executor_type):
    action_name = 'checks' if executor_type == 'poller' else 'notifications'
    lines = []
    executor_in_overload = False
    unreachable_executor = []
    first_line = '<tr>' \
                 '<th class="back-grey" rowspan="2" style="width: 8%% !important;">%s name</th>' \
                 '<th class="back-grey" rowspan="2" style="width: 8%% !important;">realm</th>' \
                 '<th class="back-grey" rowspan="2" style="width: 8%% !important;">tags</th>' \
                 '<th class="back-grey" rowspan="2" style="width: 8%% !important;">%s todo</th>' \
                 '<th class="back-grey" rowspan="2" style="width: 8%% !important;">%s done</th>' \
                 '<th class="back-grey" colspan="2" style="width: 45%% !important;">CPU</th>' \
                 '<th class="back-grey" rowspan="1" style="width: 15%% !important;">RAM</th>' \
                 '<th class="back-grey" rowspan="1" style="width: 15%% !important;">LOAD</th>' \
                 '</tr>' % (executor_type, action_name, action_name)
    lines.append(first_line)
    seconde_line = '<tr>' \
                   '<th class="back-grey-sub-info" style="width: 15%% !important;">available</th>' \
                   '<th class="back-grey-sub-info" style="width: 15%% !important;">used by the daemon %s</th>' \
                   '<th class="back-grey-sub-info" style="width: 15%% !important;">%% used on the server</th>' \
                   '<th class="back-grey-sub-info" style="width: 15%% !important;"> on the server</th>' \
                   '</tr>' % (executor_type)
    lines.append(seconde_line)
    
    for line_number, info_executor in enumerate(info_executors):
        if line_number == 0:
            line = '<tr class="%s">' \
                   '<td>%s</td>' \
                   '<td>%s</td>' \
                   '<td>%s</td>' \
                   '<td rowspan="' + str(nb_executor) + '" class="number">' + '%0.2f' % (round(actions_todo_by_sec, 2)) + \
                   '/s</td>' \
                   '<td class="number">%s/s</td>' \
                   '<td>%s</td>' \
                   '<td>%s</td>' \
                   '<td>%s</td>' \
                   '<td>%s</td>' \
                   '</tr>'
            windows_line = '<tr class="%s">' \
                           '<td>%s</td>' \
                           '<td>%s</td>' \
                           '<td>%s</td>' \
                           '<td rowspan="' + str(nb_executor) + '" class="number">' + '%0.2f' % (round(actions_todo_by_sec, 2)) + \
                           '<td class="number">%s/s</td>' \
                           '<td colspan="3" style="text-align: center;">%s</td>' \
                           '<td>%s</td>' \
                           '</tr>'
        else:
            line = '<tr class="%s">' \
                   '<td>%s</td>' \
                   '<td>%s</td>' \
                   '<td>%s</td>' \
                   '<td class="number">%s/s</td>' \
                   '<td>%s</td>' \
                   '<td>%s</td>' \
                   '<td>%s</td>' \
                   '<td>%s</td>' \
                   '</tr>'
            windows_line = '<tr class="%s">' \
                           '<td>%s</td>' \
                           '<td>%s</td>' \
                           '<td>%s</td>' \
                           '<td class="number">%s/s</td>' \
                           '<td colspan="3" style="text-align: center;">%s</td>' \
                           '<td>%s</td>' \
                           '</tr>'
        nb_cpus = -1
        cpu_usage = -1
        executor_load = -1.0
        realm = info_executor.get('realm', '')
        tags = info_executor.get('tags', '')
        name = info_executor['name']
        cpu_running_queue = -1
        max_cpu_queue_per_cpu = 4
        ram_usage = -1.0
        max_ram_usage = 100.0
        error_message = ''
        executor_uri = next((x for x in info_satellites if x['name'] == info_executor['name'] and x['type'] == executor_type), {}).get('uri', '')
        if executor_uri:
            executor_uri = executor_uri.replace("127.0.0.1", address)
            executor_uri = executor_uri.replace("localhost", address)
            try:
                raw_info_from_executor = get_raw_stat_with_exp(_uri=executor_uri)
                daemon_api_version = raw_info_from_executor.get('api_version', None)
                arbiter_version = raw_info_from_executor.get('arbiter_version', None)
                daemon_version = raw_info_from_executor.get('daemon_version', None)
                nb_cpus = raw_info_from_executor.get('nb_cpus', -1)
                cpu_usage = raw_info_from_executor.get('cpu_usage', -1)
                cpu_running_queue = raw_info_from_executor.get('cpu_running_queue', -1)
                max_cpu_queue_per_cpu = raw_info_from_executor.get('max_cpu_queue_par_cpu', -1.0)
                ram_usage = raw_info_from_executor.get('ram_usage', -1.0)
                max_ram_usage = raw_info_from_executor.get('max_ram_usage', -1.0)
                executor_load = raw_info_from_executor.get('executor_load', -1.0)
                realm = raw_info_from_executor.get('realm', '')
                tags = ','.join(raw_info_from_executor.get('tags', []))
                schedulers = raw_info_from_executor.get('schedulers', None)
                platform = raw_info_from_executor.get('platform', '')
                spare = raw_info_from_executor.get('spare', False)
                activated = raw_info_from_executor.get('activated', True)
                have_conf = raw_info_from_executor.get('have_conf', False)
                if spare:
                    name = "%s <b>(SPARE)" % name
                    if activated:
                        name += "(RUNNING)"
                    name += '</b>'
                if not have_conf:
                    error_message = "No configuration given by an Arbiter for now."
            except:
                error_message = 'UNREACHABLE : Failed to connect to [%s]<br>Note : <div style="font-style: italic;">Address retrieved from %s configuration.</div>' % (executor_uri, executor_type)
                unreachable_executor.append('%s at %s' % (name, executor_uri))
        else:
            error_message = 'UNREACHABLE : Failed to get URI of the %s [%s]' % (executor_type, info_executor['name'])
        if error_message:
            error_message = HTMLTag.tag_value(error_message, COLOR.RED)
            if line_number == 0:
                line = '<tr>' \
                       '<td>%s</td>' \
                       '<td>%s</td>' \
                       '<td>%s</td>' \
                       '<td rowspan="' + str(nb_executor) + '" class="number">' + '%0.2f' % round(actions_todo_by_sec, 2) + \
                       '/s</td>' \
                       '<td colspan="5" style="text-align: center;">' + error_message + \
                       '</td>' \
                       '</tr>'
            else:
                line = '<tr>' \
                       '<td>%s</td>' \
                       '<td>%s</td>' \
                       '<td>%s</td>' \
                       '<td colspan="5" style="text-align: center;">' + error_message + \
                       '</td>' \
                       '</tr>'
            realm = realm or '-'
            tags = tags or '-'
            line = line % (name, realm, tags)
            lines.append(line)
            continue
        
        message = ShinkenUtils._check_versions(executor_type, daemon_api_version, daemon_version, arbiter_version, shinken_supervisor_version)
        
        if message:
            warning_message = 'WARNING : ' + message
            warning_message = HTMLTag.tag_value(warning_message, COLOR.ORANGE)
            if line_number == 0:
                line = '<tr>' \
                       '<td>%s</td>' \
                       '<td>%s</td>' \
                       '<td>%s</td>' \
                       '<td rowspan="' + str(nb_executor) + '" class="number">' + '%0.2f' % (round(actions_todo_by_sec, 2)) + \
                       '/s</td>' \
                       '<td colspan="5" style="text-align: center;">' + warning_message + \
                       '</td>' \
                       '</tr>'
            else:
                line = '<tr>' \
                       '<td>%s</td>' \
                       '<td>%s</td>' \
                       '<td>%s</td>' \
                       '<td colspan="5" style="text-align: center;">' + warning_message + \
                       '</td>' \
                       '</tr>'
            realm = realm or '-'
            tags = tags or '-'
            line = line % (name, realm, tags)
            lines.append(line)
            continue
        
        # special case for windows for ex, cpu-usage is not available but give us 0.0
        if info_executor['done_by_sec'] and cpu_usage == 0.0:
            cpu_usage = None
            executor_load = -1
        
        executor_in_overload = executor_load > EXECUTOR_LOAD_LIMIT
        executor_in_overram = ram_usage > max_ram_usage
        cpu_usage = '&#216;' if cpu_usage is None or cpu_usage == -1 else int(round(cpu_usage * 100 / nb_cpus, 0))
        cpu_usage = '%s%% maximal usable (of %d core)' % (
            cpu_usage, nb_cpus) if executor_in_overload else '%s%% (of %d core)' % (cpu_usage, nb_cpus)
        line_class = 'overload' if executor_in_overload or executor_in_overram else ''
        executor_load = -1 if executor_load == -1 else round(executor_load, 2)
        
        if cpu_running_queue and cpu_running_queue != -1:
            cpu_running_queue_text = HTMLTag.cpu_queue_tag(cpu_running_queue, max_cpu_queue_per_cpu,
                                                           nb_cpus) + '<br>' + '%s Processes in the queue (limit : %s)' % (
                                         cpu_running_queue, (max_cpu_queue_per_cpu * nb_cpus))
        else:
            cpu_running_queue_text = HTMLTag.cpu_queue_tag(cpu_running_queue, max_cpu_queue_per_cpu,
                                                           nb_cpus) + '<br>' + 'CPU running queue of the %s can\'t be compute.' % executor_type
        
        ram_usage_text = '&#216;' if ram_usage is None or ram_usage == -1.0 else '%.0f%%' % ram_usage
        if platform == 'nt':
            line = windows_line % (
                line_class,
                name,
                realm,
                tags,
                round(info_executor['done_by_sec'], 2),
                'This poller is running on Windows, impossible to get CPU informations.',
                HTMLTag.ram_tag(ram_usage, max_ram_usage) + '<br>' + ram_usage_text + ' (limit :%.0f%%)' % max_ram_usage,
            )
        else:
            line = line % (
                line_class,
                name,
                realm,
                tags,
                '%0.2f' % round(info_executor['done_by_sec'], 2),
                HTMLTag.load_tag(executor_load),
                cpu_usage,
                HTMLTag.ram_tag(ram_usage, max_ram_usage) + '<br>' + ram_usage_text + ' (limit :%.0f%%)' % max_ram_usage,
                cpu_running_queue_text,
            )
        lines.append(line)
    last_line = '<tr class="invert-color">' \
                '<td>Total</td>' \
                '<td colspan="2"></td>' \
                '<td>%s/s</td>' \
                '<td>%s/s</td>' \
                '<td colspan=4></td>' \
                '</tr>' % \
                ('%0.2f' % round(actions_todo_by_sec, 2), '%0.2f' % round(actions_done_by_sec, 2))
    lines.append(last_line)
    return lines, executor_in_overload, unreachable_executor


def scheduler_latency():
    result.hard_exit(EXIT_STATUS.WARNING, 'The mode "latency" is deprecated for this plugin version. Please upgrade shinken template. Info will be available in Scheduler Running Well check.')


def scheduler_late_checks():
    result.hard_exit(EXIT_STATUS.WARNING, 'The mode "latency" is deprecated for this plugin version. Please upgrade shinken template. Info will be available in Scheduler Running Well check.')


def scheduler_top10_total():
    result.hard_exit(EXIT_STATUS.WARNING, 'The mode "top10_total" is deprecated for this plugin version. Please upgrade shinken template. You can see the check usage in Poller Performance check.')


def scheduler_top10_average():
    result.hard_exit(EXIT_STATUS.WARNING, 'The mode "top10_total" is deprecated for this plugin version. Please upgrade shinken template. You can see the check usage in Poller Performance check.')


def broker_modules_queue():
    perfs = {}
    data = basic_check('broker')
    modules = data.get('modules', [])
    for m_ in modules:
        mname = m_['module_name']
        l_ = m_['queue_size']
        perfs['module.%s.data_queue' % mname] = l_
    
    result.set_spare_info(data, daemon_type)
    
    result.add_check(EXIT_STATUS.OK, 'Modules performance is going well')
    
    module_stats = data.get('module_stats', {})
    for (module_type, module_type_entry) in module_stats.iteritems():
        for module_name, module_entry in module_type_entry.iteritems():
            if module_entry is None:
                continue
            workers = module_entry.get('workers', None)
            if workers is None:
                continue
            
            status = EXIT_STATUS.OK
            # Set a table with workers informations into the long output
            header = ('Worker', 'Managed hosts')
            total_number_of_managed_hosts = 0
            tab_lines = []
            for (worker_id, worker) in sorted(workers.iteritems()):
                number_of_managed_hosts = worker.get('number_of_managed_hosts', 0)
                queue_size = worker.get('main_process_to_worker_queue_size', 0)
                total_number_of_managed_hosts += number_of_managed_hosts
                perfs['module.%s.data_queue' % module_name] = queue_size
                perfs['module.%s.managed_hosts' % module_name] = number_of_managed_hosts
                tab_lines.append(('%s Worker - %s' % (module_name, worker_id), number_of_managed_hosts))
            nb_workers = len(workers)
            output = '<ul><li>'
            if nb_workers == 0:
                output += '%s The module %s do not have any workers' % (HTMLTag.CRITICAL, module_name)
                long_output = ''
                status = EXIT_STATUS.CRITICAL
            else:
                output += '%s The module %s have <span style="font-style:italic;color: black;">%d workers</span>' % (HTMLTag.OK, module_name, nb_workers)
                long_output = HTMLTable.table(header, tab_lines, 'Module %s workers' % module_name, compact_title=True)  # We do not want a space between the title and the table
            output += '</li></ul>'
            result.add_check(status=status, output=output, long_output=long_output)
    
    result.set_perf_data(perfs)
    result.exit()


def broker_sla():
    result.hard_exit(EXIT_STATUS.WARNING,
                     'The mode "sla" is deprecated for this Shinken version.',
                     'Please upgrade shinken template and use following template for sla monitoring: %s' % HTMLList.simple_list(["shinken-broker-module-visualisation-ui-sla-reader", "shinken-broker-module-sla-writer"]))


def broker_livedata():
    data = get_raw_stat()
    result.set_spare_info(data)
    info = {}
    perfs = {}
    modules = data.get('modules_info')
    for module in modules:
        if module['name'] == "broker-module-livedata":
            info = module
            break
    
    # if no info, means no such module
    if not info:
        result.add_check(EXIT_STATUS.CRITICAL, 'Your daemon does not have any broker-module-livedata.')
    elif 'port' not in info:
        result.add_check(EXIT_STATUS.UNKNOWN, 'Your module is not ready please retry in few seconds.')
    else:
        uri = '%s:%s' % (opts.hostname, info['port'])
        uri = uri.decode('utf-8')
        if info['https'] is True:
            result.add_check(EXIT_STATUS.OK, "The module is using <span style=\"color: black;\">HTTPS</span>")
            conn = Utils._http_get_conn(uri, timeout=3, use_ssl=(info['https'] is True), ssl_version=ssl.PROTOCOL_TLSv1_2)
        else:
            result.add_check(EXIT_STATUS.OK, "The module is using <span style=\"color: black;\">HTTP</span>")
            conn = Utils._http_get_conn(uri, timeout=3, use_ssl=(info['https'] is True))
        conn.request("GET", "/api/v1/ping")
        r1 = conn.getresponse()
        buf = r1.read()
        
        if "Not found" in buf or "pong" not in buf:
            result.add_check(EXIT_STATUS.CRITICAL, "Your module is activated, but we can't communicate with the API.")
        
        if info['token'] == 2:
            result.add_check(EXIT_STATUS.WARNING, "There is no token in the configuration, it's not safe")
        
        if info['token'] == 1:
            result.add_check(EXIT_STATUS.WARNING, "You are using the default token, it's recommended to change it")
        
        info_for_list = []
        perfs["livedata_nb_error_last_hour"] = info['error_per_hour']
        result.add_check(EXIT_STATUS.OK, 'The module handled <span style="color: black;">%d</span> request(s) in the last hour' % info['request_per_hour'])
        
        if info['error_per_hour'] >= opts.livedata_warning > 0:
            result.add_check(EXIT_STATUS.WARNING, 'The module handled <span style="color: %s;">%d</span> request error(s) in the last hour' % (COLOR.ORANGE, info['error_per_hour']))
        elif info['error_per_hour'] > 0:
            info_for_list.append('Error(s) handled: <span style="font-style:italic;color: black;">%d</span>' % info['error_per_hour'])
        else:
            info_for_list.append('No error handled')
        perfs["livedata_nb_request_last_hour"] = info['request_per_hour']
        
        if info['best_in_one_second']['nb'] != 0:
            info_for_list.append('Connection peak : <span style="font-style:italic;color: black;">%d request(s) at %s</span>' % (info['best_in_one_second']['nb'], info['best_in_one_second']['time']))
        
        if 'average_response_time' in info.keys() and info['average_response_time']:
            info_for_list.append('Average response time: <span style="font-style:italic;color: black;">%.4fs</span>' % info['average_response_time'])
            perfs["livedata_average_response_time"] = info['average_response_time']
        else:
            info_for_list.append('You need to make at least one call to the api for informations')
        
        if 'worst_response_time' in info.keys() and info['worst_response_time']:
            info_for_list.append(
                'Max response time: <span style="font-style:italic;color: black;">%.4fs at %s</span>' % (info['worst_response_time']['response_time'], datetime.datetime.fromtimestamp(info['worst_response_time']['time']).strftime('%H:%M:%S')))
        
        result.add_check(EXIT_STATUS.OK, HTMLList.simple_list(info_for_list))
        if info['request_per_hour'] > 0:
            perfs["livedata_error_percent"] = (float(info['error_per_hour']) / float(info['request_per_hour'])) * 100
        else:
            perfs["livedata_error_percent"] = 0
        
        if len(info['errors']) > 0:
            info['errors'].reverse()
            number = 0
            tab_lines = []
            header = ('Date', 'Error')
            for index in range(0, opts.livedata_error_limit):
                if index >= len(info['errors']):
                    break
                number += 1
                hour = datetime.datetime.fromtimestamp(info['errors'][index]['time']).strftime('%H:%M:%S')
                tab_lines.append((hour, info['errors'][index]['error']))
            result.add_check(long_output=HTMLTable.table(header, tab_lines, 'Last %d errors' % number))
    result.set_perf_data(perfs)
    
    if result.status == EXIT_STATUS.OK:
        result.add_check(EXIT_STATUS.OK, 'The livedata module is running well.', title=True)
    elif result.status == EXIT_STATUS.WARNING:
        result.add_check(EXIT_STATUS.WARNING, 'The livedata module has some issues.', title=True)
    elif result.status == EXIT_STATUS.CRITICAL:
        result.add_check(EXIT_STATUS.CRITICAL, 'The livedata module has critical issues.', title=True)
    
    result.exit()


# Available mode for the check based on the daemon types
MODES = {
    'arbiter'     : {
        'alive'         : {'methode': arbiter_alive, 'help': ''},
        'api_connection': {'methode': api_connection, 'help': ''},
        'stats'         : {'methode': arbiter_stats, 'help': ''},
    },
    'scheduler'   : {
        'alive'         : {'methode': scheduler_alive, 'help': ''},
        'api_connection': {'methode': api_connection, 'help': '[DEPRECATED]'},
        'latency'       : {'methode': scheduler_latency, 'help': '[DEPRECATED]'},
        'late_checks'   : {'methode': scheduler_late_checks, 'help': '[DEPRECATED]'},
        'top10_total'   : {'methode': scheduler_top10_total, 'help': '[DEPRECATED]'},
        'top10_average' : {'methode': scheduler_top10_average, 'help': '[DEPRECATED]'},
        'stats'         : {'methode': scheduler_stats, 'help': ''},
    },
    'poller'      : {
        'alive'              : {'methode': poller_alive, 'help': ''},
        'api_connection'     : {'methode': poller_api_connection, 'help': '[DEPRECATED]'},
        'cpu_load'           : {'methode': poller_cpu_load, 'help': '[DEPRECATED]'},
        'overload_protection': {'methode': poller_overload_protection, 'help': '[DEPRECATED]'},
        'stats'              : {'methode': poller_stats, 'help': ''},
    },
    'reactionner' : {
        'alive'              : {'methode': reactionner_alive, 'help': ''},
        'api_connection'     : {'methode': reactionner_api_connection, 'help': '[DEPRECATED]'},
        'cpu_load'           : {'methode': reactionner_cpu_load, 'help': '[DEPRECATED]'},
        'overload_protection': {'methode': reactionner_overload_protection, 'help': '[DEPRECATED]'},
        'stats'              : {'methode': reactionner_stats, 'help': ''},
    },
    'broker'      : {
        'alive'         : {'methode': alive, 'help': ''},
        'api_connection': {'methode': api_connection, 'help': ''},
        'modules_queue' : {'methode': broker_modules_queue, 'help': ''},
        'sla'           : {'methode': broker_sla, 'help': '[DEPRECATED]'},
        'livedata'      : {'methode': broker_livedata, 'help': ''},
    },
    'receiver'    : {
        'alive'         : {'methode': alive, 'help': ''},
        'api_connection': {'methode': api_connection, 'help': ''},
    },
    'synchronizer': {
        'alive'         : {'methode': synchronizer_alive, 'help': ''},
        'api_connection': {'methode': api_connection, 'help': ''},
    },
}

daemon_type = ''
mode = ''
uri = ''
address = ''
shinken_supervisor_version = ''
last_check = None
to_print = ''  # use for testing the output of the check
result = Result()


def _main():
    global daemon_type, mode, uri, last_check, address, opts, shinken_supervisor_version
    
    parser = get_option_parser()
    opts, args = parser.parse_args()
    
    daemon_type = opts.daemon_type
    mode = opts.mode
    last_check = opts.last_check
    address = opts.hostname
    shinken_supervisor_version = opts.shinken_supervisor_version
    uri = '%s:%s' % (opts.hostname, opts.port)
    uri = uri.decode('utf-8')
    
    # Check if the daemon type is known
    if daemon_type not in MODES:
        result.hard_exit(EXIT_STATUS.CRITICAL, 'The daemon type %s is unknown' % daemon_type)
    
    # And if check mode choose is known for this daemon too
    if mode not in MODES[daemon_type]:
        result.hard_exit(EXIT_STATUS.CRITICAL, 'The mode %s is unknown for this daemon type' % mode)
    
    if opts.timeout <= 0:
        result.hard_exit(EXIT_STATUS.CRITICAL, 'The --timeout option (%s) must be greater than 0' % opts.timeout)
    
    methode_to_call = MODES[daemon_type][mode]['methode']
    if callable(methode_to_call):
        arg, _, _, _, = getargspec(methode_to_call)
        if len(arg) > 0:
            methode_to_call(opts)
        else:
            methode_to_call()


if __name__ == '__main__':
    _main()
