#!/usr/bin/python
# -*- coding: utf-8 -*-

# Copyright (C) 2013:
#    Gabes Jean, j.gabes@shinken-solutions.com
#
# This file is part of Shinken Enterprise, all rights reserved.

import sys

try:
    import shinken
except ImportError:
    print('Cannot import shinken lib, please install it before launching this tool')
    sys.exit(2)

import json
import optparse
import os
import socket
import time
import urllib2
import re
import datetime
import itertools
import ssl
from subprocess import PIPE, Popen
from collections import OrderedDict, defaultdict
from pwd import getpwuid
from grp import getgrgid
from hashlib import sha256
from shinken.basemodule import ModuleState
from shinken.util import format_t_into_dhms_format
import copy
from shinken.log import logger
from shinken.objects.config import Config
# All about keys and such things
from shinkensolutions.crypto import are_keys_valid
# synchronizer check, like mongo
from shinken.synchronizer.synchronizerconfig import SynchronizerConfig
from shinken.daemons.brokerdaemon import GRAPHITE_API_VERSION
from shinken.misc.type_hint import List, Dict, Union
from shinkensolutions import localsystem
from shinkensolutions.localinstall import get_local_addons, ERROR_COLOR, OK_COLOR, POSSIBLE_DAEMONS, POSSIBLE_ADDONS, get_local_instances_for_type, get_local_daemon_configuration_file_path, get_instance_name, \
    get_installation_history, get_data_history, INSTALLATION_TYPES, POSSIBLE_DATA
from shinkensolutions.http_helper import http_get, HTTP_ERRORS
from shinkensolutions.lib_checks.graphite import GRAPHITE_API_VERSION, GRAPHITE_STATS_KEY, GRAPHITE_STATS_FILE_IS_TOO_OLD, NB_METRICS_COUNT_FILE
from shinken.property import BoolProp
from shinken.vmware_stats import vmware_stats_reader

try:
    from collections import Counter
except ImportError:
    from shinken.misc.counter import Counter

# For terminal size
import struct

try:
    from fcntl import ioctl
    from termios import TIOCGWINSZ
except ImportError:  # on windows: cannot have this
    ioctl = None
    TIOCGWINSZ = None

DEFAULT_UMASK = 0022
# Set umask to avoid problems when creating files
os.umask(DEFAULT_UMASK)

CONTEXT_PATH = '/var/lib/shinken/context.json'
SYNCHRONIZER_CFG_PATH = '/etc/shinken/synchronizer.cfg'
ARBITER_CFG_PATH = '/etc/shinken/shinken.cfg'

# This infos will be update with file in CONTEXT_PATH if it exist
CURRENT_VERSION = '2.03.02'
CURRENT_VERSION_RAW = CURRENT_VERSION
ORIGINAL_VERSION = '2.03.02'

DASH_LINE_SIZE = 50
MAX_NESTED_REALMS = 15
CURRENT_ROLES = list()
DEFAULT_TIMEOUT = 2
DEFAULT_TIMEOUT_FORCE = False

PORT_MAPPING = {
    'scheduler'   : 7768,
    'receiver'    : 7773,
    'broker'      : 7772,
    'poller'      : 7771,
    'reactionner' : 7769,
    'arbiter'     : 7770,
    'synchronizer': 7765,
    'provider'    : 7774}

if os.path.exists(CONTEXT_PATH):
    context = json.loads(open(CONTEXT_PATH, 'r').read())
    CURRENT_VERSION_RAW = context.get('current_version', CURRENT_VERSION)
    CURRENT_VERSION = CURRENT_VERSION_RAW.split('-')[0]
    ORIGINAL_VERSION = context.get('installed_version', ORIGINAL_VERSION)
    CURRENT_ROLES = context['daemons']

DEBUG = False

logger.setLevel('INFO')

AT_RISK_COLOR = 35
ERROR_COLOR = 31
OK_COLOR = 32
SPARE_COLOR = 94
INFO_COLOR = 96

parts = set()  # list of all parts possible

licence_data = None
stdout = sys.stdout
devnull = open('/dev/null', 'w')
# keep track of the scheduler and retention module by realm
# used to check the retention configuration in a distributed realm environment
ret_sched_by_realm = {}

# keep trac of the arbiter sparing configuration errors
# the _prepare_arbiters_spare_conf put errors inside the dict and check_sarbiter_spare_conf display errors if needed
arbiters_spare_conf = {}

_raw_stats_cache = {'synchronizer': {}}

# Keep the daemon raw stats so we can access them in various place
_daemon_raw_stats = {}
for daemon_type in PORT_MAPPING.keys():
    _daemon_raw_stats[daemon_type] = {}


#######################################################################################
#   / / / / /_(_) /____
#  / / / / __/ / / ___/
# / /_/ / /_/ / (__  )
# \____/\__/_/_/____/
#######################################################################################


class HealthPrinter(object):
    AT_RISK_COLOR = 35
    ERROR_COLOR = 31
    OK_COLOR = 32
    UNKNOWN_COLOR = 33
    INFO_COLOR = 96
    
    unknown_words = ['unknown', 'Unknown', 'Not Found', 'invalid']
    at_risk_words = ['not_implemented:', 'cannot_connect:', 'to update:']
    ok_words = ['success:']
    error_words = ['error:', 'Error', 'Connection refused', 'failure:', '404', 'Name or service not known']
    
    """ This class is only used as namespace """
    __debug_str = []
    out_file = None
    
    HISTORY_VERSION_PRINTING = {
        'INSTALLATION': 'version',
        'UPDATE'      : 'to version',
        'FIX'         : 'on version',
        'RESTORE'     : 'on version',
        'SANATIZE'    : 'on version',
        'PATCH'       : 'on version',
        'UNPATCH'     : 'on version',
        'ADDON'       : 'on version',
        'REMOVE ADDON': 'on version'
    }
    # We should have all INSTALLATION_TYPES entries list here, or the dev did fuck up
    assert (set(HISTORY_VERSION_PRINTING.keys()) == set(INSTALLATION_TYPES))
    
    
    @staticmethod
    def set_out_file(filename):
        """
        :param filename: A string representing the file name.
        :rtype: None
        """
        try:
            HealthPrinter.out_file = open(filename, 'w')
        except (OSError, IOError) as e:
            HealthPrinter.write("Unable to create open file %s for writing : %s" % (filename, e))
            exit(1)
    
    
    @staticmethod
    def get_file_name():
        """
        :return: The file name used to log output or None
        :rtype: str, None
        """
        if HealthPrinter.out_file is None:
            return None
        return HealthPrinter.out_file.name
    
    
    @staticmethod
    def write(message, auto_color=True):
        """
        Print message both to stdout and to support file if defined
        :param message: The message to show
        :rtype: None
        """
        if auto_color:
            for w in HealthPrinter.ok_words:
                message = message.replace(w, '\033[%dm%s\033[0m' % (HealthPrinter.OK_COLOR, w))
            for w in HealthPrinter.error_words:
                message = message.replace(w, '\033[%dm%s\033[0m' % (HealthPrinter.ERROR_COLOR, w))
            for w in HealthPrinter.at_risk_words:
                message = message.replace(w, '\033[%dm%s\033[0m' % (HealthPrinter.AT_RISK_COLOR, w))
            for w in HealthPrinter.unknown_words:
                message = message.replace(w, '\033[%dm%s\033[0m' % (HealthPrinter.UNKNOWN_COLOR, w))
        
        sys.stdout.write(message.encode('utf8') + '\n')
        
        if HealthPrinter.out_file is not None:
            HealthPrinter.out_file.write(message + '\n')
    
    
    @staticmethod
    def print_installation_history():
        histories = get_installation_history()
        HealthPrinter.write('\n\033[%dm%s\033[0m' % (HealthPrinter.OK_COLOR, 'INSTALLATION HISTORY:'))
        
        for history_entry in histories:
            entry_type = history_entry['type']
            displayed_date = HealthPrinter.format_date(history_entry)
            if entry_type in ('PATCH', 'UNPATCH', 'ADDON', 'REMOVE ADDON'):
                s = ' - %-25s :  %10s=%-25s date=%s' % (entry_type, HealthPrinter.HISTORY_VERSION_PRINTING.get(entry_type), history_entry['version'], displayed_date)
                HealthPrinter.write(s)
                s = '   %-25s    %s' % ('', history_entry['patch_name'])
                HealthPrinter.write(s)
                continue
            s = ' - %-25s :  %10s=%-25s date=%s' % (entry_type, HealthPrinter.HISTORY_VERSION_PRINTING.get(entry_type), history_entry['version'], displayed_date)
            HealthPrinter.write(s)
        HealthPrinter.write('')
    
    
    @staticmethod
    def print_version():
        HealthPrinter.write("shinken-healthcheck versions:")
        HealthPrinter.write("  Original installed version : v%s" % ORIGINAL_VERSION)
        HealthPrinter.write("  Updated version            : v%s" % CURRENT_VERSION_RAW)
    
    
    @staticmethod
    def print_encryption_history():
        from pymongo.errors import ConnectionFailure, ConfigurationError
        synchronizer_conf = ConfigUtils.get_synchronizer_config()
        HealthPrinter.write('\n\033[%dm%s\033[0m' % (HealthPrinter.OK_COLOR, 'PROTECTED FIELDS DATABASE MIGRATIONS HISTORY:'))
        HealthPrinter.write('')
        
        try:
            protected_fields_info = get_protected_fields_from_database(synchronizer_conf)
        except ConnectionFailure as exp:
            HealthPrinter.write('\033[%dmCannot connect to mongodb server: %s   (%s)\n\033[0m' % (HealthPrinter.ERROR_COLOR, getattr(synchronizer_conf, 'mongodb_uri', None), exp.message), auto_color=False)
            return
        except ConfigurationError as exp:
            HealthPrinter.write('\033[%dmMongo configuration error: %s    (%s)\n\033[0m' % (HealthPrinter.ERROR_COLOR, getattr(synchronizer_conf, 'mongodb_uri', None), exp.message), auto_color=False)
            return
        except Exception as exp:
            raise
        
        if not protected_fields_info:
            HealthPrinter.write('        \033[33mEncryption was never enabled\033[0m')
            HealthPrinter.write('')
            return
        
        last_migrations = protected_fields_info.get('last_migrations', [])
        if not last_migrations:
            HealthPrinter.write('        \033[33mEncryption was never enabled\033[0m')
            HealthPrinter.write('')
            return
        
        for migration in sorted(last_migrations, key=lambda mig: mig['date']):
            display_date = datetime.datetime.strptime(migration['date'], "%Y-%m-%d %H:%M:%S.%f").strftime("%Y-%m-%d  %H:%M:%S")
            
            # Make sure encoding is correct
            name_from = migration['from']['protect_fields__encryption_key_name'].strip()
            name_to = migration['to']['protect_fields__encryption_key_name'].strip()
            if isinstance(name_from, str):
                name_from = name_from.decode('utf8', 'ignore')
            if isinstance(name_to, str):
                name_to = name_to.decode('utf8', 'ignore')
            orig_encryption = migration['from'].get('protect_fields__activate_database_encryption', "")
            if orig_encryption == "":
                orig_encryption = '<Never enabled>'
            
            HealthPrinter.write('\033[%dmDate :\033[0m %s' % (HealthPrinter.OK_COLOR, display_date))
            HealthPrinter.write('\033[%dm                           From:                      To:\033[0m' % HealthPrinter.AT_RISK_COLOR)
            HealthPrinter.write('           Encrypted:      %-20s       %-5s' % (orig_encryption, migration['to']['protect_fields__activate_database_encryption']))
            HealthPrinter.write('           Key Name:       %-20s       %-20s' % (name_from or "<Never enabled>", name_to))
            HealthPrinter.write('           Backuped:       %-3s                       %-3s' % ("\033[31mFalse\033[0m" if not migration['from']['extracted_key'] else "True",
                                                                                                "\033[31mFalse\033[0m" if not migration['to']['extracted_key'] else "\033[32mTrue\033[0m"))
            HealthPrinter.write('              (note that the backup status may be different from the one displayed in the ')
            HealthPrinter.write('               "Encryption Status" section as this one is the status at migration time.)')
            HealthPrinter.write('')
            
            key_hash_from = migration['from']['protect_fields__encryption_keyfile_hash'] or "<Data protection never enabled>"
            key_hash_to = migration['to']['protect_fields__encryption_keyfile_hash']
            if key_hash_from != key_hash_to:
                HealthPrinter.write('              From key hash :      %-64s' % key_hash_from)
                HealthPrinter.write('              To   key hash :      %-64s' % key_hash_to)
            else:
                HealthPrinter.write('    Unchanged key hash :   %64s' % key_hash_to
                                    )
            substrings_from = "  ".join(sorted(migration['from']['protect_fields__substrings_matching_fields'])) or "<Data protection never enabled>"
            substrings_to = "  ".join(sorted(migration['to']['protect_fields__substrings_matching_fields']))
            if substrings_from != substrings_to:
                HealthPrinter.write('    From encrypted substrings :      %s' % substrings_from)
                HealthPrinter.write('    To   encrypted substrings :      %s' % substrings_to)
            else:
                HealthPrinter.write('    Unchanged substrings :   %s' % substrings_to)
            
            HealthPrinter.write('')
    
    
    @staticmethod
    def print_data_history():
        for data_type in POSSIBLE_DATA:
            histories = get_data_history(data_type)
            HealthPrinter.write('\n\033[%dm%s\033[0m (can be different from installation history if you did backup/restore data from another server)' % (HealthPrinter.OK_COLOR, '%s HISTORY:' % data_type.upper()))
            for history_entry in histories:
                entry_type = history_entry['type']
                displayed_date = HealthPrinter.format_date(history_entry)
                # Sanatize are with a special display
                if entry_type == 'SANATIZE':
                    s = '    * %-9s : sanatize pass=%s' % (entry_type.lower(), history_entry['sanatize_name'])
                    HealthPrinter.write(s)
                    continue
                if entry_type in ('PATCH', 'UNPATCH', 'ADDON', 'REMOVE ADDON'):
                    s = ' - %-15s :  %10s=%-25s date=%s' % (entry_type, HealthPrinter.HISTORY_VERSION_PRINTING.get(entry_type), history_entry['version'], displayed_date)
                    HealthPrinter.write(s)
                    s = '   %-15s    %s' % (' ', history_entry['patch_name'])
                    HealthPrinter.write(s)
                    continue
                # For restore, we also print the backup path to know which backup was restored
                backup_path = ''
                if entry_type == 'RESTORE':
                    backup_path = 'restored from=%s' % history_entry['backup_path']
                comment = history_entry.get('comment', '')
                if comment:
                    comment_label = 'modules' if data_type == 'modules' else 'comment'
                    s = ' - %-12s :  %10s=%-25s date=%s   server=%-15s  %s   %s=%-15s' % (
                        entry_type, HealthPrinter.HISTORY_VERSION_PRINTING.get(entry_type), history_entry['version'], displayed_date, history_entry['server_name'], backup_path, comment_label, comment)
                else:
                    s = ' - %-12s :  %10s=%-25s date=%s   server=%-15s  %s' % (
                        entry_type, HealthPrinter.HISTORY_VERSION_PRINTING.get(entry_type), history_entry['version'], displayed_date, history_entry['server_name'], backup_path)
                HealthPrinter.write(s)
            HealthPrinter.write('')
    
    
    @staticmethod
    def format_date(history_entry):
        if history_entry['date'] == 0:
            displayed_date = "\033[32mUnknown\033[0m   "
        else:
            displayed_date = time.strftime('%Y-%m-%d  %H:%M:%S', time.localtime(history_entry['date']))
        return displayed_date
    
    
    @staticmethod
    def debug(message):
        HealthPrinter.__debug_str.append('%s\n' % str(message))
    
    
    @staticmethod
    def print_debug():
        HealthPrinter.write(''.join(HealthPrinter.__debug_str))


class RealmUtils(object):
    """ This class is only used as namespace """
    config = None
    default = ""
    
    GET_REALM_CHILDS_MAX_DEPTH = 20
    
    
    @staticmethod
    def set_realm_config(realms):
        """
        :type realms: Realms
        :rtype: None
        """
        realms.pythonize()  # Necessary for getting default realm
        
        RealmUtils.config = realms
        default_realm = realms.get_default()
        if default_realm:
            RealmUtils.default = default_realm.get_name()
    
    
    @staticmethod
    def build_leaf(satellite):
        """
        :param realm: A Realm
        :type realm: shinken.objects.realm.Realm
        :return: A list of realms starting from root
        """
        realm_name = RealmUtils.get_realm_name(satellite)
        
        realm_list = RealmUtils.config.get_path_to_default(realm_name)
        out_list = list()
        for r in realm_list:
            path_to_root = RealmUtils.get_path_to_root(r).lstrip('/').rstrip('/')
            out_list.append('h0>| Realm /%s |' % path_to_root)
        out_list.append('h1>| In %s/ |' % realm_name)
        
        return out_list
    
    
    ##
    # :param   satellite_conf
    # :return: The realm name as string
    @staticmethod
    def get_realm_name(satellite_conf):
        
        # if satellite_conf has a realm attribute,
        if hasattr(satellite_conf, 'realm'):
            if satellite_conf.realm == "":
                realm_name = RealmUtils.default
            else:
                realm_name = satellite_conf.realm
        else:  # set realm_name to default
            realm_name = RealmUtils.default
        
        return realm_name
    
    
    @staticmethod
    def get_handle_realms(satellite_conf):
        realm_name = RealmUtils.get_realm_name(satellite_conf)
        handle_realms = set([realm_name])
        if satellite_conf.get_my_type() == 'broker':
            manage_sub_realms = getattr(satellite_conf, 'manage_sub_realms', True)
        else:
            manage_sub_realms = getattr(satellite_conf, 'manage_sub_realms', False)
        if manage_sub_realms in ['1', True]:
            handle_realms = handle_realms | set(RealmUtils.get_child_realms(realm_name))
        return list(handle_realms)
    
    
    @staticmethod
    def get_all_realms():
        return [r.realm_name for r in RealmUtils.config]
    
    
    @staticmethod
    def get_child_realms(realm_name, depth=0):
        if depth > RealmUtils.GET_REALM_CHILDS_MAX_DEPTH:
            return []
        
        realm = next((r for r in RealmUtils.config if r.realm_name == realm_name), None)
        if realm is None:
            return []
        
        members = getattr(realm, 'realm_members', None)
        if not members:
            return []
        
        members_lst = [m.strip() for m in members.split(',')]
        
        # if the realm is itself define as realm_member
        if realm_name in members_lst:
            members_lst.remove(realm_name)
        
        members_other = []
        for m in members_lst:
            members_other += RealmUtils.get_child_realms(m, depth=depth + 1)
        
        return list(set(members_lst + members_other))
    
    
    @staticmethod
    def get_parent_realms(realm_name):
        return list(RealmUtils.config.get_path_to_default(realm_name))
    
    
    @staticmethod
    def get_path_to_root(realm_name):
        """
        :param realm_name: Realm name as string
        :return: a "/" separated realm names from root to direct parent of realm_name
        """
        realm_list = RealmUtils.config.get_path_to_default(realm_name)
        realms_string = ""
        for i in realm_list:
            realms_string = "%s/%s" % (realms_string, i)
        return realms_string + "/"


class HostUtils(object):
    """ This class is only used as namespace """
    
    local_addresses = None
    
    
    @staticmethod
    def init_local_addresses():
        if HostUtils.local_addresses is None:
            proc = Popen(['hostname', '-I'], stdout=PIPE)
            HostUtils.local_addresses = proc.communicate()[0].split()
    
    
    @staticmethod
    def get_server_name(name):
        
        """ HostUtils.get_server_name() is used to regroup all host occurrence related to localhost under the same
        branch in the tree view output. If not already done, retrieve every ip address of the host
        """
        
        HostUtils.init_local_addresses()
        
        # name may be an address ip... or not. gethostbyname(i) will return a valid ip
        # address.
        try:
            address_name = socket.gethostbyname(name)
        except IOError as exp:
            return "Unable to get IP address for %s : %s" % (name, exp)
        
        if address_name in HostUtils.local_addresses:
            # address found i local_addresses list, it is localhost
            if address_name == name:
                return "%s (%s)" % ('localhost', address_name)
            else:
                return "%s (%s)" % (name, address_name)
        elif name == 'localhost' or name == '127.0.0.1':
            # if name is "localhost", will pass HostUtils.local_addresses list because it does not contain
            # 127.0.0.1.
            return "%s (%s)" % ('localhost', address_name)
        else:
            # it is not about the local host
            return "%s (%s)" % (name, address_name)
    
    
    # Utility function for daemons
    @staticmethod
    def get_local_daemons():
        if not os.path.exists(CONTEXT_PATH):
            return ['central', 'poller', 'synchronizer', 'scheduler', 'reactionner', 'receiver', 'broker', 'provider']
        with open(CONTEXT_PATH, 'r') as f:
            context_json = json.loads(f.read())
        local_daemons = [k for (k, installed) in context_json['daemons'].iteritems() if installed]
        return local_daemons
    
    
    @staticmethod
    def is_daemon_node(daemon_type):
        local_instances = get_local_instances_for_type(daemon_type)
        which_enabled = [k for (k, enabled) in local_instances if enabled]
        return len(which_enabled) != 0
    
    
    @staticmethod
    def get_host_by_name(name):
        try:
            return socket.gethostbyname(name)
        except Exception:
            return name


class HealthCheckProgress(object):
    """ This class is only used as namespace """
    
    progress_value = 0
    
    
    @staticmethod
    def end_progress():
        HealthCheckProgress.show_progress_message("")
        HealthCheckProgress.show_progress(100)
    
    
    @staticmethod
    def show_progress_message(message):
        HealthCheckProgress.show_progress(HealthCheckProgress.progress_value, message)
    
    
    @staticmethod
    def get_terminal_size():
        try:
            # Want to know why? ask Jean.
            height, width = struct.unpack('hhhh', ioctl(sys.stdout, TIOCGWINSZ, '\000' * 8))[0:2]
        except Exception:  # oups, which terminal is it? (note: more than IOError because I'm not sure it's the only exception that can be raised)
            height = 999
            width = 999
        return height, width
    
    
    @staticmethod
    def show_progress(pct, message=""):
        HealthCheckProgress.progress_value = pct
        divided_pct = pct / 4
        whole_line = '[' + '.' * divided_pct + ' ' * (25 - divided_pct) + '] %d%% %s' % (pct, message.encode('utf8'))
        # We want that this line do not overlap over the terminal and print endless lines, so we are
        # limiting the size to the terminal, and to show that we are removing characters, we are printing
        # ... in yellow at the end
        # NOTE: tested in putty and screen
        terminal_height, terminal_width = HealthCheckProgress.get_terminal_size()
        if len(whole_line) > terminal_width:  # too long, must cut it
            whole_line = whole_line[:terminal_width - 4] + '\033[33m...\033[0m'  # 4 to allow ... to be printed with space
        printed_line = '\r%s\033[K' % whole_line  # do not len() this one because extra characters are not visible
        sys.stdout.write(printed_line)
        sys.stdout.flush()
        if pct == 100:
            sys.stdout.write('\n')


class TreeUtils(object):
    
    @staticmethod
    def list_module_leaves(branch, module_states, is_sub_module=False, sla_module_conf=None, webui_modules_conf=None):
        # type: (Tree, Dict, bool, Dict, List) -> None
        logger.debug('list_module_leaves::start')
        if webui_modules_conf is None:
            webui_modules_conf = []
        if sla_module_conf is None:
            sla_module_conf = {}
        
        # Compute the max length of the names of modules in order to align data correctly
        _list_module_len_name = [len(module['name'].strip()) for module in module_states['modules']]
        module_line_indent = 8 + max(_list_module_len_name) if _list_module_len_name else 0
        
        # Here the errors are not the modules errors but the modules_manager_errors and can be only bad module on daemon
        for error in module_states.get('errors', []):
            mod_branch = branch.get_branch('Submodules:') if is_sub_module else branch.get_branch('Modules:')
            mod_branch.add_leaf(error, status='WARNING')
        
        for module_state in module_states['modules']:
            module_status = module_state['status']
            module_name = module_state['name']
            module_type = module_state['type']
            
            # Here we set the output only if the state is not OK because the build_module_leaf build the line
            # with OK : name and type for every module. If a module is not ok, we show his output
            module_output = [(module_state['output'], module_status)] if module_status != ModuleState.OK else []
            if module_status != ModuleState.OK and isinstance(module_state['output'], list):
                max_display_output = 10
                module_output = [(i, module_status) for i in module_state['output'][:max_display_output]]
                if len(module_state['output']) > max_display_output:
                    module_output.append(('Only %s messages is display. %s messages left.' % (max_display_output, len(module_state['output']) - max_display_output), module_status))
            module_restarts = module_state['restarts']
            
            restarts_count = 0
            last_three_dt = None
            
            has_sub_modules = "modules" in module_state and module_state['modules']
            
            if len(module_restarts) > 0:
                # Get restarts within Checks.modules_warning_threshold
                # Manage restarts stored in the old format : [ts1, ts2, ts3]
                if isinstance(module_restarts[-1], float):
                    module_restarts = [{'timestamp': ts, 'reason': "The reason for this restart was not saved"} for ts in module_restarts]
                
                last_dt = datetime.datetime.fromtimestamp(module_restarts[-1]['timestamp'])
                limit_dt = datetime.datetime.now() - datetime.timedelta(minutes=Checks.modules_warning_threshold)
                
                if last_dt >= limit_dt:
                    if module_status == ModuleState.OK:
                        module_status = ModuleState.WARNING
                    # Count all restarts within limit_dt
                    restarts_count = 0
                    for restart in reversed(module_restarts):
                        if datetime.datetime.fromtimestamp(restart['timestamp']) > limit_dt:
                            restarts_count += 1
                        else:
                            break
                    
                    # Take the 3 last restarts for output
                    last_three_dt = module_restarts[-3:]
            
            branch_status = module_status if module_status in (ModuleState.OK, ModuleState.WARNING) else 'ERROR'
            mod_branch = branch.get_branch('Submodules:') if is_sub_module else branch.get_branch('Modules:')
            
            if module_type == 'sla' and sla_module_conf:
                output = TreeUtils.check_sla_module(module_name, sla_module_conf)
                module_output.extend(output)
            elif module_type == 'webui' and webui_modules_conf:
                webui_branch = mod_branch.get_branch('Module %s:' % module_name)
                webui_module_conf = next((c for c in webui_modules_conf if c['module_name'] == module_name), None)
                if webui_module_conf:
                    check_webui_module(webui_module_conf, webui_branch)
                    if has_sub_modules:
                        TreeUtils.list_module_leaves(webui_branch, module_state, True)
            
            # Because SLA use output to show his leaf, we need to show the output. If the SLA maintains his own leaf, we can add an 'else' and print only for other module than SLA and WEBUI
            if module_output:
                mod_branch = mod_branch.get_branch("Module %s:" % module_name)
            
            TreeUtils.build_module_leaf(module_name, module_type, restarts_count, last_three_dt, Checks.modules_warning_threshold, mod_branch, module_line_indent, status=branch_status, output=module_output)
            
            # So, here we need check submodule for each module except webui who is already done
            if has_sub_modules and module_type != 'webui':
                # mod_branch = branch.get_branch('Module %s:' % module_name)
                TreeUtils.list_module_leaves(mod_branch, module_state, True)
    
    
    @staticmethod
    def check_sla_module(module_name, sla_module_state):
        sla_output = []
        
        module_conf_state = next((sla_module_state[module_name] for sla_module_name in sla_module_state if module_name == sla_module_name), None)
        if not module_conf_state:
            return []
        
        if module_conf_state['duplicate']:
            message = 'This SLA module "%s" is used by more than one Broker but you can\'t use the same database ("%s" on %s) for several brokers' % (module_name, module_conf_state['database'], module_conf_state['address'])
            sla_output.append((message, ModuleState.CRITICAL))
        
        return sla_output
    
    
    @staticmethod
    def build_module_leaf(module_name, module_type, total_restarts_count, last_restarts, threshold, branch, module_name_spaces, status, output=[]):
        name_format = "Name: %s" % module_name
        type_format = "Type: %s" % module_type
        base_indent = branch.get_indent() + Tree.indent
        line_indent = module_name_spaces - len(name_format)
        if line_indent < 0:
            line_indent = 0
        
        if total_restarts_count > 0:
            if len(last_restarts) == 1:
                restart_string = 'Last restart:'
            else:
                restart_string = 'Last %i restarts:' % len(last_restarts)
            
            restart_format = "The module [%s] has restarted [%s times] in the last %i minutes. A restart is removed from count after %s minutes." % (module_name, total_restarts_count, threshold, threshold)
            restart_format = restart_format + '\n' + base_indent + restart_string
            
            for restart in last_restarts:
                _timestamp = datetime.datetime.fromtimestamp(int(restart['timestamp'])).strftime('%Y-%m-%d %H:%M:%S')
                _reason = restart['reason'] if restart['reason'] else 'The reason for this restart was not saved'
                restart_format += '\n' + base_indent + Tree.indent + '- %s : %s' % (_timestamp, _reason)
            
            branch.add_leaf(restart_format, status, weight=-1)
        
        if output:
            for output_line in output:
                _status = output_line[1]
                # Here map the ModuleState.CRITICAL to ERROR because healthcheck don't know the critical
                if _status == ModuleState.FATAL:
                    _status = "ERROR"
                    branch.add_leaf('Your module have a FATAL error and it will NOT be restart to protect your data.', _status)
                    branch.add_leaf('You must contact your dedicated support with your log and shinken-backup.', _status)
                if _status == ModuleState.CRITICAL:
                    _status = "ERROR"
                branch.add_leaf(output_line[0], _status)
        else:
            branch.add_leaf(name_format + " " * line_indent + type_format, status)
    
    
    @staticmethod
    def compare(line_entry1, line_entry2):
        if line_entry1 == line_entry2:
            return 0
        if line_entry1 > line_entry2:
            return +1
        else:
            return -1
    
    
    @staticmethod
    def get_satellite_branch(satellite):
        name = satellite.get_name()
        satellite_type = satellite.get_my_type()
        address = satellite.address
        spare = ConfigUtils.is_spare(satellite)
        spare_activated = ConfigUtils.is_spare_activated(satellite)
        _branch = Tree.get_root() \
            .get_branch('Architecture') \
            .get_sub_branch(RealmUtils.build_leaf(satellite)) \
            .get_branch('h2>- %s:' % HostUtils.get_server_name(address)) \
            .get_branch('[ %ss ]' % satellite_type) \
            .get_branch(_get_daemon_name('[%s: %s]' % (satellite_type, name), spare, spare_activated))
        return _branch


class Tree(object):
    __root = None
    indent = " " * 4
    
    AT_RISK_COUNT = 0
    ERROR_COUNT = 0
    OK_COUNT = 0
    INFO_COUNT = 0
    
    
    def __init__(self, level=0, decorator='none', weight=0):
        self.__level = level
        self.__ok_leafs = list()
        self.__warn_leafs = list()
        self.__err_leafs = list()
        self.__info_leafs = list()
        self.__branches = dict()
        self.__decorator = decorator
        self.__weight = weight
    
    
    @staticmethod
    def get_root():
        # type: () -> Tree
        if Tree.__root is None:
            Tree.__root = Tree()
        return Tree.__root
    
    
    def add_leaf(self, content, status='OK', weight=0):
        
        if status == 'OK':
            color = OK_COLOR
            Tree.OK_COUNT += 1
        elif status == 'WARNING':
            color = AT_RISK_COLOR
            status = 'AT RISK'
            Tree.AT_RISK_COUNT += 1
        elif status == 'ERROR':
            color = ERROR_COLOR
            Tree.ERROR_COUNT += 1
        elif status == 'INFO':
            color = INFO_COLOR
            Tree.INFO_COUNT += 1
        else:
            color = 0
        
        status_string = '\033[%dm%s:\033[0m' % (color, status)
        formatted = Tree.format_status_line(status_string, content)
        
        if status == 'OK':
            self.__ok_leafs.append((weight, formatted))
        elif status == 'AT RISK':
            self.__warn_leafs.append((weight, formatted))
        elif status == 'ERROR':
            self.__err_leafs.append((weight, formatted))
        elif status == 'INFO':
            self.__info_leafs.append((weight, formatted))
    
    
    def remove_branch(self, branch_name):
        if branch_name in self.__branches.keys():
            del self.__branches[branch_name]
    
    
    def get_branch(self, branch_name, weight=0):
        # type: (str, int) -> Tree
        if branch_name not in self.__branches:
            if re.match('^h0>', branch_name) is not None:
                deco = 'h0'
            elif re.match('^h1>', branch_name) is not None:
                deco = 'h1'
            elif re.match('^h2>', branch_name) is not None:
                deco = 'h2'
            else:
                deco = 'none'
            
            self.__branches[branch_name] = Tree(self.__level + 1, decorator=deco, weight=weight)
        
        return self.__branches[branch_name]
    
    
    def get_sub_branch(self, branch_list):
        current_branch = self
        for branch in branch_list:
            current_branch = current_branch.get_branch(branch)
        
        return current_branch
    
    
    def get_indent(self):
        return Tree.indent * self.__level
    
    
    def render(self):
        
        # print error, then warn, then ok leafs, then info leafs
        sorted_err_leafs = sorted(self.__err_leafs, key=lambda leaf: leaf[0])
        for (_, content) in sorted_err_leafs:
            HealthPrinter.write(self.get_indent() + content)
        
        sorted_warn_leafs = sorted(self.__warn_leafs, key=lambda leaf: leaf[0])
        for (_, content) in sorted_warn_leafs:
            HealthPrinter.write(self.get_indent() + content)
        
        sorted_ok_leafs = sorted(self.__ok_leafs, key=lambda leaf: leaf[0])
        for (_, content) in sorted_ok_leafs:
            HealthPrinter.write(self.get_indent() + content)
        
        sorted_info_leafs = sorted(self.__info_leafs, key=lambda leaf: leaf[0])
        for (_, content) in sorted_info_leafs:
            HealthPrinter.write(self.get_indent() + content)
        
        # filter simple branches first
        normal_dict = dict()
        h2_dict = dict()
        h1_dict = dict()
        h0_dict = dict()
        for branch_name, branch in self.__branches.iteritems():
            decorator = branch.get_decorator()
            if decorator == 'none':
                normal_dict[branch_name] = branch
            elif decorator == 'h2':
                h2_dict[branch_name] = branch
            elif decorator == 'h1':
                h1_dict[branch_name] = branch
            elif decorator == 'h0':
                h0_dict[branch_name] = branch
        
        
        # Order the dicts using the weight value
        # Sort by node weight, and if the same, by the node key string
        def _cmp(c1, c2):
            k1, n1 = c1
            k2, n2 = c2
            # Beware: weight are in reverse order
            w1 = - n1.get_weight()
            w2 = - n2.get_weight()
            if w1 == w2:
                return cmp(k1.lower(), k2.lower())
            else:
                return cmp(w1, w2)
        
        
        def __get_sorted(d):
            elts = d.items()
            elts.sort(_cmp)
            d_sorted = OrderedDict(elts)
            return d_sorted
        
        
        # Sort list for display
        normal_dict_sorted = __get_sorted(normal_dict)
        h2_dict_sorted = __get_sorted(h2_dict)
        h1_dict_sorted = __get_sorted(h1_dict)
        h0_dict_sorted = __get_sorted(h0_dict)
        
        # render them
        for normal in normal_dict_sorted.keys():
            HealthPrinter.write(Tree.indent * self.__level + normal)
            normal_dict[normal].render()
        
        for h2 in h2_dict_sorted.keys():
            h2_string = h2[3:]
            HealthPrinter.write("\n")
            HealthPrinter.write(self.get_indent() + h2_string)
            HealthPrinter.write(self.get_indent() + len(h2_string) * '^')
            h2_dict[h2].render()
        
        for h1 in h1_dict_sorted.keys():
            h1_string = h1[3:]
            HealthPrinter.write("\n")
            HealthPrinter.write(self.get_indent() + len(h1_string) * '-')
            HealthPrinter.write(self.get_indent() + h1_string)
            HealthPrinter.write(self.get_indent() + len(h1_string) * '-')
            h1_dict[h1].render()
        
        for h0 in h0_dict_sorted.keys():
            h0_string = h0[3:]
            HealthPrinter.write("\n")
            HealthPrinter.write(self.get_indent() + len(h0_string) * '-')
            HealthPrinter.write(self.get_indent() + h0_string)
            HealthPrinter.write(self.get_indent() + len(h0_string) * '-')
            h0_dict[h0].render()
    
    
    def get_weight(self):
        return self.__weight
    
    
    def get_decorator(self):
        return self.__decorator
    
    
    @staticmethod
    def format_status_line(status_string, content):
        indent_value = 18
        status_len = len(status_string)
        indent_real = indent_value - status_len
        if indent_real < 1:
            indent_real = 1
        
        return status_string + " " * indent_real + content
    
    
    def show_summary(self):
        
        HealthPrinter.write('\nHealthcheck Summary ')
        
        summary = [
            {'status': 'INFO', 'color': INFO_COLOR, 'counter': self.INFO_COUNT},
            {'status': 'AT RISK', 'color': AT_RISK_COLOR, 'counter': self.AT_RISK_COUNT},
            {'status': 'ERROR', 'color': ERROR_COLOR, 'counter': self.ERROR_COUNT}
        ]
        for sum in summary:
            HealthPrinter.write('%s\033[%dm%-7s\033[0m : %s' % (self.indent, sum['color'], sum['status'], sum['counter']))


config_utils_cache = {}


class ConfigUtils(object):
    "Used as a namespace"
    
    
    @staticmethod
    def get_synchronizer_config():
        conf = config_utils_cache.get('synchronizer_conf', None)
        if conf is None:
            protect_stdout()
            conf = SynchronizerConfig()
            buf = conf.read_config([SYNCHRONIZER_CFG_PATH])
            raw_objects = conf.read_config_buf(buf)
            conf.create_objects_for_type(raw_objects, 'synchronizer')
            conf.create_objects_for_type(raw_objects, 'module')
            conf.create_objects_for_type(raw_objects, 'source')
            conf.create_objects_for_type(raw_objects, 'listener')
            conf.create_objects_for_type(raw_objects, 'tagger')
            conf.create_objects_for_type(raw_objects, 'arbiter')
            conf.early_synchronizer_linking()
            conf.fill_default()
            unprotect_stdout()
            config_utils_cache['synchronizer_conf'] = conf
        return conf
    
    
    @staticmethod
    def get_local_synchronizer_config():
        conf = ConfigUtils.get_synchronizer_config()
        for sync_config in conf.synchronizers:  # XXX only take the first element
            if sync_config.enabled:
                return sync_config
    
    
    @staticmethod
    def get_arbiter_config():
        conf = config_utils_cache.get('arbiter_conf', None)
        if conf is None:
            protect_stdout()
            conf = Config()
            buf = conf.read_config([ARBITER_CFG_PATH])
            raw_objects = conf.read_config_buf(buf)
            conf.create_objects_for_type(raw_objects, 'arbiter')
            conf.create_objects_for_type(raw_objects, 'module')
            conf.early_arbiter_linking()
            conf.create_objects(raw_objects)
            conf.fill_default()
            unprotect_stdout()
            config_utils_cache['arbiter_conf'] = conf
        return conf
    
    
    @staticmethod
    def get_local_arbiter_config():
        conf = ConfigUtils.get_arbiter_config()
        _fqdn = socket.getfqdn()
        _hostname = socket.gethostname()
        for arbiter_config in conf.arbiters:
            host_name = arbiter_config.host_name
            if host_name == _fqdn or host_name == _hostname:
                return arbiter_config
    
    
    @staticmethod
    def _get_my_arbiter():
        conf = ConfigUtils.get_arbiter_config()
        # in simple installation, just take the only arbiter defined (if enabled)
        if len(conf.arbiters) == 1:
            only_arbiter = list(conf.arbiters)[0]
            if only_arbiter.enabled:
                return only_arbiter
            return None
        
        # So we are an arbiter, but maybe we are a spare? also skip disabled arbiters
        for arbiter in conf.arbiters:
            if not arbiter.enabled:
                continue
            
            is_me = arbiter.is_me('')  # we are not forcing a name here, only use the host_name entry
            if is_me:
                return arbiter
        # Cannot find an arbiter at all
        return None
    
    
    @staticmethod
    def is_master_arbiter_node():
        # If we are not an arbiter, we are sure we are not an arbiter master
        if not HostUtils.is_daemon_node('arbiter'):
            return False
        
        my_arbiter = ConfigUtils._get_my_arbiter()
        if my_arbiter is None:
            return False
        
        return not ConfigUtils.is_spare(my_arbiter)
    
    
    @staticmethod
    def is_spare(object):
        return getattr(object, 'spare', False) in [True, '1']
    
    
    @staticmethod
    def is_spare_activated(object):
        if ConfigUtils.is_spare(object):
            raw_stats = get_raw_stats_for_satellite(object)
            return raw_stats.get('activated', False) and raw_stats.get('have_conf', False)
        return False


##
# Why GraphiteStorage class is so complex?
#
# Functionality:
# -------------
# 1 - We must have an overall write status on targeted graphite servers. Targets are defined in 'graphite_perfdata' modules, and can point to a localhost address.
# 2 - We must also have an overall read status to target graphite servers. These servers are defined in 'webui' modules, and can point to a localhost address.
#
# We should be able to merge graphite servers that refer to the same machine, defined as localhost in 'webui' or 'graphite_perfdata', by comparing their address with the broker address.
# Without more advanced mechanism (use the broker daemon UUID) this will fail if the broker or graphite server, have multiple IP addresses.
#
# We have to merge four sources of information to reach our goal:
# - The configuration known by us(/etc/shinken/* files from my arbiter location), the one we trust,
# - The configuration known by other daemons (from the running arbiter and his daemons), maybe not in sync with /etc/shinken/*,
# - possibly multiple 'webui' modules edited by users (possibly wrong) and possibly duplicated (localhost),
# - possibly multiple 'graphite_perfdata' modules edited by users (possibly wrong) and possibly duplicated (localhost),
# - the API return for write-to-graphite read-from-graphite on each brokers daemons (possibly not in sync with /etc/shinken/*).
#
# About the brokers web API:
# -------------------------
# Brokers offer an API to request their read/write status to the graphite servers they known (defined in what they know as their 'webui' and 'graphite_perfdata' modules). BUT IN SOME CASES
# (not up to date arbiter info, arbiter conflict, arbiter not restarted), the returned information can be not relevant/outdated.
#
# We should also handle broker that do not offer the read/write to/from graphite API because they are not up to date.
#
# Realms:
# ------
# 1 - We should be able to locate the graphite server data for a realm are written, (using the pair 'graphite_perfdata' modules/broker native realm), and again what graphite server is  presumably hosting data for a realm
# (using 'webui' modules).
# 2 - We should raise error on inconsistent read/write access for a realm. (read data for a realm on a server we do not see the data is written)
#
class GraphiteStorage(object):
    
    def __init__(self, conf, daemons_alive_by_type):
        self._process_performance_data = BoolProp().pythonize(getattr(conf, 'process_performance_data', '1'))
        self._all_graphite_perfdata_modules = [module for module in conf.modules if module.module_type == 'graphite_perfdata']
        self._webui_modules = [module for module in conf.modules if module.module_type == 'webui']
        self._enabled_brokers = [broker for broker in conf.brokers if (getattr(broker, 'enabled', True) in [True, '1'] and (getattr(broker, 'spare', True) in [False, '0']))]
        
        self._webui_checks = {}
        
        self._realms = conf.realms
        
        self._host_count_in_realm = self._get_host_count_in_realm(daemons_alive_by_type)
        
        if self._host_count_in_realm is None:
            Tree.get_root().get_branch('Storage').get_branch('[graphite]').add_leaf("Unable to retrieve monitored hosts from Arbiter, Following errors may not be relevant", status="ERROR")
            self._host_count_in_realm = defaultdict(lambda: 1)
        
        # Create a default list of graphite services containing every required information to generate the tree
        self._graphite_perfdata_modules = {}
        for _graphite_perfdata_module in self._all_graphite_perfdata_modules:
            
            involved_brokers = self._get_brokers_list_by_module_name(_graphite_perfdata_module.module_name)
            
            if len(involved_brokers) == 0:
                continue
            
            if _graphite_perfdata_module.host in ('localhost', '127.0.0.1'):
                # If module is configured as localhost there will be an entry for each broker configured to have it.
                
                for broker in involved_brokers:
                    # Use broker.broker_name to identify this gperf service:
                    gperf_srv_key = '%s___%s' % (_graphite_perfdata_module.module_name, broker.broker_name)
                    self._graphite_perfdata_modules[gperf_srv_key] = GraphitePerfdataModule([broker], _graphite_perfdata_module, is_local=True)
            
            else:
                
                # Should contain connexion info about all brokers having this module in their configuration.
                self._graphite_perfdata_modules[_graphite_perfdata_module.module_name] = GraphitePerfdataModule(involved_brokers, _graphite_perfdata_module)
    
    
    def _get_host_count_in_realm(self, daemons_alive_by_type):
        # Connection information
        try:
            arbiter = daemons_alive_by_type['arbiter'][0]
        except (KeyError, IndexError):
            return None
        
        address, port, proto, is_spare = arbiter
        
        port = int(port)
        
        http_query = '%s://%s:%d/get_objects_properties?table=hosts&fields=host_name,realm' % (proto, address, port)
        timeout = 300
        return_value = {}
        try:
            html = http_get(http_query, timeout=timeout)
            hosts_realms = json.loads(html)
            for host in hosts_realms:
                # we skip host template
                if not host['host_name']:
                    continue
                host_realm = host['realm'] or RealmUtils.default
                if host_realm not in return_value:
                    return_value[host_realm] = 0
                return_value[host_realm] += 1
            return return_value
        except (socket.error,) as exp:
            return_value = None
        except (urllib2.HTTPError,) as exp:
            return_value = None
        except (HTTP_ERRORS,) as exp:
            # something like timeout
            return_value = None
        except Exception as exp:
            # bad json
            return_value = None
        return return_value
    
    
    def check(self):
        try:
            for broker in self._enabled_brokers:
                self._check_graphite_write_for_broker(broker)
                self._check_graphite_read_for_broker(broker)
        except Exception as exp:
            # return
            raise
        
        if not self._process_performance_data:
            Tree.get_root().get_branch('Storage').get_branch('[graphite]').add_leaf('Process performance data are disable', status='INFO')
            if self._graphite_perfdata_modules:
                for graphite_server in self._graphite_perfdata_modules.itervalues():
                    for broker in graphite_server.get_brokers():
                        Tree.get_root().get_branch('Storage').get_branch('[graphite]').get_branch('Brokers: ').add_leaf('Broker %s have a %s module' % (broker.get('broker_name', 'unamed broker'), graphite_server.get_module_name()), status='WARNING')
            return
        
        try:
            self._populate_servers_tree()
        except Exception as exp:
            raise
        
        try:
            self._populate_realms_tree()
        except Exception as exp:
            raise
    
    
    def _get_brokers_having_module(self, module_name):
        brokers_name = []
        for broker in self._enabled_brokers:
            broker_modules = [m.strip() for m in broker.modules.split(',')]
            if module_name in broker_modules:
                brokers_name.append((broker.broker_name, ConfigUtils.is_spare(broker)))
        return brokers_name
    
    
    ##
    # When the graphite_module is defined with localhost address, the name
    # of the module include the broker name to diferentiate them.
    def _get_server_from_check_reply(self, broker, graphite_check):
        if graphite_check['host'] in ['localhost', '127.0.0.1']:
            compound_module_name = '%s___%s' % (graphite_check['module_name'], broker.broker_name)
            return self._graphite_perfdata_modules.get(compound_module_name)
        else:
            return self._graphite_perfdata_modules.get('%s' % graphite_check['module_name'])
    
    
    def _get_brokers_list_by_module_name(self, module_name):
        brokers = []
        for broker in self._enabled_brokers:
            modules = [m.strip() for m in broker.modules.split(',') if m.strip()]
            if module_name in modules:
                brokers.append(broker)
        return brokers
    
    
    def _get_broker_from_name(self, name):
        return next((b for b in self._enabled_brokers if b.broker_name == name))
    
    
    def _check_graphite_read_for_broker(self, broker):
        
        HealthCheckProgress.show_progress_message("Getting metrology server information from %s[%s] (read)" % (broker.broker_name, broker.address))
        
        _broker_address = broker.address
        _broker_port = int(broker.port)
        _broker_protocol = 'https' if getattr(broker, 'use_ssl', 0) in ['1', True] else 'http'
        
        # The timeout is broker's timeout + x calls threaded with 4 retry x 3 secondes timeout + 1 second of margin
        _broker_timeout = int(broker.timeout) + (4 * 3) + 1
        read_query = '%s://%s:%d/check_graphite_read_status' % (_broker_protocol, _broker_address, _broker_port)
        try:
            html = http_get(read_query, timeout=int(_broker_timeout))
            webui_checks = json.loads(html)
        except socket.timeout as exp:
            webui_checks = "timeout"
        
        except (urllib2.URLError, socket.error, ssl.SSLError) as exp:
            # In python 2.6 the socket timeout it was a socket.error instead of socket.timeout
            if 'timed out' in str(exp):
                webui_checks = "timeout"
            else:
                webui_checks = "cannot_connect"
        
        except urllib2.HTTPError as exp:
            http_code = exp.code
            webui_checks = "not_implemented" if http_code == 404 else 'nok'
        
        except Exception as exp:
            webui_checks = "cannot_connect"
        
        self._webui_checks[broker.broker_name] = webui_checks
    
    
    def _check_graphite_write_for_broker(self, broker):
        HealthCheckProgress.show_progress_message("Getting metrology server information from %s[%s] (write)" % (broker.broker_name, broker.address))
        
        broker_modules = [x.strip() for x in broker.modules.split(',')]
        graphite_module_used = [mod.module_name for mod in self._all_graphite_perfdata_modules if mod.module_name in broker_modules]
        
        if not graphite_module_used:
            return
        
        # Connection information
        address = broker.address
        port = int(broker.port)
        if getattr(broker, 'use_ssl', 0) in ['1', True]:
            proto = 'https'
        else:
            proto = 'http'
        
        ##
        # Each call to check-graphite-write include 4 retry with 0.5 seconds
        # timeout (2 seconds overall on a complete failure for a module) per
        # graphite_perfdata module.
        timeout = int(broker.timeout) + len(graphite_module_used) * 2
        write_query = '%s://%s:%d/check_graphite_write_status' % (proto, address, port)
        graphite_checks = ''
        try:
            html = http_get(write_query, timeout=int(timeout))
            graphite_checks = json.loads(html)
        except socket.timeout as exp:
            # something like timeout
            graphite_checks = "timeout"
        except (urllib2.URLError, socket.error, ssl.SSLError) as exp:
            # In python 2.6 the socket timeout it was a socket.error instead of socket.timeout
            if 'timed out' in str(exp):
                graphite_checks = "timeout"
            else:
                graphite_checks = "cannot_connect"
        except urllib2.HTTPError as exp:
            error_msg = exp.read()
            http_code = exp.code
            graphite_checks = "not_implemented" if http_code == 404 else 'nok'
        except Exception as exp:
            # bad json
            graphite_checks = "nok"
        
        if graphite_checks in ('timeout', 'not_implemented', 'cannot_connect', 'nok'):
            for graphite_write_server in self._graphite_perfdata_modules.itervalues():
                graphite_write_server.set_broker_status(broker, graphite_checks)
            return
        
        if graphite_checks['data'] == []:
            for graphite_write_server in self._graphite_perfdata_modules.itervalues():
                graphite_write_server.set_broker_status(broker, False)
            return
        
        for graphite_check in graphite_checks['data']:
            can_post = graphite_check.get('can_post_data', False)
            graphite_server = self._get_server_from_check_reply(broker, graphite_check)
            if graphite_server is None:
                # Unknown graphite_server, problem with concurrent arbiter.
                # Continue anyway
                continue
            else:
                graphite_server.set_broker_status(broker, can_post)
    
    
    def _populate_realms_tree(self):
        if not self._enabled_brokers:
            graphite_branch = Tree.get_root() \
                .get_branch('Storage') \
                .get_branch('[graphite]')
            
            graphite_branch.add_leaf('No broker enabled', 'INFO')
            return
        realms_branch = Tree.get_root() \
            .get_branch('Storage') \
            .get_branch('[graphite]') \
            .get_branch('Realm(s)')
        
        realms_branch_write = realms_branch.get_branch('Writing - Data location(s):', weight=20)
        realms_branch_read = realms_branch.get_branch('Reading - UI Visualisation configuration(s): The * means that webui will read all realms that it\'s broker manage')
        
        graphite_writers_location_by_realm = self._compute_write_infos(realms_branch_write)
        self._compute_read_infos(realms_branch_read, graphite_writers_location_by_realm)
    
    
    def _compute_write_infos(self, realms_branch_write):
        all_realms = RealmUtils.get_all_realms()
        graphite_writers_location_by_realm = {}
        for graphite_server in self._graphite_perfdata_modules.itervalues():
            my_address = graphite_server.get_graphite_address()
            real_address = HostUtils.get_host_by_name(my_address)
            _module_name = graphite_server.get_module_name()
            realm_store_only = [r.strip() for r in getattr(graphite_server.get_graphite_module(), 'realm_store_only', '').strip().split(',') if r.strip()]
            for realm in [r for r in realm_store_only if r not in all_realms]:
                realms_branch_write.add_leaf('Module %s has realm [%s] in parameter realm_store_only but this realm is unknown' % (graphite_server.get_module_name(), realm), status="ERROR")
            
            for broker in graphite_server.get_brokers():
                realms_managed = broker['realms_managed']
                if realm_store_only:
                    realms = realm_store_only
                else:
                    realms = realms_managed
                for realm in realms:
                    if realm not in realms_managed and realm in all_realms:
                        realms_branch_write.add_leaf(
                            'Broker %s has module %s with realm [%s] in parameter realm_store_only, but this broker handle only realms : %s ' % (broker['broker_name'], graphite_server.get_module_name(), realm, ', '.join(realms_managed)),
                            status="ERROR")
                        continue
                    
                    broker_info = (my_address, real_address, broker['broker_name'], _module_name)
                    graphite_writers_location_by_realm[realm] = graphite_writers_location_by_realm.get(realm, set())
                    graphite_writers_location_by_realm[realm].add(broker_info)
        
        # For each realm we check if there are some broker who can write
        for realm in all_realms:
            broker_list = graphite_writers_location_by_realm.get(realm, set())
            # Count destinations by IP
            counters = Counter([_ip for _, _ip, _, _ in broker_list])
            multiple_brokers_on_same_dest = [_ip for _ip, _nb in counters.iteritems() if _nb > 1]
            unique_dest = [_ip for _ip, _nb in counters.iteritems() if _nb == 1]
            
            if len(counters) == 0:
                realms_branch_write.add_leaf('[%s] There is no broker configured to write data for this realm. This realm might produce data that are not stored anywhere (no Graphite backend configured)' % realm, status="WARNING")
            
            if multiple_brokers_on_same_dest:
                _info = ['by the module [%s] of the broker [%s] on graphite backend %s%s' % (_module_name, _broker_name, _address, ' (%s)' % _ip if _ip != _address else '') for _address, _ip, _broker_name, _module_name in
                         graphite_writers_location_by_realm[realm] if _ip in multiple_brokers_on_same_dest]
                realms_branch_write.add_leaf("[%s] This realm is stored on same backends by multiple brokers : %s" % (realm, ' and '.join(_info)), status="ERROR")
            if unique_dest:
                _info = ['by the module [%s] of the broker [%s] on graphite backend %s%s' % (_module_name, _broker_name, _address, ' (%s)' % _ip if _ip != _address else '') for _address, _ip, _broker_name, _module_name in
                         graphite_writers_location_by_realm[realm] if _ip in unique_dest]
                realms_branch_write.add_leaf('Realm [%s] is stored %s' % (realm, ' and '.join(_info)))
        
        return graphite_writers_location_by_realm
    
    
    def _compute_read_infos(self, realms_branch_read, graphite_writers_location_by_realm):
        graphite_readers_by_broker = {}
        # All webui module names
        all_webui_module_names = [m.module_name.strip() for m in self._webui_modules]
        for broker in self._enabled_brokers:
            broker_name = broker.broker_name
            broker_realm = RealmUtils.get_realm_name(broker)
            broker_managed_realms = RealmUtils.get_handle_realms(broker)
            broker_address = broker.address
            graphite_readers_by_broker[broker_name] = {
                'realm'         : broker_realm,
                'managed_realms': broker_managed_realms,
                'webuis'        : []
            }
            
            for _module in self._webui_modules:
                _name = _module.module_name.strip()
                for _webui_name in all_webui_module_names:
                    if _name != _webui_name:
                        continue
                    
                    for broker_module in broker.modules.split(','):
                        if _name != broker_module.strip():
                            continue
                        
                        backends_info = []
                        _webui_name = _module.module_name.strip()
                        _graphite_backends = [m.strip().split(':') for m in getattr(_module, 'graphite_backends', '').split(',') if m.strip() and len(m.strip().split(':')) == 2]
                        for graphite_backend_realm, graphite_backend_host in _graphite_backends:
                            info = {
                                'himself'   : False,
                                'host'      : HostUtils.get_host_by_name(graphite_backend_host),
                                'realm'     : graphite_backend_realm,
                                'webui_name': _webui_name,
                            }
                            if graphite_backend_host in ('127.0.0.1', 'localhost'):
                                info['himself'] = True
                                info['host'] = HostUtils.get_host_by_name(broker_address)
                            
                            backends_info.append(info)
                        
                        graphite_readers_by_broker[broker_name]['webuis'].append({
                            'webui_name'       : _webui_name,
                            'graphite_backends': backends_info,
                        })
        
        graphite_readers_messages = self._check_graphite_readers(graphite_readers_by_broker, graphite_writers_location_by_realm)
        
        # Ok we ave all data. Let's go to add it in healthcheck branches
        if graphite_readers_messages:
            display_realm_names = []
            for _, broker_values in graphite_readers_messages.iteritems():
                for _, messages in broker_values.iteritems():
                    display_realm_names.extend([message['display_realm_name'] for message in messages])
            
            max_display_realm_name_len = max([len(i) for i in display_realm_names])
        else:
            max_display_realm_name_len = 30
        
        # We show only the broker that have a webui
        broker_to_show = [b for b, w in graphite_readers_messages.iteritems() if w]
        msg_format = '%-' + str(max_display_realm_name_len) + 's : %s'
        for broker_name in broker_to_show:
            webuis = graphite_readers_messages[broker_name]
            
            broker_branch = realms_branch_read.get_branch(broker_name)
            for _webui_name, messages in webuis.iteritems():
                webui_branch = broker_branch.get_branch(_webui_name)
                for message in sorted(messages, key=lambda r: r['display_realm_name']):
                    webui_branch.add_leaf(msg_format % (message['display_realm_name'], message['message']), status=message['status'])
    
    
    def _get_current_leaf_for_tree(self, leafs_write_info, leafs_read_info, graphite_server_ip):
        if leafs_write_info.get(graphite_server_ip, None) is None:
            leafs_write_info[graphite_server_ip] = {
                'ok'             : [],
                'nok'            : [],
                'not_implemented': [],
                'cannot_connect' : [],
                'timeout'        : [],
                'pending'        : [],
                'realms'         : set(),
                'port'           : None,
            }
        
        if leafs_read_info.get(graphite_server_ip, None) is None:
            leafs_read_info[graphite_server_ip] = {
                'ok'                         : [],
                'nok'                        : [],
                'graphs_nok'                 : [],
                'not_implemented'            : [],
                'cannot_connect'             : [],
                'timeout'                    : [],
                'pending'                    : [],
                'count'                      : 0,
                GRAPHITE_STATS_KEY.TIME_READ : None,
                GRAPHITE_STATS_KEY.LOCAL_TIME: None,
                'graphite_need_update'       : [],
                
            }
        current_write_leaf = leafs_write_info[graphite_server_ip]
        current_read_leaf = leafs_read_info[graphite_server_ip]
        
        return current_write_leaf, current_read_leaf
    
    
    def _populate_servers_tree(self):
        if not self._enabled_brokers:
            return
        
        servers_branch = Tree.get_root() \
            .get_branch('Storage') \
            .get_branch('[graphite]') \
            .get_branch('Server(s)')
        
        leafs_write_info = {}
        leafs_read_info = {}
        
        for graphite_perfata_module in self._graphite_perfdata_modules.itervalues():
            
            graphite_server_ip = '%s:' % HostUtils.get_host_by_name(graphite_perfata_module.get_graphite_address())
            
            # WRITE PART
            current_write_leaf, _ = self._get_current_leaf_for_tree(leafs_write_info, leafs_read_info, graphite_server_ip)
            
            current_write_leaf['ok'] += graphite_perfata_module.get_brokers_by_write_status(True)
            current_write_leaf['nok'] += graphite_perfata_module.get_brokers_by_write_status(False)
            current_write_leaf['not_implemented'] += graphite_perfata_module.get_brokers_by_write_status('not_implemented')
            current_write_leaf['cannot_connect'] += graphite_perfata_module.get_brokers_by_write_status('cannot_connect')
            current_write_leaf['timeout'] += graphite_perfata_module.get_brokers_by_write_status('timeout')
            current_write_leaf['realms'] = current_write_leaf['realms'] | graphite_perfata_module.get_stored_realms()
            current_write_leaf['port'] = graphite_perfata_module._port
        
        for _broker in self._enabled_brokers:
            # Read
            _broker_name = _broker.broker_name
            _broker_address = _broker.address
            _check_read_status = self._webui_checks.get(_broker_name)
            # When broker is pending (for a conf) It has no module_name, no host, no version, ...
            _broker_is_pending = len(_check_read_status) == 1 and _check_read_status[0].get('host') is None and _check_read_status[0].get('module_name') is None
            _broker_modules = [m.strip() for m in _broker.modules.split(',') if m.strip()]
            _webui_modules_on_broker = [m for m in self._webui_modules if m.module_name in _broker_modules]
            
            for _webui_module in _webui_modules_on_broker:
                webui_module_name = _webui_module.module_name
                graphite_backends = _webui_module.graphite_backends.split(',')
                for graphite_backend in graphite_backends:
                    if len(graphite_backend.split(':')) != 2:
                        continue
                    
                    realm, _backend_address = graphite_backend.split(':')
                    _graphite_address = _broker_address if _backend_address in ['localhost', '127.0.0.1'] else _backend_address
                    
                    graphite_server_ip = '%s:' % HostUtils.get_host_by_name(_graphite_address)
                    
                    _, current_read_leaf = self._get_current_leaf_for_tree(leafs_write_info, leafs_read_info, graphite_server_ip)
                    
                    if _broker_is_pending:
                        current_read_leaf['pending'].append((_broker_name, webui_module_name))
                        continue
                    
                    elif _check_read_status == 'timeout':
                        current_read_leaf['timeout'].append((_broker_name, webui_module_name))
                        continue
                    
                    elif _check_read_status == 'not_implemented':
                        current_read_leaf['not_implemented'].append((_broker_name, webui_module_name))
                        continue
                    
                    elif _check_read_status == 'cannot_connect':
                        current_read_leaf['cannot_connect'].append((_broker_name, webui_module_name))
                        continue
                    
                    webui_check = next((mod for mod in _check_read_status if mod['host'] == _backend_address and mod['module_name'] == webui_module_name), None)
                    if webui_check is None:
                        current_read_leaf['nok'].append((_broker_name, webui_module_name))
                        continue
                    
                    module_graphs_errors = webui_check.get('errors', 0)
                    
                    if module_graphs_errors:
                        webui_check['graphs_nok'].append((_broker_name, module_graphs_errors))
                    
                    
                    elif webui_check['reachable']:
                        current_read_leaf['ok'].append((_broker_name, webui_module_name))
                        if webui_check['version'] == GRAPHITE_API_VERSION:
                            current_read_leaf['count'] = webui_check.get('nb_hosts_clusters', 0)
                            current_read_leaf[GRAPHITE_STATS_KEY.LOCAL_TIME] = webui_check.get(GRAPHITE_STATS_KEY.LOCAL_TIME, None)
                            current_read_leaf[GRAPHITE_STATS_KEY.TIME_READ] = webui_check.get(GRAPHITE_STATS_KEY.TIME_READ, -1)
                        else:
                            current_read_leaf['graphite_need_update'].append((_broker_name, webui_module_name))
                    else:
                        current_read_leaf['nok'].append((_broker_name, webui_module_name))
        
        # Format messages and set statuses for leafs.
        for server_key, server_status in leafs_write_info.iteritems():
            server_branch = servers_branch.get_branch(server_key)
            sub_leafs = []
            if server_status['port']:
                sub_leafs.append(('INFO', 'Listen on port %s ' % server_status['port']))
            if server_status['realms']:
                sub_leafs.append(('INFO', 'Write data for realms : %s ' % ', '.join(sorted(set(server_status['realms'])))))
            
            for graphite_server_ip, module_webui_name in server_status['ok']:
                sub_leafs.append(('OK', 'Get data from module %s on broker %s' % (module_webui_name, graphite_server_ip)))
            
            for graphite_server_ip, module_webui_name in server_status['not_implemented']:
                sub_leafs.append(('ERROR', 'The broker [%s] seem to be up to date' % (graphite_server_ip)))
            
            for graphite_server_ip, module_webui_name in server_status['cannot_connect']:
                sub_leafs.append(('ERROR', 'Unable to connect to broker [%s] (check the broker logs for more details)' % (graphite_server_ip)))
            
            for graphite_server_ip, module_webui_name in server_status['timeout']:
                sub_leafs.append(('WARNING', 'Cannot connect to broker [%s] (cause timeout)' % (graphite_server_ip)))
            
            for graphite_server_ip, module_webui_name in server_status['nok']:
                sub_leafs.append(('ERROR', 'The broker [%s] is reachable, but it cannot contact the graphite server defined in module [%s]. Check firewalls or carbon-cache service' % (graphite_server_ip, module_webui_name)))
            
            for graphite_server_ip, module_webui_name in server_status['pending']:
                sub_leafs.append(('WARNING', 'The Broker [%s] have not yet received his configuration' % (graphite_server_ip)))
            
            write_status = 'Write connexion status'
            branch_write_status = server_branch.get_branch(write_status, 1)
            if sub_leafs:
                for sub_leaf_status, sub_leaf_message in sub_leafs:
                    branch_write_status.add_leaf(sub_leaf_message, sub_leaf_status)
            else:
                branch_write_status.add_leaf('No module graphite_perfdata is configured to write data to this server', status='WARNING')
        
        for server_key, server_status in leafs_read_info.iteritems():
            server_branch = servers_branch.get_branch(server_key)
            sub_leafs = []
            nb_host_with_metric = server_status['count']
            _read_time = server_status[GRAPHITE_STATS_KEY.TIME_READ]
            _server_time = server_status[GRAPHITE_STATS_KEY.LOCAL_TIME]
            
            metric_file_is_too_old = ''
            if _server_time is None or _read_time == -1:
                metric_file_is_too_old = 'The graphite stats file "%s" seems to be too old. You must look at the Gatherer log (/var/log/shinken/gatherer.log). ' \
                                         'Then, only if need, you can restart the Gatherer with "service shinken-gatherer restart".' % (NB_METRICS_COUNT_FILE)
            else:
                stats_file_age = _server_time - _read_time
                if stats_file_age > GRAPHITE_STATS_FILE_IS_TOO_OLD:
                    metric_file_is_too_old = 'The graphite stats file "%s" seems to be too old (not update since %s > %ss). You must look at the Gatherer log (/var/log/shinken/gatherer.log). ' \
                                             'Then, only if need, you can restart the Gatherer with "service shinken-gatherer restart".' % (
                                                 NB_METRICS_COUNT_FILE,
                                                 format_t_into_dhms_format(stats_file_age),
                                                 GRAPHITE_STATS_FILE_IS_TOO_OLD)
            
            if metric_file_is_too_old:
                sub_leafs.append(('WARNING', metric_file_is_too_old))
            
            for broker_name, webui_name in server_status['ok']:
                sub_leafs.append(('OK', 'The webui [%s] on broker [%s] can read data' % (webui_name, broker_name)))
            
            for broker_name, webui_name in server_status['nok']:
                sub_leafs.append(('ERROR', 'The webui [%s] on broker [%s] is reachable but it can not read data on graphite server. Check Httpd VirtualHost or firewall' % (webui_name, broker_name)))
            
            for broker_name, webui_name in server_status['not_implemented']:
                sub_leafs.append(('ERROR', 'The webui [%s] on broker [%s] need to contact this server but the broker doesn\'t seem to be up to date' % (webui_name, broker_name)))
            
            for broker_name, webui_name in server_status['cannot_connect']:
                sub_leafs.append(('ERROR', 'The webui [%s] on broker [%s] is unreachable. Cannot check the reading' % (webui_name, broker_name)))
            
            for broker_name, webui_name in server_status['timeout']:
                sub_leafs.append(('ERROR', 'The webui [%s] on broker [%s] is unreachable (cause : timeout)' % (webui_name, broker_name)))
            
            for broker_name, webui_name in server_status['pending']:
                sub_leafs.append(('WARNING', 'The webui [%s] on broker [%s] need to contact this server but the broker have not yet received his configuration' % (webui_name, broker_name)))
            
            for broker_name, webui_name in server_status['graphite_need_update']:
                sub_leafs.append(('WARNING', 'The webui [%s] on broker [%s] is reachable but the graphite server doesn\'t seem to be up to date' % (webui_name, broker_name)))
            
            if server_status['ok'] and nb_host_with_metric:
                sub_leafs.append(('INFO', '%i hosts (with metrics) can be requested' % nb_host_with_metric))
            
            for graphs_errors in server_status['graphs_nok']:
                sub_leafs.append(('ERROR', 'On %s graphite read error happened %s times during the last 24h, check the /opt/graphite/storage/whisper/ read access on the graphite host' % (graphs_errors[0], graphs_errors[1])))
            
            branch_read_status = server_branch.get_branch('Read connection status')
            if sub_leafs:
                for sub_leaf_status, sub_leaf_message in sub_leafs:
                    branch_read_status.add_leaf(sub_leaf_message, sub_leaf_status)
            else:
                branch_read_status.add_leaf('No webui is configured to read data from this server.', status='WARNING')
    
    
    def _is_equal_broker_webui_graphit_configuration(self, broker_01_leafs, broker_02_leafs):
        if len(broker_01_leafs) != len(broker_02_leafs):
            return False
        for broker_01_leaf, broker_02_leaf in itertools.izip(broker_01_leafs, broker_02_leafs):
            if broker_01_leaf['host'] != broker_02_leaf['host']:
                return False
            if broker_01_leaf['realm_rule'] != broker_02_leaf['realm_rule']:
                return False
        
        return True
    
    
    # check webui graphite reader
    def _check_graphite_readers(self, graphite_readers_by_broker, graphite_writers_location_by_realm):
        all_realms = RealmUtils.get_all_realms()
        graphite_readers_messages = {}
        
        for broker_name, broker_infos in graphite_readers_by_broker.iteritems():
            broker_branch_name = '%s (%s):' % (broker_name, broker_infos['realm'])
            graphite_readers_messages[broker_branch_name] = {}
            for webui_info in broker_infos['webuis']:
                webui_name = webui_info['webui_name']
                graphite_readers_messages[broker_branch_name][webui_name] = []
                generic_realm_reader = []
                specific_realm_reader = []
                
                for graphite_backend in webui_info['graphite_backends']:
                    if graphite_backend['realm'] == '*':
                        generic_realm_reader.append(graphite_backend)
                    else:
                        specific_realm_reader.append(graphite_backend)
                
                for graphite_backend in specific_realm_reader:
                    message = self._check_reader(graphite_backend['realm'], graphite_backend['host'], graphite_backend['himself'], broker_name, all_realms, broker_infos['managed_realms'], graphite_writers_location_by_realm)
                    graphite_readers_messages[broker_branch_name][webui_name].append(message)
                
                # remove realms already handled by specific rules
                realms_managed_by_specific_reader = [i['realm'] for i in specific_realm_reader]
                realms_managed_by_generic_reader = [realm for realm in broker_infos['managed_realms'] if realm not in realms_managed_by_specific_reader]
                
                if generic_realm_reader:
                    for reading_realm in realms_managed_by_generic_reader:
                        for graphite_backend in webui_info['graphite_backends']:
                            message = self._check_reader(reading_realm, graphite_backend['host'], graphite_backend['himself'], broker_name, all_realms, broker_infos['managed_realms'], graphite_writers_location_by_realm, add_star=True)
                            graphite_readers_messages[broker_branch_name][webui_name].append(message)
                
                missing_realms = [realm for realm in broker_infos['managed_realms'] if realm not in realms_managed_by_specific_reader and not generic_realm_reader]
                for missing_realm in missing_realms:
                    monitored_hosts_count = self._host_count_in_realm.get(missing_realm, 0)
                    if monitored_hosts_count <= 0:
                        message = {
                            'display_realm_name': 'Realm [%s]' % missing_realm,
                            'status'            : 'OK',
                            'message'           : 'There are no hosts in this realm',
                        }
                    else:
                        message = {
                            'display_realm_name': 'Realm [%s]' % missing_realm,
                            'status'            : 'ERROR',
                            'message'           : 'No graphics will be displayed as this realm is not present in "graphite_backends" parameter',
                        }
                    
                    graphite_readers_messages[broker_branch_name][webui_name].append(message)
            if not graphite_readers_messages[broker_branch_name]:
                graphite_readers_messages.pop(broker_branch_name, None)
        return graphite_readers_messages
    
    
    def _check_reader(self, reading_realm, reading_host, himself, broker_name, all_realms, broker_managed_realms, graphite_writers_location_by_realm, add_star=False):
        himself_string = ' (itself)' if himself else ''
        display_realm_name = 'Realm [%s] (*)' % reading_realm if add_star else 'Realm [%s]' % reading_realm
        monitored_hosts_count = self._host_count_in_realm.get(reading_realm, 0)
        
        if reading_realm not in all_realms:
            message = {
                'display_realm_name': display_realm_name,
                'status'            : 'ERROR',
                'message'           : 'This realm is an unknown realm. Know realms are : %s' % (', '.join(all_realms))
            }
        elif monitored_hosts_count <= 0:
            message = {
                'display_realm_name': display_realm_name,
                'status'            : 'OK',
                'message'           : 'There are no hosts in this realm.'
            }
        elif reading_realm not in broker_managed_realms:
            message = {
                'display_realm_name': display_realm_name,
                'status'            : 'ERROR',
                'message'           : "This realm can't be accessed by the broker %s. It can only access to : %s" % (broker_name, ', '.join(broker_managed_realms))
            }
        elif reading_realm not in graphite_writers_location_by_realm:
            message = {
                'display_realm_name': display_realm_name,
                'status'            : 'ERROR',
                'message'           : 'This realm does not have any broker which write graphite data'
            }
        elif reading_host not in [ls[1] for ls in graphite_writers_location_by_realm[reading_realm]]:
            is_a_graphite_server = False
            for writers_locations in graphite_writers_location_by_realm.itervalues():
                for writers_location in writers_locations:
                    if writers_location[1] == reading_host:
                        is_a_graphite_server = True
                        break
                if is_a_graphite_server:
                    break
            
            if is_a_graphite_server:
                message = {
                    'display_realm_name': display_realm_name,
                    'status'            : 'ERROR',
                    'message'           : 'The Graphite server on %s%s does not contain data for this realm' % (reading_host, himself_string)
                }
            else:
                message = {
                    'display_realm_name': display_realm_name,
                    'status'            : 'ERROR',
                    'message'           : 'The server on %s%s is not a known Graphite server : There is no module graphite_perfdata that writes data on backend %s' % (reading_host, himself_string, reading_host)
                }
        else:  # It is ok
            message = {
                'display_realm_name': display_realm_name,
                'status'            : 'OK',
                'message'           : 'The Graphite server on %s%s will be used' % (reading_host, himself_string)
            }
        
        return message


class GraphitePerfdataModule(object):
    def __init__(self, brokers, graphite_module, is_local=False):
        self._module = graphite_module
        self._port = graphite_module.port
        self._stored_realms = getattr(graphite_module, 'realm_store_only', None)
        self._is_local = is_local
        self._brokers = []
        
        # If the address in the cfg file for graphite_perfdata is 127.0.0.1 or localhost, the host will be the broker name and the address will be the broker address (can be the dns name).
        # Else, the host = address = the one defined in the cfg for graphite_perfdata
        if self._is_local:
            self._graphite_host = brokers[0].broker_name
            self._graphite_address = brokers[0].address
        else:
            self._graphite_host = graphite_module.host
            self._graphite_address = graphite_module.host
        
        for broker in brokers:
            self._brokers.append({
                'broker'         : broker,
                'broker_name'    : broker.broker_name,
                'write_reachable': False,
                'spare'          : ConfigUtils.is_spare(broker),
                'activated'      : ConfigUtils.is_spare_activated(broker),
                'realms_managed' : RealmUtils.get_handle_realms(broker)
            })
    
    
    def get_graphite_module(self):
        return self._module
    
    
    def get_module_name(self):
        return self._module.module_name
    
    
    def get_stored_realms(self):
        
        # Get known realms, extracted from brokers logging to this graphite server
        known_realms = set()
        for broker in self._brokers:
            known_realms = known_realms | set(broker['realms_managed'])
        
        ##
        # Extract graphite_perfdata.realm_store_only option
        store_only = getattr(self._module, 'realm_store_only', None)
        
        # If present get the realm_store_only configuration from the graphite_perfdata module configuration
        if store_only is not None:  # realm_store_only is defined
            # Extract values
            store_only_list = [s.strip() for s in store_only.split(',')]
            if len(store_only_list) > 0 and store_only_list[0] != '':  # epmty list or empty/spaces only string
                # Return the common realms to found in realm_store_only and known_realms
                return set(store_only_list).intersection(known_realms)
            
            else:  # Else will log nothing (it is an error)
                return set()
        
        else:  # not defined, wee are logging everything that come to us
            return known_realms
    
    
    def get_known_realms(self):
        realms = set()
        for broker in self._brokers:
            realm = RealmUtils.get_realm_name(broker['broker'])
            realms.add(realm)
        return list(realms)
    
    
    def set_broker_status(self, broker, status):
        try:
            broker_dict = next(br for br in self._brokers if br['broker_name'] == broker.broker_name)
            broker_dict['write_reachable'] = status
        except StopIteration:
            pass
    
    
    def get_graphite_address(self):
        return self._graphite_address
    
    
    def get_graphite_host(self):
        return self._graphite_host
    
    
    def is_local(self):
        return self._is_local
    
    
    def get_brokers_by_write_status(self, status):
        return [(_get_daemon_name(broker['broker_name'], broker['spare'], broker['activated']), self._module.module_name) for broker in self._brokers if broker['write_reachable'] == status]
    
    
    def get_brokers(self):
        return self._brokers


def get_color_bloc(s, color):
    return '\033[%dm%s\033[0m' % (color, s)


def _get_daemon_name(daemon_name, is_spare=False, is_activated=False):
    if is_spare:
        result = "%s %s" % (daemon_name, get_color_bloc('(SPARE)', SPARE_COLOR))
        if is_activated:
            result = "%s %s" % (result, get_color_bloc('(RUNNING)', OK_COLOR))
        return result
    else:
        return daemon_name


def protect_stdout():
    if not DEBUG:
        sys.stdout = devnull


def unprotect_stdout():
    sys.stdout = stdout


def get_exit_code():
    if Tree.ERROR_COUNT > 0:
        return 1
    elif Tree.AT_RISK_COUNT > 0:
        return 2
    else:
        return 0


def manage_exception(exp, update=False):
    error = ''
    code = ''
    msg_update = ''
    if getattr(exp, 'code', None):
        code = 'HTTP ERROR %s : ' % str(exp.code)
        if update and exp.code == 404:
            msg_update = '. Please update.'
    if getattr(exp, 'reason', None):
        error += str(exp.reason)
    else:
        error = str(exp)
    return '%s%s%s' % (code, error, msg_update)


#######################################################################################
#    ________  ________________ _______
#   / ____/ / / / ____/ ____/ //_/ ___/
#  / /   / /_/ / __/ / /   / ,<  \__ \
# / /___/ __  / /___/ /___/ /| |___/ /
# \____/_/ /_/_____/\____/_/ |_/____/
#######################################################################################
# Checks libraries
def check_libs():
    logger.debug('check_libs::start')
    error_lst = ['pymongo', 'pycurl', 'gevent', 'ldap']
    warn_lst = []
    for lib_name in error_lst:
        check_lib(lib_name, 'error')
    for lib_name in warn_lst:
        check_lib(lib_name, 'warning')
    
    # Special check for VMWare sdk under redhat/centos7
    check_vmware_sdk()


def check_vmware_sdk():
    if not localsystem.is_centos_redhat():
        logger.debug('check_vmware_sdk:: not redhat or centos, skipping')
        return
    _version = localsystem.get_linux_version()
    # and only the version 7.X
    if not _version.startswith('7.'):
        logger.debug('check_vmware_sdk:: not 7.X, skipping')
        return
    
    if not localsystem.is_vmware_sdk_installed():
        logger.debug('check_vmware_sdk:: no SDK installed, skipping')
        return


def check_lib(lib_name, level):
    logger.debug('check_lib::start')
    try:
        imported = __import__(lib_name)
    except ImportError as exp:
        if level == 'error':
            Tree.get_root() \
                .get_branch('Local libraries') \
                .get_branch('[%s]' % lib_name) \
                .add_leaf('Missing Library %s. It is mandatory. (%s)' % (lib_name, str(exp)), status='ERROR')
        else:
            Tree.get_root() \
                .get_branch('Local libraries') \
                .get_branch('[%s]' % lib_name) \
                .add_leaf('Missing Library %s. Shinken will have reduced features/performance without it. (%s)' % (lib_name, str(exp)), status='WARNING')
        return
    version = getattr(imported, 'version', getattr(imported, '__version__', 'version not found'))
    Tree.get_root() \
        .get_branch('Local libraries') \
        .get_branch('[%s]' % lib_name) \
        .add_leaf('Library %s is available. Version: %s' % (lib_name, version))


# Checks mongo

def check_mongodb(uri, branch):
    logger.debug('check_mongodb::start')
    ok = True
    try:
        from pymongo.connection import Connection
        from pymongo.mongo_client import ConnectionFailure, ConfigurationError
    
    except ImportError as exp:
        branch.add_leaf('Cannot connect to mongodb server: Connection import fail (%s)' % exp, status='ERROR')
        return
    
    try:
        Connection(uri)
    except ConnectionFailure as exp:
        branch.add_leaf('Cannot connect to mongodb server: %s   (%s)' % (uri, exp), status='ERROR')
        ok = False
    except ConfigurationError as exp:
        branch.add_leaf('Mongo configuration error: %s    (%s)' % (uri, exp), status='ERROR')
        ok = False
    except Exception as exp:
        raise
    
    import pymongo
    import bson
    if not pymongo.has_c():
        branch.add_leaf('Your pymongo lib has not the C extension installed', status='ERROR')
        ok = False
    if not bson.has_c():
        branch.add_leaf('Your bson lib has not the C extension installed', status='ERROR')
        ok = False
    if ok:
        branch.add_leaf('Mongodb server is available at: %s' % uri, status='OK')


# Checks licence
def check_licence():
    global licence_data
    logger.debug('check_keys::start')
    protect_stdout()
    d = are_keys_valid()
    licence_data = d  # keep it in global :)
    unprotect_stdout()
    
    if 'are_valid' not in d:
        Tree.get_root() \
            .get_branch('License key') \
            .add_leaf('Old license system, please update', status='ERROR')
        return
    
    # Global test for key
    if d['are_valid']:
        Tree.get_root() \
            .get_branch('License key') \
            .add_leaf('The license key is valid')
    else:
        Tree.get_root() \
            .get_branch('License key') \
            .add_leaf('The license key is invalid', status='ERROR')
    # duplicated is BAD
    if d['is_duplicated_trial']:
        Tree.get_root() \
            .get_branch('License key') \
            .add_leaf('There is a second testing key. Only one is allowed.', status='ERROR')
    
    if not d['is_format_valid'] and d['is_present']:
        Tree.get_root() \
            .get_branch('License key') \
            .add_leaf('The key format is invalid.', status='ERROR')
    
    if not d['is_present']:
        Tree.get_root() \
            .get_branch('License key') \
            .add_leaf('No license key. Trial mode Node limits : 20', status='ERROR')
    
    else:
        # Expired?
        now = int(time.time())
        end = d['creation_time'] + d['duration']
        nb_days = int((end - now) / 86400)
        s_start = time.strftime("%Y-%m-%d", time.gmtime(d['creation_time']))
        s_end = time.strftime("%Y-%m-%d", time.gmtime(end))
        if now > end:
            Tree.get_root() \
                .get_branch('License key') \
                .add_leaf('The license key is expired', status='ERROR')
        elif now > end - 86400 * 7:  # one week alert
            Tree.get_root() \
                .get_branch('License key') \
                .add_leaf('The license key (Customer:%s Node limit:%d) will soon reach the expiration date (start:%s end:%s => %d days remaining)' % (d['customer'], d['nodes_limit'], s_start, s_end, nb_days), status='WARNING')
        
        else:
            Tree.get_root() \
                .get_branch('License key') \
                .add_leaf('The license key (Customer:%s Node limit:%d) is valid (start:%s end:%s => %d days remaining)' % (d['customer'], d['nodes_limit'], s_start, s_end, nb_days))
    
    nodes_limit = d['nodes_limit']
    
    # First check synchronizer :
    if server_type_global or HostUtils.is_daemon_node('synchronizer'):
        synchronizer_config = ConfigUtils.get_local_synchronizer_config()
        if not synchronizer_config:
            Tree.get_root() \
                .get_branch('License key') \
                .add_leaf('No synchronizer daemon enabled', status='ERROR')
            return None
        # compare with the number of elements in the key
        nb_nodes = get_number_nodes(synchronizer_config)
        if nb_nodes != 0 and nb_nodes is not None:
            if nodes_limit is 0:  # ok you are in unlimited mode
                Tree.get_root() \
                    .get_branch('License key') \
                    .add_leaf('Nodes Used: %d / Limits: unlimited' % nb_nodes)
            elif nb_nodes <= int(nodes_limit * 0.8):  # If below 80% of the nodes number, that cool
                Tree.get_root() \
                    .get_branch('License key') \
                    .add_leaf('Nodes Used: %d / Limits: %d ' % (nb_nodes, nodes_limit))
            elif nb_nodes <= nodes_limit:  # ok between 80% and 100% allowed, warning
                Tree.get_root() \
                    .get_branch('License key') \
                    .add_leaf('Nodes Used: %d / Limits: %d => LESS than 20%% left ( %s )' % (nb_nodes, nodes_limit, get_color_bloc('%d' % (nodes_limit - nb_nodes), AT_RISK_COLOR)), status='WARNING')
            else:  # oups, over the limit!
                Tree.get_root() \
                    .get_branch('License key') \
                    .add_leaf('Nodes EXCEEDED ( %s ) Nodes Used : %d / Limits: %d' % (get_color_bloc('+ %d' % (nb_nodes - nodes_limit), ERROR_COLOR), nb_nodes, nodes_limit), status='ERROR')


def get_number_nodes(synchronizer_config):
    # get the number of nodes that are defined in the configuration
    logger.debug('get_number_nodes::start')
    proto = 'http'
    if getattr(synchronizer_config, 'use_ssl', 0) in ['1', True]:
        proto = 'https'
    address = synchronizer_config.address
    port = synchronizer_config.port
    uri = '%s://%s:%d/get_host_enabled_count' % (proto, address, port)
    try:
        html = http_get(uri, timeout=int(DEFAULT_TIMEOUT))
    except Exception as exp:
        Tree.get_root() \
            .get_branch('License key') \
            .add_leaf('Cannot contact the synchronizer daemon (%s)' % exp, status='ERROR')
        return None
    
    j = json.loads(html)
    return j


def check_synchronizer():
    logger.debug('check_synchronizer::start')
    
    protect_stdout()
    local_synchronizer_config = ConfigUtils.get_local_synchronizer_config()
    if not local_synchronizer_config:
        return
    
    address = local_synchronizer_config.address
    port = local_synchronizer_config.port
    timeout = DEFAULT_TIMEOUT
    conf = ConfigUtils.get_synchronizer_config()
    use_ssl = getattr(conf, 'http_use_ssl', '0')
    proto = 'https' if use_ssl == '1' else 'http'
    mongodb_uri = conf.mongodb_uri
    cfg_path = SYNCHRONIZER_CFG_PATH
    
    raw_stats = get_raw_stats(proto, address, port, timeout, silent=True)
    
    is_spare = raw_stats.get('spare', False)
    is_spare_activated = raw_stats.get('activated', False) and raw_stats.get('have_conf', False)
    
    if server_type_global:
        branch = TreeUtils.get_satellite_branch(conf.synchronizers[0])
    else:
        branch = Tree.get_root() \
            .get_branch('Local daemons') \
            .get_branch('[ synchronizers ]') \
            .get_branch('[%s: %s]' % ('synchronizer', _get_daemon_name(local_synchronizer_config.synchronizer_name, is_spare, is_spare_activated)))
    
    check_mongodb(mongodb_uri, branch)
    
    if not hasattr(conf, 'auth_secret'):
        branch.add_leaf('Missing auth_secret parameter in %s, please add it.' % cfg_path, status='ERROR')
    else:
        auth_secret = conf.auth_secret
        if auth_secret == 'THIS SHOULD BE CHANGED BEFORE PRODUCTION':
            branch.add_leaf('Please update the auth_secret parameter in %s with a secret value. Be sure to keep the same value than in webui.cfg' % cfg_path, status='WARNING')
        else:
            branch.add_leaf('Auth_secret is a custom variable')
    
    Checks.check_ping(branch, proto, address, port, 5)
    Checks.check_synchronizer_UI(branch, conf, address, 5)
    check_synchronizer_protected_fields(branch, conf, cfg_path)
    
    if server_type_global:
        for lst in [conf.synchronizers]:
            check_satellites_basic_configuration(lst, 'synchronizer', conf.bad_encoding_files)
            check_duplicate_satellites(lst)
        daemons = {'synchronizers': conf.synchronizers}
        check_duplicate_daemon_name(daemons)
    else:
        # This part is already check in global part
        vm_status, vm_text = Checks.check_vmware(raw_stats)
        if vm_text:
            branch.add_leaf(vm_text, vm_status)
    
    ret = Checks.check_context(branch, proto, address, port, timeout)
    if ret is False:
        return
    
    Checks.check_modules(branch, proto, address, port, timeout)
    if ret is False:
        return


def check_synchronizer_protected_fields(branch, conf, cfg_path):
    logger.debug('check_synchronizer_protected_fields::start')
    
    protected_fields_branch = branch.get_branch('[Encryption status]')
    cfg_has_protect_fields_duplicate_params = False
    for file_name, params in conf.configuration_duplicate_params.iteritems():
        for param in params:
            if "protect_fields__" in param:
                cfg_has_protect_fields_duplicate_params = True
                protected_fields_branch.add_leaf("Parameter \033[35m%s\033[0m is defined multiple times in the synchronizer configuration file %s." % (param, file_name), status="ERROR")
    
    if cfg_has_protect_fields_duplicate_params:
        branch.add_leaf("In order to prevent possible data corruption, the synchronizer will not start until the configuration file is fixed.", status='ERROR', weight=-1)
    
    cfg_activate_db_encryption = getattr(conf, 'protect_fields__activate_encryption', False) in [u'1', True]
    
    # if encryption is activated the protect_fields__encryption_keyfile is mendatory and the file must exists
    protect_fields__encryption_keyfile = getattr(conf, 'protect_fields__encryption_keyfile', None)
    
    try:
        current_protected_fields = get_protected_fields_from_database(conf)
        db_activate_db_encryption = current_protected_fields.get('protect_fields__activate_database_encryption', False) in ['1', True]
        has_been_saved = current_protected_fields.get('extracted_key', False)
        database_hash = current_protected_fields.get('protect_fields__encryption_keyfile_hash', 'Undefined key hash')
        database_key_name = current_protected_fields.get('protect_fields__encryption_key_name', 'Undefined key name')
    except:
        current_protected_fields = None
        protected_fields_branch.add_leaf("Cannont connect to mongodb server ; consistency checks will not be run", status="ERROR")
        db_activate_db_encryption = None
        has_been_saved = None
        database_hash = "NOT AVAILABLE"
        database_key_name = "NOT AVAILABLE"
    
    is_keyfile_needed = db_activate_db_encryption or cfg_activate_db_encryption
    
    ### Checks about the keyfile (if it exists)
    if is_keyfile_needed and not os.path.exists(protect_fields__encryption_keyfile):
        protected_fields_branch.add_leaf('The key file %s declared in "%s" cannot be found' % (get_color_bloc(protect_fields__encryption_keyfile, AT_RISK_COLOR),
                                                                                               get_color_bloc(cfg_path, AT_RISK_COLOR)), status='ERROR')
        return  # the key file does not exist
    
    if protect_fields__encryption_keyfile is None and is_keyfile_needed:
        protected_fields_branch.add_leaf('Encryption is enabled and the keyfile is not defined in %s' % get_color_bloc(cfg_path, AT_RISK_COLOR), status='ERROR')
        return  # the key file param is not present in the cfg file
    
    try:
        with open(protect_fields__encryption_keyfile) as fd:
            complete_key = fd.read().strip()
        key_value = complete_key[complete_key.index("|") + 1:]
        file_key_name = complete_key[:complete_key.index("|")]
        file_hash = sha256(key_value).hexdigest()
        shinken_user = 'shinken'
        shinken_group = 'shinken'
        target_permission = 600
        file_stat = os.stat(protect_fields__encryption_keyfile)
        # thanks to https://github.com/naparuba/opsbro/blob/master/opsbro/compliancemgr.py#L69
        file_permissions = int(oct(file_stat.st_mode & 0777)[1:])  # => to have something like 644
        file_owner = getpwuid(file_stat.st_uid).pw_name
        file_group = getgrgid(file_stat.st_gid).gr_name
        if file_owner != shinken_user:
            protected_fields_branch.add_leaf('The %s owner (%s) is not what is expected: %s' % (get_color_bloc(protect_fields__encryption_keyfile, AT_RISK_COLOR),
                                                                                                get_color_bloc(file_owner, AT_RISK_COLOR),
                                                                                                get_color_bloc(shinken_user, AT_RISK_COLOR)), status='ERROR')
        if file_group != shinken_group:
            protected_fields_branch.add_leaf('The %s group (%s) is not what is expected: %s' % (get_color_bloc(protect_fields__encryption_keyfile, AT_RISK_COLOR),
                                                                                                get_color_bloc(file_group, AT_RISK_COLOR),
                                                                                                get_color_bloc(shinken_group, AT_RISK_COLOR)), status='ERROR')
        if file_permissions != target_permission:
            protected_fields_branch.add_leaf('The file %s permissions (%s) are not what is expected: %s' % (get_color_bloc(protect_fields__encryption_keyfile, AT_RISK_COLOR),
                                                                                                            get_color_bloc(file_permissions, AT_RISK_COLOR),
                                                                                                            get_color_bloc(target_permission, AT_RISK_COLOR)), status='ERROR')
    except ValueError:
        protected_fields_branch.add_leaf("The key found in the keyfile has a wrong format ;"
                                         " you need to restore the key %(database_kname)s, using %(command)s before the synchronizer can run." % {
                                             'database_kname': get_color_bloc(database_key_name, AT_RISK_COLOR),
                                             'command'       : get_color_bloc('shinken-protected-fields-keyfile-restore', AT_RISK_COLOR)},
                                         status='ERROR')
        return
    except OSError as e:
        if is_keyfile_needed:
            protected_fields_branch.add_leaf("Unable to read keyfile %(keyfile)s : %(error)s ;"
                                             "you need to restore the key %(database_kname)s, using %(command)s before the synchronizer can run." % {
                                                 'keyfile'       : get_color_bloc(protect_fields__encryption_keyfile, AT_RISK_COLOR),
                                                 'error'         : e.strerror,
                                                 'database_kname': get_color_bloc(database_key_name, AT_RISK_COLOR),
                                                 'command'       : get_color_bloc('shinken-protected-fields-keyfile-restore', AT_RISK_COLOR)},
                                             status='ERROR')
            return
    except IOError as e:
        if is_keyfile_needed:
            protected_fields_branch.add_leaf("Unable to load keyfile %(keyfile)s : %(error)s ;"
                                             "you need to restore the key %(database_kname)s, using %(command)s before the synchronizer can run." % {
                                                 'keyfile'       : get_color_bloc(protect_fields__encryption_keyfile, AT_RISK_COLOR),
                                                 'error'         : e.strerror,
                                                 'database_kname': get_color_bloc(database_key_name, AT_RISK_COLOR),
                                                 'command'       : get_color_bloc('shinken-protected-fields-keyfile-restore', AT_RISK_COLOR)},
                                             status='ERROR')
            return
    
    if current_protected_fields is None:
        protected_fields_branch.add_leaf("Encryption status is %s" % get_color_bloc('UNKNOWN', AT_RISK_COLOR))
        protected_fields_branch.add_leaf("The key export status is %s" % get_color_bloc('UNKNOWN', AT_RISK_COLOR))
    elif db_activate_db_encryption:
        protected_fields_branch.add_leaf("Encryption %s with key named : %s" % (get_color_bloc('ENABLED', INFO_COLOR), get_color_bloc(database_key_name, AT_RISK_COLOR)))
    elif not cfg_activate_db_encryption:
        protected_fields_branch.add_leaf("Encryption %s " % get_color_bloc('DISABLED', INFO_COLOR))
    else:
        protected_fields_branch.add_leaf("Encryption is currently %s ; it will be enabled on next synchronizer restart" % get_color_bloc("DISABLED", INFO_COLOR), status="INFO")
    
    if current_protected_fields and is_keyfile_needed and not has_been_saved:
        protected_fields_branch.add_leaf('The key has never been exported. Run %s and follow instructions' % get_color_bloc('shinken-protected-fields-keyfile-export', AT_RISK_COLOR), status='ERROR')
    
    if db_activate_db_encryption and cfg_activate_db_encryption:
        if file_hash != database_hash:
            protected_fields_branch.add_leaf(
                'The configured keyfile (key name: %(file_kname)s) does not match '
                'the key the database was encrypted with (key name: %(database_kname)s), '
                'you need to restore the key %(database_kname)s, using %(command)s before the synchronizer can run.' % {
                    'file_kname'    : get_color_bloc(file_key_name, AT_RISK_COLOR),
                    'database_kname': get_color_bloc(database_key_name, AT_RISK_COLOR),
                    'command'       : get_color_bloc('shinken-protected-fields-keyfile-restore', AT_RISK_COLOR)
                }, status='ERROR')
        # else:
        #     protected_fields_branch.add_leaf('The configured keyfile matches the database one', status="INFO")
    elif db_activate_db_encryption:
        if file_hash != database_hash:
            protected_fields_branch.add_leaf(
                'Encryption is currently enabled, but the key found in the keyfile (key name: %(file_kname)s) '
                'differs from the key used for the running encryption (key name: %(database_kname)s) so next synchronizer '
                'restart will fail ; you need to restore the key %(database_kname)s, using %(command)s before the synchronizer can run. ' % {
                    'file_kname'    : get_color_bloc(file_key_name, AT_RISK_COLOR),
                    'database_kname': database_key_name,
                    'command'       : 'shinken-protected-fields-keyfile-restore'
                }, status='ERROR')
            return
        else:
            protected_fields_branch.add_leaf('Encryption will be disabled on next synchronizer restart', status='INFO')
    # elif cfg_activate_db_encryption:
    #     protected_fields_branch.add_leaf("Encryption is currently %s ; it will be enabled on next synchronizer restart" % get_color_bloc("DISABLED", INFO_COLOR), status="INFO")
    # else:
    # protected_fields_branch.add_leaf("Encryption %s " % get_color_bloc('DISABLED', INFO_COLOR))


def get_protected_fields_from_database(conf):
    # Check the state of the database
    from pymongo import Connection
    mongo_uri = getattr(conf, 'mongodb_uri', None)
    mongodb_con = Connection(mongo_uri, fsync=True)
    mongodb_db = getattr(mongodb_con, 'synchronizer')
    # now check the permission
    col = getattr(mongodb_db, "synchronizer-info")
    current_protected_fields = col.find_one({'_id': 'protected_fields_info'})
    if current_protected_fields is None:
        current_protected_fields = {}
    return current_protected_fields


def check_webui_module(webui_module_conf, branch):
    stats = {}
    module_name = webui_module_conf.get('module_name')
    stats[module_name] = {}
    
    proto = 'https' if webui_module_conf.get('use_ssl', '0') == '1' else 'http'
    Checks.check_ping(branch, proto, webui_module_conf.get('address', ''), int(webui_module_conf.get('port', '1')), 5, which=module_name)
    
    auth_secret = webui_module_conf.get('auth_secret', 'THIS SHOULD BE CHANGED BEFORE PRODUCTION')
    if auth_secret == 'THIS SHOULD BE CHANGED BEFORE PRODUCTION':
        branch.add_leaf('Please update the auth_secret parameter with a secret value. Be sure to keep the same value than in synchronizer.cfg', "WARNING")
    else:
        branch.add_leaf('Auth_secret is a custom variable', "OK")
    
    modules = webui_module_conf.get('modules')
    stats[module_name]['mongo'] = {}
    for sm in modules:
        if sm.module_type == 'mongodb':
            uri = sm.uri
            check_mongodb(uri, branch)
    
    return stats


# scheduler, receivers and co configuration is sensible to the address one. If configured with "localhost" then
# distributed mode. So must warn that it must be changed before production
def check_satellites_basic_configuration(lst, daemon_type, bad_encoding_files):
    logger.debug('check_satellites_basic_configuration::start')
    
    for sat in lst:
        # print lst
        enabled = not (getattr(sat, 'enabled', '1') in ['0', False])
        if not enabled:
            continue
        
        name = sat.get_name()
        stats = get_raw_stats_for_satellite(sat)
        # Save the raw stats for this daemon
        _daemon_raw_stats[daemon_type][name] = stats
        branch = TreeUtils.get_satellite_branch(sat)
        
        vm_status, vm_text = Checks.check_vmware(stats)
        if vm_text:
            branch.add_leaf(vm_text, status=vm_status)
        
        cfg_file = sat.imported_from.split(':')[0]
        if cfg_file in bad_encoding_files:
            branch.add_leaf('Some characters could not be read in utf-8 in these files : %s' % (cfg_file), status='ERROR')
        
        if sat.address == 'localhost' and server_type_global:  # only look if we are in the arbiter server, because in other it's no sense .cfg are not read
            branch.add_leaf('%s is defined with the %s address. It is a problem in distributed mode. Please configure it with the LAN IP/FQDN address instead' %
                            (name, get_color_bloc('localhost', ERROR_COLOR)), status='WARNING')
        else:
            branch.add_leaf('Configuration seems valid')


def check_duplicate_satellites(lst):
    if len(lst.items) > 0:
        errors_already_displayed = set()
        for item in lst.items.values():
            daemon_type = item.get_my_type()
            (cls, clss, name) = Config.types_creations[daemon_type]
            links = clss(lst)
            duplicates = links.find_duplicates()
            
            # Store found duplicates to prevent displaying the same error twice
            duplicate_ids = map(lambda dup: (dup[0].id, dup[1].id), duplicates)
            reduced_duplicates = set(sorted(duplicate_ids))
            if reduced_duplicates.intersection(errors_already_displayed) != set():
                continue
            
            for error in links.configuration_errors:
                branch = Tree.get_root() \
                    .get_branch('Architecture') \
                    .get_sub_branch(RealmUtils.build_leaf(item)) \
                    .get_branch('h2>- %s:' % HostUtils.get_server_name(item.address)) \
                    .get_branch("[ %ss ]" % daemon_type)
                
                branch.add_leaf(error, status='ERROR')
                errors_already_displayed.update(reduced_duplicates)


def check_default_realm(realms):
    default_realms = []
    
    branch = Tree.get_root().get_branch("Realms Structure")
    for r in realms:
        if getattr(r, 'default', False) in ("1", True):
            default_realms.append(r.get_name())
    
    if len(default_realms) == 1:
        branch.add_leaf("Realms structure is correct")
        return True
    elif len(default_realms) == 0:
        branch.add_leaf('There must be one default realm defined. Please set one by setting the "default" property to "1" for one realm.', status="ERROR")
    elif len(default_realms) > 1:
        branch.add_leaf("There can be only one default realm ; the following realms are set as default : %s" % ", ".join(default_realms), status="ERROR")
    
    return False


def check_duplicate_daemon_name(lst):
    daemons_list = {}
    for daemon_type in lst.itervalues():
        for daemon in daemon_type.items.itervalues():
            _name = daemon.get_name()
            if daemons_list.get(_name):
                daemons_list[_name].append(daemon)
            else:
                daemons_list[_name] = [daemon]
    
    for name, daemons in daemons_list.iteritems():
        if len(daemons) > 1:
            error_message = ['Daemons names must be unique. The name [%s] is used by more than one daemon. Please check your configuration in files :' % name]
            error_message.extend(['- %s' % daemon.imported_from for daemon in daemons])
            for daemon in daemons:
                branch = Tree.get_root() \
                    .get_branch('Architecture') \
                    .get_sub_branch(RealmUtils.build_leaf(daemon)) \
                    .get_branch('h2>- %s:' % HostUtils.get_server_name(daemon.address)) \
                    .get_branch("[ %ss ]" % daemon.get_my_type())
                
                for error in error_message:
                    branch.add_leaf(error, status='ERROR')


def get_raw_stats_for_satellite(satellite, silent=True):
    address = satellite.address
    port = int(satellite.port)
    if DEFAULT_TIMEOUT_FORCE:
        timeout = DEFAULT_TIMEOUT
    else:
        if hasattr(satellite, 'timeout'):
            timeout = satellite.timeout
        else:
            timeout = DEFAULT_TIMEOUT
    if getattr(satellite, 'use_ssl', 0) in ['1', True]:
        proto = 'https'
    else:
        proto = 'http'
    return get_raw_stats(proto, address, port, timeout, silent)


def get_raw_stats(proto, address, port, timeout, silent=False):
    uri_ = '%s://%s:%d/get_raw_stats' % (proto, address, port)
    stats = _raw_stats_cache.get(uri_, None)
    if stats is None:
        try:
            logger.debug('check_satellite_connection:: curl to %s' % uri_)
            html = http_get(uri_, timeout=int(timeout))
            stats = json.loads(html)
            _raw_stats_cache[uri_] = stats
        except Exception:
            if not silent:
                raise
            stats = {}
            _raw_stats_cache[uri_] = stats
    return stats


class Checks(object):
    # Will stop warn module restarts if the last restart has occured more than X minutes ago.
    # Default to 120 minutes, max at 1440 (24H). This is what keep modulesmanager.py
    modules_warning_threshold = 120
    
    
    @staticmethod
    def set_warning_threshold(threshold):
        if threshold > 1440 or threshold < 0:
            raise Exception('Modules expire option error: bad range')
        Checks.modules_warning_threshold = threshold
    
    
    @staticmethod
    def check_ping(satellite_branch, proto, address, port, timeout, which='daemon'):
        # Try standard PING
        try:
            uri_ = '%s://%s:%d/ping' % (proto, address, port)
            logger.debug('check_satellite_connection:: curl to %s' % uri_)
            html = http_get(uri_, timeout=int(timeout))
            j = json.loads(html.replace("'", '"'))
        except Exception as exp:
            message = manage_exception(exp)
            if 'SSL23_GET_SERVER_HELLO:unknown protocol' in str(exp):
                message = 'SSL connection failed ; please check your remote daemon configuration *.ini file ' + message
            elif '[SSL: WRONG_VERSION_NUMBER] wrong version number (_ssl.c:765)' in str(exp):
                message = 'SSL connection failed. Maybe this daemon does not use SSL. Please check your remote daemon configuration *.ini file'
            satellite_branch.add_leaf('Cannot contact %s %s:%s ( %s )' % (which, address, port, message), status='ERROR')
            return False
        
        if j == 'pong' or isinstance(j, dict) and j.get('text').lower() == "pong":
            satellite_branch.add_leaf('Connection to %s is OK at port %s' % (which, port))
        else:
            satellite_branch.add_leaf('Connection to %s is invalid : ( %s )' % (which, j), status='ERROR')
        
        return True
    
    
    @staticmethod
    def check_synchronizer_UI(branch, conf, address, timeout=5):
        # Try standard PING
        
        port = int(getattr(conf, 'http_port', '7766'))
        use_ssl = int(getattr(conf, 'http_use_ssl', '7766'))
        proto = 'https' if use_ssl in ['1', True] else 'http'
        
        try:
            uri_ = '%s://%s:%d/ping' % (proto, address, port)
            logger.debug('check_synchronizer_UI:: curl to %s' % uri_)
            html = http_get(uri_, timeout=int(timeout))
            response = json.loads(html.replace("'", '"'))
        except Exception as exp:
            message = manage_exception(exp)
            if 'SSL23_GET_SERVER_HELLO:unknown protocol' in str(exp):
                message = 'SSL connection failed ; please check your /etc/shinken/synchronizer.cfg ' + message
            branch.add_leaf('Cannot contact UI on %s:%s ( %s )' % (address, port, message), status='ERROR')
            return False
        
        if response == 'pong' or isinstance(response, dict) and response.get('text').lower() == "pong":
            branch.add_leaf('Connection to Synchronizer UI is OK at port %s' % (port))
        else:
            branch.add_leaf('Connection to Synchronizer UI is invalid : ( %s )' % (response), status='ERROR')
        
        return True
    
    
    @staticmethod
    def check_poller_reactionner_stats(branch, proto, address, port, timeout, satellite_type):
        try:
            res = get_raw_stats(proto, address, port, timeout)
        except (HTTP_ERRORS,) as exp:
            error = manage_exception(exp)
            branch.add_leaf('Cannot get raw stats details from your daemon ( %s ).' % error, status='WARNING')
            return False
        # Tags managment
        tags = res.get('tags', ['None', ])
        if "None" in tags:
            explain_none = '[This %s will handle all untagged %s (the "None" tag)]' % (satellite_type, 'checks' if satellite_type == 'poller' else 'notifications')
        else:
            explain_none = ""
        branch_text = '%s\t%s' % (', '.join(tags), explain_none)
        branch.get_branch('Tag list:').get_branch(branch_text)
        # manage dead workers
        workers_restarts = res.get('workers_restarts', {})
        dead_workers = res.get('dead_worker_stat', {})
        if workers_restarts:
            sub_branch = branch.get_branch("In the last 24 hours, some workers died but were restarted:")
            for woker_type, restart_data in workers_restarts.iteritems():
                woker_type = 'default worker type' if woker_type == 'fork' else woker_type
                sub_branch.add_leaf("'%s' restarted %s times" % (woker_type, len(restart_data)), status='WARNING')
        
        elif dead_workers:
            sub_branch = branch.get_branch("Since last restart, some workers died but were restarted:")
            for woker_type, restart_count in dead_workers.iteritems():
                woker_type = 'default worker type' if woker_type == 'fork' else woker_type
                sub_branch.add_leaf("'%s' restarted %s times" % (woker_type, restart_count), status='WARNING')
    
    
    @staticmethod
    def check_scheduler_retention_configuration(branch, satellite):
        sched_dict = ret_sched_by_realm.get(satellite.realm, {})
        if satellite.id in sched_dict:
            errors = sched_dict[satellite.id].get('errors', ())
            warnings = sched_dict[satellite.id].get('warnings', ())
            if len(errors) or len(warnings):
                sub_branch = branch.get_branch('Retention configuration:')
                for error in errors:
                    sub_branch.add_leaf(error, status='ERROR')
                for warning in warnings:
                    sub_branch.add_leaf(warning, status='WARNING')
    
    
    @staticmethod
    def check_arbiter_spare_conf(branch):
        errors = arbiters_spare_conf.get('errors', ())
        warnings = arbiters_spare_conf.get('warnings', ())
        if len(errors) or len(warnings):
            sub_branch = branch.get_branch('Spare configuration:')
            for error in errors:
                sub_branch.add_leaf(error, status='ERROR')
            for warning in warnings:
                sub_branch.add_leaf(warning, status='WARNING')
    
    
    @staticmethod
    def check_scheduler_retention_active(satellite_branch, satellite, modules_state):
        retention_modules = [module for module in modules_state['modules'] if module['type'] == "mongodb_retention"]
        if len(retention_modules) >= 1 and retention_modules[0]['status'] != "OK":
            satellite_branch.add_leaf("The scheduler started without loading retention", status='WARNING')
    
    
    @staticmethod
    def check_context(branch, proto, address, port, timeout):
        try:
            uri = '%s://%s:%d/get_context' % (proto, address, port)
            json_string = http_get(uri, timeout=int(timeout))
            j = json.loads(json_string)
        except ValueError as exp:
            branch.add_leaf('Unable do decode your daemon data : %s .' % exp, status='WARNING')
            return False
        except Exception as exp:
            error = manage_exception(exp, update=True)
            branch.add_leaf('Cannot get the installation detail from your daemon ( %s ).' % error, status='ERROR')
            return False
        
        current_version = j.get('current_version', None)
        if current_version is None:
            branch.add_leaf('Cannot get the installation detail from your daemon (current_version). Please update.', status='ERROR')
            return False
        if current_version == CURRENT_VERSION_RAW:
            branch.add_leaf('Daemon version is: v%s' % current_version, status='OK')
        else:
            branch.add_leaf('Daemon version is v%s and arbiter version is v%s. Please update.' % (current_version, CURRENT_VERSION_RAW), status='ERROR')
        
        error_count = int(j.get('error_count', 0))
        status = 'OK' if error_count == 0 else 'ERROR'
        if error_count > 0:
            branch.add_leaf('Some API calls between daemons failed in the last 24 hours (%d errors). Please look at your daemon logs for more details about these errors.' % error_count, status=status)
        
        deactivated_by_arbiter = j.get('deactivated_by_arbiter', False)
        if deactivated_by_arbiter:
            branch.add_leaf('Deactivated by the Arbiter, no more job will be done until it receives a new configuration.', status='WARNING')
        
        return True
    
    
    @staticmethod
    def check_modules(branch, proto, address, port, timeout, sla_module_conf=None, webui_modules_conf=None):
        # type: (Tree, str, str, int, int, Dict, List) -> Union[Dict,bool]
        logger.debug('check_satellite_connection::start')
        if webui_modules_conf is None:
            webui_modules_conf = []
        if sla_module_conf is None:
            sla_module_conf = {}
        
        try:
            uri = '%s://%s:%d/get_module_states' % (proto, address, port)
            html = http_get(uri, timeout=int(timeout))
            module_states = json.loads(html)
            TreeUtils.list_module_leaves(branch, module_states, sla_module_conf=sla_module_conf, webui_modules_conf=webui_modules_conf)
            return module_states
        except ValueError as exp:
            branch.add_leaf('Unable to decode your daemon data : %s .' % exp, status=ModuleState.WARNING)
            return False
        except Exception as exp:
            error = manage_exception(exp)
            branch.add_leaf('Cannot get module details from your daemon ( %s ).' % error, status=ModuleState.WARNING)
            return False
    
    
    @staticmethod
    def check_satellites_status(branch, proto, address, port, timeout):
        try:
            # Get initial number of satellites without checking them to define the timeout value
            uri = '%s://%s:%d/get_satellites' % (proto, address, port)
            html = http_get(uri, timeout=int(timeout))
            all_sats_count = len(json.loads(html))
            check_satellites_timeout = int(timeout) * all_sats_count + 1
            
            # Get the connexion status then:
            if all_sats_count:
                # branch.get_branch('Talk to:')
                uri = '%s://%s:%d/check_satellites_connexion?timeout=%s' % (proto, address, port, str(timeout))
                data = http_get(uri, timeout=check_satellites_timeout)
                statuses = json.loads(data)
                
                if len(statuses) != 0:
                    talk_to_branch = branch.get_branch('Talk to:')
                    for target in statuses:
                        # HealthPrinter.debug(target)
                        if target['status'] == "ok":
                            talk_to_branch.add_leaf('Reachable %s satellite (%s) at %s://%s:%s' % (target['type'], target['name'], target['proto'], target['address'], target['port']))
                        else:
                            talk_to_branch.add_leaf('Unreachable %s satellite (%s) at %s://%s:%s (%s)' % (target['type'], target['name'], target['proto'], target['address'], target['port'], target['status']), status='ERROR')
        
        except ValueError as exp:
            talk_to_branch = branch.get_branch('Talk to:')
            talk_to_branch.add_leaf('Unable do decode your daemon data : %s .' % exp, status='WARNING')
            return False
        except Exception as exp:
            error = manage_exception(exp)
            talk_to_branch = branch.get_branch('Talk to:')
            talk_to_branch.add_leaf('Cannot get "Talk to" information from the daemon. ( %s )' % error, status='WARNING')
            return False
        return True
    
    
    # We will look at all schedulers and raise an error if they are not reachable by active daemons
    @staticmethod
    def check_schedulers_are_reacheable(schedulers, brokers, pollers, reactionners):
        for scheduler in schedulers:
            scheduler_realm = getattr(scheduler, 'realm', '')
            if not scheduler_realm:
                continue
            if getattr(scheduler, 'enabled', '1') == '0':
                continue
            scheduler_name = getattr(scheduler, 'scheduler_name', None)
            if scheduler_name is None:  # no name, so no stats, do not check this
                continue
            
            raw_stats = _daemon_raw_stats['scheduler'].get(scheduler_name, None)
            if raw_stats is None:
                continue
            
            # If the scheduler do not manage hosts, then no daemon is just a WARNING, not an ERROR
            no_daemon_error_level = 'ERROR'
            if raw_stats.get('nb_hosts', 0) == 0:
                no_daemon_error_level = 'WARNING'
            
            for (_type, daemons) in (('broker', brokers), ('poller', pollers), ('reactionner', reactionners)):
                founded_daemon = False
                for daemon in daemons:
                    if founded_daemon or getattr(daemon, 'enabled', '1') == '0':
                        continue
                    manage_realms = RealmUtils.get_handle_realms(daemon)
                    if scheduler_realm in manage_realms:
                        founded_daemon = True
                if not founded_daemon:
                    branch = TreeUtils.get_satellite_branch(scheduler)
                    branch.add_leaf('The scheduler have hosts or clusters and has no %s in its realm (or upper realm with the manage_sub_realms option enabled)' % _type, status=no_daemon_error_level)
    
    
    # a daemon is deactivated by arbiter when arbiter don't know anymore a daemon and ask him to wait for a new conf
    @staticmethod
    def _is_deactivated_by_arbiter(branch, proto, address, port, timeout):
        try:
            uri = '%s://%s:%d/get_context' % (proto, address, port)
            json_string = http_get(uri, timeout=int(timeout))
            j = json.loads(json_string)
        except Exception:
            return False
        return j.get('deactivated_by_arbiter', False)
    
    
    @staticmethod
    def check_arbiter_traces(branch, proto, address, port, timeout):
        try:
            uri = '%s://%s:%d/arbiter_traces_get' % (proto, address, port)
            html = http_get(uri, timeout=int(timeout))
            arbiters = json.loads(html)
            if len(arbiters) == 1:
                # Correct number of arbiter (one).
                arbiter = arbiters[0]
                current_time = int(time.time())
                insert_time = arbiter['insert_time']
                diff_time_with_arbiter = arbiter.get('diff_time_with_arbiter', 0)
                expire_period = arbiter.get('expire_period', 2 * arbiter.get('check_interval', 60))
                expire_time = insert_time + expire_period
                expire_in = expire_time - current_time
                diff_time_info = '( and no time shift )'
                if abs(diff_time_with_arbiter) > 30:
                    branch.add_leaf('Correct connection from arbiter "%s" but a time shift of %d seconds' % (arbiter['name'], diff_time_with_arbiter), status="ERROR")
                elif expire_in < 0:
                    branch.add_leaf('Missed connection from arbiter "%s" since %i seconds' % (arbiter['name'], current_time - insert_time), status="WARNING")
                else:
                    branch.add_leaf('Correct connection from arbiter "%s" %s' % (arbiter['name'], diff_time_info))
            elif len(arbiters) == 0:
                # Not yet contacted by an arbiter.
                branch.add_leaf('Daemon has not yet been contacted by an arbiter.', status="WARNING")
            else:
                # More than one arbiter, problem.
                arbiter_branch = branch.get_branch('Arbiters conflicts:')
                current_time = int(time.time())
                for arbiter in arbiters:
                    # Compute and print the time in wich the arbiter entry
                    # will expire
                    insert_time = arbiter['insert_time']
                    check_interval = arbiter['check_interval']
                    expire_time = insert_time + (2 * check_interval)
                    expire_in = expire_time - current_time
                    arbiter_branch.add_leaf('Arbiter: %s (%s)\t This arbiter will expire in %s seconds.' % (arbiter['name'], arbiter['uri'], expire_in), status='ERROR')
        except ValueError as exp:
            branch.add_leaf('Unable to decode your daemon data : %s .' % exp, status='WARNING')
            return False
        except KeyError as exp:
            branch.add_leaf("Daemon data does not contain key [%s]" % exp.args[0])
            return False
        except Exception as exp:
            error = manage_exception(exp)
            branch.add_leaf('Cannot get the master arbiter details from your daemon ( %s ).' % error, status='ERROR')
            return False
        
        return True
    
    
    @staticmethod
    def check_vmware(stats):
        return vmware_stats_reader.check_stats_values(stats)


def check_satellite_connection(daemons_configurations, sla_module_conf=None, webui_modules_conf=None, daemons_alive=None):
    # type: (List, Dict, List) -> None
    logger.debug('check_satellite_connection::start')
    if webui_modules_conf is None:
        webui_modules_conf = []
    if sla_module_conf is None:
        sla_module_conf = {}
    for daemon_configuration in daemons_configurations:
        if getattr(daemon_configuration, 'enabled', '1') in ['0', False]:
            continue  # is disabled
        
        name = daemon_configuration.get_name()
        satellite_type = daemon_configuration.get_my_type()
        address = daemon_configuration.address
        progress_ip_address = HostUtils.get_host_by_name(address)
        progress_realm_root = RealmUtils.get_path_to_root(RealmUtils.get_realm_name(daemon_configuration))
        HealthCheckProgress.show_progress_message("Realm:%s => %s(%s) - Type:%s Name:%s" % (progress_realm_root[:-1], address, progress_ip_address, satellite_type, name))
        
        if DEFAULT_TIMEOUT_FORCE is True:
            timeout = DEFAULT_TIMEOUT
        else:
            if hasattr(daemon_configuration, 'timeout'):
                timeout = daemon_configuration.timeout
            else:
                timeout = DEFAULT_TIMEOUT
        
        port = int(daemon_configuration.port)
        if getattr(daemon_configuration, 'use_ssl', 0) in ['1', True]:
            proto = 'https'
        else:
            proto = 'http'
        
        logger.debug('check_satellite_connection:: look for daemon %s %s %s %s' % (daemon_configuration.imported_from, address, port, proto))
        
        satellite_branch = TreeUtils.get_satellite_branch(daemon_configuration)
        
        ## Check ping
        # Try HTTP API PING
        ret = Checks.check_ping(satellite_branch, proto, address, port, timeout)
        if ret and daemons_alive is not None:
            _is_spare = ConfigUtils.is_spare(daemon_configuration)
            daemons_alive.append((address, port, proto, _is_spare))
        else:  # on failure, stop
            continue
        
        ## Check context
        # If succeed, try to get the whole context with versions and so on
        ret = Checks.check_context(satellite_branch, proto, address, port, timeout)
        if ret is False:  # on failure, stop
            continue
        
        ## Check modules
        # Read and format the modules states
        webui_modules_conf_for_me = [webui_module_conf for webui_module_conf in webui_modules_conf if webui_module_conf['broker_name'] == name]
        modules_state = Checks.check_modules(satellite_branch, proto, address, port, timeout, sla_module_conf=sla_module_conf, webui_modules_conf=webui_modules_conf_for_me)
        if modules_state is False:  # on failure, stop
            continue
        
        ## Check connexions statuses
        # print satellite connexions
        Checks.check_satellites_status(satellite_branch, proto, address, port, timeout)
        
        # master arbiters are not concerned for the remaining checks
        if satellite_type == 'arbiter':
            if not ConfigUtils.is_spare(daemon_configuration):  # If 1, spares are configured by other arbiters
                satellite_branch.add_leaf('This element is defined as the master arbiter')
                Checks.check_arbiter_spare_conf(satellite_branch)
            continue
        
        # if the daemon have been deactivated by the arbiter, don't check check the arbiter trace
        if not Checks._is_deactivated_by_arbiter(satellite_branch, proto, address, port, timeout):
            ## Check arbiters connexions
            # Check arbiter connexions trace on satellites
            Checks.check_arbiter_traces(satellite_branch, proto, address, port, timeout)
            
            # If it is a poller, get his managed tags and dead workers informations from stats
            if satellite_type in ('poller', 'reactionner'):
                ret = Checks.check_poller_reactionner_stats(satellite_branch, proto, address, port, timeout, satellite_type)
                if ret is False:  # on failure, stop
                    continue
        
        # Check the scheduler retention configuration
        if satellite_type == 'scheduler':
            Checks.check_scheduler_retention_configuration(satellite_branch, daemon_configuration)
            Checks.check_scheduler_retention_active(satellite_branch, daemon_configuration, modules_state)


# Check a local daemon (localhost) connexion, but need to look in ini the ssh or not
def check_local_daemon(daemon_type, daemon_id):
    logger.debug('check_local_daemon::start')
    proto = 'http'
    address = 'localhost'
    port = PORT_MAPPING.get(daemon_type)
    d_name = ''
    timeout = DEFAULT_TIMEOUT
    satellite_type = daemon_type
    daemon_type_display = '[ %ss ]' % daemon_type
    
    if daemon_type == 'arbiter':
        arbiter_conf = ConfigUtils.get_local_arbiter_config()
        if not arbiter_conf:
            daemon_type_display = '[ %ss ]' % daemon_type
            local_branch = Tree.get_root().get_branch('Local daemons').get_branch('%s' % daemon_type_display).get_branch('[%s: --]' % daemon_type)
            local_branch.add_leaf('No configuration for this arbiter was found. Check if host_name in arbiter configuration has the machine host name.', status='ERROR')
            return
        if not getattr(arbiter_conf, 'enabled', True) in [True, '1']:
            return
        if getattr(arbiter_conf, 'use_ssl', 0) in ['1', True]:
            proto = 'https'
        address = arbiter_conf.address
        port = arbiter_conf.port
    else:
        pth = get_local_daemon_configuration_file_path(daemon_type, daemon_id)
        d_name = get_instance_name(daemon_type, daemon_id)
        # Don't merge unnamed daemons into a single entry, so add it's id into it
        if d_name.startswith('unnamed-') or d_name.startswith('(unnamed-'):
            d_name += '(id=%s)' % daemon_id
        
        if os.path.exists(pth):
            ini_lines = open(pth, 'r').read().splitlines()
            for line in ini_lines:
                if line.startswith('use_ssl='):
                    line = line.replace('use_ssl=', '')
                    if line == '1':
                        proto = 'https'
                if line.startswith('host='):
                    host_value = line.replace('host=', '')
                    if len(line) > 0:
                        address = host_value
                if line.startswith('port='):
                    port_value = line.replace('port=', '')
                    try:
                        port = int(port_value)
                    except ValueError:
                        continue
    
    raw_stats = get_raw_stats(proto, address, port, timeout, silent=True)
    
    is_spare = raw_stats.get('spare', False)
    is_spare_activated = raw_stats.get('activated', False) and raw_stats.get('have_conf', False)
    
    local_branch = Tree.get_root() \
        .get_branch('Local daemons') \
        .get_branch('%s' % daemon_type_display) \
        .get_branch('[%s: %s]' % (daemon_type, _get_daemon_name(d_name, is_spare, is_spare_activated)))
    
    ret = Checks.check_ping(local_branch, proto, address, port, timeout)
    if ret is False:
        return
    
    ret = Checks.check_context(local_branch, proto, address, port, timeout)
    if ret is False:
        return
    
    ret = Checks.check_modules(local_branch, proto, address, port, timeout)
    if ret is False:
        return
    
    vm_status, vm_text = Checks.check_vmware(raw_stats)
    if vm_text:
        local_branch.add_leaf(vm_text, vm_status)
    
    Checks.check_satellites_status(local_branch, proto, address, port, timeout)
    
    if satellite_type is 'arbiter':
        if not ConfigUtils.is_spare(arbiter_conf):  # If 1, spares are configured by other arbiters
            local_branch.add_leaf('This element is defined as the master arbiter')
            
            return
    
    # if the daemon have been deactivated by the arbiter, don't check check the arbiter trace
    if not Checks._is_deactivated_by_arbiter(local_branch, proto, address, port, timeout):
        Checks.check_arbiter_traces(local_branch, proto, address, port, timeout)
        
        # If it is a poller, get his managed tags and dead pollers informations from stats
        if satellite_type in ('poller', 'reactionner'):
            ret = Checks.check_poller_reactionner_stats(local_branch, proto, address, port, timeout, satellite_type)
            if ret is False:  # on failure, stop
                return


def _prepare_arbiters_spare_conf(conf):
    enabled_arbiters = set()
    spare_arbiters = set()
    
    arbiters_spare_conf['errors'] = []
    arbiters_spare_conf['warnings'] = []
    for arb in conf.arbiters:
        # We only want to manage the enabled arbiters
        if getattr(arb, 'enabled', '1') in ['0', False]:
            continue
        enabled_arbiters.add(arb)
        if ConfigUtils.is_spare(arb):
            spare_arbiters.add(arb)
    # if many arbiters are present in the conf, we have to make some check t be sure the conf is valid
    if len(enabled_arbiters) > 2:
        arbiters_spare_conf['errors'].append("Too many Arbiters are configured, you have %d Arbiters configured but at most 2 can be used" % len(enabled_arbiters))
        return
    elif len(enabled_arbiters) == 1:
        # only one arbiter, don't have to check the spare config
        return
    # only one spare can be configured
    # and only one master
    if len(spare_arbiters) > 1:
        arbiters_spare_conf['errors'].append("Both Arbiters cannot be configured as spare Arbiters")
    elif len(spare_arbiters) == 0:
        arbiters_spare_conf['errors'].append("Both Arbiters cannot be configured as master Arbiters")
    
    host_names_dict = dict()
    for arb in enabled_arbiters:
        arb_key = "%s:%s" % (arb.address, arb.port)
        if arb_key in host_names_dict:
            # another arbiter have the same ip and port
            arbiters_spare_conf['errors'].append("The Arbiters have the same address (%s)" % arb.address)
            continue
        if arb.host_name and arb.host_name in host_names_dict.values():
            arbiters_spare_conf['errors'].append("The Arbiters have the same host_name (%s) configured" % arb.host_name)
        elif not arb.host_name:
            arbiters_spare_conf['errors'].append("The Arbiter %s (%s) doesn't have the value 'host_name' configured" % (arb.get_name(), arb_key))
        host_names_dict[arb_key] = arb.host_name


def _prepare_schedulers_retentions(conf):
    # Manage the scheduler retention configuration if many schedulers exist in same realm
    for sched in conf.schedulers:
        # We only want to manage the enabled schedulers
        if getattr(sched, 'enabled', '1') in ['0', False]:
            continue
        if not sched.realm in ret_sched_by_realm:
            ret_sched_by_realm[sched.realm] = {'nb_active': 0}
        
        sched_dict = ret_sched_by_realm[sched.realm]
        
        if not sched.id in sched_dict:
            modules = getattr(sched, 'modules', '').replace(' ', '').split(',')
            # Split on empty lines will return [''], we whant to have a empty list instead
            modules = [] if modules == [''] else modules
            sched_dict[sched.id] = {'name': modules, 'errors': [], 'warnings': []}
            
            if len(modules) == 0:
                sched_dict[sched.id]['warnings'].append("The scheduler does not have any retention module and so it won't be able to save and load its retention data. Please add a scheduler retention module")
            elif len(modules) > 1:
                sched_dict[sched.id]['errors'].append("Too many retention modules configured (%s). Only one retention module can be used" % ', '.join(modules))
            is_active = getattr(sched, 'spare', '0') != '1'
            sched_dict[sched.id]['is_active'] = is_active
            if is_active:
                sched_dict['nb_active'] += 1
    
    # Check for 'bad' retention type when the 2nd scheduler arrive
    for realm, sched_dict in ret_sched_by_realm.iteritems():
        if len(sched_dict) > 2:  # 2 = nb_scheduler + nb_active
            for sched_id, retention_info in sched_dict.iteritems():
                if sched_id == 'nb_active':
                    # this is not a scheduler, this is the just flags, do a continue
                    continue
                # In case of a realm with 1 scheduler and 1 spare, we want to set warning on the spare and on the master
                warn_lvl = 'errors'
                if sched_dict['nb_active'] == 1 and len(sched_dict) > 2:
                    warn_lvl = 'warnings'
                retention_names = retention_info['name']
                if not retention_names:
                    sched_dict[sched_id][warn_lvl].append("The scheduler does not have any retention module and so it won't be able to save and load its retention data. Please add a scheduler retention module.")
                    continue
                # Only one retention module is configured, it can't be pickles
                if 'PickleRetentionFile' in retention_names:
                    sched_dict[sched_id][warn_lvl].append(
                        'The scheduler has a pickle_retention_file module but is in a distributed realm with several schedulers. This module is incompatible for those environments as the retention file is not shared across servers. Please use a mongodb_retention type module instead.')
                    continue
                # Find the retention module configuration
                for retention_name in retention_names:
                    retention_conf = conf.modules.find_by_name(retention_name)
                    uri = getattr(retention_conf, 'uri', '')
                    if not uri:
                        sched_dict[sched_id][warn_lvl].append('The module %s is a retention module but without a uri parameter. Please add one.' % retention_name)
                        continue
                    if 'localhost' in uri:
                        sched_dict[sched_id][warn_lvl].append(
                            'The %s module is configured with localhost URI. In a distributed realm with several schedulers, all retention module in that realm must be set to the same server. Please specify the IP address of the retention server.' % retention_name)
                        continue


def check_architecture():
    logger.debug('check_arbiter::start')
    _conf = ConfigUtils.get_arbiter_config()
    # Set realm utils config
    RealmUtils.set_realm_config(_conf.realms)
    
    _sla_modules = {}
    _sla_module_conf = {}
    _webui_modules = {}
    _webui_modules_conf = []
    for m in _conf.modules:
        if m.module_type == 'webui':
            _webui_modules[m.module_name] = get_webui_configuration(m)
        if m.module_type == 'sla':
            _sla_modules[m.module_name] = get_sla_address_and_database(m)
    
    _prepare_schedulers_retentions(_conf)
    _prepare_arbiters_spare_conf(_conf)
    
    all_daemons_conf = [('scheduler', _conf.schedulers),
                        ('broker', _conf.brokers),
                        ('receiver', _conf.receivers),
                        ('reactionner', _conf.reactionners),
                        ('poller', _conf.pollers),
                        ('arbiter', _conf.arbiters),
                        ('provider', _conf.providers)]
    daemons_alive_by_type = {}
    for (_daemon_type, _daemons_conf) in all_daemons_conf:
        _daemons_alive = []
        check_satellites_basic_configuration(_daemons_conf, _daemon_type, _conf.bad_encoding_files)
        check_duplicate_satellites(_daemons_conf)
        if _daemon_type == 'broker':
            _sla_module_conf = get_sla_modules_configuration_used(_daemons_conf, _sla_modules)
            _webui_modules_conf = get_webui_modules_configuration_used(_daemons_conf, _webui_modules)
        check_satellite_connection(_daemons_conf, sla_module_conf=_sla_module_conf, webui_modules_conf=_webui_modules_conf, daemons_alive=_daemons_alive)
        daemons_alive_by_type[_daemon_type] = sorted(_daemons_alive, key=lambda d: d[3])
    
    daemons = {
        'schedulers'  : _conf.schedulers,
        'brokers'     : _conf.brokers,
        'receivers'   : _conf.receivers,
        'reactionners': _conf.reactionners,
        'pollers'     : _conf.pollers,
        'arbiters'    : _conf.arbiters,
        'providers'   : _conf.providers
    }
    check_duplicate_daemon_name(daemons)
    # We are looking if all schedulers are reach by active brokers/pollers/reactionners like the arbiter
    # is doing on the configuration check
    Checks.check_schedulers_are_reacheable(_conf.schedulers, _conf.brokers, _conf.pollers, _conf.reactionners)
    
    GraphiteStorage(_conf, daemons_alive_by_type).check()
    
    _branch = TreeUtils.get_satellite_branch(_conf.arbiters[0])
    for _item_type in _conf.ignored_items.iterkeys():
        _branch.add_leaf('Cannot import %ss directly into Arbiter. Please use the synchronizer with a cfg-file source to import %ss into the arbiter. You can see the concerned elements in the arbiter log.' % (_item_type, _item_type),
                         status='WARNING')
    
    return


def get_webui_configuration(module):
    return {
        'modules'      : module.modules,
        'auth_secret'  : module.auth_secret,
        'imported_from': module.imported_from,
        'module_name'  : module.module_name,
        'use_ssl'      : module.use_ssl,
        'port'         : module.port
    }


def get_sla_address_and_database(module):
    _uri = module.uri
    try:
        _address = _uri.split('://')[1]
        if '/' in _address:
            _address = _address.split('/')[0]
    except Exception:
        _address = _uri
    
    return {'address': _address, 'database': module.database}


def get_webui_modules_configuration_used(broker_list, list_module_webui):
    _list_webui_used = []
    for broker in broker_list:
        for broker_modules_name in get_module_list(broker):
            data = {}
            if broker_modules_name in list_module_webui:
                _data = copy.copy(list_module_webui[broker_modules_name])
                _data['address'] = broker.address
                _data['broker_name'] = broker.get_name()
                _list_webui_used.append(_data)
    return _list_webui_used


def get_sla_modules_configuration_used(broker_list, sla_modules):
    sla_module_already_used = {}
    for broker in broker_list:
        if broker.spare == '1' or broker.enabled == '0':
            continue
        for broker_modules_name in get_module_list(broker):
            if broker_modules_name in sla_modules:
                address = broker.address
                try:
                    address = socket.gethostbyname(address)
                except IOError:
                    pass
                
                if broker_modules_name in sla_module_already_used:
                    real_duplicate = True
                    _data = sla_modules[broker_modules_name]
                    
                    if _data['address'] in ('127.0.0.1', 'localhost') and address not in _data['duplicate_with']:
                        # module with localhost address on 2 different broker
                        real_duplicate = False
                    _data['duplicate_with'].append(address)
                    sla_module_already_used[broker_modules_name]['duplicate'] = real_duplicate
                else:
                    _data = sla_modules[broker_modules_name]
                    _data['duplicate'] = False
                    _data['duplicate_with'] = [address]
                    sla_module_already_used[broker_modules_name] = _data
                break
    return sla_module_already_used


def get_module_list(daemon):
    return [module_name.strip() for module_name in daemon.modules.split(',')]


class Addons(object):
    @staticmethod
    def check_installation_present(addon_branch, install_path):
        if not os.path.isdir(install_path):
            addon_branch.add_leaf("NagVis installation cannot be found (%s)" % install_path, status='ERROR')
        else:
            addon_branch.add_leaf("NagVis installation found (%s)" % install_path)
    
    
    @staticmethod
    def at_least_one_daemon_has_module(daemons, module_name):
        module_present = False
        
        for daemon in daemons:
            if isinstance(daemon.modules, basestring):
                daemon_configured_modules = get_module_list(daemon)
            else:
                daemon_configured_modules = [mod.module_name for mod in daemon.modules]
            
            if module_name in daemon_configured_modules:
                module_present = True
        
        return module_present
    
    
    @staticmethod
    def check(addon_branch, addon_name):
        conf = ConfigUtils.get_arbiter_config()
        Addons.at_least_one_daemon_has_module(conf.brokers, 'Livestatus')
        Addons.at_least_one_daemon_has_module(conf.arbiters, 'architecture-export')
        
        if addon_name == "nagvis":
            Addons.check_installation_present(addon_branch, '/opt/nagvis')
            Addons.check_ping(addon_branch, addon_name)
            if server_type_global:
                if not Addons.at_least_one_daemon_has_module(conf.brokers, 'Livestatus'):
                    addon_branch.add_leaf("'Livestatus' module has not been found on any Broker. NagVis will not be able to see status of Shinken elements", status='ERROR')
        elif addon_name == "nagvis-shinken-architecture":
            if not os.path.isfile('/etc/shinken/modules/architecture-export.cfg'):
                addon_branch.add_leaf("Module configuration file not found (/etc/shinken/modules/architecture-export.cfg)", status='ERROR')
            else:
                addon_branch.add_leaf("Module configuration file found (/etc/shinken/modules/architecture-export.cfg)")
            
            Addons.check_installation_present(addon_branch, '/etc/shinken/external/nagvis')
            Addons.check_ping(addon_branch, addon_name)
            if server_type_global:
                if not Addons.at_least_one_daemon_has_module(conf.arbiters, 'architecture-export'):
                    addon_branch.add_leaf("'architecture-export' module has not been found on any Arbiter. Architecture maps will not be generated", status='ERROR')
                if not Addons.at_least_one_daemon_has_module(conf.brokers, 'Livestatus'):
                    addon_branch.add_leaf("'Livestatus' module has not been found on any Broker. NagVis will not be able to see status of Shinken elements", status='ERROR')
        else:
            return False
    
    
    @staticmethod
    def check_ping(addon_branch, addon_name):
        if addon_name == "nagvis":
            ping_url = "http://localhost/shinken-map"
            success_message = "'%s' addon is running correctly at %s" % (addon_name, ping_url)
            error_message = "Cannot join '%s' home page (%s)" % (addon_name, ping_url)
        elif addon_name == "nagvis-shinken-architecture":
            ping_url = "http://localhost/shinken-core-map"
            success_message = "'%s' addon is running correctly at %s" % (addon_name, ping_url)
            error_message = "Cannot join '%s' home page (%s)" % (addon_name, ping_url)
        else:
            print addon_name
            return False
        
        # Try standard PING
        try:
            response = http_get(ping_url, timeout=5, do_read=False)  # ask http_get to do not do the read, and directly give us the http response
        except Exception as exp:
            message = manage_exception(exp)
            addon_branch.add_leaf("Cannot join '%s' addon: %s" % (addon_name, message), status='ERROR')
            return False
        
        if response.getcode() != 200:
            addon_branch.add_leaf(error_message, status='ERROR')
        else:
            addon_branch.add_leaf(success_message)


def check_addons():
    addons_list = get_local_addons()
    enabled_addons = [addon for (addon, enabled) in addons_list.iteritems() if enabled]
    if len(enabled_addons) == 0:
        Tree.get_root().get_branch("Local addons").add_leaf("No enabled addons. Available addons can be listed and enabled and with the 'shinken-addons-list' and 'shinken-addons-enable' commands.")
        return
    
    for addon in POSSIBLE_ADDONS:
        enabled = False
        if addon in addons_list:
            if addons_list[addon]:
                enabled = True
        
        if enabled:
            addon_branch = Tree.get_root().get_branch("Local addons").get_branch("[%s]" % addon)
            Addons.check(addon_branch, addon)


#######################################################################################
#     __  ___      _
#    /  |/  /___ _(_)___
#   / /|_/ / __ `/ / __ \
#  / /  / / /_/ / / / / /
# /_/  /_/\__,_/_/_/ /_/
#######################################################################################


if __name__ == '__main__':
    parser = optparse.OptionParser(description='This tool is used to check the state of your Shinken Enterprise installation and configuration')
    parser.add_option('-v', '--version', dest='request_version', action='store_true', help="Show the program version number and exit")
    parser.add_option('-l', '--local', dest='local_only', action='store_true', help="Only check the local daemons and installation")
    parser.add_option('-g', '--global', dest='global_only', action='store_true', help="Check the global installation. Must be run on the arbiter/synchronizer.")
    parser.add_option('--debug', dest='debug_mode', action='store_true', help="For Support only: enable debug output in the command")
    parser.add_option('-f', '--file', dest='create_file', action='store_true', help='Writes the health check to a file in addition to displaying it')
    parser.add_option('--output-directory', dest='file_dir', help='Output directory for the output file. Defaults to current directory')
    parser.add_option('--output-name', dest='file_name', help='File name for the output file. Defaults to shinken-healthcheck_$(DATE).txt')
    parser.add_option('--timeout', dest='timeout', type="int", help='Timeout value in seconds for every network request')
    parser.add_option('--modules-warning-expire', type="int", dest='modules_warning_expire', help='Delay in minutes within which the modules restarts will raise an alert. Defaults to 120 (2 Hours), max to 1440 (24 Hours)')
    parser.add_option('--show-history', dest='show_history', action='store_true', help='Shows the history of the Shinken Enterprise installation & data on this server')
    opts, args = parser.parse_args()
    
    if opts.modules_warning_expire is not None:
        Checks.set_warning_threshold(int(opts.modules_warning_expire))
    
    if opts.request_version is True:
        HealthPrinter.print_version()
        exit(0)
    
    if opts.timeout is not None:
        DEFAULT_TIMEOUT = int(opts.timeout)
        DEFAULT_TIMEOUT_FORCE = True
    
    if opts.create_file is True:
        date_part = datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
        out_file = "shinken-healthcheck_%s.txt" % date_part
        out_dir = os.getcwd()
        if opts.file_name is not None:
            out_file = opts.file_name
        
        if opts.file_dir is not None:
            out_dir = opts.file_dir
        
        if not os.path.isdir(out_dir):
            try:
                os.mkdir(out_dir)
            except OSError as e:
                HealthPrinter.write("Unable to create output directory : %s" % e)
                exit(1)
        
        full_out_file = os.path.join(out_dir, out_file)
        
        HealthPrinter.set_out_file(full_out_file)
    
    if opts.debug_mode:
        DEBUG = True
        logger.setLevel('DEBUG')
    
    HealthPrinter.write("#" * DASH_LINE_SIZE)
    HealthPrinter.write("This tool is used to check the state of your Shinken Enterprise (v%s) installation and configuration" % CURRENT_VERSION)
    
    # Look if the user ask for local or global, and if not, guess
    server_type_local = opts.local_only
    server_type_global = opts.global_only
    
    # Maybe the user did ask both, that's stupid
    if server_type_local and server_type_global:
        HealthPrinter.write("Error: You cannot ask for a local only and global mode, please choose only one")
        exit(1)
    
    if not server_type_local:  # guess
        conf = ConfigUtils.get_arbiter_config()
        # Different launches allowed:
        # * master arbiter: see the whole architecture
        # * spare arbiter: currently limit to local launch (see SEF-5776) because local cfg files must be outdated,
        #                  so until we have a real conf cache send by the master (and not a HUGE one) we limit the test
        # * not an arbiter: local view
        if HostUtils.is_daemon_node('arbiter'):
            if ConfigUtils.is_master_arbiter_node():
                server_type_global = True
                HealthPrinter.write("\033[33mNote: This check is a global healthcheck as if launched from an Arbiter master server\033[0m")
            else:
                HealthPrinter.write("\033[33mNote: This check is a local healthcheck as if launched from an Arbiter spare server\033[0m")
                server_type_local = True
                server_type_global = False
        else:
            HealthPrinter.write("\033[33mNote: This check is a local healthcheck as if launched from a satellite server\033[0m")
            server_type_local = True
            server_type_global = False
    
    HealthPrinter.write("#" * DASH_LINE_SIZE)
    HealthPrinter.write("  Healthcheck report %s" % datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S"))
    HealthPrinter.write("-" * DASH_LINE_SIZE)
    HealthPrinter.print_version()
    
    if server_type_global:
        default_realm_ok = check_default_realm(conf.realms)
    else:
        default_realm_ok = True
    if default_realm_ok:
        # initialize first level branch orders
        Tree.get_root().get_branch('License key', weight=50)
        Tree.get_root().get_branch('Local libraries', weight=40)
        Tree.get_root().get_branch('Local addons', weight=10)
        
        if server_type_local:
            Tree.get_root().get_branch('Local daemons', weight=70)
            server_type = "local"
        else:
            Tree.get_root().get_branch('Architecture', weight=60)
            Tree.get_root().get_branch('Storage', weight=20)
            server_type = "global"
        
        HealthPrinter.write("#" * DASH_LINE_SIZE)
        HealthCheckProgress.show_progress(20)
        check_libs()
        
        HealthCheckProgress.show_progress(40)
        HealthCheckProgress.show_progress_message("Check architecture...")
        if server_type == "global":
            check_architecture()
        
        HealthCheckProgress.show_progress(60)
        HealthCheckProgress.show_progress_message("Check synchronizer...")
        if server_type == "global":
            check_synchronizer()
        
        HealthCheckProgress.show_progress(80)
        
        if server_type == "local":
            for dtype in POSSIBLE_DAEMONS:
                for (daemon_id, enabled) in get_local_instances_for_type(dtype):
                    if not enabled:
                        continue
                    if dtype == 'synchronizer':
                        check_synchronizer()
                    else:
                        check_local_daemon(dtype, daemon_id)
        
        HealthCheckProgress.show_progress_message("Check License...")
        if HostUtils.is_daemon_node('arbiter') or HostUtils.is_daemon_node('broker'):
            check_licence()
        
        HealthCheckProgress.show_progress(90)
        HealthCheckProgress.show_progress_message("Check addons...")
        check_addons()
    
    HealthCheckProgress.end_progress()
    
    if opts.show_history:
        HealthPrinter.print_installation_history()
        HealthPrinter.print_data_history()
        HealthPrinter.print_encryption_history()
    
    Tree.get_root().render()
    
    Tree.get_root().show_summary()
    
    HealthPrinter.write("\nHealthcheck report %s" % time.strftime("%d/%m/%Y %H:%M:%S"))
    
    if opts.create_file is True:
        print("\n\nSupport file is available at %s" % HealthPrinter.get_file_name())
    
    exit(get_exit_code())
