]> arthur.ath.cx Git - netdata.git/commitdiff
Merge branch 'master' into ab-debian
authorAlexander Barton <alex@barton.de>
Mon, 27 Mar 2017 11:36:17 +0000 (13:36 +0200)
committerAlexander Barton <alex@barton.de>
Mon, 27 Mar 2017 11:36:17 +0000 (13:36 +0200)
* master:
  smartd_log plugin: convert chart family to lower case
  ovpn_status_log plugin: minor fixes
  fail2bain plugin: "Currently In Jail" chart renamed
  dns_query_time plugin: replace "." with "_" in dimensions
  updated configs.signatures
  ZFS plugin for Linux; fixes #1994
  dns_query_time plugin: python readme update
  dns_query_time plugin: makefiles and python.d.conf update
  dns_query_time plugin: module configuration file added
  dns_query_time plugin: added
  added backends debugging; #2017
  fail2ban_plugin: "filterfalse" removed
  fail2ban_plugin: "Currently In Jail" chart added
  prevent a crash if loadvg chart is disabled
  Minimize reallocz usage in FreeBSD plugin
  Fix compilation error on FreeBSD 10.3
  postgres_plugin: module configuration file update
  postgres_plugin: option to exclude databases from poll added

24 files changed:
CMakeLists.txt
conf.d/Makefile.am
conf.d/health.d/zfs.conf [new file with mode: 0644]
conf.d/python.d.conf
conf.d/python.d/dns_query_time.conf [new file with mode: 0644]
conf.d/python.d/postgres.conf
configs.signatures
python.d/Makefile.am
python.d/README.md
python.d/dns_query_time.chart.py [new file with mode: 0644]
python.d/fail2ban.chart.py
python.d/ovpn_status_log.chart.py
python.d/postgres.chart.py
python.d/smartd_log.chart.py
src/Makefile.am
src/backends.c
src/freebsd_sysctl.c
src/log.h
src/plugin_proc.c
src/plugin_proc.h
src/proc_loadavg.c
src/proc_spl_kstat_zfs.c [new file with mode: 0644]
web/dashboard_info.js
web/index.html

index d848c5c84e03c6a07a506fd2d9d3ca82a0ea5300..18a0e81eae354d7f5acf20330e5391890f4528c7 100755 (executable)
@@ -83,6 +83,7 @@ set(NETDATA_SOURCE_FILES
         src/proc_self_mountinfo.c
         src/proc_self_mountinfo.h
         src/proc_softirqs.c
+        src/proc_spl_kstat_zfs.c
         src/proc_stat.c
         src/proc_sys_kernel_random_entropy_avail.c
         src/proc_uptime.c
index efe1f2a6e34d0af98c2153340d809b37f9859543..f53429928949bfb103dec20db4626b0dd4e1aa6c 100644 (file)
@@ -28,6 +28,7 @@ dist_pythonconfig_DATA = \
     python.d/apache_cache.conf \
     python.d/bind_rndc.conf \
     python.d/cpufreq.conf \
+    python.d/dns_query_time.conf \
     python.d/dovecot.conf \
     python.d/elasticsearch.conf \
     python.d/example.conf \
@@ -81,6 +82,7 @@ dist_healthconfig_DATA = \
     health.d/squid.conf \
     health.d/varnish.conf \
     health.d/web_log.conf \
+    health.d/zfs.conf \
     $(NULL)
 
 if LINUX
diff --git a/conf.d/health.d/zfs.conf b/conf.d/health.d/zfs.conf
new file mode 100644 (file)
index 0000000..af73824
--- /dev/null
@@ -0,0 +1,10 @@
+
+   alarm: zfs_memory_throttle
+      on: zfs.memory_ops
+  lookup: sum -10m unaligned absolute of throttled
+   units: events
+   every: 1m
+    warn: $this > 0
+   delay: down 1h multiplier 1.5 max 2h
+    info: the number of times ZFS had to limit the ARC growth in the last 10 minutes
+      to: sysadmin
index 9ed346cdcdf76573191a4c12b5c21dce6267bfd6..22a18efac323af97c35413f7c123f783f3bbd382 100644 (file)
@@ -31,6 +31,7 @@ log_interval: 3600
 # bind_rndc: yes
 # cpufreq: yes
 # cpuidle: yes
+# dns_query_time: yes
 # dovecot: yes
 # elasticsearch: yes
 
@@ -52,6 +53,7 @@ gunicorn_log: no
 # memcached: yes
 # mysql: yes
 # nginx: yes
+# nsd: yes
 
 # nginx_log has been replaced by web_log
 nginx_log: no
diff --git a/conf.d/python.d/dns_query_time.conf b/conf.d/python.d/dns_query_time.conf
new file mode 100644 (file)
index 0000000..f4d4dbf
--- /dev/null
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for dns_query_time
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+#  - global variables
+#  - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+#     name: myname     # the JOB's name as it will appear at the
+#                      # dashboard (by default is the job_name)
+#                      # JOBs sharing a name are mutually exclusive
+#     update_every: 1  # the JOB's data collection frequency
+#     priority: 60000  # the JOB's order on the dashboard
+#     retries: 5       # the JOB's number of restoration attempts
+#
+# Additionally to the above, dns_query_time also supports the following:
+#
+#     dns_servers: 'dns servers'       # list of dns servers to query
+#     domains: 'domains'               # list of domains
+#     aggregate: yes/no                # Default: yes. Aggregate all servers in one chart or not
+#     response_timeout: 4              # Defalt: 4. Dns query response timeout (query = -100 if response time > response_time)
+#
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+#
+#aggregate: yes
+#dns_servers: '8.8.8.8 8.8.4.4'
+#domains: 'python.org distrowatch.com linuxmint.com linux.com rsyslog.com liblognorm.com archlinux.org cisco.com debian.org kernel.org gns3.com opera.com github.com youtube.com amazon.co.uk kde.org netdata.firehol.org ubuntu.com redhat.com opensuse.org wireshark.org vmware.com microsoft.com elastic.co'
index d4d2bafcc799c0103440303acb935f0637bccc94..12dddae67ea2176115ff8360b0dce72c32986eec 100644 (file)
@@ -68,6 +68,7 @@
 #
 #     table_stats : false
 #     index_stats : false
+#     database_poll : 'dbase_name1 dbase_name2' # poll only specified databases (all other will be excluded from charts)
 #
 # Postfix permissions are configured at its pg_hba.conf file. You can
 # "trust" local clients to allow netdata to connect, or you can create
index 713a1d3232f6b2d21895099bc97a781f8fd779b0..e8a90fda723b9d7fab6a751856bbe77f6f934fc4 100644 (file)
@@ -220,6 +220,7 @@ declare -A configs_signatures=(
   ['7d8bd884ec26cb35d16c4fc05f969799']='python.d/squid.conf'
   ['7deb236ec68a512b9bdd18e6a51d76f7']='python.d/mysql.conf'
   ['7e5fc1644aa7a54f9dbb1bd102521b09']='health.d/memcached.conf'
+  ['7f13631183fbdf79c21c8e5a171e9b34']='health.d/zfs.conf'
   ['80266bddd3df374923c750a6de91d120']='health.d/apache.conf'
   ['80d242d619eb7e91cebfdbf58d79b0f8']='health.d/disks.conf'
   ['80df37b89e852d585209b8c02bb94312']='python.d/bind_rndc.conf'
@@ -329,7 +330,9 @@ declare -A configs_signatures=(
   ['c1a7e634b5b8aad523a0d115a93379cd']='health.d/memcached.conf'
   ['c3296c08260bcd556e74711c820817be']='health.d/cpu.conf'
   ['c61948101e0e6846679682794ee48c5b']='python.d/nginx.conf'
+  ['c6403d8b1bcfa52d3abb941be155fc03']='python.d.conf'
   ['c88fb430f35b7d8f08775d84debffbd2']='python.d/phpfpm.conf'
+  ['c94cb4f4eeaa13c1dcee6248deb01829']='python.d/postgres.conf'
   ['c9b792755de59d842ba95f8c315d94c8']='health.d/swap.conf'
   ['ca026d7c779f0a7cb7787713c5be5c47']='charts.d.conf'
   ['ca08a9b18d38ae0a0f5081a7cdc96863']='health.d/swap.conf'
@@ -410,6 +413,7 @@ declare -A configs_signatures=(
   ['f96acba4b14b0c1b50d0187a04416151']='health_alarm_notify.conf'
   ['f9be549a849d023595d19d5d74263e0f']='health.d/tcp_resets.conf'
   ['fa4396513b358d6ec6a7f5bfb08439b8']='health.d/net.conf'
+  ['fc40b83f173bc4676d686867a8369a62']='python.d/dns_query_time.conf'
   ['fd3164e6e8cb6726706267eae49aa082']='health_alarm_notify.conf'
   ['fdd11640ba626cc2064c2fe3ea3eee4c']='health.d/cpu.conf'
   ['fde44f62c8d7e52f09705cd273fae6b1']='charts.d/tomcat.conf'
index bfe28ff2803bf5526cd09d3d8999fef55917e91b..89ac2b789447e636d49019246a71827cd0534903 100644 (file)
@@ -18,6 +18,7 @@ dist_python_DATA = \
     bind_rndc.chart.py \
     cpufreq.chart.py \
     cpuidle.chart.py \
+    dns_query_time.chart.py \
     dovecot.chart.py \
     elasticsearch.chart.py \
     example.chart.py \
index 7df6e3e8689d1f8b6ff705cdfbbf9cba9275dd46..e0e5893e2610d2b7a69e8614bc92b299bec4e217 100644 (file)
@@ -227,6 +227,16 @@ Your kernel needs to have `CONFIG_CPU_IDLE` enabled.
 It produces one stacked chart per CPU, showing the percentage of time spent in
 each state.
 
+---
+# dns_query_time
+
+This module provides dns query time statistics.
+
+**Requirement:**
+* `python-dnspython` package
+
+It produces one aggregate chart or one chart per dns server, showing the query time.
+
 ---
 
 # dovecot
diff --git a/python.d/dns_query_time.chart.py b/python.d/dns_query_time.chart.py
new file mode 100644 (file)
index 0000000..9053d9a
--- /dev/null
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+# Description: dns_query_time netdata python.d module
+# Author: l2isbad
+
+try:
+    from time import monotonic as time
+except ImportError:
+    from time import time
+try:
+    import dns.message, dns.query, dns.name
+    DNS_PYTHON = True
+except ImportError:
+    DNS_PYTHON = False
+try:
+    from queue import Queue
+except ImportError:
+    from Queue import Queue
+from random import choice
+from threading import Thread
+from socket import gethostbyname, gaierror
+from base import SimpleService
+
+
+# default module values (can be overridden per job in `config`)
+update_every = 5
+priority = 60000
+retries = 60
+
+
+class Service(SimpleService):
+    def __init__(self, configuration=None, name=None):
+        SimpleService.__init__(self, configuration=configuration, name=name)
+        self.order = list()
+        self.definitions = dict()
+        self.timeout = self.configuration.get('response_timeout', 4)
+        self.aggregate = self.configuration.get('aggregate', True)
+        self.domains = self.configuration.get('domains')
+        self.server_list = self.configuration.get('dns_servers')
+
+    def check(self):
+        if not DNS_PYTHON:
+            self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py')
+            return False
+
+        self.timeout = self.timeout if isinstance(self.timeout, int) else 4
+        self.update_every = self.timeout + 1 if self.update_every <= self.timeout else self.update_every
+
+        if not all([self.domains, self.server_list,
+                    isinstance(self.server_list, str), isinstance(self.domains, str)]):
+            self.error('server_list and domain_list can\'t be empty')
+            return False
+        else:
+            self.domains, self.server_list = self.domains.split(), self.server_list.split()
+
+        for ns in self.server_list:
+            if not check_ns(ns):
+                self.info('Bad NS: %s' % ns)
+                self.server_list.remove(ns)
+                if not self.server_list:
+                    return False
+
+        data = self._get_data(timeout=1)
+
+        down_servers = [s for s in data if data[s] == -100]
+        for down in down_servers:
+            down = down[3:].replace('_', '.')
+            self.info('Removed due to non response %s' % down)
+            self.server_list.remove(down)
+            if not self.server_list:
+                return False
+
+        self._data_from_check = data
+        self.order, self.definitions = create_charts(aggregate=self.aggregate, server_list=self.server_list)
+        self.info(str({'domains': len(self.domains), 'servers': self.server_list}))
+        return True
+
+    def _get_data(self, timeout=None):
+        return dns_request(self.server_list, timeout or self.timeout, self.domains)
+
+
+def dns_request(server_list, timeout, domains):
+    threads = list()
+    que = Queue()
+    result = dict()
+
+    def dns_req(ns, t, q):
+        domain = dns.name.from_text(choice(domains))
+        request = dns.message.make_query(domain, dns.rdatatype.A)
+
+        try:
+            dns_start = time()
+            dns.query.udp(request, ns, timeout=t)
+            dns_end = time()
+            query_time = round((dns_end - dns_start) * 1000)
+            q.put({'_'.join(['ns', ns.replace('.', '_')]): query_time})
+        except dns.exception.Timeout:
+            q.put({'_'.join(['ns', ns.replace('.', '_')]): -100})
+
+    for server in server_list:
+        th = Thread(target=dns_req, args=(server, timeout, que))
+        th.start()
+        threads.append(th)
+
+    for th in threads:
+        th.join()
+        result.update(que.get())
+
+    return result
+
+
+def check_ns(ns):
+    try:
+        return gethostbyname(ns)
+    except gaierror:
+        return False
+
+
+def create_charts(aggregate, server_list):
+    if aggregate:
+        order = ['dns_group']
+        definitions = {'dns_group': {'options': [None, 'DNS Response Time', 'ms', 'name servers',
+                                                 'dns_query_time.response_time', 'line'], 'lines': []}}
+        for ns in server_list:
+            definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'])
+
+        return order, definitions
+    else:
+        order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
+        definitions = dict()
+        for ns in server_list:
+            definitions[''.join(['dns_', ns.replace('.', '_')])] = {'options': [None, 'DNS Response Time', 'ms', ns,
+                                                                                'dns_query_time.response_time', 'area'],
+                                                                    'lines': [['_'.join(['ns', ns.replace('.', '_')]),
+                                                                               ns, 'absolute']]}
+        return order, definitions
index c7d24e8c15db195dd567024830646e02b54a7f6d..85d0ade618006adb707a9d6b5ea20f1156b2be0a 100644 (file)
@@ -3,20 +3,17 @@
 # Author: l2isbad
 
 from base import LogService
-from re import compile
-
-try:
-    from itertools import filterfalse
-except ImportError:
-    from itertools import ifilterfalse as filterfalse
+from re import compile as r_compile
 from os import access as is_accessible, R_OK
 from os.path import isdir
 from glob import glob
+import bisect
 
 priority = 60000
 retries = 60
-REGEX = compile(r'\[([A-Za-z-_]+)][^\[\]]*?(?<!# )enabled = true')
-ORDER = ['jails_group']
+REGEX_JAILS = r_compile(r'\[([A-Za-z-_]+)][^\[\]]*?(?<!# )enabled = true')
+REGEX_DATA = r_compile(r'\[(?P<jail>[a-z]+)\] (?P<ban>[A-Z])[a-z]+ (?P<ipaddr>\d{1,3}(?:\.\d{1,3}){3})')
+ORDER = ['jails_bans', 'jails_in_jail']
 
 
 class Service(LogService):
@@ -26,35 +23,39 @@ class Service(LogService):
         self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
         self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
         self.conf_dir = self.configuration.get('conf_dir', '')
+        self.bans = dict()
         try:
             self.exclude = self.configuration['exclude'].split()
         except (KeyError, AttributeError):
-            self.exclude = []
+            self.exclude = list()
 
     def _get_data(self):
         """
         Parse new log lines
         :return: dict
         """
-        try:
-            raw = self._get_raw_data()
-            if raw is None:
-                return None
-            elif not raw:
-                return self.data
-        except (ValueError, AttributeError):
+        raw = self._get_raw_data()
+        if raw is None:
             return None
+        elif not raw:
+            return self.data
 
         # Fail2ban logs looks like
         # 2016-12-25 12:36:04,711 fail2ban.actions[2455]: WARNING [ssh] Ban 178.156.32.231
-        data = dict(
-            zip(
-                self.jails_list,
-                [len(list(filterfalse(lambda line: (jail + '] Ban') not in line, raw))) for jail in self.jails_list]
-            ))
-
-        for jail in data:
-            self.data[jail] += data[jail]
+        for row in raw:
+            match = REGEX_DATA.search(row)
+            if match:
+                match_dict = match.groupdict()
+                jail, ban, ipaddr = match_dict['jail'], match_dict['ban'], match_dict['ipaddr']
+                if jail in self.jails_list:
+                    if ban == 'B':
+                        self.data[jail] += 1
+                        if address_not_in_jail(self.bans[jail], ipaddr, self.data[jail + '_in_jail']):
+                           self.data[jail + '_in_jail'] += 1
+                    else:
+                        if ipaddr in self.bans[jail]:
+                            self.bans[jail].remove(ipaddr)
+                            self.data[jail + '_in_jail'] -= 1
 
         return self.data
 
@@ -81,17 +82,25 @@ class Service(LogService):
 
         # If for some reason parse failed we still can START with default jails_list.
         self.jails_list = list(set(jails_list) - set(self.exclude)) or ['ssh']
+
         self.data = dict([(jail, 0) for jail in self.jails_list])
+        self.data.update(dict([(jail + '_in_jail', 0) for jail in self.jails_list]))
+        self.bans = dict([(jail, list()) for jail in self.jails_list])
+
         self.create_dimensions()
         self.info('Plugin successfully started. Jails: %s' % self.jails_list)
         return True
 
     def create_dimensions(self):
         self.definitions = {
-            'jails_group': {'options': [None, "Jails ban statistics", "bans/s", 'jails', 'jail.ban', 'line'],
-                            'lines': []}}
+            'jails_bans': {'options': [None, 'Jails Ban Statistics', "bans/s", 'bans', 'jail.bans', 'line'],
+                           'lines': []},
+            'jails_in_jail': {'options': [None, 'Banned IPs (since the last restart of netdata)', 'IPs',
+                                          'in jail', 'jail.in_jail', 'line'], 'lines': []},
+                           }
         for jail in self.jails_list:
-            self.definitions['jails_group']['lines'].append([jail, jail, 'incremental'])
+            self.definitions['jails_bans']['lines'].append([jail, jail, 'incremental'])
+            self.definitions['jails_in_jail']['lines'].append([jail + '_in_jail', jail, 'absolute'])
 
 
 def parse_conf_dir(conf_dir):
@@ -114,7 +123,7 @@ def parse_conf_dir(conf_dir):
             raw_data = f.read()
 
         data = ' '.join(raw_data.split())
-        jails_list.extend(REGEX.findall(data))
+        jails_list.extend(REGEX_JAILS.findall(data))
     jails_list = list(set(jails_list))
 
     return jails_list, 'can\'t locate any jails in %s. Default jail is [\'ssh\']' % conf_dir
@@ -128,5 +137,18 @@ def parse_conf_path(conf_path):
         raw_data = jails_conf.read()
 
     data = raw_data.split()
-    jails_list = REGEX.findall(' '.join(data))
+    jails_list = REGEX_JAILS.findall(' '.join(data))
     return jails_list, 'can\'t locate any jails in %s. Default jail is  [\'ssh\']' % conf_path
+
+
+def address_not_in_jail(pool, address, pool_size):
+    index = bisect.bisect_left(pool, address)
+    if index < pool_size:
+        if pool[index] == address:
+            return False
+        else:
+            bisect.insort_left(pool, address)
+            return True
+    else:
+        bisect.insort_left(pool, address)
+        return True
index c5fca002af67d143089594bcb45b33eab58cd672..b3cc6723fff2878251468db596d25871837a1e24 100644 (file)
@@ -3,7 +3,7 @@
 # Author: l2isbad
 
 from base import SimpleService
-from re import compile, findall, search, subn
+from re import compile as r_compile
 priority = 60000
 retries = 60
 update_every = 10
@@ -11,35 +11,41 @@ update_every = 10
 ORDER = ['users', 'traffic']
 CHARTS = {
     'users': {
-        'options': [None, 'OpenVPN active users', 'active users', 'Users', 'openvpn_status.users', 'line'],
+        'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
         'lines': [
             ["users", None, "absolute"],
         ]},
     'traffic': {
-        'options': [None, 'OpenVPN traffic', 'kilobit/s', 'Traffic', 'openvpn_status.traffic', 'area'],
+        'options': [None, 'OpenVPN Traffic', 'Kb/s', 'traffic', 'openvpn_status.traffic', 'area'],
         'lines': [
             ["in", None, "incremental", 8, 1000], ["out", None, "incremental", 8, -1000]
         ]},
 
 }
 
+
 class Service(SimpleService):
     def __init__(self, configuration=None, name=None):
         SimpleService.__init__(self, configuration=configuration, name=name)
         self.order = ORDER
         self.definitions = CHARTS
         self.log_path = self.configuration.get('log_path')
-        self.regex_data_inter = compile(r'(?<=Since ).*?(?=.ROUTING)')
-        self.regex_data_final = compile(r'\d{1,3}(?:\.\d{1,3}){3}[:0-9,. ]*')
-        self.regex_users = compile(r'\d{1,3}(?:\.\d{1,3}){3}:\d+')
-        self.regex_traffic = compile(r'(?<=(?:,| ))\d+(?=(?:,| ))')
+        self.regex_data_inter = r_compile(r'(?<=Since ).*?(?=.ROUTING)')
+        self.regex_data_final = r_compile(r'\d{1,3}(?:\.\d{1,3}){3}[:0-9,. ]*')
+        self.regex_users = r_compile(r'\d{1,3}(?:\.\d{1,3}){3}:\d+')
+        self.regex_traffic = r_compile(r'(?<=(?:,| ))\d+(?=(?:,| ))')
 
     def check(self):
-        if not self._get_raw_data():
+        if not (self.log_path and isinstance(self.log_path, str)):
+            self.error('\'log_path\' is not defined')
+            return False
+
+        data = self._get_data()
+        if not data:
             self.error('Make sure that the openvpn status log file exists and netdata has permission to read it')
             return False
         else:
-            self.info('Plugin was started succesfully')
+            self._data_from_check = data
             return True
 
     def _get_raw_data(self):
@@ -47,13 +53,14 @@ class Service(SimpleService):
         Open log file
         :return: str
         """
+
         try:
             with open(self.log_path, 'rt') as log:
                 result = log.read()
-        except Exception:
+        except OSError:
             return None
         else:
-            return result
+            return result or None
 
     def _get_data(self):
         """
@@ -62,16 +69,20 @@ class Service(SimpleService):
         """
 
         raw_data = self._get_raw_data()
-        try:
-            data_inter = self.regex_data_inter.search(' '.join(raw_data.splitlines())).group()
-        except AttributeError:
-            data_inter = ''
+        if not raw_data:
+            return None
+
+        data_inter = self.regex_data_inter.search(' '.join(raw_data.splitlines()))
+        if not data_inter:
+            return None
+        else:
+            data_inter = data_inter.group()
 
         data_final = ' '.join(self.regex_data_final.findall(data_inter))
         users = self.regex_users.subn('', data_final)[1]
         traffic = self.regex_traffic.findall(data_final)
 
-        bytes_in = sum([int(traffic[i]) for i in range(len(traffic)) if (i + 1) % 2 is 1])
-        bytes_out = sum([int(traffic[i]) for i in range(len(traffic)) if (i + 1) % 2 is 0])
+        bytes_in = sum(int(traffic[i[0]]) for i in enumerate(traffic) if (i[0] + 1) % 2 is 1)
+        bytes_out = sum(int(traffic[i[0]]) for i in enumerate(traffic) if (i[0] + 1) % 2 is 0)
 
         return {'users': users, 'in': bytes_in, 'out': bytes_out}
index d359bb4f7a587039315d9b5d927785c72159b7c9..1976e2a61c49a0bb025677ac4c347f47ec3829a1 100644 (file)
@@ -242,6 +242,7 @@ class Service(SimpleService):
         self.definitions = deepcopy(CHARTS)
         self.table_stats = configuration.pop('table_stats', False)
         self.index_stats = configuration.pop('index_stats', False)
+        self.database_poll = configuration.pop('database_poll', None)
         self.configuration = configuration
         self.connection = False
         self.is_superuser = False
@@ -281,6 +282,9 @@ class Service(SimpleService):
             is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER'])
             cursor.close()
 
+            if (self.database_poll and isinstance(self.database_poll, str)):
+                self.databases = [dbase for dbase in self.databases if dbase in self.database_poll.split()] or self.databases
+
             self.locks_zeroed = populate_lock_types(self.databases)
             self.add_additional_queries_(is_superuser)
             self.create_dynamic_charts_()
index e8037237931d8ce7846b3c0d389f3c36591e1510..f9adaf1c5ec5e65c8c5d2135987c4d69192b5a60 100644 (file)
@@ -2,7 +2,7 @@
 # Description: smart netdata python.d module
 # Author: l2isbad, vorph1
 
-from re import compile
+from re import compile as r_compile
 from os import listdir, access, R_OK
 from os.path import isfile, join, getsize, basename, isdir
 try:
@@ -101,7 +101,7 @@ NAMED_DISKS = namedtuple('disks', ['name', 'size', 'number'])
 class Service(SimpleService):
     def __init__(self, configuration=None, name=None):
         SimpleService.__init__(self, configuration=configuration, name=name)
-        self.regex = compile(r'(\d+);(\d+);(\d+)')
+        self.regex = r_compile(r'(\d+);(\d+);(\d+)')
         self.log_path = self.configuration.get('log_path', '/var/log/smartd')
         self.raw_values = self.configuration.get('raw_values')
         self.attr = self.configuration.get('smart_attributes', [])
@@ -208,7 +208,7 @@ class Service(SimpleService):
 
         for k, v in dict([(k, v) for k, v in SMART_ATTR.items() if k in ORDER]).items():
             self.definitions.update({''.join(['attrid', k]): {
-                                      'options': [None, v, units, v, 'smartd.attrid' + k, 'line'],
+                                      'options': [None, v, units, v.lower(), 'smartd.attrid' + k, 'line'],
                                        'lines': create_lines(k)}})
 
 def find_disks_in_log_path(log_path):
index 1c1dd33856e155706af1885c07b8508eadafc8ad..54f3e20a7861c9984561bbf938775a6473fee13b 100644 (file)
@@ -178,6 +178,7 @@ netdata_SOURCES += \
        proc_net_softnet_stat.c \
        proc_net_stat_conntrack.c \
        proc_net_stat_synproxy.c \
+       proc_spl_kstat_zfs.c \
        proc_stat.c \
        proc_sys_kernel_random_entropy_avail.c \
        proc_vmstat.c \
index 3e385cab504bb46a91f1369d444986922baffa84..a2c25e94bdb70cbc758d485a3de682451a744a02 100644 (file)
@@ -570,29 +570,57 @@ void *backends_main(void *ptr) {
         if(unlikely(pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &pthreadoldcancelstate) != 0))
             error("Cannot set pthread cancel state to DISABLE.");
 
+        size_t count_hosts = 0;
+        size_t count_charts_total = 0;
+        size_t count_dims_total = 0;
+
         rrd_rdlock();
         RRDHOST *host;
         rrdhost_foreach_read(host) {
-            if(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE)
+            if(host->rrd_memory_mode == RRD_MEMORY_MODE_NONE) {
+                debug(D_BACKEND, "BACKEND: not sending host '%s' because its memory mode is '%s'", host->hostname, rrd_memory_mode_name(host->rrd_memory_mode));
                 continue;
+            }
 
             rrdhost_rdlock(host);
 
+            count_hosts++;
+            size_t count_charts = 0;
+            size_t count_dims = 0;
+            size_t count_dims_skipped = 0;
+
+            const char *__hostname = (host == localhost)?hostname:host->hostname;
+
             RRDSET *st;
             rrdset_foreach_read(st, host) {
                 rrdset_rdlock(st);
 
+                count_charts++;
+
                 RRDDIM *rd;
                 rrddim_foreach_read(rd, st) {
-                    if(rd->last_collected_time.tv_sec >= after)
-                        chart_buffered_metrics += backend_request_formatter(b, prefix, host, (host == localhost)?hostname:host->hostname, st, rd, after, before, options);
+                    if(likely(rd->last_collected_time.tv_sec >= after)) {
+                        chart_buffered_metrics += backend_request_formatter(b, prefix, host, __hostname, st, rd, after, before, options);
+                        count_dims++;
+                    }
+                    else {
+                        debug(D_BACKEND, "BACKEND: not sending dimension '%s' of chart '%s' from host '%s', its last data collection is not within our timeframe", rd->id, st->id, __hostname);
+                        count_dims_skipped++;
+                    }
                 }
                 rrdset_unlock(st);
             }
+
+            debug(D_BACKEND, "BACKEND: sending host '%s', metrics of %zu dimensions, of %zu charts. Skipped %zu dimensions.", __hostname, count_dims, count_charts, count_dims_skipped);
+            count_charts_total += count_charts;
+            count_dims_total += count_dims;
+
             rrdhost_unlock(host);
         }
         rrd_unlock();
 
+        debug(D_BACKEND, "BACKEND: buffer has %zu bytes, added metrics for %zu dimensions, of %zu charts, from %zu hosts", buffer_strlen(b), count_dims_total, count_charts_total, count_hosts);
+
         if(unlikely(pthread_setcancelstate(pthreadoldcancelstate, NULL) != 0))
             error("Cannot set pthread cancel state to RESTORE (%d).", pthreadoldcancelstate);
 
index 965c1cbbfd50523b324b6f8267bfa44848fe0d7d..ab2eaee7aa04dd5957348c635e268ac4a0075c6a 100644 (file)
@@ -311,8 +311,10 @@ int do_kern_cp_times(int update_every, usec_t dt) {
         static int mib[2] = {0, 0};
         long cp_time[CPUSTATES];
         static long *pcpu_cp_time = NULL;
+        static int old_number_of_cpus = 0;
 
-        pcpu_cp_time = reallocz(pcpu_cp_time, sizeof(cp_time) * number_of_cpus);
+        if(unlikely(number_of_cpus != old_number_of_cpus))
+            pcpu_cp_time = reallocz(pcpu_cp_time, sizeof(cp_time) * number_of_cpus);
         if (unlikely(GETSYSCTL_WSIZE("kern.cp_times", mib, pcpu_cp_time, sizeof(cp_time) * number_of_cpus))) {
             error("DISABLED: cpu.cpuXX charts");
             error("DISABLED: kern.cp_times module");
@@ -331,12 +333,10 @@ int do_kern_cp_times(int update_every, usec_t dt) {
                 RRDDIM *rd_interrupt;
                 RRDDIM *rd_idle;
             } *all_cpu_charts = NULL;
-            static int old_number_of_cpus = 0;
 
             if(unlikely(number_of_cpus > old_number_of_cpus)) {
                 all_cpu_charts = reallocz(all_cpu_charts, sizeof(struct cpu_chart) * number_of_cpus);
                 memset(&all_cpu_charts[old_number_of_cpus], 0, sizeof(struct cpu_chart) * (number_of_cpus - old_number_of_cpus));
-                old_number_of_cpus = number_of_cpus;
             }
 
             for (i = 0; i < number_of_cpus; i++) {
@@ -375,6 +375,8 @@ int do_kern_cp_times(int update_every, usec_t dt) {
                 rrdset_done(all_cpu_charts[i].st);
             }
         }
+
+        old_number_of_cpus = number_of_cpus;
     }
 
     return 0;
@@ -396,11 +398,13 @@ int do_hw_intcnt(int update_every, usec_t dt) {
         return 1;
     } else {
         unsigned long nintr = 0;
+        static unsigned long old_nintr = 0;
         static unsigned long *intrcnt = NULL;
         unsigned long long totalintr = 0;
 
         nintr = intrcnt_size / sizeof(u_long);
-        intrcnt = reallocz(intrcnt, nintr * sizeof(u_long));
+        if (unlikely(nintr != old_nintr))
+            intrcnt = reallocz(intrcnt, nintr * sizeof(u_long));
         if (unlikely(GETSYSCTL_WSIZE("hw.intrcnt", mib_hw_intrcnt, intrcnt, nintr * sizeof(u_long)))) {
             error("DISABLED: system.intr chart");
             error("DISABLED: system.interrupts chart");
@@ -443,7 +447,8 @@ int do_hw_intcnt(int update_every, usec_t dt) {
             static char *intrnames = NULL;
 
             size = nintr * (MAXCOMLEN + 1);
-            intrnames = reallocz(intrnames, size);
+            if (unlikely(nintr != old_nintr))
+                intrnames = reallocz(intrnames, size);
             if (unlikely(GETSYSCTL_WSIZE("hw.intrnames", mib_hw_intrnames, intrnames, size))) {
                 error("DISABLED: system.intr chart");
                 error("DISABLED: system.interrupts chart");
@@ -484,6 +489,8 @@ int do_hw_intcnt(int update_every, usec_t dt) {
                 rrdset_done(st_interrupts);
             }
         }
+
+        old_nintr = nintr;
     }
 
     return 0;
@@ -931,8 +938,12 @@ int do_kern_ipc_sem(int update_every, usec_t dt) {
         return 1;
     } else {
         static struct semid_kernel *ipc_sem_data = NULL;
+        static int old_semmni = 0;
 
-        ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni);
+        if (unlikely(ipc_sem.semmni != old_semmni)) {
+            ipc_sem_data = reallocz(ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni);
+            old_semmni = ipc_sem.semmni;
+        }
         if (unlikely(GETSYSCTL_WSIZE("kern.ipc.sema", mib_sema, ipc_sem_data, sizeof(struct semid_kernel) * ipc_sem.semmni))) {
             error("DISABLED: system.ipc_semaphores chart");
             error("DISABLED: system.ipc_semaphore_arrays chart");
@@ -1019,8 +1030,12 @@ int do_kern_ipc_shm(int update_every, usec_t dt) {
         return 1;
     } else {
         static struct shmid_kernel *ipc_shm_data = NULL;
+        static u_long old_shmmni = 0;
 
-        ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni);
+        if (unlikely(ipc_shm.shmmni != old_shmmni)) {
+            ipc_shm_data = reallocz(ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni);
+            old_shmmni = ipc_shm.shmmni;
+        }
         if (unlikely(
                 GETSYSCTL_WSIZE("kern.ipc.shmsegs", mib_shmsegs, ipc_shm_data, sizeof(struct shmid_kernel) * ipc_shm.shmmni))) {
             error("DISABLED: system.ipc_shared_mem_segs chart");
@@ -1111,8 +1126,12 @@ int do_kern_ipc_msq(int update_every, usec_t dt) {
         return 1;
     } else {
         static struct msqid_kernel *ipc_msq_data = NULL;
+        static int old_msgmni = 0;
 
-        ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni);
+        if (unlikely(ipc_msq.msgmni != old_msgmni)) {
+            ipc_msq_data = reallocz(ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni);
+            old_msgmni = ipc_msq.msgmni;
+        }
         if (unlikely(
                 GETSYSCTL_WSIZE("kern.ipc.msqids", mib_msqids, ipc_msq_data, sizeof(struct msqid_kernel) * ipc_msq.msgmni))) {
             error("DISABLED: system.ipc_msq_queues chart");
@@ -1276,14 +1295,25 @@ int do_net_isr(int update_every, usec_t dt) {
         } else if (unlikely(GETSYSCTL_SIZE("net.isr.work", mib_work, netisr_work_size))) {
             common_error = 1;
         } else {
+            static size_t old_netisr_workstream_size = 0;
+
             num_netisr_workstreams = netisr_workstream_size / sizeof(struct sysctl_netisr_workstream);
-            netisr_workstream = reallocz(netisr_workstream, num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream));
+            if (unlikely(netisr_workstream_size != old_netisr_workstream_size)) {
+                netisr_workstream = reallocz(netisr_workstream,
+                                             num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream));
+                old_netisr_workstream_size = netisr_workstream_size;
+            }
             if (unlikely(GETSYSCTL_WSIZE("net.isr.workstream", mib_workstream, netisr_workstream,
                                            num_netisr_workstreams * sizeof(struct sysctl_netisr_workstream)))){
                 common_error = 1;
             } else {
+                static size_t old_netisr_work_size = 0;
+
                 num_netisr_works = netisr_work_size / sizeof(struct sysctl_netisr_work);
-                netisr_work = reallocz(netisr_work, num_netisr_works * sizeof(struct sysctl_netisr_work));
+                if (unlikely(netisr_work_size != old_netisr_work_size)) {
+                    netisr_work = reallocz(netisr_work, num_netisr_works * sizeof(struct sysctl_netisr_work));
+                    old_netisr_work_size = netisr_work_size;
+                }
                 if (unlikely(GETSYSCTL_WSIZE("net.isr.work", mib_work, netisr_work,
                                                num_netisr_works * sizeof(struct sysctl_netisr_work)))){
                     common_error = 1;
@@ -1301,8 +1331,12 @@ int do_net_isr(int update_every, usec_t dt) {
         } else {
             unsigned long i, n;
             int j;
+            static int old_number_of_cpus = 0;
 
-            netisr_stats = reallocz(netisr_stats, (number_of_cpus + 1) * sizeof(struct netisr_stats));
+            if (unlikely(number_of_cpus != old_number_of_cpus)) {
+                netisr_stats = reallocz(netisr_stats, (number_of_cpus + 1) * sizeof(struct netisr_stats));
+                old_number_of_cpus = number_of_cpus;
+            }
             memset(netisr_stats, 0, (number_of_cpus + 1) * sizeof(struct netisr_stats));
             for (i = 0; i < num_netisr_workstreams; i++) {
                 for (n = 0; n < num_netisr_works; n++) {
@@ -3298,7 +3332,11 @@ int do_getifaddrs(int update_every, usec_t dt) {
                 // --------------------------------------------------------------------
 
                 if (ifm->do_drops == CONFIG_BOOLEAN_YES || (ifm->do_drops == CONFIG_BOOLEAN_AUTO &&
-                        (IFA_DATA(iqdrops) || IFA_DATA(oqdrops)))) {
+                        (IFA_DATA(iqdrops)
+#if __FreeBSD__ >= 11
+                         || IFA_DATA(oqdrops)
+#endif
+                        ))) {
                     if (unlikely(!ifm->st_drops)) {
                         ifm->st_drops = rrdset_create_localhost("net_drops",
                                                                 ifa->ifa_name,
@@ -3414,8 +3452,13 @@ int do_kern_devstat(int update_every, usec_t dt) {
         } else {
             static int mib_devstat[3] = {0, 0, 0};
             static void *devstat_data = NULL;
+            static int old_numdevs = 0;
 
-            devstat_data = reallocz(devstat_data, sizeof(long) + sizeof(struct devstat) * numdevs); // there is generation number before devstat structures
+            if (unlikely(numdevs != old_numdevs)) {
+                devstat_data = reallocz(devstat_data, sizeof(long) + sizeof(struct devstat) *
+                                        numdevs); // there is generation number before devstat structures
+                old_numdevs = numdevs;
+            }
             if (unlikely(GETSYSCTL_WSIZE("kern.devstat.all", mib_devstat, devstat_data,
                                          sizeof(long) + sizeof(struct devstat) * numdevs))) {
                 common_error = 1;
index d8ff0654bc21fcbf92b2724ac8132a2e2abca8c7..f5f84def4c3e6f78e5d01d83454402eab2f959d2 100644 (file)
--- a/src/log.h
+++ b/src/log.h
@@ -28,6 +28,7 @@
 #define D_CONNECT_TO        0x0000000001000000
 #define D_RRDHOST           0x0000000002000000
 #define D_LOCKS             0x0000000004000000
+#define D_BACKEND           0x0000000008000000
 #define D_SYSTEM            0x8000000000000000
 
 //#define DEBUG (D_WEB_CLIENT_ACCESS|D_LISTENER|D_RRD_STATS)
index 2ca77491de4633736f6ea43e1b17f091b3bdf3d9..b901b0731191294e9fda348528d9b2c0d07f0980 100644 (file)
@@ -49,6 +49,9 @@ static struct proc_module {
         { .name = "/proc/net/rpc/nfsd", .dim = "nfsd", .func = do_proc_net_rpc_nfsd },
         { .name = "/proc/net/rpc/nfs", .dim = "nfs", .func = do_proc_net_rpc_nfs },
 
+        // ZFS metrics
+        { .name = "/proc/spl/kstat/zfs/arcstats", .dim = "zfs_arcstats", .func = do_proc_spl_kstat_zfs_arcstats },
+
         // IPC metrics
         { .name = "ipc", .dim = "ipc", .func = do_ipc },
 
index 5dee7853cdd25141aa03a04ed6ecb1bde8ca5553..fd81d41db0814f7e3effbdf118311465144787d8 100644 (file)
@@ -25,6 +25,7 @@ extern int do_proc_net_softnet_stat(int update_every, usec_t dt);
 extern int do_proc_uptime(int update_every, usec_t dt);
 extern int do_proc_sys_devices_system_edac_mc(int update_every, usec_t dt);
 extern int do_proc_sys_devices_system_node(int update_every, usec_t dt);
+extern int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt);
 
 extern int get_numa_node_count(void);
 
index e7863f114a7445bf6594e4270b3095139260c91b..a48801b378242b20138506731fd8c4ede9ac8ec2 100644 (file)
@@ -68,9 +68,10 @@ int do_proc_loadavg(int update_every, usec_t dt) {
             rrddim_set(load_chart, "load5", (collected_number) (load5 * 1000));
             rrddim_set(load_chart, "load15", (collected_number) (load15 * 1000));
             rrdset_done(load_chart);
-        }
 
-        next_loadavg_dt = load_chart->update_every * USEC_PER_SEC;
+            next_loadavg_dt = load_chart->update_every * USEC_PER_SEC;
+        }
+        else next_loadavg_dt =  MIN_LOADAVG_UPDATE_EVERY * USEC_PER_SEC;
     }
     else next_loadavg_dt -= dt;
 
diff --git a/src/proc_spl_kstat_zfs.c b/src/proc_spl_kstat_zfs.c
new file mode 100644 (file)
index 0000000..3f5d629
--- /dev/null
@@ -0,0 +1,910 @@
+#include "common.h"
+
+#define ZFS_PROC_ARCSTATS "/proc/spl/kstat/zfs/arcstats"
+#define ZFS_FAMILY_SIZE "size"
+#define ZFS_FAMILY_EFFICIENCY "efficiency"
+#define ZFS_FAMILY_ACCESSES "accesses"
+#define ZFS_FAMILY_OPERATIONS "operations"
+#define ZFS_FAMILY_HASH "hashes"
+
+static struct arcstats {
+    unsigned long long hits;
+    unsigned long long misses;
+    unsigned long long demand_data_hits;
+    unsigned long long demand_data_misses;
+    unsigned long long demand_metadata_hits;
+    unsigned long long demand_metadata_misses;
+    unsigned long long prefetch_data_hits;
+    unsigned long long prefetch_data_misses;
+    unsigned long long prefetch_metadata_hits;
+    unsigned long long prefetch_metadata_misses;
+    unsigned long long mru_hits;
+    unsigned long long mru_ghost_hits;
+    unsigned long long mfu_hits;
+    unsigned long long mfu_ghost_hits;
+    unsigned long long deleted;
+    unsigned long long mutex_miss;
+    unsigned long long evict_skip;
+    unsigned long long evict_not_enough;
+    unsigned long long evict_l2_cached;
+    unsigned long long evict_l2_eligible;
+    unsigned long long evict_l2_ineligible;
+    unsigned long long evict_l2_skip;
+    unsigned long long hash_elements;
+    unsigned long long hash_elements_max;
+    unsigned long long hash_collisions;
+    unsigned long long hash_chains;
+    unsigned long long hash_chain_max;
+    unsigned long long p;
+    unsigned long long c;
+    unsigned long long c_min;
+    unsigned long long c_max;
+    unsigned long long size;
+    unsigned long long hdr_size;
+    unsigned long long data_size;
+    unsigned long long metadata_size;
+    unsigned long long other_size;
+    unsigned long long anon_size;
+    unsigned long long anon_evictable_data;
+    unsigned long long anon_evictable_metadata;
+    unsigned long long mru_size;
+    unsigned long long mru_evictable_data;
+    unsigned long long mru_evictable_metadata;
+    unsigned long long mru_ghost_size;
+    unsigned long long mru_ghost_evictable_data;
+    unsigned long long mru_ghost_evictable_metadata;
+    unsigned long long mfu_size;
+    unsigned long long mfu_evictable_data;
+    unsigned long long mfu_evictable_metadata;
+    unsigned long long mfu_ghost_size;
+    unsigned long long mfu_ghost_evictable_data;
+    unsigned long long mfu_ghost_evictable_metadata;
+    unsigned long long l2_hits;
+    unsigned long long l2_misses;
+    unsigned long long l2_feeds;
+    unsigned long long l2_rw_clash;
+    unsigned long long l2_read_bytes;
+    unsigned long long l2_write_bytes;
+    unsigned long long l2_writes_sent;
+    unsigned long long l2_writes_done;
+    unsigned long long l2_writes_error;
+    unsigned long long l2_writes_lock_retry;
+    unsigned long long l2_evict_lock_retry;
+    unsigned long long l2_evict_reading;
+    unsigned long long l2_evict_l1cached;
+    unsigned long long l2_free_on_write;
+    unsigned long long l2_cdata_free_on_write;
+    unsigned long long l2_abort_lowmem;
+    unsigned long long l2_cksum_bad;
+    unsigned long long l2_io_error;
+    unsigned long long l2_size;
+    unsigned long long l2_asize;
+    unsigned long long l2_hdr_size;
+    unsigned long long l2_compress_successes;
+    unsigned long long l2_compress_zeros;
+    unsigned long long l2_compress_failures;
+    unsigned long long memory_throttle_count;
+    unsigned long long duplicate_buffers;
+    unsigned long long duplicate_buffers_size;
+    unsigned long long duplicate_reads;
+    unsigned long long memory_direct_count;
+    unsigned long long memory_indirect_count;
+    unsigned long long arc_no_grow;
+    unsigned long long arc_tempreserve;
+    unsigned long long arc_loaned_bytes;
+    unsigned long long arc_prune;
+    unsigned long long arc_meta_used;
+    unsigned long long arc_meta_limit;
+    unsigned long long arc_meta_max;
+    unsigned long long arc_meta_min;
+    unsigned long long arc_need_free;
+    unsigned long long arc_sys_free;
+} arcstats = { 0 };
+
+int l2exist = -1;
+
+static void generate_charts_arcstats(int update_every) {
+
+    // ARC reads
+    unsigned long long aread = arcstats.hits + arcstats.misses;
+
+    // Demand reads
+    unsigned long long dhit = arcstats.demand_data_hits + arcstats.demand_metadata_hits;
+    unsigned long long dmiss = arcstats.demand_data_misses + arcstats.demand_metadata_misses;
+    unsigned long long dread = dhit + dmiss;
+
+    // Prefetch reads
+    unsigned long long phit = arcstats.prefetch_data_hits + arcstats.prefetch_metadata_hits;
+    unsigned long long pmiss = arcstats.prefetch_data_misses + arcstats.prefetch_metadata_misses;
+    unsigned long long pread = phit + pmiss;
+
+    // Metadata reads
+    unsigned long long mhit = arcstats.prefetch_metadata_hits + arcstats.demand_metadata_hits;
+    unsigned long long mmiss = arcstats.prefetch_metadata_misses + arcstats.demand_metadata_misses;
+    unsigned long long mread = mhit + mmiss;
+
+    // l2 reads
+    unsigned long long l2hit = arcstats.l2_hits + arcstats.l2_misses;
+    unsigned long long l2miss = arcstats.prefetch_metadata_misses + arcstats.demand_metadata_misses;
+    unsigned long long l2read = l2hit + l2miss;
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_arc_size = NULL;
+        static RRDDIM *rd_arc_size = NULL;
+        static RRDDIM *rd_arc_target_size = NULL;
+        static RRDDIM *rd_arc_target_min_size = NULL;
+        static RRDDIM *rd_arc_target_max_size = NULL;
+
+        if (unlikely(!st_arc_size)) {
+            st_arc_size = rrdset_create_localhost(
+                    "zfs"
+                    , "arc_size"
+                    , NULL
+                    , ZFS_FAMILY_SIZE
+                    , NULL
+                    , "ZFS ARC Size"
+                    , "MB"
+                    , 2000
+                    , update_every
+                    , RRDSET_TYPE_AREA
+            );
+
+            rd_arc_size            = rrddim_add(st_arc_size, "size",   "arcsz", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+            rd_arc_target_size     = rrddim_add(st_arc_size, "target", NULL,    1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+            rd_arc_target_min_size = rrddim_add(st_arc_size, "min",    "min (hard limit)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+            rd_arc_target_max_size = rrddim_add(st_arc_size, "max",    "max (high water)", 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+        }
+        else
+            rrdset_next(st_arc_size);
+
+        rrddim_set_by_pointer(st_arc_size, rd_arc_size,            arcstats.size);
+        rrddim_set_by_pointer(st_arc_size, rd_arc_target_size,     arcstats.c);
+        rrddim_set_by_pointer(st_arc_size, rd_arc_target_min_size, arcstats.c_min);
+        rrddim_set_by_pointer(st_arc_size, rd_arc_target_max_size, arcstats.c_max);
+        rrdset_done(st_arc_size);
+    }
+
+    // --------------------------------------------------------------------
+
+    if(likely(l2exist)) {
+        static RRDSET *st_l2_size = NULL;
+        static RRDDIM *rd_l2_size = NULL;
+        static RRDDIM *rd_l2_asize = NULL;
+
+        if (unlikely(!st_l2_size)) {
+            st_l2_size = rrdset_create_localhost(
+                    "zfs"
+                    , "l2_size"
+                    , NULL
+                    , ZFS_FAMILY_SIZE
+                    , NULL
+                    , "ZFS L2 ARC Size"
+                    , "MB"
+                    , 2000
+                    , update_every
+                    , RRDSET_TYPE_AREA
+            );
+
+            rd_l2_asize = rrddim_add(st_l2_size, "actual", NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+            rd_l2_size  = rrddim_add(st_l2_size, "size",   NULL, 1, 1024 * 1024, RRD_ALGORITHM_ABSOLUTE);
+        }
+        else
+            rrdset_next(st_l2_size);
+
+        rrddim_set_by_pointer(st_l2_size, rd_l2_size,  arcstats.l2_size);
+        rrddim_set_by_pointer(st_l2_size, rd_l2_asize, arcstats.l2_asize);
+        rrdset_done(st_l2_size);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_reads = NULL;
+        static RRDDIM *rd_aread = NULL;
+        static RRDDIM *rd_dread = NULL;
+        static RRDDIM *rd_pread = NULL;
+        static RRDDIM *rd_mread = NULL;
+        static RRDDIM *rd_l2read = NULL;
+
+        if (unlikely(!st_reads)) {
+            st_reads = rrdset_create_localhost(
+                    "zfs"
+                    , "reads"
+                    , NULL
+                    , ZFS_FAMILY_ACCESSES
+                    , NULL
+                    , "ZFS Reads"
+                    , "reads/s"
+                    , 2010
+                    , update_every
+                    , RRDSET_TYPE_AREA
+            );
+
+            rd_aread  = rrddim_add(st_reads, "areads",  "arc",      1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_dread  = rrddim_add(st_reads, "dreads",  "demand",   1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_pread  = rrddim_add(st_reads, "preads",  "prefetch", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_mread  = rrddim_add(st_reads, "mreads",  "metadata", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+
+            if(l2exist)
+                rd_l2read = rrddim_add(st_reads, "l2reads", "l2",       1, 1, RRD_ALGORITHM_INCREMENTAL);
+        }
+        else
+            rrdset_next(st_reads);
+
+        rrddim_set_by_pointer(st_reads, rd_aread,  aread);
+        rrddim_set_by_pointer(st_reads, rd_dread,  dread);
+        rrddim_set_by_pointer(st_reads, rd_pread,  pread);
+        rrddim_set_by_pointer(st_reads, rd_mread,  mread);
+
+        if(l2exist)
+            rrddim_set_by_pointer(st_reads, rd_l2read, l2read);
+
+        rrdset_done(st_reads);
+    }
+
+    // --------------------------------------------------------------------
+
+    if(likely(l2exist)) {
+        static RRDSET *st_l2bytes = NULL;
+        static RRDDIM *rd_l2_read_bytes = NULL;
+        static RRDDIM *rd_l2_write_bytes = NULL;
+
+        if (unlikely(!st_l2bytes)) {
+            st_l2bytes = rrdset_create_localhost(
+                    "zfs"
+                    , "bytes"
+                    , NULL
+                    , ZFS_FAMILY_ACCESSES
+                    , NULL
+                    , "ZFS ARC L2 Read/Write Rate"
+                    , "kilobytes/s"
+                    , 2200
+                    , update_every
+                    , RRDSET_TYPE_AREA
+            );
+
+            rd_l2_read_bytes  = rrddim_add(st_l2bytes, "read",  NULL,  1, 1024, RRD_ALGORITHM_INCREMENTAL);
+            rd_l2_write_bytes = rrddim_add(st_l2bytes, "write", NULL, -1, 1024, RRD_ALGORITHM_INCREMENTAL);
+        }
+        else
+            rrdset_next(st_l2bytes);
+
+        rrddim_set_by_pointer(st_l2bytes, rd_l2_read_bytes, arcstats.l2_read_bytes);
+        rrddim_set_by_pointer(st_l2bytes, rd_l2_write_bytes, arcstats.l2_write_bytes);
+        rrdset_done(st_l2bytes);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_ahits = NULL;
+        static RRDDIM *rd_ahits = NULL;
+        static RRDDIM *rd_amisses = NULL;
+
+        if (unlikely(!st_ahits)) {
+            st_ahits = rrdset_create_localhost(
+                    "zfs"
+                    , "hits"
+                    , NULL
+                    , ZFS_FAMILY_EFFICIENCY
+                    , NULL
+                    , "ZFS ARC Hits"
+                    , "percentage"
+                    , 2020
+                    , update_every
+                    , RRDSET_TYPE_STACKED
+            );
+
+            rd_ahits   = rrddim_add(st_ahits, "hits", NULL,   1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+            rd_amisses = rrddim_add(st_ahits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+        }
+        else
+            rrdset_next(st_ahits);
+
+        rrddim_set_by_pointer(st_ahits, rd_ahits,   arcstats.hits);
+        rrddim_set_by_pointer(st_ahits, rd_amisses, arcstats.misses);
+        rrdset_done(st_ahits);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_dhits = NULL;
+        static RRDDIM *rd_dhits = NULL;
+        static RRDDIM *rd_dmisses = NULL;
+
+        if (unlikely(!st_dhits)) {
+            st_dhits = rrdset_create_localhost(
+                    "zfs"
+                    , "dhits"
+                    , NULL
+                    , ZFS_FAMILY_EFFICIENCY
+                    , NULL
+                    , "ZFS Demand Hits"
+                    , "percentage"
+                    , 2030
+                    , update_every
+                    , RRDSET_TYPE_STACKED
+            );
+
+            rd_dhits   = rrddim_add(st_dhits, "hits",   NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+            rd_dmisses = rrddim_add(st_dhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+        }
+        else
+            rrdset_next(st_dhits);
+
+        rrddim_set_by_pointer(st_dhits, rd_dhits,   dhit);
+        rrddim_set_by_pointer(st_dhits, rd_dmisses, dmiss);
+        rrdset_done(st_dhits);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_phits = NULL;
+        static RRDDIM *rd_phits = NULL;
+        static RRDDIM *rd_pmisses = NULL;
+
+        if (unlikely(!st_phits)) {
+            st_phits = rrdset_create_localhost(
+                    "zfs"
+                    , "phits"
+                    , NULL
+                    , ZFS_FAMILY_EFFICIENCY
+                    , NULL
+                    , "ZFS Prefetch Hits"
+                    , "percentage"
+                    , 2040
+                    , update_every
+                    , RRDSET_TYPE_STACKED
+            );
+
+            rd_phits   = rrddim_add(st_phits, "hits",   NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+            rd_pmisses = rrddim_add(st_phits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+        }
+        else
+            rrdset_next(st_phits);
+
+        rrddim_set_by_pointer(st_phits, rd_phits,   phit);
+        rrddim_set_by_pointer(st_phits, rd_pmisses, pmiss);
+        rrdset_done(st_phits);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_mhits = NULL;
+        static RRDDIM *rd_mhits = NULL;
+        static RRDDIM *rd_mmisses = NULL;
+
+        if (unlikely(!st_mhits)) {
+            st_mhits = rrdset_create_localhost(
+                    "zfs"
+                    , "mhits"
+                    , NULL
+                    , ZFS_FAMILY_EFFICIENCY
+                    , NULL
+                    , "ZFS Metadata Hits"
+                    , "percentage"
+                    , 2050
+                    , update_every
+                    , RRDSET_TYPE_STACKED
+            );
+
+            rd_mhits   = rrddim_add(st_mhits, "hits",   NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+            rd_mmisses = rrddim_add(st_mhits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+        }
+        else
+            rrdset_next(st_mhits);
+
+        rrddim_set_by_pointer(st_mhits, rd_mhits,   mhit);
+        rrddim_set_by_pointer(st_mhits, rd_mmisses, mmiss);
+        rrdset_done(st_mhits);
+    }
+
+    // --------------------------------------------------------------------
+
+    if(likely(l2exist)) {
+        static RRDSET *st_l2hits = NULL;
+        static RRDDIM *rd_l2hits = NULL;
+        static RRDDIM *rd_l2misses = NULL;
+
+        if (unlikely(!st_l2hits)) {
+            st_l2hits = rrdset_create_localhost(
+                    "zfs"
+                    , "l2hits"
+                    , NULL
+                    , ZFS_FAMILY_EFFICIENCY
+                    , NULL
+                    , "ZFS L2 Hits"
+                    , "percentage"
+                    , 2060
+                    , update_every
+                    , RRDSET_TYPE_STACKED
+            );
+
+            rd_l2hits   = rrddim_add(st_l2hits, "hits",   NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+            rd_l2misses = rrddim_add(st_l2hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+        }
+        else
+            rrdset_next(st_l2hits);
+
+        rrddim_set_by_pointer(st_l2hits, rd_l2hits,   l2hit);
+        rrddim_set_by_pointer(st_l2hits, rd_l2misses, l2miss);
+        rrdset_done(st_l2hits);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_list_hits = NULL;
+        static RRDDIM *rd_mfu = NULL;
+        static RRDDIM *rd_mru = NULL;
+        static RRDDIM *rd_mfug = NULL;
+        static RRDDIM *rd_mrug = NULL;
+
+        if (unlikely(!st_list_hits)) {
+            st_list_hits = rrdset_create_localhost(
+                    "zfs"
+                    , "list_hits"
+                    , NULL
+                    , ZFS_FAMILY_EFFICIENCY
+                    , NULL
+                    , "ZFS List Hits"
+                    , "hits/s"
+                    , 2100
+                    , update_every
+                    , RRDSET_TYPE_AREA
+            );
+
+            rd_mfu = rrddim_add(st_list_hits,  "mfu",  NULL,        1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_mfug = rrddim_add(st_list_hits, "mfug", "mfu ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_mru = rrddim_add(st_list_hits,  "mru",  NULL,        1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_mrug = rrddim_add(st_list_hits, "mrug", "mru ghost", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+        }
+        else
+            rrdset_next(st_list_hits);
+
+        rrddim_set_by_pointer(st_list_hits, rd_mfu, arcstats.mfu_hits);
+        rrddim_set_by_pointer(st_list_hits, rd_mru, arcstats.mru_hits);
+        rrddim_set_by_pointer(st_list_hits, rd_mfug, arcstats.mfu_ghost_hits);
+        rrddim_set_by_pointer(st_list_hits, rd_mrug, arcstats.mru_ghost_hits);
+        rrdset_done(st_list_hits);
+    }
+}
+
+static void generate_charts_arc_summary(int update_every) {
+    unsigned long long arc_accesses_total = arcstats.hits + arcstats.misses;
+    unsigned long long real_hits = arcstats.mfu_hits + arcstats.mru_hits;
+    unsigned long long real_misses = arc_accesses_total - real_hits;
+
+    //unsigned long long anon_hits = arcstats.hits - (arcstats.mfu_hits + arcstats.mru_hits + arcstats.mfu_ghost_hits + arcstats.mru_ghost_hits);
+
+    unsigned long long arc_size = arcstats.size;
+    unsigned long long mru_size = arcstats.p;
+    //unsigned long long target_min_size = arcstats.c_min;
+    //unsigned long long target_max_size = arcstats.c_max;
+    unsigned long long target_size = arcstats.c;
+    //unsigned long long target_size_ratio = (target_max_size / target_min_size);
+
+    unsigned long long mfu_size;
+    if(arc_size > target_size)
+        mfu_size = arc_size - mru_size;
+    else
+        mfu_size = target_size - mru_size;
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_arc_size_breakdown = NULL;
+        static RRDDIM *rd_most_recent = NULL;
+        static RRDDIM *rd_most_frequent = NULL;
+
+        if (unlikely(!st_arc_size_breakdown)) {
+            st_arc_size_breakdown = rrdset_create_localhost(
+                    "zfs"
+                    , "arc_size_breakdown"
+                    , NULL
+                    , ZFS_FAMILY_EFFICIENCY
+                    , NULL
+                    , "ZFS ARC Size Breakdown"
+                    , "percentage"
+                    , 2020
+                    , update_every
+                    , RRDSET_TYPE_STACKED
+            );
+
+            rd_most_recent   = rrddim_add(st_arc_size_breakdown, "recent", NULL,   1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
+            rd_most_frequent = rrddim_add(st_arc_size_breakdown, "frequent", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL);
+        }
+        else
+            rrdset_next(st_arc_size_breakdown);
+
+        rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_recent,   mru_size);
+        rrddim_set_by_pointer(st_arc_size_breakdown, rd_most_frequent, mfu_size);
+        rrdset_done(st_arc_size_breakdown);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_memory = NULL;
+        static RRDDIM *rd_direct = NULL;
+        static RRDDIM *rd_throttled = NULL;
+        static RRDDIM *rd_indirect = NULL;
+
+        if (unlikely(!st_memory)) {
+            st_memory = rrdset_create_localhost(
+                    "zfs"
+                    , "memory_ops"
+                    , NULL
+                    , ZFS_FAMILY_OPERATIONS
+                    , NULL
+                    , "ZFS Memory Operations"
+                    , "operations/s"
+                    , 2023
+                    , update_every
+                    , RRDSET_TYPE_LINE
+            );
+
+            rd_direct    = rrddim_add(st_memory, "direct",    NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_throttled = rrddim_add(st_memory, "throttled", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_indirect  = rrddim_add(st_memory, "indirect",  NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
+        }
+        else
+            rrdset_next(st_memory);
+
+        rrddim_set_by_pointer(st_memory, rd_direct,    arcstats.memory_direct_count);
+        rrddim_set_by_pointer(st_memory, rd_throttled, arcstats.memory_throttle_count);
+        rrddim_set_by_pointer(st_memory, rd_indirect,  arcstats.memory_indirect_count);
+        rrdset_done(st_memory);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_important_ops = NULL;
+        static RRDDIM *rd_deleted = NULL;
+        static RRDDIM *rd_mutex_misses = NULL;
+        static RRDDIM *rd_evict_skips = NULL;
+        static RRDDIM *rd_hash_collisions = NULL;
+
+        if (unlikely(!st_important_ops)) {
+            st_important_ops = rrdset_create_localhost(
+                    "zfs"
+                    , "important_ops"
+                    , NULL
+                    , ZFS_FAMILY_OPERATIONS
+                    , NULL
+                    , "ZFS Important Operations"
+                    , "operations/s"
+                    , 2022
+                    , update_every
+                    , RRDSET_TYPE_LINE
+            );
+
+            rd_evict_skips     = rrddim_add(st_important_ops, "eskip",   "evict skip", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_deleted         = rrddim_add(st_important_ops, "deleted", NULL,         1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_mutex_misses    = rrddim_add(st_important_ops, "mtxmis",  "mutex miss", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+            rd_hash_collisions = rrddim_add(st_important_ops, "hash_collisions", "hash collisions", 1, 1, RRD_ALGORITHM_INCREMENTAL);
+        }
+        else
+            rrdset_next(st_important_ops);
+
+        rrddim_set_by_pointer(st_important_ops, rd_deleted,      arcstats.deleted);
+        rrddim_set_by_pointer(st_important_ops, rd_evict_skips,  arcstats.evict_skip);
+        rrddim_set_by_pointer(st_important_ops, rd_mutex_misses, arcstats.mutex_miss);
+        rrddim_set_by_pointer(st_important_ops, rd_hash_collisions, arcstats.hash_collisions);
+        rrdset_done(st_important_ops);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_actual_hits = NULL;
+        static RRDDIM *rd_actual_hits = NULL;
+        static RRDDIM *rd_actual_misses = NULL;
+
+        if (unlikely(!st_actual_hits)) {
+            st_actual_hits = rrdset_create_localhost(
+                    "zfs"
+                    , "actual_hits"
+                    , NULL
+                    , ZFS_FAMILY_EFFICIENCY
+                    , NULL
+                    , "ZFS Actual Cache Hits"
+                    , "percentage"
+                    , 2019
+                    , update_every
+                    , RRDSET_TYPE_STACKED
+            );
+
+            rd_actual_hits   = rrddim_add(st_actual_hits, "hits", NULL,   1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+            rd_actual_misses = rrddim_add(st_actual_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+        }
+        else
+            rrdset_next(st_actual_hits);
+
+        rrddim_set_by_pointer(st_actual_hits, rd_actual_hits,   real_hits);
+        rrddim_set_by_pointer(st_actual_hits, rd_actual_misses, real_misses);
+        rrdset_done(st_actual_hits);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_demand_data_hits = NULL;
+        static RRDDIM *rd_demand_data_hits = NULL;
+        static RRDDIM *rd_demand_data_misses = NULL;
+
+        if (unlikely(!st_demand_data_hits)) {
+            st_demand_data_hits = rrdset_create_localhost(
+                    "zfs"
+                    , "demand_data_hits"
+                    , NULL
+                    , ZFS_FAMILY_EFFICIENCY
+                    , NULL
+                    , "ZFS Data Demand Efficiency"
+                    , "percentage"
+                    , 2031
+                    , update_every
+                    , RRDSET_TYPE_STACKED
+            );
+
+            rd_demand_data_hits   = rrddim_add(st_demand_data_hits, "hits", NULL,   1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+            rd_demand_data_misses = rrddim_add(st_demand_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+        }
+        else
+            rrdset_next(st_demand_data_hits);
+
+        rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_hits,   arcstats.demand_data_hits);
+        rrddim_set_by_pointer(st_demand_data_hits, rd_demand_data_misses, arcstats.demand_data_misses);
+        rrdset_done(st_demand_data_hits);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_prefetch_data_hits = NULL;
+        static RRDDIM *rd_prefetch_data_hits = NULL;
+        static RRDDIM *rd_prefetch_data_misses = NULL;
+
+        if (unlikely(!st_prefetch_data_hits)) {
+            st_prefetch_data_hits = rrdset_create_localhost(
+                    "zfs"
+                    , "prefetch_data_hits"
+                    , NULL
+                    , ZFS_FAMILY_EFFICIENCY
+                    , NULL
+                    , "ZFS Data Prefetch Efficiency"
+                    , "percentage"
+                    , 2032
+                    , update_every
+                    , RRDSET_TYPE_STACKED
+            );
+
+            rd_prefetch_data_hits   = rrddim_add(st_prefetch_data_hits, "hits", NULL,   1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+            rd_prefetch_data_misses = rrddim_add(st_prefetch_data_hits, "misses", NULL, 1, 1, RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL);
+        }
+        else
+            rrdset_next(st_prefetch_data_hits);
+
+        rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_hits,   arcstats.prefetch_data_hits);
+        rrddim_set_by_pointer(st_prefetch_data_hits, rd_prefetch_data_misses, arcstats.prefetch_data_misses);
+        rrdset_done(st_prefetch_data_hits);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_hash_elements = NULL;
+        static RRDDIM *rd_hash_elements_current = NULL;
+        static RRDDIM *rd_hash_elements_max = NULL;
+
+        if (unlikely(!st_hash_elements)) {
+            st_hash_elements = rrdset_create_localhost(
+                    "zfs"
+                    , "hash_elements"
+                    , NULL
+                    , ZFS_FAMILY_HASH
+                    , NULL
+                    , "ZFS ARC Hash Elements"
+                    , "elements"
+                    , 2300
+                    , update_every
+                    , RRDSET_TYPE_LINE
+            );
+
+            rd_hash_elements_current = rrddim_add(st_hash_elements, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+            rd_hash_elements_max     = rrddim_add(st_hash_elements, "max",     NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+        }
+        else
+            rrdset_next(st_hash_elements);
+
+        rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_current, arcstats.hash_elements);
+        rrddim_set_by_pointer(st_hash_elements, rd_hash_elements_max, arcstats.hash_elements_max);
+        rrdset_done(st_hash_elements);
+    }
+
+    // --------------------------------------------------------------------
+
+    {
+        static RRDSET *st_hash_chains = NULL;
+        static RRDDIM *rd_hash_chains_current = NULL;
+        static RRDDIM *rd_hash_chains_max = NULL;
+
+        if (unlikely(!st_hash_chains)) {
+            st_hash_chains = rrdset_create_localhost(
+                    "zfs"
+                    , "hash_chains"
+                    , NULL
+                    , ZFS_FAMILY_HASH
+                    , NULL
+                    , "ZFS ARC Hash Chains"
+                    , "chains"
+                    , 2310
+                    , update_every
+                    , RRDSET_TYPE_LINE
+            );
+
+            rd_hash_chains_current = rrddim_add(st_hash_chains, "current", NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+            rd_hash_chains_max     = rrddim_add(st_hash_chains, "max",     NULL, 1, 1, RRD_ALGORITHM_ABSOLUTE);
+        }
+        else
+            rrdset_next(st_hash_chains);
+
+        rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_current, arcstats.hash_chains);
+        rrddim_set_by_pointer(st_hash_chains, rd_hash_chains_max, arcstats.hash_chain_max);
+        rrdset_done(st_hash_chains);
+    }
+
+    // --------------------------------------------------------------------
+
+}
+
+int do_proc_spl_kstat_zfs_arcstats(int update_every, usec_t dt) {
+    (void)dt;
+
+    static procfile *ff = NULL;
+    static ARL_BASE *arl_base = NULL;
+
+    if(unlikely(!arl_base)) {
+        arl_base = arl_create("arcstats", NULL, 60);
+
+        arl_expect(arl_base, "hits", &arcstats.hits);
+        arl_expect(arl_base, "misses", &arcstats.misses);
+        arl_expect(arl_base, "demand_data_hits", &arcstats.demand_data_hits);
+        arl_expect(arl_base, "demand_data_misses", &arcstats.demand_data_misses);
+        arl_expect(arl_base, "demand_metadata_hits", &arcstats.demand_metadata_hits);
+        arl_expect(arl_base, "demand_metadata_misses", &arcstats.demand_metadata_misses);
+        arl_expect(arl_base, "prefetch_data_hits", &arcstats.prefetch_data_hits);
+        arl_expect(arl_base, "prefetch_data_misses", &arcstats.prefetch_data_misses);
+        arl_expect(arl_base, "prefetch_metadata_hits", &arcstats.prefetch_metadata_hits);
+        arl_expect(arl_base, "prefetch_metadata_misses", &arcstats.prefetch_metadata_misses);
+        arl_expect(arl_base, "mru_hits", &arcstats.mru_hits);
+        arl_expect(arl_base, "mru_ghost_hits", &arcstats.mru_ghost_hits);
+        arl_expect(arl_base, "mfu_hits", &arcstats.mfu_hits);
+        arl_expect(arl_base, "mfu_ghost_hits", &arcstats.mfu_ghost_hits);
+        arl_expect(arl_base, "deleted", &arcstats.deleted);
+        arl_expect(arl_base, "mutex_miss", &arcstats.mutex_miss);
+        arl_expect(arl_base, "evict_skip", &arcstats.evict_skip);
+        arl_expect(arl_base, "evict_not_enough", &arcstats.evict_not_enough);
+        arl_expect(arl_base, "evict_l2_cached", &arcstats.evict_l2_cached);
+        arl_expect(arl_base, "evict_l2_eligible", &arcstats.evict_l2_eligible);
+        arl_expect(arl_base, "evict_l2_ineligible", &arcstats.evict_l2_ineligible);
+        arl_expect(arl_base, "evict_l2_skip", &arcstats.evict_l2_skip);
+        arl_expect(arl_base, "hash_elements", &arcstats.hash_elements);
+        arl_expect(arl_base, "hash_elements_max", &arcstats.hash_elements_max);
+        arl_expect(arl_base, "hash_collisions", &arcstats.hash_collisions);
+        arl_expect(arl_base, "hash_chains", &arcstats.hash_chains);
+        arl_expect(arl_base, "hash_chain_max", &arcstats.hash_chain_max);
+        arl_expect(arl_base, "p", &arcstats.p);
+        arl_expect(arl_base, "c", &arcstats.c);
+        arl_expect(arl_base, "c_min", &arcstats.c_min);
+        arl_expect(arl_base, "c_max", &arcstats.c_max);
+        arl_expect(arl_base, "size", &arcstats.size);
+        arl_expect(arl_base, "hdr_size", &arcstats.hdr_size);
+        arl_expect(arl_base, "data_size", &arcstats.data_size);
+        arl_expect(arl_base, "metadata_size", &arcstats.metadata_size);
+        arl_expect(arl_base, "other_size", &arcstats.other_size);
+        arl_expect(arl_base, "anon_size", &arcstats.anon_size);
+        arl_expect(arl_base, "anon_evictable_data", &arcstats.anon_evictable_data);
+        arl_expect(arl_base, "anon_evictable_metadata", &arcstats.anon_evictable_metadata);
+        arl_expect(arl_base, "mru_size", &arcstats.mru_size);
+        arl_expect(arl_base, "mru_evictable_data", &arcstats.mru_evictable_data);
+        arl_expect(arl_base, "mru_evictable_metadata", &arcstats.mru_evictable_metadata);
+        arl_expect(arl_base, "mru_ghost_size", &arcstats.mru_ghost_size);
+        arl_expect(arl_base, "mru_ghost_evictable_data", &arcstats.mru_ghost_evictable_data);
+        arl_expect(arl_base, "mru_ghost_evictable_metadata", &arcstats.mru_ghost_evictable_metadata);
+        arl_expect(arl_base, "mfu_size", &arcstats.mfu_size);
+        arl_expect(arl_base, "mfu_evictable_data", &arcstats.mfu_evictable_data);
+        arl_expect(arl_base, "mfu_evictable_metadata", &arcstats.mfu_evictable_metadata);
+        arl_expect(arl_base, "mfu_ghost_size", &arcstats.mfu_ghost_size);
+        arl_expect(arl_base, "mfu_ghost_evictable_data", &arcstats.mfu_ghost_evictable_data);
+        arl_expect(arl_base, "mfu_ghost_evictable_metadata", &arcstats.mfu_ghost_evictable_metadata);
+        arl_expect(arl_base, "l2_hits", &arcstats.l2_hits);
+        arl_expect(arl_base, "l2_misses", &arcstats.l2_misses);
+        arl_expect(arl_base, "l2_feeds", &arcstats.l2_feeds);
+        arl_expect(arl_base, "l2_rw_clash", &arcstats.l2_rw_clash);
+        arl_expect(arl_base, "l2_read_bytes", &arcstats.l2_read_bytes);
+        arl_expect(arl_base, "l2_write_bytes", &arcstats.l2_write_bytes);
+        arl_expect(arl_base, "l2_writes_sent", &arcstats.l2_writes_sent);
+        arl_expect(arl_base, "l2_writes_done", &arcstats.l2_writes_done);
+        arl_expect(arl_base, "l2_writes_error", &arcstats.l2_writes_error);
+        arl_expect(arl_base, "l2_writes_lock_retry", &arcstats.l2_writes_lock_retry);
+        arl_expect(arl_base, "l2_evict_lock_retry", &arcstats.l2_evict_lock_retry);
+        arl_expect(arl_base, "l2_evict_reading", &arcstats.l2_evict_reading);
+        arl_expect(arl_base, "l2_evict_l1cached", &arcstats.l2_evict_l1cached);
+        arl_expect(arl_base, "l2_free_on_write", &arcstats.l2_free_on_write);
+        arl_expect(arl_base, "l2_cdata_free_on_write", &arcstats.l2_cdata_free_on_write);
+        arl_expect(arl_base, "l2_abort_lowmem", &arcstats.l2_abort_lowmem);
+        arl_expect(arl_base, "l2_cksum_bad", &arcstats.l2_cksum_bad);
+        arl_expect(arl_base, "l2_io_error", &arcstats.l2_io_error);
+        arl_expect(arl_base, "l2_size", &arcstats.l2_size);
+        arl_expect(arl_base, "l2_asize", &arcstats.l2_asize);
+        arl_expect(arl_base, "l2_hdr_size", &arcstats.l2_hdr_size);
+        arl_expect(arl_base, "l2_compress_successes", &arcstats.l2_compress_successes);
+        arl_expect(arl_base, "l2_compress_zeros", &arcstats.l2_compress_zeros);
+        arl_expect(arl_base, "l2_compress_failures", &arcstats.l2_compress_failures);
+        arl_expect(arl_base, "memory_throttle_count", &arcstats.memory_throttle_count);
+        arl_expect(arl_base, "duplicate_buffers", &arcstats.duplicate_buffers);
+        arl_expect(arl_base, "duplicate_buffers_size", &arcstats.duplicate_buffers_size);
+        arl_expect(arl_base, "duplicate_reads", &arcstats.duplicate_reads);
+        arl_expect(arl_base, "memory_direct_count", &arcstats.memory_direct_count);
+        arl_expect(arl_base, "memory_indirect_count", &arcstats.memory_indirect_count);
+        arl_expect(arl_base, "arc_no_grow", &arcstats.arc_no_grow);
+        arl_expect(arl_base, "arc_tempreserve", &arcstats.arc_tempreserve);
+        arl_expect(arl_base, "arc_loaned_bytes", &arcstats.arc_loaned_bytes);
+        arl_expect(arl_base, "arc_prune", &arcstats.arc_prune);
+        arl_expect(arl_base, "arc_meta_used", &arcstats.arc_meta_used);
+        arl_expect(arl_base, "arc_meta_limit", &arcstats.arc_meta_limit);
+        arl_expect(arl_base, "arc_meta_max", &arcstats.arc_meta_max);
+        arl_expect(arl_base, "arc_meta_min", &arcstats.arc_meta_min);
+        arl_expect(arl_base, "arc_need_free", &arcstats.arc_need_free);
+        arl_expect(arl_base, "arc_sys_free", &arcstats.arc_sys_free);
+    }
+
+    if(unlikely(!ff)) {
+        char filename[FILENAME_MAX + 1];
+        snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, ZFS_PROC_ARCSTATS);
+        ff = procfile_open(config_get("plugin:proc:" ZFS_PROC_ARCSTATS, "filename to monitor", filename), " \t:", PROCFILE_FLAG_DEFAULT);
+        if(unlikely(!ff))
+            return 1;
+    }
+
+    ff = procfile_readall(ff);
+    if(unlikely(!ff))
+        return 0; // we return 0, so that we will retry to open it next time
+
+    size_t lines = procfile_lines(ff), l;
+
+    arl_begin(arl_base);
+
+    for(l = 0; l < lines ;l++) {
+        size_t words = procfile_linewords(ff, l);
+        if(unlikely(words < 3)) {
+            if(unlikely(words)) error("Cannot read " ZFS_PROC_ARCSTATS " line %zu. Expected 3 params, read %zu.", l, words);
+            continue;
+        }
+
+        const char *key   = procfile_lineword(ff, l, 0);
+        const char *value = procfile_lineword(ff, l, 2);
+
+        if(unlikely(l2exist == -1)) {
+            if(key[0] == 'l' && key[1] == '2' && key[2] == '_')
+                l2exist = 1;
+        }
+
+        if(unlikely(arl_check(arl_base, key, value))) break;
+    }
+
+    if(unlikely(l2exist == -1))
+        l2exist = 0;
+
+    generate_charts_arcstats(update_every);
+    generate_charts_arc_summary(update_every);
+
+    return 0;
+}
index c348da30df1fc9ab80826c4690fad5c442e2471e..a9278cbb6a73ce5d61399c511b04d10cd712a52a 100644 (file)
@@ -103,6 +103,12 @@ netdataDashboard.menu = {
         info: 'Performance metrics of the NFS operations of this system, acting as an NFS client.'
     },
 
+    'zfs': {
+        title: 'ZFS filesystem',
+        icon: '<i class="fa fa-folder-open" aria-hidden="true"></i>',
+        info: 'Performance metrics of the ZFS filesystem. The following charts visualize all metrics reported by <a href="https://github.com/zfsonlinux/zfs/blob/master/cmd/arcstat/arcstat.py" target="_blank">arcstat.py</a> and <a href="https://github.com/zfsonlinux/zfs/blob/master/cmd/arc_summary/arc_summary.py" target="_blank">arc_summary.py</a>.'
+    },
+
     'apps': {
         title: 'Applications',
         icon: '<i class="fa fa-heartbeat" aria-hidden="true"></i>',
index b62306a2655ae56069a26e1bb909d0a1b94ee4e1..3283b16e89cf53e215ca0a6758a30758ff6a783b 100644 (file)
             });
 
             NETDATA.requiredJs.push({
-                url: NETDATA.serverDefault + 'dashboard_info.js?v20170308-1',
+                url: NETDATA.serverDefault + 'dashboard_info.js?v20170325-1',
                 async: false,
                 isAlreadyLoaded: function() { return false; }
             });
     </div>
 </body>
 </html>
-<script type="text/javascript" src="dashboard.js?v20170211-2"></script>
+<script type="text/javascript" src="dashboard.js?v20170325-1"></script>