X-Git-Url: https://gerrit.opnfv.org/gerrit/gitweb?a=blobdiff_plain;f=src%2Fceph%2Fsrc%2Fpybind%2Fmgr%2Fzabbix%2Fzabbix_template.xml;fp=src%2Fceph%2Fsrc%2Fpybind%2Fmgr%2Fzabbix%2Fzabbix_template.xml;h=ecd1ef438a4ba9a66a84843a99c2260a6357f02a;hb=812ff6ca9fcd3e629e49d4328905f33eee8ca3f5;hp=0000000000000000000000000000000000000000;hpb=15280273faafb77777eab341909a3f495cf248d9;p=stor4nfv.git diff --git a/src/ceph/src/pybind/mgr/zabbix/zabbix_template.xml b/src/ceph/src/pybind/mgr/zabbix/zabbix_template.xml new file mode 100644 index 0000000..ecd1ef4 --- /dev/null +++ b/src/ceph/src/pybind/mgr/zabbix/zabbix_template.xml @@ -0,0 +1,1707 @@ + + + 3.0 + 2017-07-05T09:03:49Z + + + Templates + + + + + + + + {ceph-mgr Zabbix module:ceph.overall_status_int.last()}=2 + Ceph cluster in ERR state + + 0 + 5 + Ceph cluster is in ERR state + 0 + + + + {ceph-mgr Zabbix module:ceph.overall_status_int.avg(1h)}=1 + Ceph cluster in WARN state + + 0 + 4 + Issue a trigger if Ceph cluster is in WARN state for >1h + 0 + + + + {ceph-mgr Zabbix module:ceph.num_osd_in.change()}>0 + Number of IN OSDs decreased + + 0 + 2 + Amount of OSDs in IN state decreased + 0 + + + + {ceph-mgr Zabbix module:ceph.num_osd_up.change()}>0 + Number of UP OSDs decreased + + 0 + 2 + Amount of OSDs in UP state decreased + 0 + + + + + + Ceph bandwidth + 900 + 200 + 0.0000 + 100.0000 + 1 + 1 + 0 + 1 + 0 + 0.0000 + 0.0000 + 0 + 0 + 0 + 0 + + + 0 + 0 + 1A7C11 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.rd_bytes + + + + 1 + 0 + F63100 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.wr_bytes + + + + + + Ceph free space + 900 + 200 + 0.0000 + 100.0000 + 1 + 1 + 0 + 1 + 0 + 0.0000 + 0.0000 + 1 + 2 + 0 + + ceph-mgr Zabbix module + ceph.total_bytes + + + + 0 + 0 + 2774A4 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.total_avail_bytes + + + + + + Ceph health + 900 + 200 + 0.0000 + 2.0000 + 1 + 1 + 0 + 1 + 0 + 0.0000 + 0.0000 + 1 + 1 + 0 + 0 + + + 0 + 0 + 1A7C11 + 0 + 7 + 0 + + ceph-mgr Zabbix module + ceph.overall_status_int + + + + + + Ceph I/O + 900 + 200 + 0.0000 + 100.0000 + 1 + 1 + 0 + 1 + 0 + 0.0000 + 0.0000 + 0 + 0 + 0 + 0 + + + 0 + 0 + 1A7C11 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.rd_ops + + + + 1 + 0 + F63100 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.wr_ops + + + + + + Ceph OSD latency + 900 + 200 + 0.0000 + 100.0000 + 1 + 1 + 0 + 1 + 0 + 0.0000 + 0.0000 + 0 + 0 + 0 + 0 + + + 0 + 0 + 1A7C11 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_latency_apply_avg + + + + 1 + 0 + F63100 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_latency_commit_avg + + + + 2 + 0 + 2774A4 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_latency_apply_max + + + + 3 + 0 + A54F10 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_latency_commit_max + + + + 4 + 0 + FC6EA3 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_latency_apply_min + + + + 5 + 0 + 6C59DC + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_latency_commit_min + + + + + + Ceph OSD utilization + 900 + 200 + 0.0000 + 100.0000 + 1 + 1 + 0 + 1 + 0 + 0.0000 + 0.0000 + 1 + 1 + 0 + 0 + + + 0 + 0 + 0000CC + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_nearfull_ratio + + + + 1 + 0 + F63100 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_full_ratio + + + + 2 + 0 + CC00CC + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_backfillfull_ratio + + + + 3 + 0 + A54F10 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_max_fill + + + + 4 + 0 + FC6EA3 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_avg_fill + + + + 5 + 0 + 6C59DC + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.osd_min_fill + + + + + + Ceph storage overview + 900 + 200 + 0.0000 + 0.0000 + 0 + 0 + 2 + 1 + 0 + 0.0000 + 0.0000 + 0 + 0 + 0 + 0 + + + 0 + 0 + F63100 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.total_used_bytes + + + + 1 + 0 + 00CC00 + 0 + 2 + 0 + + ceph-mgr Zabbix module + ceph.total_avail_bytes + + + + + +