一区二区三区在线-一区二区三区亚洲视频-一区二区三区亚洲-一区二区三区午夜-一区二区三区四区在线视频-一区二区三区四区在线免费观看

服務器之家:專注于服務器技術及軟件下載分享
分類導航

云服務器|WEB服務器|FTP服務器|郵件服務器|虛擬主機|服務器安全|DNS服務器|服務器知識|Nginx|IIS|Tomcat|

服務器之家 - 服務器技術 - 服務器知識 - 使用docker部署grafana+prometheus配置

使用docker部署grafana+prometheus配置

2022-01-20 17:12runzhao 服務器知識

這篇文章主要介紹了docker部署grafana+prometheus配置,本文給大家介紹的非常詳細,對大家的學習或工作具有一定的參考借鑒價值,需要的朋友可以參考下

docker-compose-monitor.yml

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
version: '2'
 
networks:
  monitor:
    driver: bridge
 
services:
  influxdb:
    image: influxdb:latest
    container_name: tig-influxdb
    ports:
      - "18083:8083"
      - "18086:8086"
      - "18090:8090"
    env_file:
      - 'env.influxdb'
    volumes:
      # Data persistency
      # sudo mkdir -p ./influxdb/data
      - ./influxdb/data:/var/lib/influxdb
      # 配置docker里的時間為東八區時間
      - ./timezone:/etc/timezone:ro
      - ./localtime:/etc/localtime:ro
    restart: unless-stopped #停止后自動
 
  telegraf:
    image: telegraf:latest
    container_name: tig-telegraf
    links:
      - influxdb
    volumes:
      - ./telegraf.conf:/etc/telegraf/telegraf.conf:ro
      - ./timezone:/etc/timezone:ro
      - ./localtime:/etc/localtime:ro
    restart: unless-stopped
  prometheus:
    image: prom/prometheus
    container_name: prometheus
    hostname: prometheus
    restart: always
    volumes:
      - /home/qa/docker/grafana/prometheus.yml:/etc/prometheus/prometheus.yml
      - /home/qa/docker/grafana/node_down.yml:/etc/prometheus/node_down.yml
    ports:
      - '9090:9090'
    networks:
      - monitor
 
  alertmanager:
    image: prom/alertmanager
    container_name: alertmanager
    hostname: alertmanager
    restart: always
    volumes:
      - /home/qa/docker/grafana/alertmanager.yml:/etc/alertmanager/alertmanager.yml
    ports:
      - '9093:9093'
    networks:
      - monitor
 
  grafana:
    image: grafana/grafana:6.7.4
    container_name: grafana
    hostname: grafana
    restart: always
    ports:
      - '13000:3000'
    networks:
      - monitor
 
  node-exporter:
    image: quay.io/prometheus/node-exporter
    container_name: node-exporter
    hostname: node-exporter
    restart: always
    ports:
      - '9100:9100'
    networks:
      - monitor
 
  cadvisor:
    image: google/cadvisor:latest
    container_name: cadvisor
    hostname: cadvisor
    restart: always
    volumes:
      - /:/rootfs:ro
      - /var/run:/var/run:rw
      - /sys:/sys:ro
      - /var/lib/docker/:/var/lib/docker:ro
    ports:
      - '18080:8080'
    networks:
      - monitor

alertmanager.yml

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
global:
  resolve_timeout: 5m
  smtp_from: '郵箱'
  smtp_smarthost: 'smtp.exmail.qq.com:25'
  smtp_auth_username: '郵箱'
  smtp_auth_password: '密碼'
  smtp_require_tls: false
  smtp_hello: 'qq.com'
route:
  group_by: ['alertname']
  group_wait: 5s
  group_interval: 5s
  repeat_interval: 5m
  receiver: 'email'
receivers:
- name: 'email'
  email_configs:
  - to: '收件郵箱'
    send_resolved: true
inhibit_rules:
  - source_match:
      severity: 'critical'
    target_match:
      severity: 'warning'
    equal: ['alertname', 'dev', 'instance']

prometheus.yml

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
global:
  scrape_interval:     15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).
 
# Alertmanager configuration
alerting:
  alertmanagers:
  - static_configs:
    - targets: ['192.168.32.117:9093']
      # - alertmanager:9093
 
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  - "node_down.yml"
  # - "node-exporter-alert-rules.yml"
  # - "first_rules.yml"
  # - "second_rules.yml"
 
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # IO存儲節點組
  - job_name: 'io'
    scrape_interval: 8s
    static_configs:     #端口為node-exporter啟動的端口 
      - targets: ['192.168.32.117:9100']
      - targets: ['192.168.32.196:9100']
      - targets: ['192.168.32.136:9100']
      - targets: ['192.168.32.193:9100']
      - targets: ['192.168.32.153:9100']
      - targets: ['192.168.32.185:9100']
      - targets: ['192.168.32.190:19100']
      - targets: ['192.168.32.192:9100']
 
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: 'cadvisor'
    static_configs:     #端口為cadvisor啟動的端口
      - targets: ['192.168.32.117:18080']
      - targets: ['192.168.32.193:8080']
      - targets: ['192.168.32.153:8080']
      - targets: ['192.168.32.185:8080']
      - targets: ['192.168.32.190:18080']
      - targets: ['192.168.32.192:18080']

node_down.yml

?
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
groups:
  - name: node_down
    rules:
      - alert: InstanceDown
        expr: up == 0
        for: 1m
        labels:
          user: test
        annotations:
          summary: 'Instance {{ $labels.instance }} down'
          description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 1 minutes.'
 
        #剩余內存小于10%
      - alert: 剩余內存小于10%
        expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
        for: 2m
        labels:
          severity: warning
        annotations:
          summary: Host out of memory (instance {{ $labels.instance }})
          description: "Node memory is filling up (< 10% left)\n  VALUE = {{ $value }}\n  LABELS = {{ $labels }}"
 
        #剩余磁盤小于10%
      - alert: 剩余磁盤小于10%
        expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0
        for: 2m
        labels:
          severity: warning
        annotations:
          summary: Host out of disk space (instance {{ $labels.instance }})
          description: "Disk is almost full (< 10% left)\n  VALUE = {{ $value }}\n  LABELS = {{ $labels }}"
 
        #cpu負載 > 80%
      - alert: CPU負載 > 80%
        expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80
        for: 0m
        labels:
          severity: warning
        annotations:
          summary: Host high CPU load (instance {{ $labels.instance }})
          description: "CPU load is > 80%\n  VALUE = {{ $value }}\n  LABELS = {{ $labels }}"

告警:https://awesome-prometheus-alerts.grep.to/rules#prometheus-self-monitoring

官網儀表盤:https://grafana.com/grafana/dashboards/

到此這篇關于docker部署grafana+prometheus配置的文章就介紹到這了,更多相關docker部署grafana+prometheus內容請搜索服務器之家以前的文章或繼續瀏覽下面的相關文章希望大家以后多多支持服務器之家!

原文鏈接:https://www.cnblogs.com/runzhao/p/15716274.html

延伸 · 閱讀

精彩推薦
主站蜘蛛池模板: 韩国帅男同gay网站 韩国三级在线播放 | 耽美肉文高h | 亚洲激情偷拍 | porno movie hd高清| 91最新入口| 蜜色影院 | 奇米7777第四色 | 热99re久久精品国产首页 | 思思91精品国产综合在线 | 99福利网| 青春草视频免费观看 | 草莓秋葵菠萝蜜绿巨人污 | 免费视屏 | 午夜伦理电影在线观免费 | 精品老司机在线视频香蕉 | 亚洲精品国偷拍自产在线观看蜜臀 | 91porny新九色在线 | 亚洲欧美精品天堂久久综合一区 | 波多野结衣教师未删减版 | 精品免费| 男人狂躁女人gif动态图 | 狠狠燥 | 亚洲精品免费在线观看 | 91高清国产视频 | 九色PORNY真实丨国产大胸 | 天莱男模gary | 男人捅女人的鸡鸡 | 国产一区二区视频免费 | 成人午夜爽爽爽免费视频 | 2021日产国产麻豆 | 成年女人毛片免费观看97 | 亚洲H成年动漫在线观看不卡 | 美国雪白人妖sarina | 热99在线视频| 成人在线视频在线观看 | 91大片淫黄大片在线天堂 | 红色一片在线影视 | 亚洲精品视频网 | 天天久久综合 | voyeur 中国女厕 亚洲女厕 | 11 13加污女qq看他下面 |