本篇内容介绍了“docker-compose数据库监控举例分析”的有关知识,在实际案例的操作过程中,不少人都会遇到这样的困境,接下来就让小编带领大家学习一下如何处理这些情况吧!希望大家仔细阅读,能够学有所成!
监控对象:
数据采集:
prometheus
cadvisor
node-exporter:
redis-exporter
mysql-exporter:
报表:
grafana
nginx 监控需要编译vfs模块,笔记麻烦!推荐ELK 使用filebeat 直接收集日志,kibana出报表
version: '2' services: # db server redis: build: redis/ container_name: redis restart: unless-stopped volumes: - ~/docker/redis/data:/data - ~/docker/redis/conf/master/redis.conf:/etc/redis/redis.conf ports: - "6379:6379" # mysql: # build: mysql/ # container_name: mysql # restart: unless-stopped # volumes: # # - ~/docker/mysql/data:/var/lib/mysql # - ~/docker/mysql/data:/var/lib/mysql # - ~/github/docker_db/mysql/conf.d:/etc/mysql/conf.d # ports: # - "3306:3306" # environment: # MYSQL_ROOT_PASSWORD: "mysecretpassword" # exporter cadvisor: image: google/cadvisor:latest container_name: cadvisor restart: unless-stopped ports: - '8080:8080' volumes: - /:/rootfs:ro - /var/run:/var/run:rw - /sys:/sys:ro - /var/lib/docker/:/var/lib/docker:ro node-exporter: image: prom/node-exporter:latest container_name: node-exporter restart: unless-stopped ports: - '9100:9100' command: - '--path.procfs=/host/proc' - '--path.sysfs=/host/sys' - '--collector.filesystem.ignored-mount-points=^/(sys|proc|dev|host|etc)($$|/)' - '--collector.textfile.directory=/node_exporter/prom' volumes: - /:/rootfs:ro - /proc:/host/proc:ro - /sys:/host/sys:ro - ~/docker/prometheus_exporter/prom:/node_exporter/prom # mysql exporter # mysql-exporter: # image: prom/mysqld-exporter # container_name: mysql-exporter # hostname: mysql-exporter # user: '0' # restart: always # ports: # - "9104:9104" # environment: # # DATA_SOURCE_NAME: "root:yunjingtest@(localhost:3306)" # DATA_SOURCE_NAME: "root:mysecretpassword@(mysql:23306)" # depends_on: # - mysql # redis exporter redis-exporter: image: oliver006/redis_exporter container_name: redis-exporter hostname: redis-exporter restart: always ports: - "9121:9121" command: - "--redis.addr=redis://redis:6379" depends_on: - redis prometheus: image: prom/prometheus:latest container_name: prometheus restart: unless-stopped ports: - '9090:9090' # user: '0' command: - '--config.file=/etc/prometheus/prometheus.yml' - '--storage.tsdb.path=/prometheus/data' - '--storage.tsdb.retention=90d' - '--web.enable-lifecycle' volumes: - ./prometheus/conf/prometheus.yml:/etc/prometheus/prometheus.yml - ~/docker/prometheus/data:/prometheus/data depends_on: - cadvisor - node-exporter grafana: image: grafana/grafana:latest container_name: grafana restart: unless-stopped ports: - '3000:3000' depends_on: - prometheus volumes: - ~/docker/grafana:/var/lib/grafana environment: - HTTP_USER=admin - HTTP_PASS=yunjingtest # - GF_SECURITY_ADMIN_PASSWORD=yunjingtest # - GF_USERS_ALLOW_SIGN_UP=false networks: db: driver: bridge
run:
➜ docker_db git:(master) docker-compose up -d Creating redis ... done Creating cadvisor ... done Creating node-exporter ... done Creating redis-exporter ... done Creating prometheus ... done Creating grafana ... done ➜ docker_db git:(master) docker-compose ps Name Command State Ports -------------------------------------------------------------------------------- cadvisor /usr/bin/cadvisor -logtostderr Up 0.0.0.0:8080->8080/tcp grafana /run.sh Up 0.0.0.0:3000->3000/tcp node-exporter /bin/node_exporter --path. ... Up 0.0.0.0:9100->9100/tcp prometheus /bin/prometheus --config.f ... Up 0.0.0.0:9090->9090/tcp redis docker-entrypoint.sh /usr/ ... Up 0.0.0.0:6379->6379/tcp redis-exporter /redis_exporter --redis.ad ... Up 0.0.0.0:9121->9121/tcp ➜ docker_db git:(master)
http://localhost:9090/targets
redis 状态报表
prometheus 状态报表 dashborad 在granfa官网查找,找到模版id
监控指标 模版id:8919
cpu
内存
磁盘
系统调用
网络io ...
cpu/内存:
系统负载/磁盘/网络io
# my global config global: scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration alerting: alertmanagers: - static_configs: - targets: # - alertmanager:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: # - "first_rules.yml" # - "second_rules.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. - job_name: 'prometheus' # metrics_path defaults to '/metrics' # scheme defaults to 'http'. static_configs: - targets: ['localhost:9090'] - job_name: 'node-exporter' scrape_interval: 5s static_configs: - targets: ['node-exporter:9100'] - job_name: 'cadvisor' scrape_interval: 5s static_configs: - targets: ['cadvisor:8080'] # - job_name: 'mysql-exporter' # scrape_interval: 5s # static_configs: # - targets: ['mysql-exporter:9104'] # - job_name: 'mysql-exporter-100' # scrape_interval: 5s # static_configs: # - targets: ['mysql-exporter-100:9105'] - job_name: 'redis-exporter' scrape_interval: 5s static_configs: - targets: ['redis-exporter:9121']
“docker-compose数据库监控举例分析”的内容就介绍到这里了,感谢大家的阅读。如果想了解更多行业相关的知识可以关注亿速云网站,小编将为大家输出更多高质量的实用文章!
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。