# my global config global: scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. # scrape_timeout is set to the global default (10s). # Alertmanager configuration alerting: alertmanagers: - static_configs: - targets: # - alertmanager:9093 # Load rules once and periodically evaluate them according to the global 'evaluation_interval'. rule_files: # - "first_rules.yml" # - "second_rules.yml" # A scrape configuration containing exactly one endpoint to scrape: # Here it's Prometheus itself. scrape_configs: - job_name: 'Prometheus' static_configs: - targets: ['127.0.0.1:9090'] - job_name: 'Grafana' static_configs: - targets: ['127.0.0.1:3000'] - job_name: 'Nacos' metrics_path: '/nacos/actuator/prometheus' static_configs: - targets: ['127.0.0.1:8848'] - job_name: RuoYi-Cloud-Plus honor_timestamps: true scrape_interval: 15s scrape_timeout: 10s metrics_path: /actuator/prometheus scheme: http http_sd_configs: - url: 'http://127.0.0.1:9100/actuator/prometheus/sd'