Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

采集 系统disk 指标, 过滤ignore_fs 配置没生效。 #1093

Open
GitChenfan opened this issue Nov 23, 2024 · 6 comments
Open

采集 系统disk 指标, 过滤ignore_fs 配置没生效。 #1093

GitChenfan opened this issue Nov 23, 2024 · 6 comments

Comments

@GitChenfan
Copy link

Relevant config.toml

[global]
# whether print configs
print_configs = false

# add label(agent_hostname) to series
# "" -> auto detect hostname
# "xx" -> use specified string xx
# "$hostname" -> auto detect hostname
# "$ip" -> auto detect ip
# "$sn" -> auto detect bios serial number
# "$hostname-$ip" -> auto detect hostname and ip to replace the vars
hostname = "$ip"

# will not add label(agent_hostname) if true
omit_hostname = false

# global collect interval, unit: second
interval = 15

# input provider settings; optional: local / http
providers = ["local"]

# The concurrency setting controls the number of concurrent tasks spawned for each input. 
# By default, it is set to runtime.NumCPU() * 10. This setting is particularly useful when dealing
# with configurations that involve extensive instances of input like ping, net_response, or http_response.
# As multiple goroutines run simultaneously, the "ResponseTime" metric might appear larger than expected. 
# However, utilizing the concurrency setting can help mitigate this issue and optimize the response time.
concurrency = -1

# Setting http.ignore_global_labels = true if disabled report custom labels
[global.labels]
# region = "shanghai"
# env = "localhost"
# sn = "$sn"

[log]
# file_name is the file to write logs to
file_name = "stdout"

# options below will not be work when file_name is stdout or stderr
# max_size is the maximum size in megabytes of the log file before it gets rotated. It defaults to 100 megabytes.
max_size = 100
# max_age is the maximum number of days to retain old log files based on the timestamp encoded in their filename.  
max_age = 1
# max_backups is the maximum number of old log files to retain.  
max_backups = 1
# local_time determines if the time used for formatting the timestamps in backup files is the computer's local time.  
local_time = true
# Compress determines if the rotated log files should be compressed using gzip. 
compress = false

[writer_opt]
batch = 1000
chan_size = 1000000

[[writers]]
url = "http://192.168.24.1:17000/prometheus/v1/write"

## Optional TLS Config
# tls_min_version = "1.2"
# tls_ca = "/etc/categraf/ca.pem"
# tls_cert = "/etc/categraf/cert.pem"
# tls_key = "/etc/categraf/key.pem"
## Use TLS but skip chain & host verification
# insecure_skip_verify = true

# Basic auth username
basic_auth_user = ""

# Basic auth password
basic_auth_pass = ""

## Optional headers
# headers = ["X-From", "categraf", "X-Xyz", "abc"]

# timeout settings, unit: ms
timeout = 5000
dial_timeout = 2500
max_idle_conns_per_host = 100

[http]
enable = false
address = ":9100"
print_access = false
run_mode = "release"
ignore_hostname = false
agent_host_tag = ""
ignore_global_labels = false

[ibex]
enable = false
## ibex flush interval
interval = "1000ms"
## n9e ibex server rpc address
servers = ["192.168.24.1:20090"]
## temp script dir
meta_dir = "./meta"

[heartbeat]
enable = true

# report os version cpu.util mem.util metadata
url = "http://192.168.24.1:17000/v1/n9e/heartbeat"

# interval, unit: s
interval = 10

# Basic auth username
basic_auth_user = ""

# Basic auth password
basic_auth_pass = ""

## Optional headers
# headers = ["X-From", "categraf", "X-Xyz", "abc"]

# timeout settings, unit: ms
timeout = 5000
dial_timeout = 2500
max_idle_conns_per_host = 100

[prometheus]
enable = false
scrape_config_file = "/path/to/in_cluster_scrape.yaml"
## log level, debug warn info error
log_level = "info"
## wal file storage path ,default ./data-agent
# wal_storage_path = "/path/to/storage"
## wal reserve time duration, default value is 2 hour
# wal_min_duration = 2

Logs from categraf

[logs]
## just a placholder
api_key = "ef4ahfbwzwwtlwfpbertgq1i6mq0ab1q"
## enable log collect or not
enable = false
## the server receive logs, http/tcp/kafka, only kafka brokers can be multiple ip:ports with concatenation character ","
send_to = "127.0.0.1:17878"
## send logs with protocol: http/tcp/kafka
send_type = "http"
topic = "flashcatcloud"
## send logs with compression or not 
use_compress = false
## use ssl or not
send_with_tls = false
## send logs in batchs
batch_wait = 5
## save offset in this path 
run_path = "/opt/categraf/run"
## max files can be open 
open_files_limit = 100
## scan config file in 10 seconds
scan_period = 10
## read buffer of udp 
frame_size = 9000

## channal size, default 100
## 读取日志缓冲区,行数
chan_size = 1000
## pipeline num , default 4
## 有多少线程处理日志
pipeline=4
## configuration for kafka
## 指定kafka版本
kafka_version="3.3.2"
# 默认0 表示串行,如果对日志顺序有要求,保持默认配置
batch_max_concurrence = 0
# 最大并发批次, 默认100
batch_max_size=100
# 每次最大发送的内容上限 默认1000000
batch_max_content_size=1000000
# client timeout in seconds
producer_timeout= 10

# 是否开启sasl模式
sasl_enable = false
sasl_user = "admin"
sasl_password = "admin"
# PLAIN
sasl_mechanism= "PLAIN"
# v1
sasl_version=1
# set true
sasl_handshake = true
# optional
# sasl_auth_identity=""
#
##
# v0.3.39以上版本新增,是否开启pod日志采集
enable_collect_container=false

# 是否采集所有pod的stdout stderr
collect_container_all = true
  ## glog processing rules
  # [[logs.Processing_rules]]
  ## single log configure
  [[logs.items]]
  ## file/journald/tcp/udp
  type = "file"
  ## type=file, path is required; type=journald/tcp/udp, port is required
  path = "/opt/tomcat/logs/*.txt"
  source = "tomcat"
  service = "my_service"

System info

cactegraf 0.3.82 ubuntu 20.04

Docker

No response

Steps to reproduce

...

Expected behavior

无法过滤 ignore_fs 中的配置, 页面上依旧看看到 对应 fstype 的 指标采集

Actual behavior

过滤生效,只保留有效的指标采集

Additional info

No response

@GitChenfan
Copy link
Author

经查看 出问题的 采集指标 是 findmnt 查看列表 中 SOURCE 和 FSTYPE 不一致的项

@kongfei605
Copy link
Collaborator

经查看 出问题的 采集指标 是 findmnt 查看列表 中 SOURCE 和 FSTYPE 不一致的项

截个图吧

@GitChenfan
Copy link
Author

图片

@GitChenfan
Copy link
Author

root@ubuntu20043:/# findmnt
TARGET SOURCE FSTYPE OPTIONS
/ /dev/sda2 ext4 rw,relatime
├─/sys sysfs sysfs rw,nosuid,nodev,noexec,
│ ├─/sys/kernel/security securityfs securityfs rw,nosuid,nodev,noexec,
│ ├─/sys/fs/cgroup tmpfs tmpfs ro,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/unified cgroup2 cgroup2 rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/systemd cgroup cgroup rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/cpuset cgroup cgroup rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/perf_event cgroup cgroup rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/blkio cgroup cgroup rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/devices cgroup cgroup rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/rdma cgroup cgroup rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/net_cls,net_prio cgroup cgroup rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/memory cgroup cgroup rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/hugetlb cgroup cgroup rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/cpu,cpuacct cgroup cgroup rw,nosuid,nodev,noexec,
│ │ ├─/sys/fs/cgroup/freezer cgroup cgroup rw,nosuid,nodev,noexec,
│ │ └─/sys/fs/cgroup/pids cgroup cgroup rw,nosuid,nodev,noexec,
│ ├─/sys/fs/pstore pstore pstore rw,nosuid,nodev,noexec,
│ ├─/sys/fs/bpf none bpf rw,nosuid,nodev,noexec,
│ ├─/sys/kernel/debug debugfs debugfs rw,nosuid,nodev,noexec,
│ │ └─/sys/kernel/debug/tracing tracefs tracefs rw,nosuid,nodev,noexec,
│ ├─/sys/kernel/tracing tracefs tracefs rw,nosuid,nodev,noexec,
│ ├─/sys/fs/fuse/connections fusectl fusectl rw,nosuid,nodev,noexec,
│ └─/sys/kernel/config configfs configfs rw,nosuid,nodev,noexec,
├─/proc proc proc rw,nosuid,nodev,noexec,
│ └─/proc/sys/fs/binfmt_misc systemd-1 autofs rw,relatime,fd=28,pgrp=
│ └─/proc/sys/fs/binfmt_misc binfmt_misc binfmt_misc rw,nosuid,nodev,noexec,
├─/dev udev devtmpfs rw,nosuid,noexec,relati
│ ├─/dev/pts devpts devpts rw,nosuid,noexec,relati
│ ├─/dev/shm tmpfs tmpfs rw,nosuid,nodev
│ ├─/dev/hugepages hugetlbfs hugetlbfs rw,relatime,pagesize=2M
│ └─/dev/mqueue mqueue mqueue rw,nosuid,nodev,noexec,
├─/run tmpfs tmpfs rw,nosuid,nodev,noexec,
│ ├─/run/lock tmpfs tmpfs rw,nosuid,nodev,noexec,
│ ├─/run/snapd/ns tmpfs[/snapd/ns] tmpfs rw,nosuid,nodev,noexec,
│ │ └─/run/snapd/ns/lxd.mnt nsfs[mnt:[4026532614]]
│ │ nsfs rw
│ ├─/run/docker/netns/default nsfs[net:[4026531992]]
│ │ nsfs rw
│ ├─/run/docker/netns/0c4c4dfbc66a nsfs[net:[4026533121]]
│ │ nsfs rw
│ ├─/run/user/1001 tmpfs tmpfs rw,nosuid,nodev,relatim
│ ├─/run/docker/netns/202edaffcace nsfs[net:[4026532639]]
│ │ nsfs rw
│ ├─/run/docker/netns/8f92fe284b37 nsfs[net:[4026533003]]
│ │ nsfs rw
│ ├─/run/docker/netns/d3bf38fa027f nsfs[net:[4026532706]]
│ │ nsfs rw
│ ├─/run/docker/netns/4413bca92d85 nsfs[net:[4026533180]]
│ │ nsfs rw
│ ├─/run/docker/netns/4c95c50bd30b nsfs[net:[4026532767]]
│ │ nsfs rw
│ ├─/run/docker/netns/62b4f11a9b19 nsfs[net:[4026532826]]
│ │ nsfs rw
│ ├─/run/docker/netns/4c0eb0b5e124 nsfs[net:[4026533365]]
│ │ nsfs rw
│ ├─/run/docker/netns/1449cd45a217 nsfs[net:[4026532885]]
│ │ nsfs rw
│ ├─/run/docker/netns/aca298d76dd4 nsfs[net:[4026533062]]
│ │ nsfs rw
│ └─/run/docker/netns/eba59bb9f97a nsfs[net:[4026532944]]
│ nsfs rw
├─/data /dev/sdb1 ext4 rw,relatime
│ ├─/data/docker/overlay2/d7f3729dfa420e6580247e50372d6f960893675a0deb69a24f791ba7492865d7/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/overlay2/82da038b7bf90d2d17b50eb9f4f8cd5ab0e18f37973ad228fe30ef8ce0c383de/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/overlay2/9529efe7d3aeb064555f2395e0cea07dbf143c7ce241e8a0bc5a8fe6b810b943/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/overlay2/a0cc495712f452a924c99438d114c17704fdc954f0c9329bda091b05aa205f4d/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/overlay2/3a52ecc59a6bef34c75fd3b3f6f44cd3bd28b456ec8706e862fc22db539efc93/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/overlay2/00f66254c0da7605eea25eb25b814043cc175aa1a07a405eda2bda1847b107c4/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/containers/fed566f81442b1054b1ac3e3744295e1870343424d3858332596f881fdf24939/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
│ ├─/data/docker/overlay2/696e21fc42a5c841425924e0fb437f419180a98a63c7eb641c7c729f166c0457/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/overlay2/2c91ee5c824c2c2a9423ddcb0cef8c9a5dac483e9b37ef17eef940be813b936a/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/overlay2/c4b130fa33f368506ea1782ebb62186945475e5e31622a71fabb4a22b8d642da/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/overlay2/c47636a6619cf7563875ce67147889455018f62e6d7f84d54b949b565e00d2ae/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/containers/0814f1e948682065df07f6c36e42f354041e80fd940fb01cd4830fa47a57712d/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
│ ├─/data/docker/containers/2d960255c41639f42f23362836b48c263007c3458576ed670f4af034f0c378d9/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
│ ├─/data/docker/containers/ba7448d3393f7bb2403f7b137e1a955dfb2e68558ddbcc72f2e6b14d8cdbead4/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
│ ├─/data/docker/containers/e8a9ce6ddab5c7ba3156af3b6ab14ee65f2730cb22cf55ed1954296914e70a4a/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
│ ├─/data/docker/overlay2/666ea52331d86b07fcd7308702f281dfab900c0bd4931b62bbbd9574215e4739/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/containers/0e258481f902f7a632dcf4ae0d397ec452e05191f02712c233196dc3c0f2ac0b/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
│ ├─/data/docker/containers/aac1f1c572a04fb4074e66c54444f60da954ba7c4143f332111258c6575afd59/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
│ ├─/data/docker/overlay2/076f44e660907a3a51b1d29ef8b04c23b0a1a8f4be1f348155c7213b14e7dfd9/merged overlay overlay rw,relatime,lowerdir=/d
│ ├─/data/docker/containers/81d64b80d5f3e9434f0814345ad307b1857c7dd016a840f3daf1b1fc4ab53689/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
│ └─/data/docker/containers/23943dfa80067fde0139c1417f1ddc600d0b50abd3fdcc7edbb931f77892e225/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
├─/snap/core20/2379 /dev/loop1 squashfs ro,nodev,relatime
├─/snap/core18/2829 /dev/loop0 squashfs ro,nodev,relatime
├─/snap/core18/2846 /dev/loop3 squashfs ro,nodev,relatime
├─/snap/core20/2434 /dev/loop2 squashfs ro,nodev,relatime
├─/snap/snapd/21759 /dev/loop5 squashfs ro,nodev,relatime
├─/snap/snapd/21465 /dev/loop6 squashfs ro,nodev,relatime
├─/snap/lxd/24061 /dev/loop4 squashfs ro,nodev,relatime
├─/snap/lxd/29619 /dev/loop7 squashfs ro,nodev,relatime
├─/var/lib/docker_bk/overlay2/3a52ecc59a6bef34c75fd3b3f6f44cd3bd28b456ec8706e862fc22db539efc93/merged overlay overlay rw,relatime,lowerdir=/v
├─/var/lib/docker_bk/overlay2/696e21fc42a5c841425924e0fb437f419180a98a63c7eb641c7c729f166c0457/merged overlay overlay rw,relatime,lowerdir=/v
├─/var/lib/docker_bk/overlay2/00f66254c0da7605eea25eb25b814043cc175aa1a07a405eda2bda1847b107c4/merged overlay overlay rw,relatime,lowerdir=/v
├─/var/lib/docker_bk/overlay2/c4b130fa33f368506ea1782ebb62186945475e5e31622a71fabb4a22b8d642da/merged overlay overlay rw,relatime,lowerdir=/v
├─/var/lib/docker_bk/overlay2/c47636a6619cf7563875ce67147889455018f62e6d7f84d54b949b565e00d2ae/merged overlay overlay rw,relatime,lowerdir=/v
├─/var/lib/docker_bk/overlay2/9529efe7d3aeb064555f2395e0cea07dbf143c7ce241e8a0bc5a8fe6b810b943/merged overlay overlay rw,relatime,lowerdir=/v
├─/var/lib/docker_bk/overlay2/82da038b7bf90d2d17b50eb9f4f8cd5ab0e18f37973ad228fe30ef8ce0c383de/merged overlay overlay rw,relatime,lowerdir=/v
├─/var/lib/docker_bk/overlay2/996fd6aa66b84ffba772ecffabed18dc3d8a11de227d2a8b998ef205e04b6001/merged overlay overlay rw,relatime,lowerdir=/v
├─/var/lib/docker_bk/overlay2/2c91ee5c824c2c2a9423ddcb0cef8c9a5dac483e9b37ef17eef940be813b936a/merged overlay overlay rw,relatime,lowerdir=/v
├─/var/lib/docker_bk/overlay2/a0cc495712f452a924c99438d114c17704fdc954f0c9329bda091b05aa205f4d/merged overlay overlay rw,relatime,lowerdir=/v
├─/var/lib/docker_bk/containers/fed566f81442b1054b1ac3e3744295e1870343424d3858332596f881fdf24939/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
├─/var/lib/docker_bk/containers/e8a9ce6ddab5c7ba3156af3b6ab14ee65f2730cb22cf55ed1954296914e70a4a/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
├─/var/lib/docker_bk/containers/0e258481f902f7a632dcf4ae0d397ec452e05191f02712c233196dc3c0f2ac0b/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
├─/var/lib/docker_bk/containers/2d960255c41639f42f23362836b48c263007c3458576ed670f4af034f0c378d9/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
├─/var/lib/docker_bk/containers/0814f1e948682065df07f6c36e42f354041e80fd940fb01cd4830fa47a57712d/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
├─/var/lib/docker_bk/containers/ba7448d3393f7bb2403f7b137e1a955dfb2e68558ddbcc72f2e6b14d8cdbead4/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
├─/var/lib/docker_bk/containers/81d64b80d5f3e9434f0814345ad307b1857c7dd016a840f3daf1b1fc4ab53689/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,
└─/var/lib/docker_bk/containers/aac1f1c572a04fb4074e66c54444f60da954ba7c4143f332111258c6575afd59/mounts/shm shm tmpfs rw,nosuid,nodev,noexec,

@kongfei605
Copy link
Collaborator

input.disk的配置也贴一下

@GitChenfan
Copy link
Author

GitChenfan commented Nov 26, 2024

# # collect interval
# interval = 15

# # By default stats will be gathered for all mount points.
# # Set mount_points will restrict the stats to only the specified mount points.
# mount_points = ["/", "/data"]

# Ignore mount points by filesystem type.
ignore_fs = ["shm","tmpfs", "devtmpfs", "devfs", "iso9660", "overlay", "aufs", "squashfs", "nsfs", "CDFS", "fuse.juicefs"]

ignore_mount_points = ["/boot", "/var/lib/kubelet/pods","/var/lib/docker"]`

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants