HEX
Server: LiteSpeed
System: Linux kapuas.iixcp.rumahweb.net 5.14.0-427.42.1.el9_4.x86_64 #1 SMP PREEMPT_DYNAMIC Fri Nov 1 14:58:02 EDT 2024 x86_64
User: mirz4654 (1666)
PHP: 8.1.33
Disabled: system,exec,escapeshellarg,escapeshellcmd,passthru,proc_close,proc_get_status,proc_nice,proc_open,proc_terminate,shell_exec,popen,pclose,dl,pfsockopen,leak,apache_child_terminate,posix_kill,posix_mkfifo,posix_setsid,posix_setuid,posix_setpgid,ini_alter,show_source,define_syslog_variables,symlink,syslog,openlog,openlog,closelog,ocinumcols,listen,chgrp,apache_note,apache_setenv,debugger_on,debugger_off,ftp_exec,dll,ftp,myshellexec,socket_bind,mail,posix_getwpuid
Upload Files
File: //lib/netdata/conf.d/ebpf.d.conf
#
# Global options
#
# The `ebpf load mode` option accepts the following values :
#  `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
#  `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
#            new charts for the return of these functions, such as errors.
#
# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
# or `cgroups.plugin`.
# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change the setting 
# `apps` and `cgroups` to  'no'.
#
# The `update every` option defines the number of seconds used to read data from kernel and send to netdata
#
# The `pid table size` defines the maximum number of PIDs stored in the application hash tables.
#
# The `btf path` specifies where to find the BTF files.
#
# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.15.
#
# The `lifetime` defines the time length a thread will run when it is enabled by a function.
#
[global]
    ebpf load mode = entry
    apps = no
    cgroups = no
    update every = 5
    pid table size = 32768
    btf path = /sys/kernel/btf/
    maps per core = yes
    lifetime = 300

#
# eBPF Programs
#
# The eBPF collector has the following eBPF programs:
#
#  `cachestat` : Make charts for kernel functions related to page cache.
#  `dcstat`    : Make charts for kernel functions related to directory cache.
#  `disk`      : Monitor I/O latencies for disks
#  `fd`        : This eBPF program creates charts that show information about file manipulation.
#  `filesystem`: Monitor calls for functions used to manipulate specific filesystems
#  `hardirq`   : Monitor latency of serving hardware interrupt requests (hard IRQs).
#  `mdflush`   : Monitors flush counts for multi-devices.
#  `mount`     : Monitor calls for syscalls mount and umount
#  `oomkill`   : This eBPF program creates a chart that shows which process got OOM killed and when.
#  `process`   : This eBPF program creates charts that show information about process life.
#  `shm`       : Monitor calls for syscalls shmget, shmat, shmdt and shmctl.
#  `socket`    : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
#                bandwidth consumed by each.
#  `softirq`   : Monitor latency of serving software interrupt requests (soft IRQs).
#  `sync`      : Monitor calls for syscall sync(2).             
#  `swap`      : Monitor calls for internal swap functions.
#  `vfs`       : This eBPF program creates charts that show information about process VFS IO, VFS file manipulation and
#               files removed.
#
# When plugin detects that system has support to BTF, it enables integration with apps.plugin.
#
[ebpf programs]
    cachestat = no
    dcstat = no
    disk = no
    fd = no
    filesystem = no
    hardirq = no
    mdflush = no
    mount = yes
    oomkill = yes
    process = no
    shm = no
    socket = no
    softirq = yes
    sync = no
    swap = no
    vfs = no
    network connections = no

#
# Inter-Process Communication (IPC)
#
# Configurations for communication between different plugins.
#
# Available `integration` options:
#     `shm`     : Shared Memory. Collectors will use the same shared memory
#                 to avoid duplication.
#     `socket`  : Unix socket. The eBPF plugin will not store data in the user ring;
#                 it will only read data from the kernel and send it to requesting clients.
#     `disabled`: Disables data sharing between collectors.
#
# The `backlog` option defines the maximum number of concurrent connections that
# can be queued for acceptance.
#
# The `bind to` option defines the path for unix socket
#
[ipc]
    integration = disabled
    bind to = unix:/tmp/netdata_ebpf_sock
    backlog = 20