Файловый менеджер - Редактировать - /home/lakoyani/lakoyani.com.fj/tuned.conf.tar
Назад
usr/lib/tmpfiles.d/tuned.conf 0000644 00000000070 14711236246 0012161 0 ustar 00 # tuned runtime directory d /run/tuned 0755 root root - usr/lib/tuned/cloudlinux-default/tuned.conf 0000644 00000001560 14711252530 0015042 0 ustar 00 # # tuned configuration # [main] summary=Optimized Cloudlinux hosting Servers include=throughput-performance [bootloader] cmdline = systemd.unified_cgroup_hierarchy=0 systemd.legacy_systemd_cgroup_controller cgroup.memory=nokmem [cpu] governor=performance energy_perf_bias=performance min_perf_pct=100 [vm] transparent_hugepages=never [sysctl] kernel.numa_balancing = 1 ################# vm.dirty_ratio = 40 vm.dirty_background_ratio = 10 vm.swappiness=10 #### vm.zone_reclaim_mode=0 ####################3 # TCP fast open reduces network latency by enabling data exchange # during the sender's initial TCP SYN. The value 3 enables fast open # on client and server connections. net.ipv4.tcp_fastopen=3 [disk-vm] type=disk devices = vd* elevator = mq-deadline [disk-sas] type=disk devices = sd* elevator = mq-deadline [disk-nvme] type=disk devices = nvme* elevator = none usr/lib/tuned/latency-performance/tuned.conf 0000644 00000003016 14711253020 0015161 0 ustar 00 # # tuned configuration # [main] summary=Optimize for deterministic performance at the cost of increased power consumption [cpu] force_latency=1 governor=performance energy_perf_bias=performance min_perf_pct=100 [sysctl] # ktune sysctl settings for rhel6 servers, maximizing i/o throughput # # Minimal preemption granularity for CPU-bound tasks: # (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds) kernel.sched_min_granularity_ns=10000000 # If a workload mostly uses anonymous memory and it hits this limit, the entire # working set is buffered for I/O, and any more write buffering would require # swapping, so it's time to throttle writes until I/O can catch up. Workloads # that mostly use file mappings may be able to use even higher values. # # The generator of dirty data starts writeback at this percentage (system default # is 20%) vm.dirty_ratio=10 # Start background writeback (via writeback threads) at this percentage (system # default is 10%) vm.dirty_background_ratio=3 # The swappiness parameter controls the tendency of the kernel to move # processes out of physical memory and onto the swap disk. # 0 tells the kernel to avoid swapping processes out of physical memory # for as long as possible # 100 tells the kernel to aggressively swap processes out of physical memory # and move them to swap cache vm.swappiness=10 # The total time the scheduler will consider a migrated process # "cache hot" and thus less likely to be re-migrated # (system default is 500000, i.e. 0.5 ms) kernel.sched_migration_cost_ns=5000000 usr/lib/tuned/hpc-compute/tuned.conf 0000644 00000002717 14711253035 0013464 0 ustar 00 # # tuned configuration # [main] summary=Optimize for HPC compute workloads description=Configures virtual memory, CPU governors, and network settings for HPC compute workloads. include=latency-performance [vm] # Most HPC application can take advantage of hugepages. Force them to on. transparent_hugepages=always [disk] # Increase the readahead value to support large, contiguous, files. readahead=>4096 [sysctl] # Forces hugepages to be allocated on non-hotpluggable memory vm.hugepages_treat_as_movable=0 # Keep a reasonable amount of memory free to support large mem requests vm.min_free_kbytes=135168 # Most HPC applications are NUMA aware. Enabling zone reclaim ensures # memory is reclaimed and reallocated from local pages. Disabling # automatic NUMA balancing prevents unwanted memory unmapping. vm.zone_reclaim_mode=1 kernel.numa_balancing=0 # Busy polling helps reduce latency in the network receive path # by allowing socket layer code to poll the receive queue of a # network device, and disabling network interrupts. # busy_read value greater than 0 enables busy polling. Recommended # net.core.busy_read value is 50. # busy_poll value greater than 0 enables polling globally. # Recommended net.core.busy_poll value is 50 net.core.busy_read=50 net.core.busy_poll=50 # TCP fast open reduces network latency by enabling data exchange # during the sender's initial TCP SYN. The value 3 enables fast open # on client and server connections. net.ipv4.tcp_fastopen=3 usr/lib/tuned/balanced/tuned.conf 0000644 00000000565 14711253155 0012773 0 ustar 00 # # tuned configuration # [main] summary=General non-specialized tuned profile [modules] cpufreq_conservative=+r [cpu] priority=10 governor=conservative|powersave energy_perf_bias=normal [audio] timeout=10 [video] radeon_powersave=dpm-balanced, auto [disk] # Comma separated list of devices, all devices if commented out. # devices=sda [scsi_host] alpm=medium_power usr/lib/tuned/virtual-guest/tuned.conf 0000644 00000001355 14711253564 0014057 0 ustar 00 # # tuned configuration # [main] summary=Optimize for running inside a virtual guest include=throughput-performance [sysctl] # If a workload mostly uses anonymous memory and it hits this limit, the entire # working set is buffered for I/O, and any more write buffering would require # swapping, so it's time to throttle writes until I/O can catch up. Workloads # that mostly use file mappings may be able to use even higher values. # # The generator of dirty data starts writeback at this percentage (system default # is 20%) vm.dirty_ratio = 30 # Filesystem I/O is usually much more efficient than swapping, so try to keep # swapping low. It's usually safe to go even lower than this on systems with # server-grade storage. vm.swappiness = 30 usr/lib/tuned/network-latency/tuned.conf 0000644 00000000556 14711254115 0014365 0 ustar 00 # # tuned configuration # [main] summary=Optimize for deterministic performance at the cost of increased power consumption, focused on low latency network performance include=latency-performance [vm] transparent_hugepages=never [sysctl] net.core.busy_read=50 net.core.busy_poll=50 net.ipv4.tcp_fastopen=3 kernel.numa_balancing=0 [bootloader] cmdline=skew_tick=1
| ver. 1.4 |
Github
|
.
| PHP 7.4.33 | Генерация страницы: 0 |
proxy
|
phpinfo
|
Настройка