Created
June 15, 2025 14:46
-
-
Save linuxmalaysia/5e1301e28fec79c55752c3a3448e1bcb to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# --- General System Resource Limits --- | |
# Increase the maximum number of memory map areas a process may have. | |
# This helps prevent out-of-memory errors for applications like Nginx that handle many connections, | |
# each potentially requiring its own memory mappings. Your value of 262144 is well-suited. | |
vm.max_map_count = 262144 | |
# Increase the maximum number of file handles available system-wide. | |
# Nginx, especially as a reverse proxy or load balancer, opens a large number of file descriptors | |
# for incoming client connections, outgoing backend connections, log files, and cached content. | |
# Your high value (3.2 million) is excellent for very high concurrency. | |
# Ensure that your Nginx `worker_rlimit_nofile` and the `ulimit -n` for the Nginx user are | |
# set to a similarly high value (or higher if this is a system-wide limit). | |
fs.file-max = 3261780 | |
# Increase the maximum number of process IDs (PIDs). | |
# This helps prevent PID exhaustion issues on busy systems, ensuring new processes can be created. | |
kernel.pid_max = 65536 | |
# --- Network Buffer and Memory Tuning --- | |
# Increase maximum receive socket buffer size (in bytes). | |
# Larger buffers allow the kernel to buffer more incoming data, improving throughput, | |
# especially under high network bandwidth and/or latency. Your value of 16MB is generous. | |
net.core.rmem_max = 16777216 | |
# Increase maximum send socket buffer size (in bytes). | |
# Similar to rmem_max, this allows the kernel to buffer more outgoing data, aiding throughput. | |
# Your value of 16MB is generous. | |
net.core.wmem_max = 16777216 | |
# Increase Linux auto-tuning TCP receive memory limits (min, default, max in bytes). | |
# This allows the kernel to dynamically adjust the receive buffer size for each TCP connection. | |
# Your settings provide a good range for dynamic scaling. | |
net.ipv4.tcp_rmem = 4096 12582912 16777216 | |
# Increase Linux auto-tuning TCP send memory limits (min, default, max in bytes). | |
# Similar to tcp_rmem, this tunes the send buffer auto-tuning. | |
# Your settings provide a good range for dynamic scaling. | |
net.ipv4.tcp_wmem = 4096 12582912 16777216 | |
# --- TCP Connection Management and Queueing --- | |
# Increase the maximum input queue length for network devices. | |
# This specifies the maximum number of packets that can be queued on a network device's input | |
# queue before they are dropped. A higher value helps prevent packet loss under bursty traffic. | |
# Your value of 32768 is very high, suitable for extreme loads. | |
net.core.netdev_max_backlog = 32768 | |
# Increase the maximum SYN backlog queue size. | |
# This defines the maximum number of partially open connections (SYN_RECV state) that the | |
# kernel will queue before ignoring new SYN requests. Crucial for handling connection floods. | |
# Your value of 32768 is very aggressive and highly suitable for a busy load balancer. | |
net.ipv4.tcp_max_syn_backlog = 32768 | |
# Increase the maximum accept queue limit for listening sockets (SOMAXCONN). | |
# This defines the maximum number of completed TCP connections that are waiting in the | |
# accept queue for the application (Nginx) to accept them. A higher value prevents connection | |
# rejections under high rates of new connections. Your value of 65535 is excellent. | |
net.core.somaxconn = 65535 | |
# Enable reuse of TIME_WAIT sockets. | |
# This is CRITICAL for high-traffic reverse proxies and load balancers. It allows the kernel | |
# to immediately reuse sockets in the TIME_WAIT state for new outgoing connections, | |
# preventing port exhaustion, which is a common issue for services making many outbound connections. | |
net.ipv4.tcp_tw_reuse = 1 | |
# Reduce the timeout for closing client connections in TIME_WAIT state. | |
# While `tcp_tw_reuse` is the primary solution for TIME_WAIT issues, reducing this timeout | |
# can help free up resources faster if `tcp_tw_reuse` cannot be applied (e.g., specific NAT scenarios). | |
# Your value of 30 seconds is standard. Some very aggressive setups might use 15 seconds. | |
net.ipv4.tcp_fin_timeout = 30 | |
# Increase the maximum number of sockets in TIME_WAIT state. | |
# Even with `tcp_tw_reuse` enabled, some sockets will still enter TIME_WAIT. This parameter | |
# sets the maximum number of such sockets allowed before the kernel starts actively cleaning them up, | |
# potentially causing issues. Your value of 400000 is generous. | |
net.ipv4.tcp_max_tw_buckets = 400000 | |
# Increase the maximum number of orphaned sockets. | |
# Orphaned sockets are those that have been closed by the application but are still held by the kernel | |
# awaiting final cleanup. A higher limit prevents resource exhaustion under heavy load. | |
# Your value of 60000 is good. | |
net.ipv4.tcp_max_orphans = 60000 | |
# Increase the local port range used by TCP and UDP for outgoing connections. | |
# As a reverse proxy, Nginx initiates many outgoing connections to backend servers. | |
# A wider ephemeral port range helps prevent port exhaustion when making many simultaneous requests. | |
# Your range (1024-65535) is standard and effective. | |
net.ipv4.ip_local_port_range = 1024 65535 | |
# Reduce the number of SYN and SYN+ACK retries before packet expires. | |
# This makes the connection establishment process fail faster if a client or backend server | |
# is unreachable or unresponsive, freeing up resources sooner. | |
# Your value of 1 is very aggressive (only one retry). For maximum reliability, 2 or 3 might be used, | |
# but for a load balancer, 1 or 2 is often acceptable to quickly discard failed connections. | |
net.ipv4.tcp_syn_retries = 1 | |
net.ipv4.tcp_synack_retries = 1 | |
# --- TCP Congestion Control & Performance Optimizations --- | |
# Enable TCP BBR congestion control. | |
# BBR (Bottleneck Bandwidth and RTT) is a modern congestion control algorithm that often | |
# provides significantly better throughput and lower latency, especially on high-latency | |
# or lossy networks, compared to older algorithms like Cubic. Excellent choice for Nginx. | |
net.ipv4.tcp_congestion_control = bbr | |
# Set the default queueing discipline for network devices. | |
# FQ_Codel (Fair Queueing with Controlled Delay) aims to provide low latency and | |
# high throughput by actively managing bufferbloat, leading to a smoother network experience. | |
# Excellent choice for modern server environments. | |
net.core.default_qdisc = fq_codel | |
# Enable TCP MTU probing. | |
# This allows TCP to dynamically discover the optimal Maximum Transmission Unit (MTU) size | |
# along the network path by sending packets with the Don't Fragment (DF) bit set. This can | |
# improve performance by preventing fragmentation and improving packet efficiency. | |
net.ipv4.tcp_mtu_probing = 1 | |
# Disable caching of ssthresh from previous TCP connection. | |
# `ssthresh` (slow start threshold) is a TCP congestion control parameter. Disabling its | |
# caching ensures that new connections don't start with potentially stale congestion | |
# information, potentially leading to faster ramp-up of new connections. | |
net.ipv4.tcp_no_metrics_save = 1 | |
# Enable TCP Window Scaling. | |
# This allows TCP to use window sizes larger than 64KB, which is essential for high-bandwidth | |
# connections over long distances to maximize throughput. It's usually enabled by default, | |
# but explicitly setting it to 1 ensures it's active. | |
net.ipv4.tcp_window_scaling = 1 | |
# --- Memory Management (Swapping) --- | |
# Reduce the kernel's tendency to swap. | |
# `vm.swappiness` controls how aggressively the kernel swaps out idle memory pages. | |
# A value of 1 means the kernel will only swap when absolutely necessary to avoid | |
# out-of-memory (OOM) conditions. For a dedicated Nginx server with sufficient RAM, | |
# this is an excellent setting as it prioritizes keeping data in physical memory, | |
# significantly improving responsiveness and preventing I/O bottlenecks from swapping. | |
vm.swappiness = 1 | |
# Set the minimum free memory (in KB) before the kernel starts swapping. | |
# This helps ensure a certain amount of free memory is always available, preventing the system | |
# from completely running out of memory before swapping begins. Your value of 320MB is reasonable. | |
vm.min_free_kbytes = 327680 | |
# Control the kernel's tendency to reclaim memory used for caching directory and inode objects. | |
# The default of 100 is usually fine. Lower values make the kernel less aggressive in reclaiming | |
# this cache, potentially keeping more filesystem metadata in memory, which might be useful if Nginx | |
# frequently accesses many small files. Higher values make it more aggressive. | |
# For most Nginx reverse proxy use cases, the default (or 100) is suitable. | |
# vm.vfs_cache_pressure = 100 # Uncomment and adjust if you have a specific need to change this. | |
# --- Security Hardening (General Server Best Practices) --- | |
# Re-enabling SYN cookie flood protection. | |
# Your original setting `net.ipv4.tcp_syncookies = 0` disables this. While your high | |
# `tcp_max_syn_backlog` provides a good buffer, SYN cookies are a crucial last line of defense | |
# against SYN flood DDoS attacks. When the SYN backlog is full, SYN cookies allow the server | |
# to respond to SYN requests without allocating full connection state immediately, protecting it | |
# from being overwhelmed. **It is highly recommended to keep this enabled for public-facing servers.** | |
net.ipv4.tcp_syncookies = 1 | |
# Ignore ICMP broadcast requests to prevent smurf attacks. | |
net.ipv4.icmp_echo_ignore_broadcasts = 1 | |
# Ignore bad ICMP error messages (e.g., from malformed packets). | |
net.ipv4.icmp_ignore_bogus_error_responses = 1 | |
# Log packets with impossible addresses (martians) to the kernel log. | |
# This helps detect network configuration errors or potential attacks (e.g., IP spoofing). | |
net.ipv4.conf.all.log_martians = 1 | |
net.ipv4.conf.default.log_martians = 1 | |
# Disable source routing. | |
# Prevents attackers from specifying the exact network path a packet should take, enhancing security. | |
net.ipv4.conf.all.accept_source_route = 0 | |
net.ipv4.conf.default.accept_source_route = 0 | |
# Enable reverse path filtering (Strict Mode). | |
# Helps prevent IP spoofing by checking if the source IP address of an incoming packet | |
# could be routed back via the interface it arrived on. Recommended for security. | |
net.ipv4.conf.all.rp_filter = 1 | |
net.ipv4.conf.default.rp_filter = 1 | |
# Disable ICMP redirects. | |
# Prevents man-in-the-middle attacks where an attacker could send ICMP redirect messages | |
# to trick your system into sending traffic through a malicious gateway. | |
net.ipv4.conf.all.accept_redirects = 0 | |
net.ipv4.conf.default.accept_redirects = 0 | |
net.ipv4.conf.all.secure_redirects = 0 | |
net.ipv4.conf.default.secure_redirects = 0 | |
# Disable IP forwarding (unless the server is intentionally acting as a router). | |
# For a dedicated Nginx reverse proxy/load balancer, this should typically be 0. | |
net.ipv4.ip_forward = 0 | |
net.ipv4.conf.all.send_redirects = 0 | |
net.ipv4.conf.default.send_redirects = 0 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment