|
@@ -0,0 +1,166 @@
|
|
|
|
+{% if ansible_prolog -%}
|
|
|
|
+{% from 'templates/ansible/prolog.j2' import prolog with context %}
|
|
|
|
+{{ prolog() }}
|
|
|
|
+{% endif -%}
|
|
|
|
+# nginx Configuration File
|
|
|
|
+# http://wiki.nginx.org/Configuration
|
|
|
|
+
|
|
|
|
+# Run as a less privileged user for security reasons.
|
|
|
|
+user www-data;
|
|
|
|
+
|
|
|
|
+# How many worker threads to run;
|
|
|
|
+# "auto" sets it to the number of CPU cores available in the system, and
|
|
|
|
+# offers the best performance. Don't set it higher than the number of CPU
|
|
|
|
+# cores if changing this parameter.
|
|
|
|
+
|
|
|
|
+# The maximum number of connections for Nginx is calculated by:
|
|
|
|
+# max_clients = worker_processes * worker_connections
|
|
|
|
+worker_processes 4;
|
|
|
|
+
|
|
|
|
+# Maximum open file descriptors per process;
|
|
|
|
+# should be > worker_connections.
|
|
|
|
+worker_rlimit_nofile 16384;
|
|
|
|
+
|
|
|
|
+events {
|
|
|
|
+ # When you need > 8000 * cpu_cores connections, you start optimizing your OS,
|
|
|
|
+ # and this is probably the point at where you hire people who are smarter than
|
|
|
|
+ # you, as this is *a lot* of requests.
|
|
|
|
+ worker_connections 8192;
|
|
|
|
+ # Event model to use
|
|
|
|
+ use epoll;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+# PID file
|
|
|
|
+pid /var/run/nginx.pid;
|
|
|
|
+
|
|
|
|
+http {
|
|
|
|
+
|
|
|
|
+ # Hide nginx version information.
|
|
|
|
+ server_tokens off;
|
|
|
|
+
|
|
|
|
+ # Define the MIME types for files.
|
|
|
|
+ include /etc/nginx/mime.types;
|
|
|
|
+ default_type application/octet-stream;
|
|
|
|
+
|
|
|
|
+ # Format to use in log files
|
|
|
|
+ log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
|
|
|
+ '$status $body_bytes_sent "$http_referer" '
|
|
|
|
+ '"$http_user_agent" "$http_x_forwarded_for"';
|
|
|
|
+
|
|
|
|
+ # Default log files
|
|
|
|
+ # (this is only used when you don't override {error,access}_log on a server{} level)
|
|
|
|
+ access_log /var/log/nginx/access.log;
|
|
|
|
+ error_log /var/log/nginx/error.log;
|
|
|
|
+
|
|
|
|
+ # How long to allow each connection to stay idle; longer values are better
|
|
|
|
+ # for each individual client, particularly for SSL, but means that worker
|
|
|
|
+ # connections are tied up longer. (Default: 65)
|
|
|
|
+ keepalive_timeout 20;
|
|
|
|
+
|
|
|
|
+ # Speed up file transfers by using sendfile() to copy directly
|
|
|
|
+ # between descriptors rather than using read()/write().
|
|
|
|
+ sendfile on;
|
|
|
|
+
|
|
|
|
+ # Tell Nginx not to send out partial frames; this increases throughput
|
|
|
|
+ # since TCP frames are filled up before being sent out. (adds TCP_CORK)
|
|
|
|
+ tcp_nopush on;
|
|
|
|
+
|
|
|
|
+ # Tell Nginx to enable(off)/disable(on) the Nagle buffering algorithm for TCP packets, which
|
|
|
|
+ # collates several smaller packets together into one larger packet, thus saving
|
|
|
|
+ # bandwidth at the cost of a nearly imperceptible increase to latency. (removes TCP_NODELAY)
|
|
|
|
+ tcp_nodelay on;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ # Compression
|
|
|
|
+
|
|
|
|
+ # Enable Gzip compressed.
|
|
|
|
+ gzip on;
|
|
|
|
+
|
|
|
|
+ # Enable compression both for HTTP/1.0 and HTTP/1.1 (required for CloudFront).
|
|
|
|
+ gzip_http_version 1.0;
|
|
|
|
+
|
|
|
|
+ # Compression level (1-9).
|
|
|
|
+ # 5 is a perfect compromise between size and cpu usage, offering about
|
|
|
|
+ # 75% reduction for most ascii files (almost identical to level 9).
|
|
|
|
+ gzip_comp_level 5;
|
|
|
|
+
|
|
|
|
+ # Don't compress anything that's already small and unlikely to shrink much
|
|
|
|
+ # if at all (the default is 20 bytes, which is bad as that usually leads to
|
|
|
|
+ # larger files after gzipping).
|
|
|
|
+ gzip_min_length 256;
|
|
|
|
+
|
|
|
|
+ # Compress data even for clients that are connecting to us via proxies,
|
|
|
|
+ # identified by the "Via" header (required for CloudFront).
|
|
|
|
+ gzip_proxied any;
|
|
|
|
+
|
|
|
|
+ # Tell proxies to cache both the gzipped and regular version of a resource
|
|
|
|
+ # whenever the client's Accept-Encoding capabilities header varies;
|
|
|
|
+ # Avoids the issue where a non-gzip capable client (which is extremely rare
|
|
|
|
+ # today) would display gibberish if their proxy gave them the gzipped version.
|
|
|
|
+ gzip_vary on;
|
|
|
|
+
|
|
|
|
+ # Compress all output labeled with one of the following MIME-types.
|
|
|
|
+ gzip_types
|
|
|
|
+ application/atom+xml
|
|
|
|
+ application/javascript
|
|
|
|
+ application/json
|
|
|
|
+ application/rss+xml
|
|
|
|
+ application/vnd.ms-fontobject
|
|
|
|
+ application/x-font-ttf
|
|
|
|
+ application/x-web-app-manifest+json
|
|
|
|
+ application/xhtml+xml
|
|
|
|
+ application/xml
|
|
|
|
+ font/opentype
|
|
|
|
+ image/svg+xml
|
|
|
|
+ image/x-icon
|
|
|
|
+ text/css
|
|
|
|
+ text/plain
|
|
|
|
+ text/x-component;
|
|
|
|
+ # text/html is always compressed by HttpGzipModule
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ # SSL
|
|
|
|
+
|
|
|
|
+ # This should be turned on if you are going to have pre-compressed copies (.gz) of
|
|
|
|
+ # static files available. If not it should be left off as it will cause extra I/O
|
|
|
|
+ # for the check. It is best if you enable this in a location{} block for
|
|
|
|
+ # a specific directory, or on an individual server{} level.
|
|
|
|
+ # gzip_static on;
|
|
|
|
+
|
|
|
|
+ # Protect against the BEAST attack by preferring RC4-SHA when using SSLv3 and TLS protocols.
|
|
|
|
+ # Note that TLSv1.1 and TLSv1.2 are immune to the beast attack but only work with OpenSSL v1.0.1 and higher and has limited client support.
|
|
|
|
+ ssl_protocols SSLv3 TLSv1 TLSv1.1 TLSv1.2;
|
|
|
|
+ ssl_ciphers RC4:HIGH:!aNULL:!MD5;
|
|
|
|
+ ssl_prefer_server_ciphers on;
|
|
|
|
+
|
|
|
|
+ # Optimize SSL by caching session parameters for 10 minutes. This cuts down on the number of expensive SSL handshakes.
|
|
|
|
+ # The handshake is the most CPU-intensive operation, and by default it is re-negotiated on every new/parallel connection.
|
|
|
|
+ # By enabling a cache (of type "shared between all Nginx workers"), we tell the client to re-use the already negotiated state.
|
|
|
|
+ # Further optimization can be achieved by raising keepalive_timeout, but that shouldn't be done unless you serve primarily HTTPS.
|
|
|
|
+ ssl_session_cache shared:SSL:10m; # a 1mb cache can hold about 4000 sessions, so we can hold 40000 sessions
|
|
|
|
+ ssl_session_timeout 10m;
|
|
|
|
+
|
|
|
|
+ # This default SSL certificate will be served whenever the client lacks support for SNI (Server Name Indication).
|
|
|
|
+ # Make it a symlink to the most important certificate you have, so that users of IE 8 and below on WinXP can see your main site without SSL errors.
|
|
|
|
+ #ssl_certificate /etc/nginx/default_ssl.crt;
|
|
|
|
+ #ssl_certificate_key /etc/nginx/default_ssl.key;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ # Passenger
|
|
|
|
+
|
|
|
|
+ #passenger_root /usr;
|
|
|
|
+ #passenger_ruby /usr/bin/ruby;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ # Naxsi
|
|
|
|
+
|
|
|
|
+ #include /etc/nginx/naxsi_core.rules;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ # Extended configuration
|
|
|
|
+
|
|
|
|
+ # More configuration parameters
|
|
|
|
+ include /etc/nginx/conf.d/*.conf;
|
|
|
|
+ # Virtual hosts
|
|
|
|
+ include /etc/nginx/sites-enabled/*;
|
|
|
|
+}
|