ref: https://github.com/RammusXu/toolkit/tree/master/k8s/echoserver
Here's a example to mount nginx.conf from ConfigMap
And make sure you kubectl rollout restart deployment echoserver
after update ConfigMap. Pod only clone ConfigMap when it created. It don't sync or auto-updated.
apiVersion: apps/v1
kind: Deployment
metadata:
name: echoserver
namespace: default
spec:
replicas: 1
selector:
matchLabels:
app: echoserver
template:
metadata:
labels:
app: echoserver
spec:
volumes:
- name: config
configMap:
name: nginx-config
containers:
- name: echoserver
# image: gcr.io/google_containers/echoserver:1.10
image: openresty/openresty:1.15.8.2-1-alpine
ports:
- containerPort: 8080
name: http
# nginx.conf override
volumeMounts:
- name: config
subPath: nginx.conf
# mountPath: /etc/nginx/nginx.conf
mountPath: /usr/local/openresty/nginx/conf/nginx.conf
readOnly: true
---
apiVersion: v1
kind: Service
metadata:
name: echoserver
namespace: default
spec:
type: NodePort
ports:
- port: 80
targetPort: http
protocol: TCP
name: http
selector:
app: echoserver
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-config
namespace: default
data:
nginx.conf: |-
events {
worker_connections 1024;
}
env HOSTNAME;
env NODE_NAME;
env POD_NAME;
env POD_NAMESPACE;
env POD_IP;
http {
default_type 'text/plain';
# maximum allowed size of the client request body. By default this is 1m.
# Request with bigger bodies nginx will return error code 413.
# http://nginx.org/en/docs/http/ngx_http_core_module.html#client_max_body_size
client_max_body_size 10m;
# https://blog.percy.io/tuning-nginx-behind-google-cloud-platform-http-s-load-balancer-305982ddb340
keepalive_timeout 650;
keepalive_requests 10000;
# GZIP support
gzip on;
gzip_min_length 128;
gzip_proxied any;
gzip_comp_level 6;
gzip_types text/css
text/plain
text/javascript
application/javascript
application/json
application/x-javascript
application/xml
application/xml+rss
application/xhtml+xml
application/x-font-ttf
application/x-font-opentype
application/vnd.ms-fontobject
image/svg+xml
image/x-icon
application/rss+xml
application/atom_xml
application/vnd.apple.mpegURL
application/x-mpegurl
vnd.apple.mpegURL
application/dash+xml;
init_by_lua_block {
local template = require("template")
-- template syntax documented here:
-- https://github.com/bungle/lua-resty-template/blob/master/README.md
tmpl = template.compile([[
Hostname: {{os.getenv("HOSTNAME") or "N/A"}}
Pod Information:
{% if os.getenv("POD_NAME") then %}
node name: {{os.getenv("NODE_NAME") or "N/A"}}
pod name: {{os.getenv("POD_NAME") or "N/A"}}
pod namespace: {{os.getenv("POD_NAMESPACE") or "N/A"}}
pod IP: {{os.getenv("POD_IP") or "N/A"}}
{% else %}
-no pod information available-
{% end %}
Server values:
server_version=nginx: {{ngx.var.nginx_version}} - lua: {{ngx.config.ngx_lua_version}}
Request Information:
client_address={{ngx.var.remote_addr}}
method={{ngx.req.get_method()}}
real path={{ngx.var.request_uri}}
query={{ngx.var.query_string or ""}}
request_version={{ngx.req.http_version()}}
request_scheme={{ngx.var.scheme}}
request_uri={{ngx.var.scheme.."://"..ngx.var.host..":"..ngx.var.server_port..ngx.var.request_uri}}
Request Headers:
{% for i, key in ipairs(keys) do %}
{{key}}={{headers[key]}}
{% end %}
Request Body:
{{ngx.var.request_body or " -no body in request-"}}
]])
}
server {
# please check the benefits of reuseport https://www.nginx.com/blog/socket-sharding-nginx-release-1-9-1
# basically instructs to create an individual listening socket for each worker process (using the SO_REUSEPORT
# socket option), allowing a kernel to distribute incoming connections between worker processes.
listen 8080 default_server reuseport;
listen 8443 default_server ssl http2 reuseport;
ssl_certificate /certs/certificate.crt;
ssl_certificate_key /certs/privateKey.key;
# Replace '_' with your hostname.
server_name _;
location / {
lua_need_request_body on;
content_by_lua_block {
ngx.header["Server"] = "echoserver"
local headers = ngx.req.get_headers()
local keys = {}
for key, val in pairs(headers) do
table.insert(keys, key)
end
table.sort(keys)
ngx.say(tmpl({os=os, ngx=ngx, keys=keys, headers=headers}))
}
}
}
}
kubectl exec -it <pod_name> -- /bin/bash
) do you see the static content there? Share the deployment so we can test and reproduce your issue! – Will R.O.F.