Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -196,11 +196,11 @@ Add new variable to `deploy/deploy-project.sh` and specify your redis version
...
```

### Enable Horizontal pod autoscaling
### Disable Horizontal pod autoscaling

Add new variables to `deploy/deploy-project.sh` to enable pod autoscaling:
Add new variables to `deploy/deploy-project.sh` to disable pod autoscaling:

- Enable this functionality:
- Disable this functionality:
```diff
...
function deploy() {
Expand All @@ -209,7 +209,7 @@ Add new variables to `deploy/deploy-project.sh` to enable pod autoscaling:
...
)

+ ENABLE_AUTOSCALING=true
+ ENABLE_AUTOSCALING=false
...
```
- If you need more replicas, then you can adjust those variables (default values are set to 2):
Expand Down
52 changes: 33 additions & 19 deletions deploy/parts/autoscaling.sh
Original file line number Diff line number Diff line change
@@ -1,37 +1,51 @@
#!/bin/bash -e

echo -n "Prepare Autoscaling "
patch_rollout() {
local file="$1" min_replicas="$2"

if (( min_replicas >= 4 )); then
yq e -i ".spec.strategy.rollingUpdate.maxUnavailable=25%" "$file"
fi

if (( min_replicas >= 6 )); then
yq e -i ".spec.strategy.rollingUpdate.maxSurge=25%" "$file"
fi
}

assertVariable "BASE_PATH"
assertVariable "CONFIGURATION_TARGET_PATH"
assertVariable "RUNNING_PRODUCTION"

if [ -z ${ENABLE_AUTOSCALING} ]; then
ENABLE_AUTOSCALING=false
fi
if [[ "${ENABLE_AUTOSCALING:-true}" == "true" ]]; then
echo -n "Prepare Autoscaling "

if [ ${ENABLE_AUTOSCALING} = true ]; then
if [ -z ${MIN_PHP_FPM_REPLICAS} ]; then
MIN_PHP_FPM_REPLICAS=2
fi
MIN_PHP_FPM_REPLICAS="${MIN_PHP_FPM_REPLICAS:-2}"
MAX_PHP_FPM_REPLICAS="${MAX_PHP_FPM_REPLICAS:-3}"
MIN_STOREFRONT_REPLICAS="${MIN_STOREFRONT_REPLICAS:-2}"
MAX_STOREFRONT_REPLICAS="${MAX_STOREFRONT_REPLICAS:-3}"

if [ -z ${MAX_PHP_FPM_REPLICAS} ]; then
MAX_PHP_FPM_REPLICAS=3
fi

if [ -z ${MIN_STOREFRONT_REPLICAS} ]; then
if [[ "${RUNNING_PRODUCTION}" -eq 0 || "${DOWNSCALE_RESOURCE:-0}" -eq 1 ]]; then
MIN_PHP_FPM_REPLICAS=2
MAX_PHP_FPM_REPLICAS=2
MIN_STOREFRONT_REPLICAS=2
fi

if [ -z ${MAX_STOREFRONT_REPLICAS} ]; then
MAX_STOREFRONT_REPLICAS=3
MAX_STOREFRONT_REPLICAS=2
fi

yq e -i ".spec.minReplicas=${MIN_PHP_FPM_REPLICAS}" "${CONFIGURATION_TARGET_PATH}/horizontalPodAutoscaler.yaml"
yq e -i ".spec.maxReplicas=${MAX_PHP_FPM_REPLICAS}" "${CONFIGURATION_TARGET_PATH}/horizontalPodAutoscaler.yaml"

yq e -i ".spec.minReplicas=${MIN_STOREFRONT_REPLICAS}" "${CONFIGURATION_TARGET_PATH}/horizontalStorefrontAutoscaler.yaml"
yq e -i ".spec.maxReplicas=${MAX_STOREFRONT_REPLICAS}" "${CONFIGURATION_TARGET_PATH}/horizontalStorefrontAutoscaler.yaml"
fi

patch_rollout "${CONFIGURATION_TARGET_PATH}/deployments/webserver-php-fpm.yaml" "${MIN_PHP_FPM_REPLICAS}"
patch_rollout "${CONFIGURATION_TARGET_PATH}/deployments/storefront.yaml" "${MIN_STOREFRONT_REPLICAS}"

yq e -i '
.resources += [
"../../horizontalPodAutoscaler.yaml",
"../../horizontalStorefrontAutoscaler.yaml"
]
' "${CONFIGURATION_TARGET_PATH}/kustomize/webserver/kustomization.yaml"

echo -e "[${GREEN}OK${NO_COLOR}]"
echo -e "[${GREEN}OK${NO_COLOR}]"
fi
61 changes: 9 additions & 52 deletions deploy/parts/deploy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ assertVariable "DEPLOY_REGISTER_USER"
assertVariable "DEPLOY_REGISTER_PASSWORD"

assertVariable "BASIC_AUTH_PATH"
assertVariable "ENABLE_AUTOSCALING"
assertVariable "RUNNING_PRODUCTION"
FIRST_DEPLOY_LOAD_DEMO_DATA=${FIRST_DEPLOY_LOAD_DEMO_DATA:-0}

Expand All @@ -26,11 +25,7 @@ echo -n " Delete secret for docker registry "
runCommand "SKIP" "kubectl delete secret dockerregistry -n ${PROJECT_NAME}"

echo -n " Create new secret for docker registry "
if [ "${GCLOUD_DEPLOY}" = "true" ]; then
runCommand "ERROR" "kubectl create secret docker-registry dockerregistry --docker-server=eu.gcr.io --docker-username _json_key --docker-email ${GCLOUD_CONTAINER_REGISTRY_EMAIL} --docker-password='${GCLOUD_CONTAINER_REGISTRY_ACCOUNT}' -n ${PROJECT_NAME}"
else
runCommand "ERROR" "kubectl create secret docker-registry dockerregistry --docker-server=${CI_REGISTRY} --docker-username=${DEPLOY_REGISTER_USER} --docker-password=${DEPLOY_REGISTER_PASSWORD} -n ${PROJECT_NAME}"
fi
runCommand "ERROR" "kubectl create secret docker-registry dockerregistry --docker-server=${CI_REGISTRY} --docker-username=${DEPLOY_REGISTER_USER} --docker-password=${DEPLOY_REGISTER_PASSWORD} -n ${PROJECT_NAME}"

if [ ${RUNNING_PRODUCTION} -eq "0" ] || [ ${#FORCE_HTTP_AUTH_IN_PRODUCTION[@]} -ne "0" ]; then
echo -n " Create or update secret for http auth "
Expand All @@ -51,14 +46,11 @@ fi
if [ "${RUNNING_PRODUCTION}" -eq "0" ] || [ "${DOWNSCALE_RESOURCE:-0}" -eq "1" ]; then
echo -n " Replace pods CPU requests to minimum (for Devel cluster only) "

yq e -i '.spec.template.spec.containers[0].resources.requests.cpu = "0.01"' "${CONFIGURATION_TARGET_PATH}/deployments/storefront.yaml"
yq e -i '.spec.template.spec.containers[0].resources.requests.cpu = "0.01"' "${CONFIGURATION_TARGET_PATH}/deployments/webserver-php-fpm.yaml"
yq e -i '.spec.template.spec.containers[1].resources.requests.cpu = "0.01"' "${CONFIGURATION_TARGET_PATH}/deployments/webserver-php-fpm.yaml"
yq e -i '.spec.template.spec.containers[1].resources.requests.cpu = "0.01"' "${CONFIGURATION_TARGET_PATH}/deployments/redis.yaml"
yq e -i '.spec.template.spec.containers[0].resources.requests.cpu = "0.01"' "${CONFIGURATION_TARGET_PATH}/deployments/rabbitmq.yaml"
yq e -i '.spec.template.spec.containers[0].resources.requests.cpu = "0.1"' "${CONFIGURATION_TARGET_PATH}/deployments/storefront.yaml"
yq e -i '.spec.template.spec.containers[0].resources.requests.cpu = "0.1"' "${CONFIGURATION_TARGET_PATH}/deployments/webserver-php-fpm.yaml"

yq e -i '.spec.template.spec.containers[0].resources.requests.memory = "100Mi"' "${CONFIGURATION_TARGET_PATH}/deployments/webserver-php-fpm.yaml"
yq e -i '.spec.template.spec.containers[1].resources.requests.memory = "100Mi"' "${CONFIGURATION_TARGET_PATH}/deployments/redis.yaml"
yq e -i '.spec.template.spec.containers[0].resources.requests.memory = "100Mi"' "${CONFIGURATION_TARGET_PATH}/deployments/redis.yaml"

echo -e "[${GREEN}OK${NO_COLOR}]"
else
Expand All @@ -77,14 +69,10 @@ else
fi
fi

DEPLOYED_CRON_POD=$(kubectl get pods --namespace=${PROJECT_NAME} --field-selector=status.phase=Running -l app=cron -o=jsonpath='{.items[?(@.status.containerStatuses[0].state.running)].metadata.name}') || true

if [[ -n ${DEPLOYED_CRON_POD} ]]; then
echo -n "Lock crons to prevent run next iteration "
runCommand "ERROR" "kubectl exec -t --namespace=${PROJECT_NAME} ${DEPLOYED_CRON_POD} -- bash -c \"./phing -S cron-lock > /dev/null 2>&1 & disown\""

echo -n "Waiting until all cron instances are done "
runCommand "ERROR" "kubectl exec --namespace=${PROJECT_NAME} ${DEPLOYED_CRON_POD} -- ./phing -S cron-watch"
if kubectl get deployment/cron --namespace="${PROJECT_NAME}" >/dev/null 2>&1; then
echo -n "Waiting until all cron instances are done and stop cron"
runCommand "ERROR" "kubectl scale deployment/cron --namespace=${PROJECT_NAME} --replicas=0"
runCommand "ERROR" "kubectl rollout status deployment/cron --namespace=${PROJECT_NAME} --timeout=60m"
fi

echo "Migrate Application (database migrations, elasticsearch migrations, ...):"
Expand Down Expand Up @@ -130,7 +118,7 @@ if [ ${MIGRATION_COMPLETE_EXIT_CODE} -eq 1 ]; then
echo -e "[${RED}ERROR${NO_COLOR}]"

echo -n "Restore previous cron container "
runCommand "SKIP" "kubectl delete pod --namespace=${PROJECT_NAME} ${DEPLOYED_CRON_POD}"
runCommand "SKIP" "kubectl scale deployment/cron --namespace=${PROJECT_NAME} --replicas=1"

RUNNING_WEBSERVER_PHP_FPM_POD=$(kubectl get pods --namespace=${PROJECT_NAME} --field-selector=status.phase=Running -l app=webserver-php-fpm -o=jsonpath='{.items[0].metadata.name}')

Expand All @@ -151,9 +139,6 @@ else
kubectl logs job/migrate-application --namespace=${PROJECT_NAME}
echo -e "section_end:`date +%s`:migrate_application_logs_section\r\e[0K"
echo ""

echo -n "Deploy new cron container "
runCommand "ERROR" "kustomize build --load_restrictor none \"${CONFIGURATION_TARGET_PATH}/kustomize/cron\" | kubectl apply -f -"
fi

echo "Deploy new Webserver and PHP-FPM container:"
Expand All @@ -169,40 +154,12 @@ if [ $DISPLAY_FINAL_CONFIGURATION -eq "1" ]; then
echo ""
fi

if [ ${ENABLE_AUTOSCALING} = true ]; then
echo -n " Delete previous Horizontal pod autoscaler for Backend "
runCommand "SKIP" "kubectl delete hpa webserver-php-fpm --namespace=${PROJECT_NAME}"

echo -n " Delete previous Horizontal pod autoscaler for Storefront "
runCommand "SKIP" "kubectl delete hpa storefront --namespace=${PROJECT_NAME}"
fi

echo -n " Deploy Webserver and PHP-FPM container with Storefront"
runCommand "ERROR" "kustomize build --load_restrictor none \"${CONFIGURATION_TARGET_PATH}/kustomize/webserver\" | kubectl apply -f -"

echo -n " Waiting for start new PHP-FPM and Storefront container (In case of fail you need to manually check what is state of application)"
runCommand "ERROR" "kubectl rollout status --namespace=${PROJECT_NAME} deployment/webserver-php-fpm deployment/storefront --watch"

if [ ${ENABLE_AUTOSCALING} = true ]; then
echo -n " Deploy Horizontal pod autoscaler for Backend "

if [ ${RUNNING_PRODUCTION} -eq "0" ]; then
yq e -i '.spec.minReplicas = 2' "${CONFIGURATION_TARGET_PATH}/horizontalPodAutoscaler.yaml"
yq e -i '.spec.maxReplicas = 2' "${CONFIGURATION_TARGET_PATH}/horizontalPodAutoscaler.yaml"
fi

runCommand "ERROR" "kubectl apply -f ${CONFIGURATION_TARGET_PATH}/horizontalPodAutoscaler.yaml"

echo -n " Deploy Horizontal pod autoscaler for Storefront "

if [ ${RUNNING_PRODUCTION} -eq "0" ]; then
yq e -i '.spec.minReplicas = 2' "${CONFIGURATION_TARGET_PATH}/horizontalStorefrontAutoscaler.yaml"
yq e -i '.spec.maxReplicas = 2' "${CONFIGURATION_TARGET_PATH}/horizontalStorefrontAutoscaler.yaml"
fi

runCommand "ERROR" "kubectl apply -f ${CONFIGURATION_TARGET_PATH}/horizontalStorefrontAutoscaler.yaml"
fi

RUNNING_WEBSERVER_PHP_FPM_POD=$(kubectl get pods --namespace=${PROJECT_NAME} --field-selector=status.phase=Running -l app=webserver-php-fpm -o=jsonpath='{.items[?(@.status.containerStatuses[0].state.running)].metadata.name}')

echo -n "Disable maintenance page "
Expand Down
8 changes: 8 additions & 0 deletions deploy/parts/environment-variables.sh
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,14 @@ for key in "${!ENVIRONMENT_VARIABLES[@]}"; do
}
" "${CONFIGURATION_TARGET_PATH}/deployments/webserver-php-fpm.yaml"

# Webserver PHP-FPM warmup container
yq e -i "
.spec.template.spec.initContainers[1].env[${ITERATOR}] = {
\"name\": \"${key}\",
\"value\": \"${ENVIRONMENT_VARIABLES[$key]}\"
}
" "${CONFIGURATION_TARGET_PATH}/deployments/webserver-php-fpm.yaml"

# Cron deployment
yq e -i "
.spec.template.spec.containers[0].env[${ITERATOR}] = {
Expand Down
2 changes: 0 additions & 2 deletions docs/deploy-project.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@ function deploy() {
DOMAIN_HOSTNAME_2
)

ENABLE_AUTOSCALING=true

declare -A ENVIRONMENT_VARIABLES=(
["APP_SECRET"]=${APP_SECRET}
["DATABASE_HOST"]=${POSTGRES_DATABASE_IP_ADDRESS}
Expand Down
9 changes: 8 additions & 1 deletion kubernetes/configmap/nginx.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,15 @@ data:
root /var/www/html/web;

location /health {
stub_status on;
access_log off;

include fastcgi_params;
fastcgi_read_timeout 3s;
fastcgi_param SCRIPT_FILENAME /ping;
fastcgi_param SCRIPT_NAME /ping;
fastcgi_param REQUEST_METHOD GET;

fastcgi_pass php-upstream;
}
}

Expand Down
2 changes: 2 additions & 0 deletions kubernetes/configmap/production-php-fpm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ data:
[www]

listen = 127.0.0.1:9000
ping.path = /ping
ping.response = pong

pm = dynamic
pm.max_children = 20
Expand Down
34 changes: 23 additions & 11 deletions kubernetes/deployments/cron.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,9 @@ metadata:
labels:
app: cron
spec:
progressDeadlineSeconds: 1500
replicas: 1
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
type: Recreate
selector:
matchLabels:
app: cron
Expand All @@ -25,6 +21,7 @@ spec:
labels:
app: cron
spec:
terminationGracePeriodSeconds: 3000
tolerations:
- key: "workload"
operator: "Equal"
Expand All @@ -38,8 +35,7 @@ spec:
matchExpressions:
- key: workload
operator: In
values:
- background
values: ["background"]
volumes:
- name: domains-urls
configMap:
Expand All @@ -61,14 +57,30 @@ spec:
runAsUser: 0
imagePullPolicy: IfNotPresent
workingDir: /var/www/html
command: ["/bin/sh","-c"]
args: ["cd /var/www/html && ./phing warmup > /dev/null && rm -rf /tmp/log-pipe && mkfifo /tmp/log-pipe && chmod 666 /tmp/log-pipe && crontab -u root /var/spool/cron/template && { crond || cron; } && stdbuf -o0 tail -n +1 -f /tmp/log-pipe"]
command: ["/bin/sh","-lc"]
args:
- >
./phing warmup > /dev/null

# FIFO for logs
rm -f /tmp/log-pipe && mkfifo /tmp/log-pipe && chmod 666 /tmp/log-pipe

# crontab
crontab -u root /var/spool/cron/template

# run on backround
stdbuf -o0 tail -n +1 -f /tmp/log-pipe &

exec crond -f || exec cron -f
lifecycle:
preStop:
exec:
command:
- sleep
- '5'
- /bin/sh
- -lc
- |
( ./bin/console deploy:cron:lock > /tmp/cron-lock.log 2>&1 || true ) &
./bin/console deploy:cron:watch || true
volumeMounts:
- name: domains-urls
mountPath: /var/www/html/{{DOMAINS_URLS_FILEPATH}}
Expand Down
32 changes: 32 additions & 0 deletions kubernetes/deployments/rabbitmq.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@ metadata:
spec:
serviceName: rabbitmq
replicas: 1
updateStrategy:
type: RollingUpdate
rollingUpdate:
partition: 0
selector:
matchLabels:
app: rabbitmq
Expand All @@ -30,6 +34,9 @@ spec:
image: rabbitmq:4.1-management-alpine
ports:
- name: rabbitmq
containerPort: 5672
protocol: TCP
- name: rabbitmq-management
containerPort: 15672
protocol: TCP
- name: exporter
Expand All @@ -40,12 +47,37 @@ spec:
value: "{{RABBITMQ_DEFAULT_USER}}"
- name: RABBITMQ_DEFAULT_PASS
value: "{{RABBITMQ_DEFAULT_PASS}}"
lifecycle:
preStop:
exec:
command: [ "sh", "-c", "rabbitmqctl stop_app && sleep 5" ]
resources:
requests:
cpu: "20m"
memory: "256Mi"
limits:
memory: "512Mi"
startupProbe:
exec:
command: [ "rabbitmq-diagnostics", "ping" ]
failureThreshold: 30
periodSeconds: 10
livenessProbe:
exec:
command: ["rabbitmq-diagnostics", "ping"]
periodSeconds: 30
timeoutSeconds: 15
failureThreshold: 5
readinessProbe:
exec:
command: [ "rabbitmq-diagnostics", "-q", "check_running" ]
periodSeconds: 10
timeoutSeconds: 10
failureThreshold: 3
volumeMounts:
- name: rabbitmq-data
mountPath: /var/lib/rabbitmq
terminationGracePeriodSeconds: 90
imagePullSecrets:
- name: dockerregistry
volumeClaimTemplates:
Expand Down
Loading