From 89c4408ff356aa71fec0f9792a744b6cf6b77762 Mon Sep 17 00:00:00 2001 From: mohitrajain Date: Fri, 9 Jan 2026 18:11:55 +0100 Subject: [PATCH 1/6] fix: wpb-22439 remove repmgr_node_config from group_vars --- .../offline/group_vars/postgresql/postgresql.yml | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/ansible/inventory/offline/group_vars/postgresql/postgresql.yml b/ansible/inventory/offline/group_vars/postgresql/postgresql.yml index decd66b19..351b99539 100644 --- a/ansible/inventory/offline/group_vars/postgresql/postgresql.yml +++ b/ansible/inventory/offline/group_vars/postgresql/postgresql.yml @@ -24,19 +24,8 @@ repmgr_namespace: "{{ wire_namespace | default('default') }}" wire_pg_secret_name: "wire-postgresql-external-secret" # Node configuration for repmgr -repmgr_node_config: - postgresql1: # Maps to postgresql_rw group - node_id: 1 - priority: 150 - role: primary - postgresql2: # Maps to first postgresql_ro - node_id: 2 - priority: 100 - role: standby - postgresql3: # Maps to second postgresql_ro - node_id: 3 - priority: 50 - role: standby +# NOTE: repmgr_node_config is defined in the inventory file ansible/inventory/offline/99-static, ansible/inventory/offline/staging.yml and terraform/examples/wire-server-deploy-offline-hetzner/outputs.tf +# to allow environment-specific node mappings. Do not define here. # repmgr settings # repmgrd monitoring and reconnection configuration From 96851c750ff2b6fd0d0ae54cf4d13b63b7d92796 Mon Sep 17 00:00:00 2001 From: mohitrajain Date: Fri, 9 Jan 2026 18:12:41 +0100 Subject: [PATCH 2/6] fix: wpb-22439 add repmgr_node_config to node specific inventory --- ansible/inventory/offline/99-static | 14 +++++++++++++- offline/postgresql-cluster.md | 14 +++++++++++++- .../outputs.tf | 17 +++++++++++++++++ 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/ansible/inventory/offline/99-static b/ansible/inventory/offline/99-static index 42098caa0..f6e469574 100644 --- a/ansible/inventory/offline/99-static +++ b/ansible/inventory/offline/99-static @@ -83,7 +83,19 @@ [postgresql:vars] postgresql_network_interface = enp1s0 - +repmgr_node_config: + postgresql1: # Maps to postgresql_rw group + node_id: 1 + priority: 150 + role: primary + postgresql2: # Maps to first postgresql_ro + node_id: 2 + priority: 100 + role: standby + postgresql3: # Maps to second postgresql_ro + node_id: 3 + priority: 50 + role: standby [elasticsearch:vars] # elasticsearch_network_interface = enp1s0 diff --git a/offline/postgresql-cluster.md b/offline/postgresql-cluster.md index caaac441f..003c2b7c0 100644 --- a/offline/postgresql-cluster.md +++ b/offline/postgresql-cluster.md @@ -177,7 +177,19 @@ postgresql3 ansible_host=192.168.122.206 [postgresql:vars] postgresql_network_interface = enp1s0 - +repmgr_node_config: + postgresql1: # Maps to postgresql_rw group + node_id: 1 + priority: 150 + role: primary + postgresql2: # Maps to first postgresql_ro + node_id: 2 + priority: 100 + role: standby + postgresql3: # Maps to second postgresql_ro + node_id: 3 + priority: 50 + role: standby # All PostgreSQL nodes [postgresql] diff --git a/terraform/examples/wire-server-deploy-offline-hetzner/outputs.tf b/terraform/examples/wire-server-deploy-offline-hetzner/outputs.tf index b468e387c..5bfebe101 100644 --- a/terraform/examples/wire-server-deploy-offline-hetzner/outputs.tf +++ b/terraform/examples/wire-server-deploy-offline-hetzner/outputs.tf @@ -162,6 +162,23 @@ output "static-inventory" { vars = { wire_dbname = "wire-server" postgresql_network_interface = "enp7s0" + repmgr_node_config = { + postgresql1 = { + node_id = 1 + priority = 150 + role = "primary" + } + postgresql2 = { + node_id = 2 + priority = 100 + role = "standby" + } + postgresql3 = { + node_id = 3 + priority = 50 + role = "standby" + } + } } } postgresql_rw = { From 823f913e63722c7b53e1d40b12f5d44674a3e6bb Mon Sep 17 00:00:00 2001 From: mohitrajain Date: Fri, 9 Jan 2026 18:13:28 +0100 Subject: [PATCH 3/6] fix: wpb-22439 add node specific repmgr_node_config to wiab-staging inventory --- ansible/inventory/offline/staging.yml | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/ansible/inventory/offline/staging.yml b/ansible/inventory/offline/staging.yml index f5c8fd8f7..3dbea32ac 100644 --- a/ansible/inventory/offline/staging.yml +++ b/ansible/inventory/offline/staging.yml @@ -88,6 +88,19 @@ postgresql: vars: wire_dbname: wire-server postgresql_network_interface: enp1s0 + repmgr_node_config: + datanode1: # Maps to postgresql_rw group + node_id: 1 + priority: 150 + role: primary + datanode2: # Maps to first postgresql_ro + node_id: 2 + priority: 100 + role: standby + datanode3: # Maps to second postgresql_ro + node_id: 3 + priority: 50 + role: standby postgresql_rw: hosts: From 913f97fec4929972a78c6195e57d2b0bfa260587 Mon Sep 17 00:00:00 2001 From: mohitrajain Date: Fri, 9 Jan 2026 18:15:05 +0100 Subject: [PATCH 4/6] fix: wpb-22439 add changelog --- changelog.d/3-deploy-builds/postgresql_repmgr_node_config | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/3-deploy-builds/postgresql_repmgr_node_config diff --git a/changelog.d/3-deploy-builds/postgresql_repmgr_node_config b/changelog.d/3-deploy-builds/postgresql_repmgr_node_config new file mode 100644 index 000000000..161d93894 --- /dev/null +++ b/changelog.d/3-deploy-builds/postgresql_repmgr_node_config @@ -0,0 +1 @@ +Changed: remove repmgr_node_config from group_vars and put with node inventory From 84e6103f1c5b4236ab6fbb23fbd318df34d67a81 Mon Sep 17 00:00:00 2001 From: mohitrajain Date: Thu, 29 Jan 2026 20:19:54 +0100 Subject: [PATCH 5/6] fix: wpb-22439 add comment on hostname requirement directly for rmq-cluster group --- ansible/inventory/offline/staging.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ansible/inventory/offline/staging.yml b/ansible/inventory/offline/staging.yml index 3dbea32ac..4c4bf4dbf 100644 --- a/ansible/inventory/offline/staging.yml +++ b/ansible/inventory/offline/staging.yml @@ -38,8 +38,6 @@ k8s-cluster: kube-master: {} datanodes: - # host names here must match each node's actual hostname - # its a requirement for rabbitmq hosts: datanode1: ansible_host: "datanode1_ip" @@ -76,6 +74,8 @@ minio: rmq-cluster: children: + # host names here must match each node's actual hostname + # its a requirement for rabbitmq datanodes: {} vars: rabbitmq_network_interface: enp1s0 From c304abfd4c93d84654e95850421bd2a170b4b64e Mon Sep 17 00:00:00 2001 From: mohit rajain Date: Wed, 18 Feb 2026 11:50:27 +0100 Subject: [PATCH 6/6] wpb-22439 7 docs wiab staging (#851) * fix: wpb-21356 fix coturn port ranges * build: wpb-21356 add documentation for wiab-staging and clean old single_hetzner_machine_installation * build: wpb-21356 add changelog and small fix for offline-vm-setup.sh * fix: wpb-22439 memory requirements for VMs in wiab-staging.md doc * fix: wpb-22846 update wiab-staging documentation for hairpin networking * fix: wpb-22439 fix documentation typo * fix: wpb-22439 fix documentation suggestions * fix: wpb-22439 update wiab-staging documentation * wpb-22439 8 clean old wiab staging script and workflows (#852) * fix: wpb-21356 debugging ssh banner issues * fix: wpb-21356 disable flows on push and removing the need for the default-build for verify steps * fix: wpb-21356 remove static build hashes and enabled bundle build verification in offline.yml workflow * refactor: wpb-21356 remove old deploy-wiab workflow as it has already been incorporated in offline.yml * refactor: wpb-21356 remove old autodeploy.sh as offline/cd_staging.sh will replace it * refactor: wpb-21356 remove references of old autodeploy.sh, wiab-hetzner playbook and documentation --- .github/workflows/deploy-wiab.yml | 39 -- bin/autodeploy.sh | 450 ------------------ changelog.d/3-deploy-builds/wiab-staging | 1 + offline/coturn.md | 10 +- offline/docs_ubuntu_22.04.md | 5 +- .../single_hetzner_machine_installation.md | 120 ----- offline/wiab-staging.md | 246 ++++++++++ 7 files changed, 253 insertions(+), 618 deletions(-) delete mode 100644 .github/workflows/deploy-wiab.yml delete mode 100755 bin/autodeploy.sh delete mode 100644 offline/single_hetzner_machine_installation.md create mode 100644 offline/wiab-staging.md diff --git a/.github/workflows/deploy-wiab.yml b/.github/workflows/deploy-wiab.yml deleted file mode 100644 index ad4b0a17f..000000000 --- a/.github/workflows/deploy-wiab.yml +++ /dev/null @@ -1,39 +0,0 @@ -# This playbook is not-up-to-date, requires to be updated to match with current developments -# A new WIAB (wire in a box) dev solution has been created https://docs.wire.com/latest/how-to/install/demo-wiab.html and can be used until this (wiab-staging) gets updated -name: Deploy on Hetzner WIAB setup -on: - workflow_run: - workflows: ["Prepare custom offline package"] - types: - - completed - -jobs: - deploy: - runs-on: ubuntu-latest - concurrency: - group: autodeploy-script - cancel-in-progress: false - - steps: - # Step 1: Checkout the repository code - - name: Checkout code - uses: actions/checkout@v3 - - # Step 2: Set up SSH key for remote access - - name: Set up SSH key - uses: webfactory/ssh-agent@v0.5.3 - with: - ssh-private-key: ${{ secrets.WIAB_PRIVATE_SSH_KEY }} - - # Step 3: Get the latest commit SHA, for the artifact - - name: Get latest commit SHA - id: get_commit_sha - run: | - COMMIT_SHA=$(git rev-parse HEAD) - echo "commit_sha=$COMMIT_SHA" >> $GITHUB_ENV - - # Step 4: Run the autodeploy script - - name: Run Auto Deploy Script - run: | - cd bin - ./autodeploy.sh --artifact-hash ${{ env.COMMIT_SHA }} --target-domain wiab-test-box.wire.link --force-redeploy diff --git a/bin/autodeploy.sh b/bin/autodeploy.sh deleted file mode 100755 index d7506cf3b..000000000 --- a/bin/autodeploy.sh +++ /dev/null @@ -1,450 +0,0 @@ -#!/usr/bin/env bash -# This script is not-up-to-date, requires to be updated to match with current developments -# A new WIAB (wire in a box) dev solution has been created https://docs.wire.com/latest/how-to/install/demo-wiab.html and can be used until this (wiab-staging) gets updated - -# shellcheck disable=SC2087 - -# This script can be replaced with a simpler solution of wiab-demo installtion -# https://docs.wire.com/latest/how-to/install/demo-wiab.html - -set -Eeuo pipefail - -msg() { - echo >&2 -e "${1-}" -} - -trap cleanup SIGINT SIGTERM ERR EXIT - -usage() { - cat </dev/null 2>&1 ; then - msg "INFO: DNS A record exists: $SUBDOMAIN.$TARGET_SYSTEM" - else - die "ERROR: DNS A record for $SUBDOMAIN.$TARGET_SYSTEM does not exist. Exiting. Please check DNS record set." - fi -done - -if ssh -q -o StrictHostKeyChecking=no -o ConnectTimeout=5 -p "$SSH_PORT" "$SSH_USER"@webapp."$TARGET_SYSTEM" id | grep -q "$SSH_USER"; then - msg "" - msg "INFO: Successfully logged into $TARGET_SYSTEM as $SSH_USER" -else - die "ERROR: Can't log into $TARGET_SYSTEM via SSH, please check SSH connectivity." -fi - - -if curl --head --silent --fail https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-"$ARTIFACT_HASH".tgz >/dev/null 2>&1 ; then - msg "INFO: Artifact exists https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-$ARTIFACT_HASH.tgz" -else - die "ERROR: No artifact found via https://s3-eu-west-1.amazonaws.com/public.wire.com/artifacts/wire-server-deploy-static-$ARTIFACT_HASH.tgz" -fi - -system_cleanup_meta() { - msg "" - msg "INFO: Cleaning up all VMs, docker resources and wire-server-deploy files on $TARGET_SYSTEM." - msg "" - sleep 5 - ssh -p "$SSH_PORT" -o StrictHostKeyChecking=no "$SSH_USER"@webapp."$TARGET_SYSTEM" "bash -s" < /dev/null; then - for VM in $(virsh list --all --name); do virsh destroy "$VM"; virsh undefine "$VM" --remove-all-storage; done - fi - if which docker > /dev/null; then - docker system prune -a -f - fi - rm -f /home/$DEMO_USER/.ssh/known_hosts - rm -rf /home/$DEMO_USER/wire-server-deploy - rm -f /home/$DEMO_USER/wire-server-deploy-static-*.tgz -} - -preprovision_hetzner() { - msg "" - msg "INFO: running local ansible playbook for inital server deployment." - msg "INFO: This will setup up the Hetzner system with basic defaults, download and unpack the wire-server-deploy artifact." - sleep 5 - # on Mac devices C.UTF-8 is not available - if [[ $(uname) == "Darwin" ]]; then - export LC_ALL=en_US.UTF-8 - else - export LC_ALL=C.UTF-8 - fi - ansible-playbook ../ansible/hetzner-single-deploy.yml -e "artifact_hash=$ARTIFACT_HASH" -e "ansible_ssh_common_args='-o ServerAliveInterval=30 -o ServerAliveCountMax=10 -o ControlMaster=auto -o ControlPersist=180m'" -i $SSH_USER@webapp."$TARGET_SYSTEM", --diff -} - -remote_deployment() { - msg() { - echo >&2 -e "${1-}" - } - cd $SCRIPT_DIR &>/dev/null || exit 1 - - bash bin/offline-vm-setup.sh - msg "" - while sudo virsh list --all | grep -Fq running; do - sleep 20 - msg "INFO: VM deployment still in progress ..." - done - sleep 20 - msg "" - msg "INFO: VM deployment done. Starting all VMs:" - msg "" - for VM in $(sudo virsh list --all --name); do sudo virsh start "$VM"; done - sleep 60 - - msg "" - msg "INFO: Setting up offline environment (this will take a while)." - msg "" - # Rather than sourcing wire-server-deploy/bin/offline-env.sh, we invoke - # the relevant commands below, declaring "d" as a function instead of an alias. - ZAUTH_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/containers-adminhost/quay.io_wire_zauth_*.tar | awk '{print $3}') - export ZAUTH_CONTAINER - WSD_CONTAINER=$(sudo docker load -i "$SCRIPT_DIR"/containers-adminhost/container-wire-server-deploy.tgz | awk '{print $3}') - d() { - sudo docker run --network=host -v "${SSH_AUTH_SOCK:-nonexistent}":/ssh-agent -e SSH_AUTH_SOCK=/ssh-agent -v "$HOME"/.ssh:/root/.ssh -v "$PWD":/wire-server-deploy "$WSD_CONTAINER" "$@" - } - export -f d - - bash bin/offline-secrets.sh - - HOST_IP=$(dig @resolver4.opendns.com myip.opendns.com +short) - - cat >ansible/inventory/offline/hosts.ini</dev/null) - if [[ $? -eq 0 && -n "$podCIDR" ]]; then - sed -i "s|RELAY_NETWORKS: \".*\"|RELAY_NETWORKS: \":${podCIDR}\"|" $SMTP_VALUES_FILE - else - echo "Failed to fetch podSubnet. Attention using the default value: $(grep -i RELAY_NETWORKS $SMTP_VALUES_FILE)" - fi - d helm install smtp ./charts/smtp --values $SMTP_VALUES_FILE - - d helm install reaper ./charts/reaper - - cp values/wire-server/prod-values.example.yaml values/wire-server/values.yaml - sed -i "s/example.com/$TARGET_SYSTEM/g" values/wire-server/values.yaml - sed -i "s/# - \"turn::3478\"/- \"turn:$HOST_IP:3478\"/g" values/wire-server/values.yaml - sed -i "s/# - \"turn::3478?transport=tcp\"/- \"turn:$HOST_IP:3478?transport=tcp\"/g" values/wire-server/values.yaml - - d helm install wire-server ./charts/wire-server --timeout=15m0s --values ./values/wire-server/values.yaml --values ./values/wire-server/secrets.yaml - - sed -i "s/example.com/$TARGET_SYSTEM/g" values/webapp/prod-values.example.yaml - d helm install webapp ./charts/webapp --values ./values/webapp/prod-values.example.yaml - - sed -i "s/example.com/$TARGET_SYSTEM/g" values/team-settings/prod-values.example.yaml - d helm install team-settings ./charts/team-settings --values ./values/team-settings/prod-values.example.yaml --values ./values/team-settings/prod-secrets.example.yaml - - sed -i "s/example.com/$TARGET_SYSTEM/g" values/account-pages/prod-values.example.yaml - d helm install account-pages ./charts/account-pages --values ./values/account-pages/prod-values.example.yaml - - cp values/ingress-nginx-controller/prod-values.example.yaml ./values/ingress-nginx-controller/values.yaml - d helm install ingress-nginx-controller ./charts/ingress-nginx-controller --values ./values/ingress-nginx-controller/values.yaml - - KUBENODEIP=$(d kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=IP:.status.hostIP --no-headers) - sudo sed -i "s/define KUBENODEIP.*/define KUBENODEIP = $KUBENODEIP/" /etc/nftables.conf - sudo systemctl restart nftables - - INGRESSNODE=$(d kubectl get pods -l app.kubernetes.io/name=ingress-nginx -o=custom-columns=NODE:.spec.nodeName --no-headers) - d kubectl cordon "$INGRESSNODE" - - cp ./values/nginx-ingress-services/prod-values.example.yaml ./values/nginx-ingress-services/values.yaml - cp ./values/nginx-ingress-services/prod-secrets.example.yaml ./values/nginx-ingress-services/secrets.yaml - sed -i 's/useCertManager: false/useCertManager: true/g' values/nginx-ingress-services/values.yaml - sed -i 's/certmasterEmail:/certmasterEmail: backend+wiabautodeploy@wire.com/g' values/nginx-ingress-services/values.yaml - sed -i "s/example.com/$TARGET_SYSTEM/" values/nginx-ingress-services/values.yaml - - d kubectl create namespace cert-manager-ns - d helm upgrade --install -n cert-manager-ns --set 'installCRDs=true' cert-manager charts/cert-manager --values values/cert-manager/prod-values.example.yaml - - d kubectl uncordon "$INGRESSNODE" - - d helm upgrade --install nginx-ingress-services charts/nginx-ingress-services -f values/nginx-ingress-services/values.yaml - - d kubectl get certificate - - cp values/sftd/prod-values.example.yaml values/sftd/values.yaml - sed -i "s/webapp.example.com/webapp.$TARGET_SYSTEM/" values/sftd/values.yaml - sed -i "s/sftd.example.com/sftd.$TARGET_SYSTEM/" values/sftd/values.yaml - sed -i 's/name: letsencrypt-prod/name: letsencrypt-http01/' values/sftd/values.yaml - sed -i "s/replicaCount: 3/replicaCount: 1/" values/sftd/values.yaml - d kubectl label node kubenode1 wire.com/role=sftd - d helm upgrade --install sftd ./charts/sftd --set 'nodeSelector.wire\.com/role=sftd' --set 'node_annotations="{'wire\.com/external-ip': '"$HOST_IP"'}"' --values values/sftd/values.yaml - - ZREST_SECRET=$(grep -A1 turn values/wire-server/secrets.yaml | grep secret | tr -d '"' | awk '{print $NF}') - - cat >values/coturn/values.yaml<values/coturn/secrets.yaml</dev/null" || echo "false") -EXISTING_VMS=$(ssh -p "$SSH_PORT" -o StrictHostKeyChecking=no "$SSH_USER"@webapp."$TARGET_SYSTEM" "virsh list --all --name" || echo "false") -EXISTING_CONTAINERS=$(ssh -p "$SSH_PORT" -o StrictHostKeyChecking=no "$SSH_USER"@webapp."$TARGET_SYSTEM" "docker ps -q --all" || echo "false") - -if [[ "$EXISTING_INSTALL" != "false" && -n "$EXISTING_INSTALL" ]]; then - msg "" - msg "WARNING: existing wire-server-deploy installation found: $EXISTING_INSTALL" - DO_SYSTEM_CLEANUP=true -fi -if [[ "$EXISTING_VMS" != "false" && -n "$EXISTING_VMS" ]]; then - msg "" - msg "WARNING: existing libvirt VMs found: $EXISTING_VMS" - DO_SYSTEM_CLEANUP=true -fi -if [[ "$EXISTING_CONTAINERS" != "false" && -n "$EXISTING_CONTAINERS" ]]; then - echo "$EXISTING_CONTAINERS" - msg "" - msg "WARNING: existing Docker containers found." - DO_SYSTEM_CLEANUP=true -fi - -if [ "$DO_SYSTEM_CLEANUP" = false ]; then - msg "" - msg "INFO: Target system clean, no previous wire-server-deploy installation found." -fi -if [ "$DO_SYSTEM_CLEANUP" = true ] && [ "$FORCE_REDEPLOY" = 0 ]; then - msg "" - IFS= read -r -p "Do you want to wipe all wire-server-deploy components from $TARGET_SYSTEM? (y/n) " PROMPT_CLEANUP - if [[ $PROMPT_CLEANUP == "n" || $PROMPT_CLEANUP == "N" ]]; then - msg "" - die "Aborting, not cleaning up $TARGET_SYSTEM" - fi - system_cleanup_meta -fi -if [ "$DO_SYSTEM_CLEANUP" = true ] && [ "$FORCE_REDEPLOY" = 1 ]; then - system_cleanup_meta -fi - -msg "INFO: Commencing Wire-in-a-box deployment on $TARGET_SYSTEM." -preprovision_hetzner -ssh -p "$SSH_PORT" -o StrictHostKeyChecking=no -o ServerAliveInterval=30 -o ServerAliveCountMax=10 "$DEMO_USER"@webapp."$TARGET_SYSTEM" "bash -s" < **Note (cert-manager & hairpin NAT):** When cert-manager performs HTTP-01 self-checks inside the cluster, traffic can hairpin (Pod → Node → host public IP → DNAT → Node → Ingress). If your nftables rules DNAT in PREROUTING without a matching SNAT on virbr0→virbr0, return packets may bypass the host and break conntrack, causing HTTP-01 timeouts. Also, strict rp_filter can drop asymmetric return packets. If cert-manager is deployed, verify whether hairpin handling is needed: + > + > - Enable hairpin SNAT for DNATed traffic (forces return traffic through the host): + > ```bash + > sudo nft insert rule ip nat POSTROUTING position 0 \ + > iifname "virbr0" oifname "virbr0" \ + > ct status dnat counter masquerade + > ``` + > - Relax reverse-path filtering to loose mode to allow asymmetric flows: + > ```bash + > sudo sysctl -w net.ipv4.conf.all.rp_filter=2 + > sudo sysctl -w net.ipv4.conf.virbr0.rp_filter=2 + > ``` + > These settings help conntrack reverse DNAT correctly and avoid drops during cert-manager’s HTTP-01 challenges in NAT/bridge (virbr0) environments. + +### Calling Services + +- **[Installing SFTD](docs_ubuntu_22.04.md#installing-sftd)** + - Deploy the Selective Forwarding Unit (SFT) calling server for Wire's voice and video calling capabilities. Optionally enable cooperation with TURN servers and configure appropriate node annotations for external IPs. + +- **[Installing Coturn](coturn.md)** + - Deploy TURN/STUN servers for WebRTC connectivity, enabling peer-to-peer communication for calling services and ensuring connectivity through firewalls and NATs. + +## Network Traffic Configuration + +### Bring traffic from Physical machine to Wire services in k8s cluster + +If you used the Ansible playbook earlier, nftables firewall rules are pre-configured to forward traffic. If you set up VMs manually with your own hypervisor, you must manually configure network traffic flow using nftables. + +**Required Network Configuration:** + +The physical machine must forward traffic from external clients to the Kubernetes cluster running Wire services. This involves: + +1. **HTTP/HTTPS Traffic (Ingress)** - Forward ports 80 and 443 to the nginx-ingress-controller running on a Kubernetes node + - Port 80 (HTTP) → Kubernetes node port 31772 + - Port 443 (HTTPS) → Kubernetes node port 31773 + +2. **Calling Services Traffic (Coturn/SFT)** - Forward media and TURN protocol traffic to Coturn/SFT + - Port 3478 (TCP/UDP) → Coturn control traffic + - Ports 32768-65535 (UDP) → Media relay traffic for WebRTC calling + +**Implementation:** + +Use the detailed nftables rules in [../ansible/files/wiab_server_nftables.conf.j2](../ansible/files/wiab_server_nftables.conf.j2) as the template. The guide covers: +- Defining your network variables (Coturn IP, Kubernetes node IP, WAN interface) +- Creating NAT rules for HTTP/HTTPS ingress traffic +- Setting up TURN protocol forwarding for Coturn +- Restarting nftables to apply changes + +You can also apply these rules using the Ansible playbook, by following: + +```bash +ansible-playbook -i inventory.yml ansible/wiab-staging-nftables.yml +``` + +*Note: If you ran the playbook wiab-staging-provision.yml then it might already be configured for you. Please confirm before running.* + +The inventory should define the following variables: + +```ini +[all:vars] +# Kubernetes node IPs +kubenode1_ip=192.168.122.11 +kubenode2_ip=192.168.122.12 +kubenode3_ip=192.168.122.13 + +# Calling services node (usually kubenode3) +calling_node_ip=192.168.122.13 + +# Host WAN interface name +inf_wan=eth0 +```