From ffb5062555b27a68cd91556ab1719c1e31204aa7 Mon Sep 17 00:00:00 2001 From: Russell Teague Date: Nov 03 2020 21:41:36 +0000 Subject: openshift_node: Drain nodes and restart during join.yml - Add drain and restart during node join - During restart, delete SDN/OVS pods when using CRIO SDN pods need to be restarted after CA redeploy --- diff --git a/playbooks/openshift-node/private/join.yml b/playbooks/openshift-node/private/join.yml index 9af550e..afc40f8 100644 --- a/playbooks/openshift-node/private/join.yml +++ b/playbooks/openshift-node/private/join.yml @@ -107,6 +107,11 @@ - import_playbook: contrail_sanitize.yml +- import_playbook: restart.yml + vars: + openshift_node_restart_docker_required: true + openshift_node_restart_drain: true + - name: Node Join Checkpoint End hosts: all gather_facts: false diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index b98fd9a..9a3c455 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -74,6 +74,13 @@ - openshift_node_restart_docker_required | default(True) - not openshift_use_crio_only | bool + - import_role: + name: openshift_node + tasks_from: sdn_delete.yml + when: + - openshift_use_crio | default(false) | bool + - openshift_node_restart_drain | default(false) | bool + - name: Wait for master API to come back online wait_for: host: "{{ openshift.common.hostname }}" diff --git a/roles/openshift_node/tasks/sdn_delete.yml b/roles/openshift_node/tasks/sdn_delete.yml new file mode 100644 index 0000000..f375f65 --- /dev/null +++ b/roles/openshift_node/tasks/sdn_delete.yml @@ -0,0 +1,17 @@ +--- + +# https://bugzilla.redhat.com/show_bug.cgi?id=1660880 +# Delete the SDN/OVS pods to allow any changes to the DaemonSets to be applied. +- name: Delete OpenShift SDN/OVS pods prior to upgrade + shell: > + {{ openshift_client_binary }} get pods + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --field-selector=spec.nodeName={{ l_kubelet_node_name | lower }} + -o json + -n openshift-sdn | + {{ openshift_client_binary }} delete + --config={{ openshift.common.config_base }}/master/admin.kubeconfig + --force + --grace-period=0 + -f - + delegate_to: "{{ groups.oo_first_master.0 }}" diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index b612bba..9512ac1 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -7,21 +7,7 @@ # tasks file for openshift_node_upgrade -# https://bugzilla.redhat.com/show_bug.cgi?id=1660880 -# Delete the SDN/OVS pods to allow any changes to the DaemonSets to be applied. -- name: Delete OpenShift SDN/OVS pods prior to upgrade - shell: > - {{ openshift_client_binary }} get pods - --config={{ openshift.common.config_base }}/master/admin.kubeconfig - --field-selector=spec.nodeName={{ l_kubelet_node_name | lower }} - -o json - -n openshift-sdn | - {{ openshift_client_binary }} delete - --config={{ openshift.common.config_base }}/master/admin.kubeconfig - --force - --grace-period=0 - -f - - delegate_to: "{{ groups.oo_first_master.0 }}" +- import_tasks: sdn_delete.yml - name: stop services for upgrade import_tasks: upgrade/stop_services.yml