diff --git a/etc/kayobe/ansible/neutron-l3-drain.yml b/etc/kayobe/ansible/neutron-l3-drain.yml
index 7640a6920c1b5e19b0d0d1a8c4409d2118065448..d9f261d1754c338a0873c358122113f3f590b354 100644
--- a/etc/kayobe/ansible/neutron-l3-drain.yml
+++ b/etc/kayobe/ansible/neutron-l3-drain.yml
@@ -6,18 +6,18 @@
     - neutron-l3-drain
   vars:
     maintenance: false
-    drain_ctrl1: false
-    drain_ctrl2: false
-    drain_ctrl3: false
+    neutron_namespace_drain_ctrl1: false
+    neutron_namespace_drain_ctrl2: false
+    neutron_namespace_drain_ctrl3: false
   tasks:
     - import_role:
-        name: neutron-namespace-drain
+        name: stackhpc.openstack_ops.neutron_namespace_drain
         tasks_from: main.yml
-      when: drain_ctrl1 | bool or drain_ctrl2 | bool or drain_ctrl3 | bool or neutron_drain_host is defined
+      when: neutron_namespace_drain_ctrl1 | bool or neutron_namespace_drain_ctrl2 | bool or neutron_namespace_drain_ctrl3 | bool or neutron_namespace_drain_host is defined
     - name: "Print Info"
       debug:
         msg:
-          - "{{ neutron_drain_host }} is read for maintenance"
+          - "{{ neutron_namespace_drain_host }} is read for maintenance"
           - "rerun this play book with -e maintenance=false to re-add"
           - "routers"
       when: maintenance | bool
diff --git a/etc/kayobe/ansible/requirements.yml b/etc/kayobe/ansible/requirements.yml
index b1f787e937ac8baa96fb448985f7a46e76576def..9df47ae504ff856315c45d6d47227ac609ebb302 100644
--- a/etc/kayobe/ansible/requirements.yml
+++ b/etc/kayobe/ansible/requirements.yml
@@ -18,6 +18,9 @@ collections:
   - name: https://github.com/stackhpc/ansible-collection-openstack
     type: git
     version: 0.2.2
+  - name: https://github.com/stackhpc/ansible-collection-openstack-ops
+    type: git
+    version: feature/neutron-namespace-drain
 roles:
   # TODO(tylerchristie): remove these after migrating to collections
   - src: https://github.com/stackhpc/ansible-role-sriov
diff --git a/etc/kayobe/ansible/roles/neutron-namespace-drain/defaults/main.yml b/etc/kayobe/ansible/roles/neutron-namespace-drain/defaults/main.yml
deleted file mode 100644
index aee284f4549705cad9573f807bff3bc05234753d..0000000000000000000000000000000000000000
--- a/etc/kayobe/ansible/roles/neutron-namespace-drain/defaults/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-neutron_drain_venv: "{{ virtualenv_path }}/openstack"
-neutron_drain_host: "{% if drain_ctrl1 | bool %}{{ groups['controllers'][0] }}{% elif drain_ctrl2 | bool %}{{ groups['controllers'][1] }}{% elif drain_ctrl3 | bool %}{{ groups['controllers'][2] }}{% endif %}"
-drain_ctrl1: false
-drain_ctrl2: false
-drain_ctrl3: false
-drain_dhcp_agents: false
diff --git a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/add-dhcp.yml b/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/add-dhcp.yml
deleted file mode 100644
index 3cd24ca3e55b25939f5fc855044da2486c213dcf..0000000000000000000000000000000000000000
--- a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/add-dhcp.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-- name: Query source SRC_DHCP_ID
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent list --host {{ neutron_drain_host }}
-    --agent-type dhcp -f value -c ID
-  register: SRC_DHCP_ID
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-- name: Set fact containing SRC_DHCP_ID
-  set_fact:
-    DHCP_SRC_ID: "{{ SRC_DHCP_ID.stdout }}"
-
-- name: Enable DHCP agent
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent set "{{ DHCP_SRC_ID }}" --enable
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-- name: Get Network IDs
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network list -f value -c ID
-  register: network_ids
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-- name: Set Network IDs
-  set_fact:
-    NETWORK_IDS: "{{ network_ids.stdout_lines }}"
-
-- name: Add DHCP agent
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent add network {{ DHCP_SRC_ID }} {{ item }} --dhcp
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-  with_items:
-    - "{{ NETWORK_IDS }}"
-  loop_control:
-    pause: 10 
diff --git a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/add-l3.yml b/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/add-l3.yml
deleted file mode 100644
index d8d8836e73bbceae34c82294700dc7f418d15ee7..0000000000000000000000000000000000000000
--- a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/add-l3.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-- name: Query source SRC_L3_ID
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent list --host {{ neutron_drain_host }}
-    --agent-type l3 -f value -c ID
-  register: SRC_L3_ID
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-- name: Set fact containing SRC_L3_ID
-  set_fact:
-    L3_SRC_ID: "{{ SRC_L3_ID.stdout }}"
-
-- name: Enable L3 agent
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent set "{{ L3_SRC_ID }}" --enable
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-- name: Get Router IDs
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    router list -f value -c ID
-  register: router_ids
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-- name: Set Router IDS
-  set_fact:
-    ROUTER_IDS: "{{ router_ids.stdout_lines }}"
-
-- name: Add router
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent add router {{ L3_SRC_ID }} {{ item }} --l3
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-  with_items:
-    - "{{ ROUTER_IDS }}"
-  loop_control:
-    pause: 10
diff --git a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/drain-dhcp.yml b/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/drain-dhcp.yml
deleted file mode 100644
index ba28edc579b1aacf444e2f263fdcf70d7f2a6558..0000000000000000000000000000000000000000
--- a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/drain-dhcp.yml
+++ /dev/null
@@ -1,87 +0,0 @@
----
-- name: Query source SRC_DHCP_ID
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent list --host {{ neutron_drain_host }}
-    --agent-type dhcp -f value -c ID
-  register: SRC_DHCP_ID
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-- name: Set fact containing SRC_DHCP_ID
-  set_fact:
-    DHCP_SRC_ID: "{{ SRC_DHCP_ID.stdout }}"
-
-- name: Get DHCP agent network IDs
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-     network list --agent {{ DHCP_SRC_ID }} -f value -c ID
-  register: dhcp_agent_ids
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-- name: Set DHCP agent network IDs
-  set_fact:
-    DHCP_AGENT_IDS: "{{ dhcp_agent_ids.stdout_lines }}"
-
-- name: Remove DHCP agent
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent remove network {{ DHCP_SRC_ID }} {{ item }} --dhcp
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-  with_items:
-    - "{{ DHCP_AGENT_IDS }}"
-
-- name: Wait for no more dhcp agents to be attached to the host
-  block:
-    - name: Retry count
-      set_fact:
-        retry_count: "{{ 0 if retry_count is undefined or retry_count == 'reset' else retry_count | int + 1 }}"
-        max_retries: 20
-
-    - name: Verify dhcp agents exist
-      command: >
-        {{ neutron_drain_venv }}/bin/openstack
-        network list --agent {{ DHCP_SRC_ID }} -f value -c ID
-      environment: "{{ openstack_auth_env }}"
-      delegate_to: "{{ neutron_drain_host }}"
-      vars:
-        ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-      register: agent_status
-
-    - name: Fail if DHCP agent still attached
-      fail:
-        msg: a DHCP agent is still attached to "{{ L3_SRC_ID }}"
-      when: agent_status.stdout | length > 0
-
-    - name: Reset retry count after success
-      set_fact:
-        retry_count: reset
-  rescue:
-    - fail:
-        msg: |
-          Maximum retries waiting for DHCP agents to be detached reached
-      when: retry_count | int == max_retries
-
-    - name: Reset retry counter if max retries reached (exit loop)
-      set_fact:
-        retry_count: reset
-      failed_when: retry_count == 'reset'
-      when: retry_count | int >= max_retries | int
-
-- name: Disable DHCP agent
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent set "{{ DHCP_SRC_ID }}" --disable
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-  when: agent_status.stdout | length == 0
diff --git a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/drain-l3.yml b/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/drain-l3.yml
deleted file mode 100644
index 54bccd45829432ed7ba5033c8bf0ea2d69a8b463..0000000000000000000000000000000000000000
--- a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/drain-l3.yml
+++ /dev/null
@@ -1,86 +0,0 @@
----
-- name: Query source SRC_L3_ID
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent list --host {{ neutron_drain_host }}
-    --agent-type l3 -f value -c ID
-  register: SRC_L3_ID
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-- name: Set fact containing SRC_L3_ID
-  set_fact:
-    L3_SRC_ID: "{{ SRC_L3_ID.stdout }}"
-
-- name: Get Router IDs
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    router list --agent {{ L3_SRC_ID }} -f value -c ID
-  register: router_ids
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-- name: Set Router IDs
-  set_fact:
-    ROUTER_IDS: "{{ router_ids.stdout_lines }}"
-
-- name: Remove router
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent remove router {{ L3_SRC_ID }} {{ item }} --l3
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-  with_items:
-    - "{{ ROUTER_IDS }}"
-
-- name: Wait for no more routers to be attached to the host
-  block:
-    - name: Retry count
-      set_fact:
-        retry_count: "{{ 0 if retry_count is undefined or retry_count == 'reset' else retry_count | int + 1 }}"
-        max_retries: 20
-
-    - name: Verify routers exist
-      command: >
-        {{ neutron_drain_venv }}/bin/openstack router list --agent {{ L3_SRC_ID }} -f value -c ID
-      environment: "{{ openstack_auth_env }}"
-      delegate_to: "{{ neutron_drain_host }}"
-      register: agent_status
-      vars:
-        ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
-
-    - name: Fail if routers still attached
-      fail:
-        msg: a Router is still attached to agent "{{ L3_SRC_ID }}"
-      when: agent_status.stdout | length > 0
-
-    - name: Reset retry count after success
-      set_fact:
-        retry_count: reset
-  rescue:
-    - fail:
-        msg: |
-          Maximum retries waiting for routers to be detached reached
-      when: retry_count | int == max_retries
-
-    - name: Reset retry counter if max retries reached (exit loop)
-      set_fact:
-        retry_count: reset
-      failed_when: retry_count == 'reset'
-      when: retry_count | int >= max_retries | int
-
-- name: Disable L3 agent
-  command: >
-    {{ neutron_drain_venv }}/bin/openstack
-    network agent set "{{ L3_SRC_ID }}" --disable
-  environment: "{{ openstack_auth_env }}"
-  delegate_to: "{{ neutron_drain_host }}"
-  when: agent_status.stdout | length == 0
-  vars:
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host }}"
diff --git a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/main.yml b/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/main.yml
deleted file mode 100644
index a2c3f8e0ffed2adde2d791d4f1b2ec4860354323..0000000000000000000000000000000000000000
--- a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- name: "Setup OpenStack venv"
-  import_tasks: setup.yml
-
-- name: "Drain L3 agents"
-  import_tasks: drain-l3.yml
-  when: maintenance | bool
-
-- name: "Add L3 agents"
-  import_tasks: add-l3.yml
-  when: not maintenance | bool
-
-- name: "Drain DHCP agents"
-  import_tasks: drain-dhcp.yml
-  when: maintenance | bool and drain_dhcp_agents | bool
-
-- name: "Add DHCP agents"
-  import_tasks: add-dhcp.yml
-  when: not maintenance | bool and drain_dhcp_agents | bool
diff --git a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/setup.yml b/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/setup.yml
deleted file mode 100644
index 8f7cfc5b920f08a87033b31d4bbbda828c057931..0000000000000000000000000000000000000000
--- a/etc/kayobe/ansible/roles/neutron-namespace-drain/tasks/setup.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-- name: Set up openstack cli virtualenv
-  pip:
-    virtualenv: "{{ neutron_drain_venv }}"
-    name:
-      - python-openstackclient
-    state: latest
-    virtualenv_command: /usr/bin/python3 -m venv
-    extra_args: "{% if pip_upper_constraints_file %}-c {{ pip_upper_constraints_file }}{% endif %}"
-  run_once: true
-  become: true
-  delegate_to: "{{ neutron_drain_host }}"
-  vars:
-    # NOTE: Without this, the delegate ansible_host variable will not
-    # be respected when using delegate_to.
-    ansible_host: "{{ hostvars[neutron_drain_host].ansible_host | default(neutron_drain_host) }}"
-
diff --git a/etc/kayobe/kolla/globals.yml b/etc/kayobe/kolla/globals.yml
index f150cb19d1641eddd1006eb3441582d0725515b8..ca679ca8a4f9992ce137adf1c2e657cc986801b3 100644
--- a/etc/kayobe/kolla/globals.yml
+++ b/etc/kayobe/kolla/globals.yml
@@ -88,6 +88,7 @@ haproxy_connect_timeout: 60s
 
 # RMQ HA mode (durable queues)
 om_enable_rabbitmq_high_availability: false
+om_enable_rabbitmq_quorum_queues: true
 
 # Check keystone and horizon are up, and cert is valid
 prometheus_blackbox_exporter_endpoints:
@@ -105,7 +106,7 @@ prometheus_blackbox_exporter_endpoints:
 
 # Backport of: https://review.opendev.org/c/openstack/kolla-ansible/+/824994
 # Upstream version isbackported to Victoria.
-rabbitmq_remove_ha_all_policy: true
+#rabbitmq_remove_ha_all_policy: true
 
 # Needed to launch VM instances in aio environment without nested virt
 nova_compute_virt_type: "{{ 'qemu' if scientific_openstack_trait_all_in_one else 'kvm' }}"
diff --git a/tools/rabbitmq-quorum-migration.sh b/tools/rabbitmq-quorum-migration.sh
index 9bd4d79d2b3ecb1df354bfbc571b4f2b9e5192e1..92bb98204dd02b496046fe38c44fc419ea65e3da 100755
--- a/tools/rabbitmq-quorum-migration.sh
+++ b/tools/rabbitmq-quorum-migration.sh
@@ -11,6 +11,16 @@ if [[ ! $KAYOBE_CONFIG_PATH ]]; then
     exit 2
 fi
 
+if [[ ! "$1" = "--skip-upgrade" ]]; then
+    kayobe overcloud host command run --limit controllers[2] --become --command "cp -a /etc/kolla/neutron-l3-agent/neutron-l3-agent-wrapper.sh /etc/kolla/neutron-l3-agent/neutron-l3-agent-wrapper.bak"
+    kayobe overcloud host command run --limit controllers[2] --become --command "sed -i '/neutron-netns-cleanup/,+3d' /etc/kolla/neutron-l3-agent/neutron-l3-agent-wrapper.sh"
+    kayobe overcloud host command run --limit controllers[2] --become --command "chattr +i /etc/kolla/neutron-l3-agent/neutron-l3-agent-wrapper.sh"
+    kayobe kolla ansible run "rabbitmq-upgrade 3.12"
+    sleep 360
+    kayobe kolla ansible run "rabbitmq-upgrade 3.13"
+    kayobe overcloud host command run --limit controllers[2] --become --command "chattr -i /etc/kolla/neutron-l3-agent/neutron-l3-agent-wrapper.sh"
+fi
+
 if [[ ! "$1" = "--skip-checks" ]]; then
     # Fail if clocks are not synced
     if ! ( kayobe overcloud host command run -l controllers -b --command "timedatectl status | grep 'synchronized: yes'" ); then
@@ -29,6 +39,8 @@ fi
 
 # Generate new config, stop services using rabbit, and reset rabbit state
 kayobe overcloud service configuration generate --node-config-dir /etc/kolla --kolla-skip-tags rabbitmq-ha-precheck
+kayobe overcloud host command run --limit controllers[2] --become --command "sed -i '/neutron-netns-cleanup/,+3d' /etc/kolla/neutron-l3-agent/neutron-l3-agent-wrapper.sh"
+kayobe overcloud host command run --limit controllers[2] --become --command "chattr +i /etc/kolla/neutron-l3-agent/neutron-l3-agent-wrapper.sh"
 kayobe kolla ansible run "stop --yes-i-really-really-mean-it" -kt $RABBITMQ_SERVICES_TO_RESTART
 kayobe kolla ansible run rabbitmq-reset-state
 
@@ -58,3 +70,5 @@ if [[ ! "$1" = "--skip-checks" ]]; then
         echo "Failed post-check: A controller does not have any quorum queues"
     fi
 fi
+
+kayobe overcloud host command run --limit controllers[2] --become --command "chattr -i /etc/kolla/neutron-l3-agent/neutron-l3-agent-wrapper.sh"