FAQ | This is a LIVE service | Changelog

Skip to content
Snippets Groups Projects
Commit edc965aa authored by Paul Browne's avatar Paul Browne
Browse files

Added configuration required to integrate Arcus' Glance with existing external Ceph cluster

parent 27c5be3a
No related branches found
No related tags found
1 merge request!4Integrate external Ceph cluster
...@@ -22,8 +22,7 @@ controller_network_interfaces: > ...@@ -22,8 +22,7 @@ controller_network_interfaces: >
inspection_net_name, inspection_net_name,
octavia_net_name, octavia_net_name,
provision_wl_net_name, provision_wl_net_name,
storage_net_name, storage_net_name] | unique | list }}
storage_mgmt_net_name] | unique | list }}
# List of default networks to which controller nodes are attached. # List of default networks to which controller nodes are attached.
#controller_default_network_interfaces: #controller_default_network_interfaces:
......
[client.arcus-glance]
key = {{ secrets_ceph_client_arcus_glance }}
[global]
fsid = {{ secrets_ceph_fsid }}
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
mon initial members = ceph01-ar17-mon,ceph01-ar18-mon,ceph01-br18-mon
mon host = 10.4.200.17,10.4.200.13,10.4.200.9
[DEFAULT]
show_image_direct_url = True
[glance_store]
stores = rbd
default_store = rbd
rbd_store_pool = arcus-images
rbd_store_user = arcus-glance
rbd_store_ceph_conf = /etc/ceph/ceph.conf
...@@ -13,9 +13,10 @@ nova_instance_datadir_volume: '/var/lib/nova' ...@@ -13,9 +13,10 @@ nova_instance_datadir_volume: '/var/lib/nova'
# Ceph integration, without nova having rbd backend # Ceph integration, without nova having rbd backend
cinder_backend_ceph: "no" cinder_backend_ceph: "no"
# Make glance use a file backend # Make glance use a Ceph RBD backend
glance_backend_swift: "no" glance_backend_swift: "no"
glance_file_backend: "yes" glance_backend_file: "no"
glance_backend_ceph: "yes"
# Enable CADF notifications in Keystone # Enable CADF notifications in Keystone
enable_cadf_notifications: "yes" enable_cadf_notifications: "yes"
......
...@@ -2,52 +2,48 @@ ...@@ -2,52 +2,48 @@
admin_oc_net_ips: admin_oc_net_ips:
arcus-seed: 10.41.101.37 arcus-seed: 10.41.101.37
clc-ar06-u19: 10.41.160.251 clc-ar06-u19: 10.41.160.251
clc-dr16-u15: 10.41.160.253
clc-er13-u38: 10.41.160.252 clc-er13-u38: 10.41.160.252
clc-dr16-u15: 10.41.160.253
clc-mon-ar06-u27: 10.41.160.240 clc-mon-ar06-u27: 10.41.160.240
cleaning_net_ips: cleaning_net_ips:
clc-ar06-u19: 10.13.1.0 clc-ar06-u19: 10.13.1.0
clc-dr16-u15: 10.13.1.2
clc-er13-u38: 10.13.1.1 clc-er13-u38: 10.13.1.1
clc-dr16-u15: 10.13.1.2
inspection_net_ips: inspection_net_ips:
clc-ar06-u19: 10.10.1.0 clc-ar06-u19: 10.10.1.0
clc-dr16-u15: 10.10.1.2
clc-er13-u38: 10.10.1.1 clc-er13-u38: 10.10.1.1
clc-dr16-u15: 10.10.1.2
internal_net_ips: internal_net_ips:
clc-ar06-u19: 10.5.1.0 clc-ar06-u19: 10.5.1.0
clc-dr16-u15: 10.5.1.2
clc-er13-u38: 10.5.1.1 clc-er13-u38: 10.5.1.1
clc-dr16-u15: 10.5.1.2
clc-mon-ar06-u27: 10.5.1.3 clc-mon-ar06-u27: 10.5.1.3
arcus-seed: 10.5.1.4 arcus-seed: 10.5.1.4
octavia_net_ips: octavia_net_ips:
clc-ar06-u19: 10.8.1.0 clc-ar06-u19: 10.8.1.0
clc-dr16-u15: 10.8.1.2
clc-er13-u38: 10.8.1.1 clc-er13-u38: 10.8.1.1
clc-dr16-u15: 10.8.1.2
oob_oc_net_ips: oob_oc_net_ips:
arcus-seed: 10.45.101.9 arcus-seed: 10.45.101.9
clc-ar06-u19: 10.45.160.248 clc-ar06-u19: 10.45.160.248
clc-dr16-u15: 10.45.160.249
clc-er13-u38: 10.45.160.250 clc-er13-u38: 10.45.160.250
clc-dr16-u15: 10.45.160.249
clc-mon-ar06-u27: 10.45.160.240 clc-mon-ar06-u27: 10.45.160.240
provision_oc_net_ips: provision_oc_net_ips:
arcus-seed: 10.1.0.1 arcus-seed: 10.1.0.1
provision_wl_net_ips: provision_wl_net_ips:
clc-ar06-u19: 10.11.1.0 clc-ar06-u19: 10.11.1.0
clc-dr16-u15: 10.11.1.2
clc-er13-u38: 10.11.1.1 clc-er13-u38: 10.11.1.1
clc-dr16-u15: 10.11.1.2
public_net_ips: public_net_ips:
clc-ar06-u19: 128.232.222.2 clc-ar06-u19: 128.232.222.2
clc-dr16-u15: 128.232.222.4
clc-er13-u38: 128.232.222.3 clc-er13-u38: 128.232.222.3
storage_mgmt_net_ips: clc-dr16-u15: 128.232.222.4
clc-ar06-u19: 10.7.1.0
clc-dr16-u15: 10.7.1.2
clc-er13-u38: 10.7.1.1
storage_net_ips: storage_net_ips:
clc-ar06-u19: 10.6.1.0 clc-ar06-u19: 10.4.201.0
clc-dr16-u15: 10.6.1.2 clc-er13-u38: 10.4.201.1
clc-er13-u38: 10.6.1.1 clc-dr16-u15: 10.4.201.2
tunnel_net_ips: tunnel_net_ips:
clc-ar06-u19: 10.17.1.0 clc-ar06-u19: 10.17.1.0
clc-dr16-u15: 10.17.1.2
clc-er13-u38: 10.17.1.1 clc-er13-u38: 10.17.1.1
clc-dr16-u15: 10.17.1.2
...@@ -124,18 +124,18 @@ public_net_allocation_pool_start: 128.232.222.2 ...@@ -124,18 +124,18 @@ public_net_allocation_pool_start: 128.232.222.2
public_net_allocation_pool_end: 128.232.222.4 public_net_allocation_pool_end: 128.232.222.4
# Storage network IP information. # Storage network IP information.
storage_net_vlan: 606 storage_net_vlan: 4
storage_net_octet: 6 storage_net_octet: 4
storage_net_cidr: "10.{{ storage_net_octet }}.0.0/16" storage_net_cidr: "10.{{ storage_net_octet }}.0.0/16"
storage_net_allocation_pool_start: "10.{{ storage_net_octet }}.1.0" storage_net_allocation_pool_start: "10.{{ storage_net_octet }}.201.0"
storage_net_allocation_pool_end: "10.{{ storage_net_octet }}.3.255" storage_net_allocation_pool_end: "10.{{ storage_net_octet }}.201.255"
# Storage management network IP information. # Storage management network IP information.
storage_mgmt_net_vlan: 607 storage_mgmt_net_vlan: 5
storage_mgmt_net_octet: 7 storage_mgmt_net_octet: 5
storage_mgmt_net_cidr: "10.{{ storage_mgmt_net_octet }}.0.0/16" storage_mgmt_net_cidr: "10.{{ storage_mgmt_net_octet }}.0.0/16"
storage_mgmt_net_allocation_pool_start: "10.{{ storage_mgmt_net_octet }}.1.0" storage_mgmt_net_allocation_pool_start: "10.{{ storage_mgmt_net_octet }}.201.0"
storage_mgmt_net_allocation_pool_end: "10.{{ storage_mgmt_net_octet }}.3.255" storage_mgmt_net_allocation_pool_end: "10.{{ storage_mgmt_net_octet }}.201.255"
# Workload cleaning network IP information. # Workload cleaning network IP information.
cleaning_net_vlan: 613 cleaning_net_vlan: 613
......
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment