Glance no more multiple ceph clusters since commit
Bug #1946743 reported by
Florian Engelmann
This bug affects 1 person
Affects | Status | Importance | Assigned to | Milestone | |
---|---|---|---|---|---|
kolla-ansible |
New
|
Undecided
|
Unassigned |
Bug Description
Hi,
we do use glance multistore feature to store images in multiple availability zones. Since the following commit we are not more able to copy multiple ceph.conf (eg. ceph.az1.conf, ceph.az2.conf and ceph.az3.conf) to the glance container:
https:/
Before an asterisk was used to copy all ceph.* files which was fine for us.
How to handle multiple ceph clusters after this commit?
To post a comment you must log in.
Example glance confoiguration using multiple ceph clusters as stores:
[DEFAULT] kolla/glance/ glance- api.log yyyyyyyyyyyyyyy yyyyyyyy locations = True cinder: internalURL //yyyyyyyyyyyyy yyyyyyyyyyyyyyy yyy direct_ url = True self_reference_ url = http:// yyyyyyyyyyyyyyy yyyyyyyyyyy: 9292
debug = False
log_file = /var/log/
use_forwarded_for = true
bind_host = yyyyyyyyyyyyyyyy
bind_port = 9292
workers = 5
registry_host = glance.
show_multiple_
cinder_catalog_info = volume:
transport_url = rabbit:
image_size_cap = 1099511627776
show_image_
enabled_backends = az1:rbd, az2:rbd, az3:rbd
worker_
[database] //glance: yyyyyyyyyyyyyyy yyyyyyy: 6033/glance
connection = mysql+pymysql:
max_retries = -1
[keystone_ authtoken] e_uri = http:// keystone. yyyyyyyyyyyyyyy yy:5000 keystone- admin.yyyyyyyyy yyyyyy: 35357 yyyyyyyyyyyyyyy yyyyy security_ strategy = ENCRYPT yyyyy
www_authenticat
auth_url = http://
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = yyyyyyyyyyyyyyy
memcache_
memcache_secret_key = yyyyyyyyyyyyyyy
memcached_servers = xxxxxxxxxx
[paste_deploy]
flavor = keystone
[glance_store]
default_backend = az1
[oslo_middleware] proxy_headers_ parsing = True
enable_
[oslo_messaging _notifications] //yyyyyyyyyyyyy yyyyyy
transport_url = rabbit:
driver = messagingv2
topics = notifications
[oslo_policy]
policy_file = policy.yaml
[profiler] //elasticsearch .xxxxxxxxxxxxxx x:9200
enabled = true
trace_sqlalchemy = true
hmac_keys = yyyyyyyyyyyyyyyyyy
connection_string = elasticsearch:
[image_format]
[taskflow_executor]
conversion_format = raw
[os_glance_ staging_ store] store_datadir = /var/lib/ glance/ os_glance_ staging_ store
filesystem_
[os_glance_ tasks_store] store_datadir = /var/lib/ glance/ os_glance_ tasks_store
filesystem_
[az1] ceph.az1. conf chunk_size = 8
store_description = AZ1 Image Store
rbd_store_pool = az1glance
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/
rbd_store_
[az2] ceph.az2. conf chunk_size = 8
store_description = AZ2 Image Store
rbd_store_pool = az2glance
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/
rbd_store_
[az3] ceph.az3. conf chunk_size = 8
store_description = AZ3 Image Store
rbd_store_pool = az3glance
rbd_store_user = glance
rbd_store_ceph_conf = /etc/ceph/
rbd_store_
[cors] /xxxxxxxxxxxxxx xxxxxxxxxx
allowed_origin = https:/