# Pastebin FUjIPgsM -- debug: true install_method: source # Hardening apply_security_hardening: false # HAProxy haproxy_keepalived_internal_vip_cidr: "{{internal_lb_vip_address}}/32" haproxy_keepalived_external_vip_cidr: "172.17.223.34/32" haproxy_keepalived_internal_interface: br-mgmt haproxy_keepalived_external_interface: br-api haproxy_ssl: true haproxy_user_ssl_cert: /etc/openstack_deploy/ssl/cert.crt haproxy_user_ssl_key: /etc/openstack_deploy/ssl/cert.key haproxy_user_ssl_ca_cert: /etc/openstack_deploy/ssl/cert.crt #rabbitmq_use_ssl: true rabbitmq_user_ssl_cert: "/etc/openstack_deploy/ssl/cert.crt" rabbitmq_user_ssl_key: "/etc/openstack_deploy/ssl/cert.key" rabbitmq_user_ssl_ca_cert: "/etc/openstack_deploy/ssl/cert.crt" lxc_cache_extra_distro_packages: - vim - wget - curl - telnet #- rsyslog ######## CEPH ######## cephx: true #ceph_mons: # - 172.17.223.129 # - 172.17.223.130 # - 172.17.223.131 # OSA options for using an existing Ceph deployment. This example can be used if all configuration needs to come from OSA # configuration files instead of the Ceph MONs: http://docs.openstack.org/openstack-ansible-ceph_client/latest/config-from-file.html # Directory containing the Ceph keyring files with access credentials. ceph_keyrings_dir: /etc/openstack_deploy/ceph-keyrings # General Ceph configuration file containing the information for Ceph clients # to connect to the Ceph cluster. ceph_conf_file: | [global] mon initial members = meo-sstr001, meo-sstr002, meo-sstr003 fsid = xxxxxxxx-xxxx-xxxx-xxxx-c794f4fb6593 ## Ceph clusters starting with the Nautilus release can support the v2 wire protocol mon host = [v2:172.17.223.129:3300/0,v1:172.17.223.129:6789/0],[v2:172.17.223.130:3300/0,v1:172.17.223.130:6789/0],[v2:172.17.223.131:3300/0,v1:172.17.223.131:6789/0] glance_default_store: rbd glance_notification_driver: noop glance_ceph_client: glance glance_rbd_store_pool: images glance_rbd_store_chunk_size: 8 # For configuring a backend in Cinder to store volumes in Ceph. This # configuration will be used for Nova compute and libvirt to access volumes. cinder_ceph_client: cinder cinder_backends: # rbd_volumes: ceph: volume_driver: cinder.volume.drivers.rbd.RBDDriver rbd_pool: volumes rbd_ceph_conf: /etc/ceph/ceph.conf rbd_store_chunk_size: 8 volume_backend_name: rbddriver rbd_user: "{{ cinder_ceph_client }}" rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}" report_discard_supported: true # extra_volume_types: # - low-iops # - high-iops # - ultra-high-iops # Configuration for Nova compute and libvirt to store ephemeral discs in Ceph. nova_libvirt_images_rbd_pool: vms nova_libvirt_inject_password: true ######## GALERA ######## galera_innodb_buffer_pool_size: 4096M #default: 4M galera_innodb_log_buffer_size: 128M #default: 128K galera_monitoring_allowed_source: 0.0.0.0/0 galera_wsrep_provider_options: - option: gcache.size value: 1024M ########## Local Pypi # Copy these files from the host into the containers lxc_container_cache_files_from_host: - /etc/pip.conf - /root/.pydistutils.cfg