구성: Basic Installation 문서의 구성을 따랐음.
OS: Ubuntu 13.04
Cloud Controller Node: 59.29.142.9, 192.168.100.1
Network Controller Node: 59.29.142.88, 192.168.100.2
Compute Node: 59.29.142.86, 192.168.100.3
각 Node 공통사항
/etc/hosts
59.29.142.86 fox2
59.29.142.88 fox4
59.29.142.9 tech
192.168.100.1 cloud
192.168.100.2 network
192.168.100.3 compute1
1. Cloud Controller Node
/etc/network/interfaces
# This file describes the network interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
auto eth0
iface eth0 inet static
address 59.29.142.9
netmask 255.255.255.0
network 59.29.142.0
broadcast 59.29.142.255
gateway 59.29.142.254
# dns-* options are implemented by the resolvconf package, if installed
dns-nameservers 8.8.8.8
auto eth1
iface eth1 inet static
address 192.168.100.1
netmask 255.255.255.0
network 192.168.100.0
broadcast 192.168.100.255
/etc/nova/nova.conf
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
#volumes_path=/var/lib/cinder/volumes
enabled_apis=ec2,osapi_compute,metadata
sql_connection=mysql://nova:nova@localhost/nova
my_ip=192.168.100.1
#rabbit_user=guest
rabbit_password=password
auth_strategy=keystone
# Networking
network_api_class=nova.network.quantumv2.api.API
quantum_url=http://192.168.100.1:9696
quantum_auth_strategy=keystone
quantum_admin_tenant_name=service
quantum_admin_username=quantum
quantum_admin_password=password
quantum_admin_auth_url=http://192.168.100.1:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
# Security Groups
firewall_driver=nova.virt.firewall.NoopFirewallDriver
security_group_api=quantum
# Metadata
quantum_metadata_proxy_shared_secret=password
service_quantum_metadata_proxy=true
metadata_listen = 192.168.100.1
metadata_listen_port = 8775
# Cinder
volume_api_class=nova.volume.cinder.API
# Glance
glance_api_servers=192.168.100.1:9292
image_service=nova.image.glance.GlanceImageService
# novnc
novnc_enable=true
novncproxy_port=6080
novncproxy_host=59.29.142.9
vncserver_listen=0.0.0.0
root@cloud:/etc/nova# cat /etc/nova.conf
cat: /etc/nova.conf: No such file or directory
root@cloud:/etc/nova# cat nova.conf
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
#volumes_path=/var/lib/cinder/volumes
enabled_apis=ec2,osapi_compute,metadata
sql_connection=mysql://nova:nova@localhost/nova
my_ip=192.168.100.1
#rabbit_user=guest
rabbit_password=password
auth_strategy=keystone
# Networking
network_api_class=nova.network.quantumv2.api.API
quantum_url=http://192.168.100.1:9696
quantum_auth_strategy=keystone
quantum_admin_tenant_name=service
quantum_admin_username=quantum
quantum_admin_password=password
quantum_admin_auth_url=http://192.168.100.1:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
# Security Groups
firewall_driver=nova.virt.firewall.NoopFirewallDriver
security_group_api=quantum
# Metadata
quantum_metadata_proxy_shared_secret=password
service_quantum_metadata_proxy=true
metadata_listen = 192.168.100.1
metadata_listen_port = 8775
# Cinder
volume_api_class=nova.volume.cinder.API
# Glance
glance_api_servers=192.168.100.1:9292
image_service=nova.image.glance.GlanceImageService
# novnc
novnc_enable=true
novncproxy_port=6080
novncproxy_host=59.29.142.9
vncserver_listen=0.0.0.0
/etc/nova/api-paste.ini
############
# Metadata #
############
[composite:metadata]
use = egg:Paste#urlmap
/: meta
[pipeline:meta]
pipeline = ec2faultwrap logrequest metaapp
[app:metaapp]
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
#######
# EC2 #
#######
[composite:ec2]
use = egg:Paste#urlmap
/services/Cloud: ec2cloud
[composite:ec2cloud]
use = call:nova.api.auth:pipeline_factory
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
[filter:ec2faultwrap]
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
[filter:logrequest]
paste.filter_factory = nova.api.ec2:RequestLogging.factory
[filter:ec2lockout]
paste.filter_factory = nova.api.ec2:Lockout.factory
[filter:ec2keystoneauth]
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
[filter:ec2noauth]
paste.filter_factory = nova.api.ec2:NoAuth.factory
[filter:cloudrequest]
controller = nova.api.ec2.cloud.CloudController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:authorizer]
paste.filter_factory = nova.api.ec2:Authorizer.factory
[filter:validator]
paste.filter_factory = nova.api.ec2:Validator.factory
[app:ec2executor]
paste.app_factory = nova.api.ec2:Executor.factory
#############
# Openstack #
#############
[composite:osapi_compute]
use = call:nova.api.openstack.urlmap:urlmap_factory
/: oscomputeversions
/v1.1: openstack_compute_api_v2
/v2: openstack_compute_api_v2
[composite:openstack_compute_api_v2]
use = call:nova.api.auth:pipeline_factory
noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
[filter:noauth]
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
[filter:ratelimit]
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
[filter:sizelimit]
paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
[app:osapi_compute_app_v2]
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
[pipeline:oscomputeversions]
pipeline = faultwrap oscomputeversionapp
[app:oscomputeversionapp]
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
#auth_host = 127.0.0.1
auth_host = 192.168.100.1
auth_port = 35357
auth_protocol = http
#admin_tenant_name = %SERVICE_TENANT_NAME%
#admin_user = %SERVICE_USER%
#admin_password = %SERVICE_PASSWORD%
admin_tenant_name = service
admin_user = nova
admin_password = password
# signing_dir is configurable, but the default behavior of the authtoken
# middleware should be sufficient. It will create a temporary directory
# in the home directory for the user the nova process is running as.
#signing_dir = /var/lib/nova/keystone-signing
# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
auth_version = v2.0
/etc/quantum/quantum.conf
[DEFAULT]
# Default log level is INFO
# verbose and debug has the same result.
# One of them will set DEBUG log level output
# debug = False
# verbose = False
verbose = True
# Where to store Quantum state files. This directory must be writable by the
# user executing the agent.
# state_path = /var/lib/quantum
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server
bind_host = 0.0.0.0
# Port the bind the API server to
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of quantum.extensions is appended to this, so if your
# extensions are in there you don’t need to specify them here
# api_extensions_path =
# Quantum plugin provider module
# core_plugin =
core_plugin = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2
# Advanced service modules
# service_plugins =
# Paste configuration file
api_paste_config = /etc/quantum/api-paste.ini
# The strategy to be used for auth.
# Supported values are ‘keystone'(default), ‘noauth’.
auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
# dhcp_lease_duration = 120
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Quantum is
# being used in conjunction with nova security groups and/or metadata service.
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = quantum.openstack.common.rpc.impl_kombu
# Size of RPC thread pool
# rpc_thread_pool_size = 64,
# Size of RPC connection pool
# rpc_conn_pool_size = 30
# Seconds to wait for a response from call or multicall
# rpc_response_timeout = 60
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
# rpc_cast_timeout = 30
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = quantum.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
control_exchange = quantum
# If passed, use a fake RabbitMQ provider
fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)’
# kombu_ssl_ca_certs =
# IP address of the RabbitMQ installation
# rabbit_host = localhost
# Password of the RabbitMQ server
# rabbit_password = guest
rabbit_host = 192.168.100.1
rabbit_password = password
# Port where RabbitMQ server is running/listening
# rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to ‘$rabbit_host:$rabbit_port’
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
# rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all).You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=quantum.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to ‘$qpid_hostname:$qpid_port’
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ”
# Password for qpid connection
# qpid_password = ”
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ”
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either ‘tcp’ or ‘ssl’
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=quantum.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The “host” option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are create, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = quantum.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = quantum.openstack.common.notifier.log_notifier
# RPC driver. DHCP agents needs it.
notification_driver = quantum.openstack.common.notifier.rpc_notifier
#notification_driver = quantum.openstack.common.notifier.rabbit_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down.
# agent_down_time = 5
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = quantum.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = quantum.scheduler.l3_agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# quantum server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to quantum server
# router_auto_schedule = True
# =========== end of items for agent scheduler extension =====
[QUOTAS]
# resource name(s) that are supported in quota features
# quota_items = network,subnet,port
# default number of resource allowed per tenant, minus for unlimited
# default_quota = -1
# number of networks allowed per tenant, and minus means unlimited
# quota_network = 10
# number of subnets allowed per tenant, and minus means unlimited
# quota_subnet = 10
# number of ports allowed per tenant, and minus means unlimited
# quota_port = 50
# number of security groups allowed per tenant, and minus means unlimited
# quota_security_group = 10
# number of security group rules allowed per tenant, and minus means unlimited
# quota_security_group_rule = 100
# default driver to use for quota checks
# quota_driver = quantum.quota.ConfDriver
[DEFAULT_SERVICETYPE]
# Description of the default service type (optional)
# description = “default service type”
# Enter a service definition line for each advanced service provided
# by the default service type.
# Each service definition should be in the following format:
# <service>:<plugin>[:driver]
[AGENT]
# Use “sudo quantum-rootwrap /etc/quantum/rootwrap.conf” to use the real
# root filter facility.
# Change to “sudo” to skip the filtering and just run the comand directly
# root_helper = sudo
root_helper = sudo quantum-rootwrap /etc/quantum/rootwrap.conf
# =========== items for agent management extension =============
# seconds between nodes reporting state to server, should be less than
# agent_down_time
# report_interval = 4
# =========== end of items for agent management extension =====
[keystone_authtoken]
#auth_host = 127.0.0.1
auth_host = 192.168.100.1
auth_port = 35357
auth_protocol = http
#admin_tenant_name = %SERVICE_TENANT_NAME%
#admin_user = %SERVICE_USER%
#admin_password = %SERVICE_PASSWORD%
admin_tenant_name = service
admin_user = quantum
admin_password = password
signing_dir = /var/lib/quantum/keystone-signing
/etc/quantum/api-paste.ini
[composite:quantum]
use = egg:Paste#urlmap
/: quantumversions
/v2.0: quantumapi_v2_0
[composite:quantumapi_v2_0]
use = call:quantum.auth:pipeline_factory
noauth = extensions quantumapiapp_v2_0
keystone = authtoken keystonecontext extensions quantumapiapp_v2_0
[filter:keystonecontext]
paste.filter_factory = quantum.auth:QuantumKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
[filter:extensions]
paste.filter_factory = quantum.api.extensions:plugin_aware_extension_middleware_factory
[app:quantumversions]
paste.app_factory = quantum.api.versions:Versions.factory
[app:quantumapiapp_v2_0]
paste.app_factory = quantum.api.v2.router:APIRouter.factory
/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
[DATABASE]
sql_connection = mysql://quantum:quantum@localhost/quantum
reconnect_interval = 2
[OVS]
tenant_network_type = gre
tunnel_id_ranges = 1:1000
enable_tunneling = True
local_ip = 192.168.100.1
#bridge_mappings = physnet1:br-int
#integration_bridge = br-int
#tunnel_bridge = br-tun
#tenant_network_type = gre
#network_vlan_ranges physnet:100:2999
[AGENT]
# Agent’s polling interval in seconds
polling_interval = 2
[SECURITYGROUP]
firewall_driver = quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
/etc/keystone/keystone.conf
[DEFAULT]
# A “shared secret” between keystone and other openstack services
# admin_token = ADMIN
admin_token = password
# The IP address of the network interface to listen on
# bind_host = 0.0.0.0
# The port number which the public service listens on
# public_port = 5000
# The port number which the public admin listens on
# admin_port = 35357
# The base endpoint URLs for keystone that are advertised to clients
# (NOTE: this does NOT affect how keystone listens for connections)
# public_endpoint = http://localhost:%(public_port)d/
# admin_endpoint = http://localhost:%(admin_port)d/
# The port number which the OpenStack Compute service listens on
# compute_port = 8774
# Path to your policy definition containing identity actions
# policy_file = policy.json
# Rule to check if no matching policy definition is found
# FIXME(dolph): This should really be defined as [policy] default_rule
# policy_default_rule = admin_required
# Role for migrating membership relationships
# During a SQL upgrade, the following values will be used to create a new role
# that will replace records in the user_tenant_membership table with explicit
# role grants. After migration, the member_role_id will be used in the API
# add_user_to_project, and member_role_name will be ignored.
# member_role_id = 9fe2ff9ee4384b1894a90878d3e92bab
# member_role_name = _member_
# === Logging Options ===
# Print debugging output
# (includes plaintext request logging, potentially including passwords)
# debug = False
debug = True
# Print more verbose output
# verbose = False
verbose = True
# Name of log file to output to. If not set, logging will go to stdout.
log_file = keystone.log
# The directory to keep log files in (will be prepended to –logfile)
log_dir = /var/log/keystone
# Use syslog for logging.
# use_syslog = False
# syslog facility to receive log lines
# syslog_log_facility = LOG_USER
# If this option is specified, the logging configuration file specified is
# used and overrides any other logging options specified. Please see the
# Python logging module documentation for details on logging configuration
# files.
# log_config = logging.conf
# A logging.Formatter log message format string which may use any of the
# available logging.LogRecord attributes.
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# Format string for %(asctime)s in log records.
# log_date_format = %Y-%m-%d %H:%M:%S
# onready allows you to send a notification when the process is ready to serve
# For example, to have it notify using systemd, one could set shell command:
# onready = systemd-notify –ready
# or a module with notify() method:
# onready = keystone.common.systemd
[sql]
# The SQLAlchemy connection string used to connect to the database
#connection = sqlite:////var/lib/keystone/keystone.db
connection = mysql://keystone:keystone@localhost/keystone
# the timeout before idle sql connections are reaped
# idle_timeout = 200
[identity]
driver = keystone.identity.backends.sql.Identity
# This references the domain to use for all Identity API v2 requests (which are
# not aware of domains). A domain with this ID will be created for you by
# keystone-manage db_sync in migration 008. The domain referenced by this ID
# cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API.
# There is nothing special about this domain, other than the fact that it must
# exist to order to maintain support for your v2 clients.
# default_domain_id = default
[trust]
driver = keystone.trust.backends.sql.Trust
# delegation and impersonation features can be optionally disabled
# enabled = True
[catalog]
# dynamic, sql-based backend (supports API/CLI-based management commands)
driver = keystone.catalog.backends.sql.Catalog
# static, file-based backend (does *NOT* support any management commands)
# driver = keystone.catalog.backends.templated.TemplatedCatalog
# template_file = default_catalog.templates
[token]
driver = keystone.token.backends.sql.Token
# Amount of time a token should remain valid (in seconds)
# expiration = 86400
[policy]
driver = keystone.policy.backends.sql.Policy
[ec2]
driver = keystone.contrib.ec2.backends.sql.Ec2
[ssl]
#enable = True
#certfile = /etc/keystone/ssl/certs/keystone.pem
#keyfile = /etc/keystone/ssl/private/keystonekey.pem
#ca_certs = /etc/keystone/ssl/certs/ca.pem
#cert_required = True
[signing]
#token_format = PKI
#certfile = /etc/keystone/ssl/certs/signing_cert.pem
#keyfile = /etc/keystone/ssl/private/signing_key.pem
#ca_certs = /etc/keystone/ssl/certs/ca.pem
#key_size = 1024
#valid_days = 3650
#ca_password = None
[ldap]
# url = ldap://localhost
# user = dc=Manager,dc=example,dc=com
# password = None
# suffix = cn=example,cn=com
# use_dumb_member = False
# allow_subtree_delete = False
# dumb_member = cn=dumb,dc=example,dc=com
# Maximum results per page; a value of zero (‘0’) disables paging (default)
# page_size = 0
# The LDAP dereferencing option for queries. This can be either ‘never’,
# ‘searching’, ‘always’, ‘finding’ or ‘default’. The ‘default’ option falls
# back to using default dereferencing configured by your ldap.conf.
# alias_dereferencing = default
# The LDAP scope for queries, this can be either ‘one’
# (onelevel/singleLevel) or ‘sub’ (subtree/wholeSubtree)
# query_scope = one
# user_tree_dn = ou=Users,dc=example,dc=com
# user_filter =
# user_objectclass = inetOrgPerson
# user_domain_id_attribute = businessCategory
# user_id_attribute = cn
# user_name_attribute = sn
# user_mail_attribute = email
# user_pass_attribute = userPassword
# user_enabled_attribute = enabled
# user_enabled_mask = 0
# user_enabled_default = True
# user_attribute_ignore = tenant_id,tenants
# user_allow_create = True
# user_allow_update = True
# user_allow_delete = True
# user_enabled_emulation = False
# user_enabled_emulation_dn =
# tenant_tree_dn = ou=Groups,dc=example,dc=com
# tenant_filter =
# tenant_objectclass = groupOfNames
# tenant_domain_id_attribute = businessCategory
# tenant_id_attribute = cn
# tenant_member_attribute = member
# tenant_name_attribute = ou
# tenant_desc_attribute = desc
# tenant_enabled_attribute = enabled
# tenant_attribute_ignore =
# tenant_allow_create = True
# tenant_allow_update = True
# tenant_allow_delete = True
# tenant_enabled_emulation = False
# tenant_enabled_emulation_dn =
# role_tree_dn = ou=Roles,dc=example,dc=com
# role_filter =
# role_objectclass = organizationalRole
# role_id_attribute = cn
# role_name_attribute = ou
# role_member_attribute = roleOccupant
# role_attribute_ignore =
# role_allow_create = True
# role_allow_update = True
# role_allow_delete = True
# group_tree_dn =
# group_filter =
# group_objectclass = groupOfNames
# group_id_attribute = cn
# group_name_attribute = ou
# group_member_attribute = member
# group_desc_attribute = desc
# group_attribute_ignore =
# group_allow_create = True
# group_allow_update = True
# group_allow_delete = True
[auth]
methods = password,token
password = keystone.auth.plugins.password.Password
token = keystone.auth.plugins.token.Token
[filter:debug]
paste.filter_factory = keystone.common.wsgi:Debug.factory
[filter:token_auth]
paste.filter_factory = keystone.middleware:TokenAuthMiddleware.factory
[filter:admin_token_auth]
paste.filter_factory = keystone.middleware:AdminTokenAuthMiddleware.factory
[filter:xml_body]
paste.filter_factory = keystone.middleware:XmlBodyMiddleware.factory
[filter:json_body]
paste.filter_factory = keystone.middleware:JsonBodyMiddleware.factory
[filter:user_crud_extension]
paste.filter_factory = keystone.contrib.user_crud:CrudExtension.factory
[filter:crud_extension]
paste.filter_factory = keystone.contrib.admin_crud:CrudExtension.factory
[filter:ec2_extension]
paste.filter_factory = keystone.contrib.ec2:Ec2Extension.factory
[filter:s3_extension]
paste.filter_factory = keystone.contrib.s3:S3Extension.factory
[filter:url_normalize]
paste.filter_factory = keystone.middleware:NormalizingFilter.factory
[filter:sizelimit]
paste.filter_factory = keystone.middleware:RequestBodySizeLimiter.factory
[filter:stats_monitoring]
paste.filter_factory = keystone.contrib.stats:StatsMiddleware.factory
[filter:stats_reporting]
paste.filter_factory = keystone.contrib.stats:StatsExtension.factory
[filter:access_log]
paste.filter_factory = keystone.contrib.access:AccessLogMiddleware.factory
[app:public_service]
paste.app_factory = keystone.service:public_app_factory
[app:service_v3]
paste.app_factory = keystone.service:v3_app_factory
[app:admin_service]
paste.app_factory = keystone.service:admin_app_factory
[pipeline:public_api]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug ec2_extension user_crud_extension public_service
[pipeline:admin_api]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension crud_extension admin_service
[pipeline:api_v3]
pipeline = access_log sizelimit stats_monitoring url_normalize token_auth admin_token_auth xml_body json_body debug stats_reporting ec2_extension s3_extension service_v3
[app:public_version_service]
paste.app_factory = keystone.service:public_version_app_factory
[app:admin_version_service]
paste.app_factory = keystone.service:admin_version_app_factory
[pipeline:public_version_api]
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body public_version_service
[pipeline:admin_version_api]
pipeline = access_log sizelimit stats_monitoring url_normalize xml_body admin_version_service
[composite:main]
use = egg:Paste#urlmap
/v2.0 = public_api
/v3 = api_v3
/ = public_version_api
[composite:admin]
use = egg:Paste#urlmap
/v2.0 = admin_api
/v3 = api_v3
/ = admin_version_api
/etc/cinder/cinder.conf
[DEFAULT]
rootwrap_config = /etc/cinder/rootwrap.conf
api_paste_confg = /etc/cinder/api-paste.ini
iscsi_helper = tgtadm
volume_name_template = volume-%s
volume_group = cinder-volumes
verbose = True
auth_strategy = keystone
state_path = /var/lib/cinder
lock_path = /var/lock/cinder
volumes_dir = /var/lib/cinder/volumes
sql_connection = mysql://cinder:cinder@localhost/cinder
rabbit_host = 192.168.100.1
rabbit_port = 5672
rabbit_userid = guest
rabbit_password = password
rabbit_virtual_host = /
/etc/cinder/api-paste.ini
#############
# OpenStack #
#############
[composite:osapi_volume]
use = call:cinder.api:root_app_factory
/: apiversions
/v1: openstack_volume_api_v1
/v2: openstack_volume_api_v2
[composite:openstack_volume_api_v1]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = faultwrap sizelimit noauth apiv1
keystone = faultwrap sizelimit authtoken keystonecontext apiv1
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv1
[composite:openstack_volume_api_v2]
use = call:cinder.api.middleware.auth:pipeline_factory
noauth = faultwrap sizelimit noauth apiv2
keystone = faultwrap sizelimit authtoken keystonecontext apiv2
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext apiv2
[filter:faultwrap]
paste.filter_factory = cinder.api.middleware.fault:FaultWrapper.factory
[filter:noauth]
paste.filter_factory = cinder.api.middleware.auth:NoAuthMiddleware.factory
[filter:sizelimit]
paste.filter_factory = cinder.api.middleware.sizelimit:RequestBodySizeLimiter.factory
[app:apiv1]
paste.app_factory = cinder.api.v1.router:APIRouter.factory
[app:apiv2]
paste.app_factory = cinder.api.v2.router:APIRouter.factory
[pipeline:apiversions]
pipeline = faultwrap osvolumeversionapp
[app:osvolumeversionapp]
paste.app_factory = cinder.api.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = cinder.api.middleware.auth:CinderKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
service_protocol = http
#service_host = 127.0.0.1
service_host = 192.168.100.1
service_port = 5000
#auth_host = 127.0.0.1
auth_host = 192.168.100.1
auth_port = 35357
auth_protocol = http
#admin_tenant_name = %SERVICE_TENANT_NAME%
#admin_user = %SERVICE_USER%
#admin_password = %SERVICE_PASSWORD%
admin_tenant_name = service
admin_user = cinder
admin_password = password
signing_dir = /var/lib/cinder
/etc/glance/glance-api.conf
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
# Show debugging output in logs (sets DEBUG log level output)
#debug = False
# Which backend scheme should Glance use by default is not specified
# in a request to add a new image to Glance? Known schemes are determined
# by the known_stores option below.
# Default: ‘file’
default_store = file
# List of which store classes and store class locations are
# currently known to glance at startup.
#known_stores = glance.store.filesystem.Store,
# glance.store.http.Store,
# glance.store.rbd.Store,
# glance.store.s3.Store,
# glance.store.swift.Store,
# Maximum image size (in bytes) that may be uploaded through the
# Glance API server. Defaults to 1 TB.
# WARNING: this value should only be increased after careful consideration
# and must be set to a value under 8 EB (9223372036854775808).
#image_size_cap = 1099511627776
# Address to bind the API server
bind_host = 0.0.0.0
# Port the bind the API server to
bind_port = 9292
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
log_file = /var/log/glance/api.log
# Backlog requests when creating socket
backlog = 4096
# TCP_KEEPIDLE value in seconds when creating socket.
# Not supported on OS X.
#tcp_keepidle = 600
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
#sql_connection = sqlite:////var/lib/glance/glance.sqlite
sql_connection = mysql://glance:glance@localhost/glance
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in ‘MySQL Gone Away’ exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
sql_idle_timeout = 3600
# Number of Glance API worker processes to start.
# On machines with more than one CPU increasing this value
# may improve performance (especially if using SSL with
# compression turned on). It is typically recommended to set
# this value to the number of CPUs present on your machine.
workers = 1
# Role used to identify an authenticated user as administrator
#admin_role = admin
# Allow unauthenticated users to access the API with read-only
# privileges. This only applies when using ContextMiddleware.
#allow_anonymous_access = False
# Allow access to version 1 of glance api
#enable_v1_api = True
# Allow access to version 2 of glance api
#enable_v2_api = True
# Return the URL that references where the data is stored on
# the backend storage system. For example, if using the
# file system store a URL of ‘file:///path/to/image’ will
# be returned to the user in the ‘direct_url’ meta-data field.
# The default value is false.
#show_image_direct_url = False
# ================= Syslog Options ============================
# Send logs to syslog (/dev/log) instead of to file specified
# by `log_file`
#use_syslog = False
# Facility to use. If unset defaults to LOG_USER.
#syslog_log_facility = LOG_LOCAL0
# ================= SSL Options ===============================
# Certificate file to use when starting API server securely
#cert_file = /path/to/certfile
# Private key file to use when starting API server securely
#key_file = /path/to/keyfile
# CA certificate file to use to verify connecting clients
#ca_file = /path/to/cafile
# ================= Security Options ==========================
# AES key for encrypting store ‘location’ metadata, including
# — if used — Swift or S3 credentials
# Should be set to a random string of length 16, 24 or 32 bytes
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
# ============ Registry Options ===============================
# Address to find the registry server
registry_host = 0.0.0.0
# Port the registry server is listening on
registry_port = 9191
# What protocol to use when connecting to the registry server?
# Set to https for secure HTTP communication
registry_client_protocol = http
# The path to the key file to use in SSL connections to the
# registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
#registry_client_key_file = /path/to/key/file
# The path to the cert file to use in SSL connections to the
# registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
#registry_client_cert_file = /path/to/cert/file
# The path to the certifying authority cert file to use in SSL connections
# to the registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
#registry_client_ca_file = /path/to/ca/file
# When using SSL in connections to the registry server, do not require
# validation via a certifying authority. This is the registry’s equivalent of
# specifying –insecure on the command line using glanceclient for the API
# Default: False
#registry_client_insecure = False
# The period of time, in seconds, that the API server will wait for a registry
# request to complete. A value of ‘0’ implies no timeout.
# Default: 600
#registry_client_timeout = 600
# Whether to automatically create the database tables.
# Default: False
#db_auto_create = False
# ============ Notification System Options =====================
# Notifications can be sent when images are create, updated or deleted.
# There are three methods of sending notifications, logging (via the
# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
# message queue), or noop (no notifications sent, the default)
notifier_strategy = noop
# Configuration options if sending notifications via rabbitmq (these are
# the defaults)
rabbit_host = localhost
rabbit_port = 5672
rabbit_use_ssl = false
rabbit_userid = guest
rabbit_password = guest
rabbit_virtual_host = /
rabbit_notification_exchange = glance
rabbit_notification_topic = notifications
rabbit_durable_queues = False
# Configuration options if sending notifications via Qpid (these are
# the defaults)
qpid_notification_exchange = glance
qpid_notification_topic = notifications
qpid_host = localhost
qpid_port = 5672
qpid_username =
qpid_password =
qpid_sasl_mechanisms =
qpid_reconnect_timeout = 0
qpid_reconnect_limit = 0
qpid_reconnect_interval_min = 0
qpid_reconnect_interval_max = 0
qpid_reconnect_interval = 0
qpid_heartbeat = 5
# Set to ‘ssl’ to enable SSL
qpid_protocol = tcp
qpid_tcp_nodelay = True
# ============ Filesystem Store Options ========================
# Directory that the Filesystem backend store
# writes image data to
filesystem_store_datadir = /var/lib/glance/images/
# ============ Swift Store Options =============================
# Version of the authentication service to use
# Valid versions are ‘2’ for keystone and ‘1’ for swauth and rackspace
swift_store_auth_version = 2
# Address where the Swift authentication service lives
# Valid schemes are ‘http://’ and ‘https://’
# If no scheme specified, default to ‘https://’
# For swauth, use something like ‘127.0.0.1:8080/v1.0/’
swift_store_auth_address = 127.0.0.1:5000/v2.0/
# User to authenticate against the Swift authentication service
# If you use Swift authentication service, set it to ‘account’:’user’
# where ‘account’ is a Swift storage account and ‘user’
# is a user in that account
swift_store_user = jdoe:jdoe
# Auth key for the user authenticating against the
# Swift authentication service
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
# Container within the account that the account should use
# for storing images in Swift
swift_store_container = glance
# Do we create the container if it does not exist?
swift_store_create_container_on_put = False
# What size, in MB, should Glance start chunking image files
# and do a large object manifest in Swift? By default, this is
# the maximum object size in Swift, which is 5GB
swift_store_large_object_size = 5120
# When doing a large object manifest, what size, in MB, should
# Glance write chunks to Swift? This amount of data is written
# to a temporary disk buffer during the process of chunking
# the image file, and the default is 200MB
swift_store_large_object_chunk_size = 200
# Whether to use ServiceNET to communicate with the Swift storage servers.
# (If you aren’t RACKSPACE, leave this False!)
#
# To use ServiceNET for authentication, prefix hostname of
# `swift_store_auth_address` with ‘snet-‘.
# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
swift_enable_snet = False
# If set to True enables multi-tenant storage mode which causes Glance images
# to be stored in tenant specific Swift accounts.
#swift_store_multi_tenant = False
# A list of swift ACL strings that will be applied as both read and
# write ACLs to the containers created by Glance in multi-tenant
# mode. This grants the specified tenants/users read and write access
# to all newly created image objects. The standard swift ACL string
# formats are allowed, including:
# <tenant_id>:<username>
# <tenant_name>:<username>
# *:<username>
# Multiple ACLs can be combined using a comma separated list, for
# example: swift_store_admin_tenants = service:glance,*:admin
#swift_store_admin_tenants =
# The region of the swift endpoint to be used for single tenant. This setting
# is only necessary if the tenant has multiple swift endpoints.
#swift_store_region =
# ============ S3 Store Options =============================
# Address where the S3 authentication service lives
# Valid schemes are ‘http://’ and ‘https://’
# If no scheme specified, default to ‘http://’
s3_store_host = 127.0.0.1:8080/v1.0/
# User to authenticate against the S3 authentication service
s3_store_access_key = <20-char AWS access key>
# Auth key for the user authenticating against the
# S3 authentication service
s3_store_secret_key = <40-char AWS secret key>
# Container within the account that the account should use
# for storing images in S3. Note that S3 has a flat namespace,
# so you need a unique bucket name for your glance images. An
# easy way to do this is append your AWS access key to “glance”.
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
# your AWS access key if you use it in your bucket name below!
s3_store_bucket = <lowercased 20-char aws access key>glance
# Do we create the bucket if it does not exist?
s3_store_create_bucket_on_put = False
# When sending images to S3, the data will first be written to a
# temporary buffer on disk. By default the platform’s temporary directory
# will be used. If required, an alternative directory can be specified here.
#s3_store_object_buffer_dir = /path/to/dir
# When forming a bucket url, boto will either set the bucket name as the
# subdomain or as the first token of the path. Amazon’s S3 service will
# accept it as the subdomain, but Swift’s S3 middleware requires it be
# in the path. Set this to ‘path’ or ‘subdomain’ – defaults to ‘subdomain’.
#s3_store_bucket_url_format = subdomain
# ============ RBD Store Options =============================
# Ceph configuration file path
# If using cephx authentication, this file should
# include a reference to the right keyring
# in a client.<USER> section
rbd_store_ceph_conf = /etc/ceph/ceph.conf
# RADOS user to authenticate as (only applicable if using cephx)
rbd_store_user = glance
# RADOS pool in which images are stored
rbd_store_pool = images
# Images will be chunked into objects of this size (in megabytes).
# For best performance, this should be a power of two
rbd_store_chunk_size = 8
# ============ Delayed Delete Options =============================
# Turn on/off delayed delete
delayed_delete = False
# Delayed delete time in seconds
scrub_time = 43200
# Directory that the scrubber will use to remind itself of what to delete
# Make sure this is also set in glance-scrubber.conf
scrubber_datadir = /var/lib/glance/scrubber
# =============== Image Cache Options =============================
# Base directory that the Image Cache uses
image_cache_dir = /var/lib/glance/image-cache/
[keystone_authtoken]
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
#admin_tenant_name = %SERVICE_TENANT_NAME%
#admin_user = %SERVICE_USER%
#admin_password = %SERVICE_PASSWORD%
admin_tenant_name = service
admin_user = glance
admin_password = password
[paste_deploy]
# Name of the paste configuration file that defines the available pipelines
#config_file = glance-api-paste.ini
# Partial name of a pipeline in your paste configuration file with the
# service name removed. For example, if your paste section name is
# [pipeline:glance-api-keystone], you would configure the flavor below
# as ‘keystone’.
#flavor=
flavor=keystone
/etc/glance/glance-api.conf cat glance-registry.conf
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
# Show debugging output in logs (sets DEBUG log level output)
#debug = False
# Which backend scheme should Glance use by default is not specified
# in a request to add a new image to Glance? Known schemes are determined
# by the known_stores option below.
# Default: ‘file’
default_store = file
# List of which store classes and store class locations are
# currently known to glance at startup.
#known_stores = glance.store.filesystem.Store,
# glance.store.http.Store,
# glance.store.rbd.Store,
# glance.store.s3.Store,
# glance.store.swift.Store,
# Maximum image size (in bytes) that may be uploaded through the
# Glance API server. Defaults to 1 TB.
# WARNING: this value should only be increased after careful consideration
# and must be set to a value under 8 EB (9223372036854775808).
#image_size_cap = 1099511627776
# Address to bind the API server
bind_host = 0.0.0.0
# Port the bind the API server to
bind_port = 9292
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
log_file = /var/log/glance/api.log
# Backlog requests when creating socket
backlog = 4096
# TCP_KEEPIDLE value in seconds when creating socket.
# Not supported on OS X.
#tcp_keepidle = 600
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
#sql_connection = sqlite:////var/lib/glance/glance.sqlite
sql_connection = mysql://glance:glance@localhost/glance
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in ‘MySQL Gone Away’ exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
sql_idle_timeout = 3600
# Number of Glance API worker processes to start.
# On machines with more than one CPU increasing this value
# may improve performance (especially if using SSL with
# compression turned on). It is typically recommended to set
# this value to the number of CPUs present on your machine.
workers = 1
# Role used to identify an authenticated user as administrator
#admin_role = admin
# Allow unauthenticated users to access the API with read-only
# privileges. This only applies when using ContextMiddleware.
#allow_anonymous_access = False
# Allow access to version 1 of glance api
#enable_v1_api = True
# Allow access to version 2 of glance api
#enable_v2_api = True
# Return the URL that references where the data is stored on
# the backend storage system. For example, if using the
# file system store a URL of ‘file:///path/to/image’ will
# be returned to the user in the ‘direct_url’ meta-data field.
# The default value is false.
#show_image_direct_url = False
# ================= Syslog Options ============================
# Send logs to syslog (/dev/log) instead of to file specified
# by `log_file`
#use_syslog = False
# Facility to use. If unset defaults to LOG_USER.
#syslog_log_facility = LOG_LOCAL0
# ================= SSL Options ===============================
# Certificate file to use when starting API server securely
#cert_file = /path/to/certfile
# Private key file to use when starting API server securely
#key_file = /path/to/keyfile
# CA certificate file to use to verify connecting clients
#ca_file = /path/to/cafile
# ================= Security Options ==========================
# AES key for encrypting store ‘location’ metadata, including
# — if used — Swift or S3 credentials
# Should be set to a random string of length 16, 24 or 32 bytes
#metadata_encryption_key = <16, 24 or 32 char registry metadata key>
# ============ Registry Options ===============================
# Address to find the registry server
registry_host = 0.0.0.0
# Port the registry server is listening on
registry_port = 9191
# What protocol to use when connecting to the registry server?
# Set to https for secure HTTP communication
registry_client_protocol = http
# The path to the key file to use in SSL connections to the
# registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_KEY_FILE environ variable to a filepath of the key file
#registry_client_key_file = /path/to/key/file
# The path to the cert file to use in SSL connections to the
# registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_CERT_FILE environ variable to a filepath of the cert file
#registry_client_cert_file = /path/to/cert/file
# The path to the certifying authority cert file to use in SSL connections
# to the registry server, if any. Alternately, you may set the
# GLANCE_CLIENT_CA_FILE environ variable to a filepath of the CA cert file
#registry_client_ca_file = /path/to/ca/file
# When using SSL in connections to the registry server, do not require
# validation via a certifying authority. This is the registry’s equivalent of
# specifying –insecure on the command line using glanceclient for the API
# Default: False
#registry_client_insecure = False
# The period of time, in seconds, that the API server will wait for a registry
# request to complete. A value of ‘0’ implies no timeout.
# Default: 600
#registry_client_timeout = 600
# Whether to automatically create the database tables.
# Default: False
#db_auto_create = False
# ============ Notification System Options =====================
# Notifications can be sent when images are create, updated or deleted.
# There are three methods of sending notifications, logging (via the
# log_file directive), rabbit (via a rabbitmq queue), qpid (via a Qpid
# message queue), or noop (no notifications sent, the default)
notifier_strategy = noop
# Configuration options if sending notifications via rabbitmq (these are
# the defaults)
rabbit_host = localhost
rabbit_port = 5672
rabbit_use_ssl = false
rabbit_userid = guest
rabbit_password = guest
rabbit_virtual_host = /
rabbit_notification_exchange = glance
rabbit_notification_topic = notifications
rabbit_durable_queues = False
# Configuration options if sending notifications via Qpid (these are
# the defaults)
qpid_notification_exchange = glance
qpid_notification_topic = notifications
qpid_host = localhost
qpid_port = 5672
qpid_username =
qpid_password =
qpid_sasl_mechanisms =
qpid_reconnect_timeout = 0
qpid_reconnect_limit = 0
qpid_reconnect_interval_min = 0
qpid_reconnect_interval_max = 0
qpid_reconnect_interval = 0
qpid_heartbeat = 5
# Set to ‘ssl’ to enable SSL
qpid_protocol = tcp
qpid_tcp_nodelay = True
# ============ Filesystem Store Options ========================
# Directory that the Filesystem backend store
# writes image data to
filesystem_store_datadir = /var/lib/glance/images/
# ============ Swift Store Options =============================
# Version of the authentication service to use
# Valid versions are ‘2’ for keystone and ‘1’ for swauth and rackspace
swift_store_auth_version = 2
# Address where the Swift authentication service lives
# Valid schemes are ‘http://’ and ‘https://’
# If no scheme specified, default to ‘https://’
# For swauth, use something like ‘127.0.0.1:8080/v1.0/’
swift_store_auth_address = 127.0.0.1:5000/v2.0/
# User to authenticate against the Swift authentication service
# If you use Swift authentication service, set it to ‘account’:’user’
# where ‘account’ is a Swift storage account and ‘user’
# is a user in that account
swift_store_user = jdoe:jdoe
# Auth key for the user authenticating against the
# Swift authentication service
swift_store_key = a86850deb2742ec3cb41518e26aa2d89
# Container within the account that the account should use
# for storing images in Swift
swift_store_container = glance
# Do we create the container if it does not exist?
swift_store_create_container_on_put = False
# What size, in MB, should Glance start chunking image files
# and do a large object manifest in Swift? By default, this is
# the maximum object size in Swift, which is 5GB
swift_store_large_object_size = 5120
# When doing a large object manifest, what size, in MB, should
# Glance write chunks to Swift? This amount of data is written
# to a temporary disk buffer during the process of chunking
# the image file, and the default is 200MB
swift_store_large_object_chunk_size = 200
# Whether to use ServiceNET to communicate with the Swift storage servers.
# (If you aren’t RACKSPACE, leave this False!)
#
# To use ServiceNET for authentication, prefix hostname of
# `swift_store_auth_address` with ‘snet-‘.
# Ex. https://example.com/v1.0/ -> https://snet-example.com/v1.0/
swift_enable_snet = False
# If set to True enables multi-tenant storage mode which causes Glance images
# to be stored in tenant specific Swift accounts.
#swift_store_multi_tenant = False
# A list of swift ACL strings that will be applied as both read and
# write ACLs to the containers created by Glance in multi-tenant
# mode. This grants the specified tenants/users read and write access
# to all newly created image objects. The standard swift ACL string
# formats are allowed, including:
# <tenant_id>:<username>
# <tenant_name>:<username>
# *:<username>
# Multiple ACLs can be combined using a comma separated list, for
# example: swift_store_admin_tenants = service:glance,*:admin
#swift_store_admin_tenants =
# The region of the swift endpoint to be used for single tenant. This setting
# is only necessary if the tenant has multiple swift endpoints.
#swift_store_region =
# ============ S3 Store Options =============================
# Address where the S3 authentication service lives
# Valid schemes are ‘http://’ and ‘https://’
# If no scheme specified, default to ‘http://’
s3_store_host = 127.0.0.1:8080/v1.0/
# User to authenticate against the S3 authentication service
s3_store_access_key = <20-char AWS access key>
# Auth key for the user authenticating against the
# S3 authentication service
s3_store_secret_key = <40-char AWS secret key>
# Container within the account that the account should use
# for storing images in S3. Note that S3 has a flat namespace,
# so you need a unique bucket name for your glance images. An
# easy way to do this is append your AWS access key to “glance”.
# S3 buckets in AWS *must* be lowercased, so remember to lowercase
# your AWS access key if you use it in your bucket name below!
s3_store_bucket = <lowercased 20-char aws access key>glance
# Do we create the bucket if it does not exist?
s3_store_create_bucket_on_put = False
# When sending images to S3, the data will first be written to a
# temporary buffer on disk. By default the platform’s temporary directory
# will be used. If required, an alternative directory can be specified here.
#s3_store_object_buffer_dir = /path/to/dir
# When forming a bucket url, boto will either set the bucket name as the
# subdomain or as the first token of the path. Amazon’s S3 service will
# accept it as the subdomain, but Swift’s S3 middleware requires it be
# in the path. Set this to ‘path’ or ‘subdomain’ – defaults to ‘subdomain’.
#s3_store_bucket_url_format = subdomain
# ============ RBD Store Options =============================
# Ceph configuration file path
# If using cephx authentication, this file should
# include a reference to the right keyring
# in a client.<USER> section
rbd_store_ceph_conf = /etc/ceph/ceph.conf
# RADOS user to authenticate as (only applicable if using cephx)
rbd_store_user = glance
# RADOS pool in which images are stored
rbd_store_pool = images
# Images will be chunked into objects of this size (in megabytes).
# For best performance, this should be a power of two
rbd_store_chunk_size = 8
# ============ Delayed Delete Options =============================
# Turn on/off delayed delete
delayed_delete = False
# Delayed delete time in seconds
scrub_time = 43200
# Directory that the scrubber will use to remind itself of what to delete
# Make sure this is also set in glance-scrubber.conf
scrubber_datadir = /var/lib/glance/scrubber
# =============== Image Cache Options =============================
# Base directory that the Image Cache uses
image_cache_dir = /var/lib/glance/image-cache/
[keystone_authtoken]
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
#admin_tenant_name = %SERVICE_TENANT_NAME%
#admin_user = %SERVICE_USER%
#admin_password = %SERVICE_PASSWORD%
admin_tenant_name = service
admin_user = glance
admin_password = password
[paste_deploy]
# Name of the paste configuration file that defines the available pipelines
#config_file = glance-api-paste.ini
# Partial name of a pipeline in your paste configuration file with the
# service name removed. For example, if your paste section name is
# [pipeline:glance-api-keystone], you would configure the flavor below
# as ‘keystone’.
#flavor=
flavor=keystone
cat: cat: No such file or directory
[DEFAULT]
# Show more verbose log output (sets INFO log level output)
#verbose = False
# Show debugging output in logs (sets DEBUG log level output)
#debug = False
# Address to bind the registry server
bind_host = 0.0.0.0
# Port the bind the registry server to
bind_port = 9191
# Log to this file. Make sure you do not set the same log
# file for both the API and registry servers!
log_file = /var/log/glance/registry.log
# Backlog requests when creating socket
backlog = 4096
# TCP_KEEPIDLE value in seconds when creating socket.
# Not supported on OS X.
#tcp_keepidle = 600
# SQLAlchemy connection string for the reference implementation
# registry server. Any valid SQLAlchemy connection string is fine.
# See: http://www.sqlalchemy.org/docs/05/reference/sqlalchemy/connections.html#sqlalchemy.create_engine
#sql_connection = sqlite:////var/lib/glance/glance.sqlite
sql_connection = mysql://glance:glance@localhost/glance
# Period in seconds after which SQLAlchemy should reestablish its connection
# to the database.
#
# MySQL uses a default `wait_timeout` of 8 hours, after which it will drop
# idle connections. This can result in ‘MySQL Gone Away’ exceptions. If you
# notice this, you can lower this value to ensure that SQLAlchemy reconnects
# before MySQL can drop the connection.
sql_idle_timeout = 3600
# Limit the api to return `param_limit_max` items in a call to a container. If
# a larger `limit` query param is provided, it will be reduced to this value.
api_limit_max = 1000
# If a `limit` query param is not provided in an api request, it will
# default to `limit_param_default`
limit_param_default = 25
# Role used to identify an authenticated user as administrator
#admin_role = admin
# Whether to automatically create the database tables.
# Default: False
#db_auto_create = False
# ================= Syslog Options ============================
# Send logs to syslog (/dev/log) instead of to file specified
# by `log_file`
#use_syslog = False
# Facility to use. If unset defaults to LOG_USER.
#syslog_log_facility = LOG_LOCAL1
# ================= SSL Options ===============================
# Certificate file to use when starting registry server securely
#cert_file = /path/to/certfile
# Private key file to use when starting registry server securely
#key_file = /path/to/keyfile
# CA certificate file to use to verify connecting clients
#ca_file = /path/to/cafile
[keystone_authtoken]
auth_host = 127.0.0.1
auth_port = 35357
auth_protocol = http
#admin_tenant_name = %SERVICE_TENANT_NAME%
#admin_user = %SERVICE_USER%
#admin_password = %SERVICE_PASSWORD%
admin_tenant_name = service
admin_user = glance
admin_password = password
[paste_deploy]
# Name of the paste configuration file that defines the available pipelines
#config_file = glance-registry-paste.ini
# Partial name of a pipeline in your paste configuration file with the
# service name removed. For example, if your paste section name is
# [pipeline:glance-registry-keystone], you would configure the flavor below
# as ‘keystone’.
#flavor=
flavor=keystone
2. Network Controller Node
/etc/network/interfaces
# This file describes the network 4interfaces available on your system
# and how to activate them. For more information, see interfaces(5).
# The loopback network interface
auto lo
iface lo inet loopback
# The primary network interface
auto br-ex
iface br-ex inet static
address 59.29.142.88
netmask 255.255.255.0
gateway 59.29.142.254
auto eth0
iface eth0 inet manual
up ip address add 0/0 dev $IFACE
up ip link set $IFACE up
down ip link set $IFACE down
auto br-int
iface br-int inet static
address 192.168.100.2
netmask 255.255.255.0
network 192.168.100.0
broadcast 192.168.100.255
auto eth1
iface eth1 inet manual
up ip address add 0/0 dev $IFACE
up ip link set $IFACE up
down ip link set $IFACE down
/etc/quantum/quantum.conf
[DEFAULT]
verbose = True
lock_path = $state_path/lock
bind_host = 0.0.0.0
bind_port = 9696
core_plugin = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2
api_paste_config = /etc/quantum/api-paste.ini
control_exchange = quantum
rabbit_password = password
rabbit_host = 192.168.100.1
notification_driver = quantum.openstack.common.notifier.rpc_notifier
#notification_driver = quantum.openstack.common.notifier.rabbit_notifier
default_notification_level = INFO
notification_topics = notifications
[QUOTAS]
[DEFAULT_SERVICETYPE]
[AGENT]
root_helper = sudo quantum-rootwrap /etc/quantum/rootwrap.conf
[keystone_authtoken]
auth_host = 192.168.100.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = quantum
admin_password = password
signing_dir = /var/lib/quantum/keystone-signing
/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
[DATABASE]
sql_connection = mysql://quantum:quantum@192.168.100.1/quantum
reconnect_interval = 2
[OVS]
tenant_network_type = gre
tunnel_id_ranges = 1:1000
enable_tunneling = True
#integration_bridge = br-int
#tunnel_bridge = br-tun
local_ip = 192.168.100.2
#network_vlan_ranges = physnet1
#bridge_mappings = physnet1:br-int
[AGENT]
polling_interval = 2
[SECURITYGROUP]
firewall_driver = quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
/etc/quantum/api-paste.ini
[composite:quantum]
use = egg:Paste#urlmap
/: quantumversions
/v2.0: quantumapi_v2_0
[composite:quantumapi_v2_0]
use = call:quantum.auth:pipeline_factory
noauth = extensions quantumapiapp_v2_0
keystone = authtoken keystonecontext extensions quantumapiapp_v2_0
[filter:keystonecontext]
paste.filter_factory = quantum.auth:QuantumKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
[filter:extensions]
paste.filter_factory = quantum.api.extensions:plugin_aware_extension_middleware_factory
[app:quantumversions]
paste.app_factory = quantum.api.versions:Versions.factory
[app:quantumapiapp_v2_0]
paste.app_factory = quantum.api.v2.router:APIRouter.factory
/etc/quantum/dhcp_agent.ini
[DEFAULT]
debug = True
Verbose = True
signing_dir = /var/cache/quantum
interface_driver = quantum.agent.linux.interface.OVSInterfaceDriver
dhcp_driver = quantum.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True
enable_metadata_network = True
ovs_use_veth = True
use_namespaces = True
admin_tenant_name = service
admin_user = quantum
admin_password = password
auth_url = http://192.168.100.1:35357/v2.0
dhcp_agent_manager = quantum.agent.dhcp_agent.DhcpAgentWithStateReport
root_helper = quantum-rootwrap /etc/quantum/rootwrap.conf
state_path = /var/lib/quantum
/etc/quantum# cat l3_agent.ini
[DEFAULT]
debug = True
verbose = True
interface_driver = quantum.agent.linux.interface.OVSInterfaceDriver
auth_url = http://192.168.100.1:35357/v2.0
admin_tenamt_name = service
admin_password = password
metadata_ip = 192.168.100.1
use_namespaces = True
external_network_bridge = br-ex
signing_dir = /var/cache/quantum
l3_agent_manager = quantum.agent.l3_agent.L3NATAgentWithStateReport
root_helper = quantum-rootwrap /etc/quantum/rootwrap.conf
/etc/quantum/metadata_agent.ini
[DEFAULT]
auth_url = http://192.168.100.1:35357/v2.0
auth_region = RegionOne
admin_tenant_name = service
admin_user = quantum
admin_password = password
nova_metadata_ip = 192.168.100.1
metadata_proxy_shared_secret = password
debug = True
nova_metadata_ip = 192.168.100.1
nova_metadata_port = 8775
3. Compute Node
/etc/network/interfaces
auto eth0
iface eth0 inet static
address 59.29.142.86
netmask 255.255.255.0
network 59.29.142.0
broadcast 59.29.142.255
auto br-int
iface br-int inet static
address 192.168.100.3
netmask 255.255.255.0
gateway 192.168.100.2
dns-nameservers 8.8.8.8
auto eth1
iface eth1 inet manual
up ip address add 0/0 dev $IFACE
up ip link set $IFACE up
down ip link set $IFACE down
/etc/nova/api-paste.ini
############
# Metadata #
############
[composite:metadata]
use = egg:Paste#urlmap
/: meta
[pipeline:meta]
pipeline = ec2faultwrap logrequest metaapp
[app:metaapp]
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
#######
# EC2 #
#######
[composite:ec2]
use = egg:Paste#urlmap
/services/Cloud: ec2cloud
[composite:ec2cloud]
use = call:nova.api.auth:pipeline_factory
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
[filter:ec2faultwrap]
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
[filter:logrequest]
paste.filter_factory = nova.api.ec2:RequestLogging.factory
[filter:ec2lockout]
paste.filter_factory = nova.api.ec2:Lockout.factory
[filter:ec2keystoneauth]
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
[filter:ec2noauth]
paste.filter_factory = nova.api.ec2:NoAuth.factory
[filter:cloudrequest]
controller = nova.api.ec2.cloud.CloudController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:authorizer]
paste.filter_factory = nova.api.ec2:Authorizer.factory
[filter:validator]
paste.filter_factory = nova.api.ec2:Validator.factory
[app:ec2executor]
paste.app_factory = nova.api.ec2:Executor.factory
#############
# Openstack #
#############
[composite:osapi_compute]
use = call:nova.api.openstack.urlmap:urlmap_factory
/: oscomputeversions
/v1.1: openstack_compute_api_v2
/v2: openstack_compute_api_v2
[composite:openstack_compute_api_v2]
use = call:nova.api.auth:pipeline_factory
noauth = faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
keystone = faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
[filter:noauth]
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
[filter:ratelimit]
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
[filter:sizelimit]
paste.filter_factory = nova.api.sizelimit:RequestBodySizeLimiter.factory
[app:osapi_compute_app_v2]
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
[pipeline:oscomputeversions]
pipeline = faultwrap oscomputeversionapp
[app:oscomputeversionapp]
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
auth_host = 192.168.100.1
auth_port = 35357
auth_protocol = http
admin_tenant_name = service
admin_user = nova
admin_password = password
# signing_dir is configurable, but the default behavior of the authtoken
# middleware should be sufficient. It will create a temporary directory
# in the home directory for the user the nova process is running as.
#signing_dir = /var/lib/nova/keystone-signing
# Workaround for https://bugs.launchpad.net/nova/+bug/1154809
auth_version = v2.0
/etc/nova# cat nova-compute.conf
[DEFAULT]
libvirt_type=kvm
compute_driver=libvirt.LibvirtDriver
/etc/nova/nova.conf
[DEFAULT]
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
verbose=True
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
# General
verbose=true
my_ip=192.168.100.3
rabbit_host=192.168.100.1
rabbit_password=password
auth_strategy=keystone
ec2_host=192.168.100.1
ec2_url=http://192.168.100.1:8773/services/Cloud
# Networking
libvirt_use_virtio_for_bridges=True
network_api_class=nova.network.quantumv2.api.API
quantum_url=http://192.168.100.1:9696
quantum_auth_strategy=keystone
quantum_admin_tenant_name=service
quantum_admin_username=quantum
quantum_admin_password=password
quantum_admin_auth_url=http://192.168.100.1:35357/v2.0
libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
# Security Groups
firewall_driver=nova.virt.firewall.NoopFirewallDriver
security_group_api=quantum
# Compute
compute_driver=libvirt.LibvirtDriver
connection_type=libvirt
# Cinder
volume_api_class=nova.volume.cinder.API
# Glance
glance_api_servers=192.168.100.1:9292
image_service=nova.image.glance.GlanceImageService
# novnc
vnc_enabled=true
vncserver_proxyclient_address=192.168.100.3
novncproxy_base_url=http://59.29.142.9:6080/vnc_auto.html
vncserver_listen=0.0.0.0
/etc/quantum/quantum.conf
[DEFAULT]
# Default log level is INFO
# verbose and debug has the same result.
# One of them will set DEBUG log level output
# debug = False
# verbose = False
# Where to store Quantum state files. This directory must be writable by the
# user executing the agent.
# state_path = /var/lib/quantum
# Where to store lock files
lock_path = $state_path/lock
# log_format = %(asctime)s %(levelname)8s [%(name)s] %(message)s
# log_date_format = %Y-%m-%d %H:%M:%S
# use_syslog -> syslog
# log_file and log_dir -> log_dir/log_file
# (not log_file) and log_dir -> log_dir/{binary_name}.log
# use_stderr -> stderr
# (not user_stderr) and (not log_file) -> stdout
# publish_errors -> notification system
# use_syslog = False
# syslog_log_facility = LOG_USER
# use_stderr = True
# log_file =
# log_dir =
# publish_errors = False
# Address to bind the API server
bind_host = 0.0.0.0
# Port the bind the API server to
bind_port = 9696
# Path to the extensions. Note that this can be a colon-separated list of
# paths. For example:
# api_extensions_path = extensions:/path/to/more/extensions:/even/more/extensions
# The __path__ of quantum.extensions is appended to this, so if your
# extensions are in there you don’t need to specify them here
# api_extensions_path =
# Quantum plugin provider module
# core_plugin =
core_plugin = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2
# Advanced service modules
# service_plugins =
# Paste configuration file
api_paste_config = /etc/quantum/api-paste.ini
# The strategy to be used for auth.
# Supported values are ‘keystone'(default), ‘noauth’.
# auth_strategy = keystone
# Base MAC address. The first 3 octets will remain unchanged. If the
# 4h octet is not 00, it will also used. The others will be
# randomly generated.
# 3 octet
# base_mac = fa:16:3e:00:00:00
# 4 octet
# base_mac = fa:16:3e:4f:00:00
# Maximum amount of retries to generate a unique MAC address
# mac_generation_retries = 16
# DHCP Lease duration (in seconds)
# dhcp_lease_duration = 120
# Allow sending resource operation notification to DHCP agent
# dhcp_agent_notification = True
# Enable or disable bulk create/update/delete operations
# allow_bulk = True
# Enable or disable pagination
# allow_pagination = False
# Enable or disable sorting
# allow_sorting = False
# Enable or disable overlapping IPs for subnets
# Attention: the following parameter MUST be set to False if Quantum is
# being used in conjunction with nova security groups and/or metadata service.
# allow_overlapping_ips = False
# Ensure that configured gateway is on subnet
# force_gateway_on_subnet = False
# RPC configuration options. Defined in rpc __init__
# The messaging module to use, defaults to kombu.
# rpc_backend = quantum.openstack.common.rpc.impl_kombu
# Size of RPC thread pool
# rpc_thread_pool_size = 64,
# Size of RPC connection pool
# rpc_conn_pool_size = 30
# Seconds to wait for a response from call or multicall
# rpc_response_timeout = 60
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
# rpc_cast_timeout = 30
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = quantum.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
control_exchange = quantum
# If passed, use a fake RabbitMQ provider
# fake_rabbit = False
# Configuration options if sending notifications via kombu rpc (these are
# the defaults)
# SSL version to use (valid only if SSL enabled)
# kombu_ssl_version =
# SSL key file (valid only if SSL enabled)
# kombu_ssl_keyfile =
# SSL cert file (valid only if SSL enabled)
# kombu_ssl_certfile =
# SSL certification authority file (valid only if SSL enabled)’
# kombu_ssl_ca_certs =
# IP address of the RabbitMQ installation
# rabbit_host = localhost
# Password of the RabbitMQ server
# rabbit_password = guest
# Port where RabbitMQ server is running/listening
# rabbit_port = 5672
# RabbitMQ single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# rabbit_hosts is defaulted to ‘$rabbit_host:$rabbit_port’
# rabbit_hosts = localhost:5672
# User ID used for RabbitMQ connections
# rabbit_userid = guest
# Location of a virtual RabbitMQ installation.
# rabbit_virtual_host = /
# Maximum retries with trying to connect to RabbitMQ
# (the default of 0 implies an infinite retry count)
# rabbit_max_retries = 0
# RabbitMQ connection retry interval
# rabbit_retry_interval = 1
# Use HA queues in RabbitMQ (x-ha-policy: all).You need to
# wipe RabbitMQ database when changing this option. (boolean value)
# rabbit_ha_queues = false
# QPID
# rpc_backend=quantum.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Qpid single or HA cluster (host:port pairs i.e: host1:5672, host2:5672)
# qpid_hosts is defaulted to ‘$qpid_hostname:$qpid_port’
# qpid_hosts = localhost:5672
# Username for qpid connection
# qpid_username = ”
# Password for qpid connection
# qpid_password = ”
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ”
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either ‘tcp’ or ‘ssl’
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
# ZMQ
# rpc_backend=quantum.openstack.common.rpc.impl_zmq
# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
# The “host” option should point or resolve to this address.
# rpc_zmq_bind_address = *
# ============ Notification System Options =====================
# Notifications can be sent when network/subnet/port are create, updated or deleted.
# There are three methods of sending notifications: logging (via the
# log_file directive), rpc (via a message queue) and
# noop (no notifications sent, the default)
# Notification_driver can be defined multiple times
# Do nothing driver
# notification_driver = quantum.openstack.common.notifier.no_op_notifier
# Logging driver
# notification_driver = quantum.openstack.common.notifier.log_notifier
# RPC driver. DHCP agents needs it.
# notification_driver = quantum.openstack.common.notifier.rpc_notifier
notification_driver = quantum.openstack.common.notifier.rabbit_notifier
# default_notification_level is used to form actual topic name(s) or to set logging level
default_notification_level = INFO
# default_publisher_id is a part of the notification payload
# host = myhost.com
# default_publisher_id = $host
# Defined in rpc_notifier, can be comma separated values.
# The actual topic names will be %s.%(default_notification_level)s
notification_topics = notifications
# Default maximum number of items returned in a single response,
# value == infinite and value < 0 means no max limit, and value must
# greater than 0. If the number of items requested is greater than
# pagination_max_limit, server will just return pagination_max_limit
# of number of items.
# pagination_max_limit = -1
# Maximum number of DNS nameservers per subnet
# max_dns_nameservers = 5
# Maximum number of host routes per subnet
# max_subnet_host_routes = 20
# Maximum number of fixed ips per port
# max_fixed_ips_per_port = 5
# =========== items for agent management extension =============
# Seconds to regard the agent as down.
# agent_down_time = 5
# =========== end of items for agent management extension =====
# =========== items for agent scheduler extension =============
# Driver to use for scheduling network to DHCP agent
# network_scheduler_driver = quantum.scheduler.dhcp_agent_scheduler.ChanceScheduler
# Driver to use for scheduling router to a default L3 agent
# router_scheduler_driver = quantum.scheduler.l3_agent_scheduler.ChanceScheduler
# Allow auto scheduling networks to DHCP agent. It will schedule non-hosted
# networks to first DHCP agent which sends get_active_networks message to
# quantum server
# network_auto_schedule = True
# Allow auto scheduling routers to L3 agent. It will schedule non-hosted
# routers to first L3 agent which sends sync_routers message to quantum server
# router_auto_schedule = True
# =========== end of items for agent scheduler extension =====
rabbit_host = 192.168.100.1
rabbit_password = password
verbose = True
[QUOTAS]
# resource name(s) that are supported in quota features
# quota_items = network,subnet,port
# default number of resource allowed per tenant, minus for unlimited
# default_quota = -1
# number of networks allowed per tenant, and minus means unlimited
# quota_network = 10
# number of subnets allowed per tenant, and minus means unlimited
# quota_subnet = 10
# number of ports allowed per tenant, and minus means unlimited
# quota_port = 50
# number of security groups allowed per tenant, and minus means unlimited
# quota_security_group = 10
# number of security group rules allowed per tenant, and minus means unlimited
# quota_security_group_rule = 100
# default driver to use for quota checks
# quota_driver = quantum.quota.ConfDriver
[DEFAULT_SERVICETYPE]
# Description of the default service type (optional)
# description = “default service type”
# Enter a service definition line for each advanced service provided
# by the default service type.
# Each service definition should be in the following format:
# <service>:<plugin>[:driver]
[AGENT]
# Use “sudo quantum-rootwrap /etc/quantum/rootwrap.conf” to use the real
# root filter facility.
# Change to “sudo” to skip the filtering and just run the comand directly
# root_helper = sudo
root_helper = sudo quantum-rootwrap /etc/quantum/rootwrap.conf
# =========== items for agent management extension =============
# seconds between nodes reporting state to server, should be less than
# agent_down_time
# report_interval = 4
# =========== end of items for agent management extension =====
[keystone_authtoken]
auth_host = 192.168.100.1
auth_port = 35357
auth_protocol = http
#admin_tenant_name = %SERVICE_TENANT_NAME%
#admin_user = %SERVICE_USER%
#admin_password = %SERVICE_PASSWORD%
admin_tenant_name = service
admin_user = admin
admin_password = password
signing_dir = /var/lib/quantum/keystone-signing
/etc/quantum/api-paste.ini
[composite:quantum]
use = egg:Paste#urlmap
/: quantumversions
/v2.0: quantumapi_v2_0
[composite:quantumapi_v2_0]
use = call:quantum.auth:pipeline_factory
noauth = extensions quantumapiapp_v2_0
keystone = authtoken keystonecontext extensions quantumapiapp_v2_0
[filter:keystonecontext]
paste.filter_factory = quantum.auth:QuantumKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
[filter:extensions]
paste.filter_factory = quantum.api.extensions:plugin_aware_extension_middleware_factory
[app:quantumversions]
paste.app_factory = quantum.api.versions:Versions.factory
[app:quantumapiapp_v2_0]
paste.app_factory = quantum.api.v2.router:APIRouter.factory
/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
[DATABASE]
sql_connection = mysql://quantum:quantum@192.168.100.1/quantum
reconnect_interval = 2
[OVS]
tenant_network_type = gre
tunnel_id_ranges = 1:1000
local_ip = 192.168.100.3
enable_tunneling = True
#network_vlan_ranges = physnet1
#bridge_mappings = physnet1:br-int
[AGENT]
# Agent’s polling interval in seconds
polling_interval = 2
[SECURITYGROUP]
firewall_driver = quantum.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver