From bbe771a8d0682dbf85ecd18f9da071c40886fe7c Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Tue, 20 Jan 2015 13:30:33 -0500 Subject: [PATCH 0001/2941] Add bash completion for OSC use openstackclient to generate bash completion script, and move the file to the right location. Change-Id: I96f2230cbba030e235161165d3b173c7af5e28fe --- stack.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index 585d1ceaf6..37b4e2853f 100755 --- a/stack.sh +++ b/stack.sh @@ -1312,6 +1312,13 @@ fi service_check +# Bash completion +# =============== + +# Prepare bash completion for OSC +openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null + + # Fin # === From 1d3a6ec0de8badae58492021e9025f0ef78878b6 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Wed, 25 Feb 2015 12:38:47 +0000 Subject: [PATCH 0002/2941] Add support for using IPA with iSCSI This patch is adding a new boolean that can be toggled to indicate that we should use the IPA ramdisk instead of the normal ramdisk when deploying a node with the iSCSI methodology. Defaults to False. Depends-On: Iaabc6ada729461f18d69ee12d01b9f1465944454 Change-Id: If4004078866d833eb946be40b6dfb204aa4a6840 --- lib/ironic | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/lib/ironic b/lib/ironic index 7ffa6a5caf..394d68225a 100644 --- a/lib/ironic +++ b/lib/ironic @@ -121,6 +121,16 @@ IRONIC_HTTP_DIR=${IRONIC_HTTP_DIR:-$IRONIC_DATA_DIR/httpboot} IRONIC_HTTP_SERVER=${IRONIC_HTTP_SERVER:-$HOST_IP} IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-8088} +# NOTE(lucasagomes): This flag is used to differentiate the nodes that +# uses IPA as their deploy ramdisk from nodes that uses the agent_* drivers +# (which also uses IPA but depends on Swift Temp URLs to work). At present, +# all drivers that uses the iSCSI approach for their deployment supports +# using both, IPA or bash ramdisks for the deployment. In the future we +# want to remove the support for the bash ramdisk in favor of IPA, once +# we get there this flag can be removed, and all conditionals that uses +# it should just run by default. +IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=$(trueorfalse False IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA) + # get_pxe_boot_file() - Get the PXE/iPXE boot file path function get_pxe_boot_file { local relpath=syslinux/pxelinux.0 @@ -162,6 +172,11 @@ function is_deployed_by_agent { return 1 } +function is_deployed_with_ipa_ramdisk { + is_deployed_by_agent || [[ "$IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA" == "True" ]] && return 0 + return 1 +} + # install_ironic() - Collect source and prepare function install_ironic { # make sure all needed service were enabled @@ -310,7 +325,11 @@ function configure_ironic_conductor { iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then - iniset $IRONIC_CONF_FILE pxe pxe_append_params "nofb nomodeset vga=normal console=ttyS0" + local pxe_params="nofb nomodeset vga=normal console=ttyS0" + if is_deployed_with_ipa_ramdisk; then + pxe_params+=" systemd.journald.forward_to_console=yes" + fi + iniset $IRONIC_CONF_FILE pxe pxe_append_params "$pxe_params" fi if is_deployed_by_agent; then if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then @@ -325,9 +344,6 @@ function configure_ironic_conductor { iniset $IRONIC_CONF_FILE glance swift_container glance iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600 iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30 - if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then - iniset $IRONIC_CONF_FILE agent agent_pxe_append_params "nofb nomodeset vga=normal console=ttyS0 systemd.journald.forward_to_console=yes" - fi fi if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then @@ -698,7 +714,7 @@ function upload_baremetal_ironic_deploy { if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then # we can build them only if we're not offline if [ "$OFFLINE" != "True" ]; then - if is_deployed_by_agent; then + if is_deployed_with_ipa_ramdisk; then build_ipa_coreos_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH else ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \ @@ -708,7 +724,7 @@ function upload_baremetal_ironic_deploy { die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be build in OFFLINE mode" fi else - if is_deployed_by_agent; then + if is_deployed_with_ipa_ramdisk; then # download the agent image tarball wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL_PATH wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK_PATH From c24b399b7e37480ee57546fee9fab4d4c6b452e0 Mon Sep 17 00:00:00 2001 From: Li Ma Date: Sun, 21 Dec 2014 23:51:40 -0800 Subject: [PATCH 0003/2941] Complete the support of MatchMakerRedis driver MatchMakerRedis is the only tested routing method for ZeroMQ driver. For others, like MatchMakerLocalhost and MatchMakerRing, it still takes some time to work on and completely test. MatchMakerRedis is enough to run under real-world deployment. Change-Id: I3b2e8e68ceebd377479d75bbb8b862ae60cfc826 Partially-Implements: blueprint zeromq --- lib/rpc_backend | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 899748c53d..ff22bbf8fa 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -158,9 +158,6 @@ function install_rpc_backend { fi _configure_qpid elif is_service_enabled zeromq; then - # NOTE(ewindisch): Redis is not strictly necessary - # but there is a matchmaker driver that works - # really well & out of the box for multi-node. if is_fedora; then install_package zeromq python-zmq if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then @@ -243,11 +240,15 @@ function iniset_rpc_backend { local section=$3 if is_service_enabled zeromq; then iniset $file $section rpc_backend "zmq" - iniset $file $section rpc_zmq_matchmaker \ - oslo_messaging._drivers.matchmaker_redis.MatchMakerRedis - # Set MATCHMAKER_REDIS_HOST if running multi-node. - MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1} - iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST + iniset $file $section rpc_zmq_host `hostname` + if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then + iniset $file $section rpc_zmq_matchmaker \ + oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis + MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1} + iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST + else + die $LINENO "Other matchmaker drivers not supported" + fi elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then # For Qpid use the 'amqp' oslo.messaging transport when AMQP 1.0 is used if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then From 37a06f017ba6ef38159ee65ac25bdd890ccbd102 Mon Sep 17 00:00:00 2001 From: Telles Nobrega Date: Tue, 18 Nov 2014 07:59:10 -0300 Subject: [PATCH 0004/2941] Add data-processing service for Sahara When registering endpoint with data_processing keystone transforms it into data-processing. This problem causes sahara to not find the endpoint afterwards We need to have two endpoints for correct working tempest and horizon with devstack. After resolving bug in tempest and horizon need to remove old data_processing endpoint Change-Id: I97827d23ffe8a1218abd61e76804b918b1b7cbe0 Partial-bug: #1356053 --- lib/sahara | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/lib/sahara b/lib/sahara index a84a06f1ce..9b2e9c406d 100644 --- a/lib/sahara +++ b/lib/sahara @@ -65,9 +65,25 @@ function create_sahara_accounts { if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local sahara_service=$(get_or_create_service "sahara" \ - "data_processing" "Sahara Data Processing") - get_or_create_endpoint $sahara_service \ + # TODO: remove "data_processing" service when #1356053 will be fixed + local sahara_service_old=$(openstack service create \ + "data_processing" \ + --name "sahara" \ + --description "Sahara Data Processing" \ + -f value -c id + ) + local sahara_service_new=$(openstack service create \ + "data-processing" \ + --name "sahara" \ + --description "Sahara Data Processing" \ + -f value -c id + ) + get_or_create_endpoint $sahara_service_old \ + "$REGION_NAME" \ + "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ + "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" + get_or_create_endpoint $sahara_service_new \ "$REGION_NAME" \ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ From dd4bafd7ef832f6264659af8d63f4db66d32828f Mon Sep 17 00:00:00 2001 From: Peter Stachowski Date: Mon, 2 Mar 2015 23:32:04 -0500 Subject: [PATCH 0005/2941] Add guestagent to defined Trove conf variables When the Trove configuration files were defined in variables, the guestagent wasn't included. In order for profiling to continue on the guestagent, its configuration file must be defined as well. (See https://bugs.launchpad.net/devstack/+bug/1421403) TROVE_GUESTAGENT_CONF is now defined. Change-Id: Ie7cb531e2a1eca74100e2466a430e85eaf936263 Closes-Bug: #1427506 --- lib/trove | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/lib/trove b/lib/trove index 080e860f03..d77798308b 100644 --- a/lib/trove +++ b/lib/trove @@ -37,6 +37,7 @@ TROVE_CONF_DIR=/etc/trove TROVE_CONF=$TROVE_CONF_DIR/trove.conf TROVE_TASKMANAGER_CONF=$TROVE_CONF_DIR/trove-taskmanager.conf TROVE_CONDUCTOR_CONF=$TROVE_CONF_DIR/trove-conductor.conf +TROVE_GUESTAGENT_CONF=$TROVE_CONF_DIR/trove-guestagent.conf TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove @@ -171,18 +172,18 @@ function configure_trove { fi # Set up Guest Agent conf - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT rabbit_userid $RABBIT_USERID - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT rabbit_host $TROVE_HOST_GATEWAY - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_user radmin - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_tenant_name trove - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT control_exchange trove - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT ignore_users os_admin - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_dir /var/log/trove/ - iniset $TROVE_CONF_DIR/trove-guestagent.conf DEFAULT log_file trove-guestagent.log - setup_trove_logging $TROVE_CONF_DIR/trove-guestagent.conf + iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_userid $RABBIT_USERID + iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_host $TROVE_HOST_GATEWAY + iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_password $RABBIT_PASSWORD + iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_user radmin + iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_tenant_name trove + iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS + iniset $TROVE_GUESTAGENT_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT + iniset $TROVE_GUESTAGENT_CONF DEFAULT control_exchange trove + iniset $TROVE_GUESTAGENT_CONF DEFAULT ignore_users os_admin + iniset $TROVE_GUESTAGENT_CONF DEFAULT log_dir /var/log/trove/ + iniset $TROVE_GUESTAGENT_CONF DEFAULT log_file trove-guestagent.log + setup_trove_logging $TROVE_GUESTAGENT_CONF } # install_troveclient() - Collect source and prepare From e3ceaedbd7b111c1e8b28510a4eb11e540d4af77 Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Tue, 3 Mar 2015 16:13:31 +0000 Subject: [PATCH 0006/2941] Fix defaulting of REMOTE_CEPH A typo in lib/ceph was causing REMOTE_CEPH to be defaulted whenever lib/ceph was sourced, regardless of its existing value. The `trueorfalse` function takes a variable name as its second argument, not a value. Change-Id: Iec846e0b892eaa63a0a2a59aa045bc56d5606af1 --- lib/ceph | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceph b/lib/ceph index a6b8cc8b57..251cfd1449 100644 --- a/lib/ceph +++ b/lib/ceph @@ -71,7 +71,7 @@ CEPH_REPLICAS=${CEPH_REPLICAS:-1} CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) # Connect to an existing Ceph cluster -REMOTE_CEPH=$(trueorfalse False $REMOTE_CEPH) +REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH) REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring} From 0fdf34959eb8f330301adfcd0ab1cfe975b5460c Mon Sep 17 00:00:00 2001 From: Matthew Booth Date: Tue, 3 Mar 2015 16:37:35 +0000 Subject: [PATCH 0007/2941] Don't cleanup ceph config when REMOTE_CEPH=True If REMOTE_CEPH=True then we didn't write the contents of /etc/ceph, so we shouldn't delete them. Change-Id: I6291c6562a2864de775b1acb4be0be35b866f30d --- lib/ceph | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ceph b/lib/ceph index 251cfd1449..76747ccd77 100644 --- a/lib/ceph +++ b/lib/ceph @@ -151,14 +151,14 @@ function cleanup_ceph_embedded { if [[ -e ${CEPH_DISK_IMAGE} ]]; then sudo rm -f ${CEPH_DISK_IMAGE} fi + + # purge ceph config file and keys + sudo rm -rf ${CEPH_CONF_DIR}/* } function cleanup_ceph_general { undefine_virsh_secret uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1 - - # purge ceph config file and keys - sudo rm -rf ${CEPH_CONF_DIR}/* } From aca8a7fd991484a59fc20aadc3cedb339fc55ca5 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Tue, 3 Mar 2015 08:50:27 -0800 Subject: [PATCH 0008/2941] Add support for oslo.versionedobjects Change-Id: I01dba39259a3b264d4ec2b21db8429d340751979 --- lib/oslo | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 4 ++-- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/oslo b/lib/oslo index 18cddc193e..86efb60a4e 100644 --- a/lib/oslo +++ b/lib/oslo @@ -36,6 +36,7 @@ GITDIR["oslo.policy"]=$DEST/oslo.policy GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap GITDIR["oslo.serialization"]=$DEST/oslo.serialization GITDIR["oslo.utils"]=$DEST/oslo.utils +GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects GITDIR["oslo.vmware"]=$DEST/oslo.vmware GITDIR["pycadf"]=$DEST/pycadf GITDIR["stevedore"]=$DEST/stevedore @@ -72,6 +73,7 @@ function install_oslo { _do_install_oslo_lib "oslo.rootwrap" _do_install_oslo_lib "oslo.serialization" _do_install_oslo_lib "oslo.utils" + _do_install_oslo_lib "oslo.versionedobjects" _do_install_oslo_lib "oslo.vmware" _do_install_oslo_lib "pycadf" _do_install_oslo_lib "stevedore" diff --git a/stackrc b/stackrc index 103be6de66..30706ebd4a 100644 --- a/stackrc +++ b/stackrc @@ -361,6 +361,10 @@ GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-master} GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git} GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-master} +# oslo.versionedobjects +GITREPO["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_REPO:-${GIT_BASE}/openstack/oslo.versionedobjects.git} +GITBRANCH["oslo.versionedobjects"]=${OSLOVERSIONEDOBJECTS_BRANCH:-master} + # oslo.vmware GITREPO["oslo.vmware"]=${OSLOVMWARE_REPO:-${GIT_BASE}/openstack/oslo.vmware.git} GITBRANCH["oslo.vmware"]=${OSLOVMWARE_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 472b0ea4af..0bec584aad 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -34,8 +34,8 @@ ALL_LIBS+=" python-keystoneclient taskflow oslo.middleware pycadf" ALL_LIBS+=" python-glanceclient python-ironicclient tempest-lib" ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore" ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db" -ALL_LIBS+=" oslo.vmware keystonemiddleware oslo.serialization" -ALL_LIBS+=" python-saharaclient django_openstack_auth" +ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware" +ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth" ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" From 93e682c558f954fa35a00d7cc6a6903e8ed59178 Mon Sep 17 00:00:00 2001 From: Doug Wiegley Date: Tue, 3 Mar 2015 10:31:30 -0700 Subject: [PATCH 0009/2941] Revert change to remove lbaas from devstack; it breaks grenade. Change-Id: Ie2adaeb7f27d6d646ca2e6e575fb430b9b74b276 --- functions-common | 19 -------- lib/neutron | 53 ++++++++++++++++------- lib/neutron_plugins/services/loadbalancer | 49 +++++++++++++++++++++ stackrc | 3 -- 4 files changed, 86 insertions(+), 38 deletions(-) create mode 100644 lib/neutron_plugins/services/loadbalancer diff --git a/functions-common b/functions-common index 267dfe8622..df69cbad16 100644 --- a/functions-common +++ b/functions-common @@ -1601,25 +1601,6 @@ function enable_plugin { GITBRANCH[$name]=$branch } -# is_plugin_enabled -# -# Has a particular plugin been enabled? -function is_plugin_enabled { - local plugins=$@ - local plugin - local enabled=1 - - # short circuit if nothing to do - if [[ -z ${DEVSTACK_PLUGINS} ]]; then - return $enabled - fi - - for plugin in ${plugins}; do - [[ ,${DEVSTACK_PLUGINS}, =~ ,${plugin}, ]] && enabled=0 - done - return $enabled -} - # fetch_plugins # # clones all plugins diff --git a/lib/neutron b/lib/neutron index a0f9c362f2..a7aabc5909 100755 --- a/lib/neutron +++ b/lib/neutron @@ -100,8 +100,10 @@ IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-fe80:cafe:cafe::1} # Set up default directories GITDIR["python-neutronclient"]=$DEST/python-neutronclient + NEUTRON_DIR=$DEST/neutron NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas +NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas NEUTRON_VPNAAS_DIR=$DEST/neutron-vpnaas NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} @@ -114,7 +116,6 @@ fi NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf - export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} # Agent binaries. Note, binary paths for other agents are set in per-service @@ -325,6 +326,12 @@ ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} # Please refer to ``lib/neutron_plugins/README.md`` for details. source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN +# Agent loadbalancer service plugin functions +# ------------------------------------------- + +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/loadbalancer + # Agent metering service plugin functions # ------------------------------------------- @@ -351,17 +358,6 @@ fi TEMPEST_SERVICES+=,neutron -# For backward compatibility, if q-lbaas service is enabled, make sure to load the -# neutron-lbaas plugin. This hook should be removed in a future release, perhaps -# as early as Liberty. - -if is_service_enabled q-lbaas; then - if ! is_plugin_enabled neutron-lbaas; then - DEPRECATED_TEXT+="External plugin neutron-lbaas has been automatically activated, please add the appropriate enable_plugin to your local.conf. This will be removed in the Liberty cycle." - enable_plugin "neutron-lbaas" ${NEUTRON_LBAAS_REPO} ${NEUTRON_LBAAS_BRANCH} - fi -fi - # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -429,7 +425,9 @@ function configure_neutron { iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES - + if is_service_enabled q-lbaas; then + _configure_neutron_lbaas + fi if is_service_enabled q-metering; then _configure_neutron_metering fi @@ -607,8 +605,7 @@ function init_neutron { recreate_database $Q_DB_NAME # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head - - for svc in fwaas vpnaas; do + for svc in fwaas lbaas vpnaas; do if [ "$svc" = "vpnaas" ]; then q_svc="q-vpn" else @@ -628,6 +625,10 @@ function install_neutron { git_clone $NEUTRON_FWAAS_REPO $NEUTRON_FWAAS_DIR $NEUTRON_FWAAS_BRANCH setup_develop $NEUTRON_FWAAS_DIR fi + if is_service_enabled q-lbaas; then + git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH + setup_develop $NEUTRON_LBAAS_DIR + fi if is_service_enabled q-vpn; then git_clone $NEUTRON_VPNAAS_REPO $NEUTRON_VPNAAS_DIR $NEUTRON_VPNAAS_BRANCH setup_develop $NEUTRON_VPNAAS_DIR @@ -671,6 +672,10 @@ function install_neutron_agent_packages { if is_service_enabled q-agt q-dhcp q-l3; then neutron_plugin_install_agent_packages fi + + if is_service_enabled q-lbaas; then + neutron_agent_lbaas_install_agent_packages + fi } # Start running processes, including screen @@ -730,6 +735,10 @@ function start_neutron_agents { run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" fi + if is_service_enabled q-lbaas; then + run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" + fi + if is_service_enabled q-metering; then run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" fi @@ -753,6 +762,9 @@ function stop_neutron { stop_process q-agt + if is_service_enabled q-lbaas; then + neutron_lbaas_stop + fi if is_service_enabled q-fwaas; then neutron_fwaas_stop fi @@ -780,11 +792,12 @@ function cleanup_neutron { fi # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|fip|snat)-[0-9a-f-]*'); do + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do sudo ip netns delete ${ns} done } + function _create_neutron_conf_dir { # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find if [[ ! -d $NEUTRON_CONF_DIR ]]; then @@ -954,6 +967,14 @@ function _configure_neutron_ceilometer_notifications { iniset $NEUTRON_CONF DEFAULT notification_driver messaging } +function _configure_neutron_lbaas { + if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf ]; then + cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf $NEUTRON_CONF_DIR + fi + neutron_agent_lbaas_configure_common + neutron_agent_lbaas_configure_agent +} + function _configure_neutron_metering { neutron_agent_metering_configure_common neutron_agent_metering_configure_agent diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer new file mode 100644 index 0000000000..f465cc94b4 --- /dev/null +++ b/lib/neutron_plugins/services/loadbalancer @@ -0,0 +1,49 @@ +# Neutron loadbalancer plugin +# --------------------------- + +# Save trace setting +LB_XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +AGENT_LBAAS_BINARY="$NEUTRON_BIN_DIR/neutron-lbaas-agent" +LBAAS_PLUGIN=neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPlugin + +function neutron_agent_lbaas_install_agent_packages { + if is_ubuntu || is_fedora || is_suse; then + install_package haproxy + fi +} + +function neutron_agent_lbaas_configure_common { + _neutron_service_plugin_class_add $LBAAS_PLUGIN + _neutron_deploy_rootwrap_filters $NEUTRON_LBAAS_DIR +} + +function neutron_agent_lbaas_configure_agent { + LBAAS_AGENT_CONF_PATH=/etc/neutron/services/loadbalancer/haproxy + mkdir -p $LBAAS_AGENT_CONF_PATH + + LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini" + + cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME + + # ovs_use_veth needs to be set before the plugin configuration + # occurs to allow plugins to override the setting. + iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT ovs_use_veth $Q_OVS_USE_VETH + + neutron_plugin_setup_interface_driver $LBAAS_AGENT_CONF_FILENAME + + if is_fedora; then + iniset $LBAAS_AGENT_CONF_FILENAME DEFAULT user_group "nobody" + iniset $LBAAS_AGENT_CONF_FILENAME haproxy user_group "nobody" + fi +} + +function neutron_lbaas_stop { + pids=$(ps aux | awk '/haproxy/ { print $2 }') + [ ! -z "$pids" ] && sudo kill $pids +} + +# Restore xtrace +$LB_XTRACE diff --git a/stackrc b/stackrc index 103be6de66..cb044b8fed 100644 --- a/stackrc +++ b/stackrc @@ -198,9 +198,6 @@ NEUTRON_FWAAS_REPO=${NEUTRON_FWAAS_REPO:-${GIT_BASE}/openstack/neutron-fwaas.git NEUTRON_FWAAS_BRANCH=${NEUTRON_FWAAS_BRANCH:-master} # neutron lbaas service -# The neutron-lbaas specific entries are deprecated and replaced by the neutron-lbaas -# devstack plugin and should be removed in a future release, possibly as soon as Liberty. - NEUTRON_LBAAS_REPO=${NEUTRON_LBAAS_REPO:-${GIT_BASE}/openstack/neutron-lbaas.git} NEUTRON_LBAAS_BRANCH=${NEUTRON_LBAAS_BRANCH:-master} From 802473e45ab897144d81d48164d8342763a119d8 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 4 Mar 2015 11:14:00 +0100 Subject: [PATCH 0010/2941] Do not install the python-virtualenv on Fedora The files/rpms/general:python-virtualenv explicitly installed this package on Fedoras. This package is not installed on other distros by devstack by default. If you stack/unstack the package gets reinstalled, and confuses the system about the installed virtual-env version. The uninstall works in CI, but it can be problematic when you do reinstalls on the same machine. The uninstall introduced by 834b804d3eda9029d3c66db0ab732a76a22ed08b, this commit deos not has any reference to the external bug what it supposed to solve. Related RDO thread started here: https://www.redhat.com/archives/rdo-list/2015-March/msg00015.html Change-Id: I4a723f179bdc28d39a4910fb9e3787e9e67c354b --- files/rpms/general | 1 - 1 file changed, 1 deletion(-) diff --git a/files/rpms/general b/files/rpms/general index 56a933190c..cf406325e7 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -15,7 +15,6 @@ libxslt-devel psmisc pylint python-unittest2 -python-virtualenv python-devel screen tar From e2d2d65aa517fdf88cbdd0248d72eb6473bb14c6 Mon Sep 17 00:00:00 2001 From: Zhenzan Zhou Date: Sat, 28 Feb 2015 11:13:27 +0800 Subject: [PATCH 0011/2941] Fix ironic port-create deprecated option A recent ironicclient commit If05d51b09d787ccfbf6f6d35d8e752d42f673601 deprecated --node_uuid, now it should use --node. Change-Id: Ia97074bd2ce92645ac4b4151824098cb99434117 --- lib/ironic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index ade889ef75..0c549b6e68 100644 --- a/lib/ironic +++ b/lib/ironic @@ -600,7 +600,7 @@ function enroll_nodes { $node_options \ | grep " uuid " | get_field 2) - ironic port-create --address $mac_address --node_uuid $node_id + ironic port-create --address $mac_address --node $node_id total_nodes=$((total_nodes+1)) total_cpus=$((total_cpus+$ironic_node_cpu)) From ab7df5ea1d4bd579c97bc739d7b9893df1715845 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 4 Mar 2015 16:25:40 -0800 Subject: [PATCH 0012/2941] Add cryptography to list if files to build before hand building this takes 20 seconds or so Change-Id: I95c71b1d0255c02038006bc743125ff2c49d9da9 --- files/venv-requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt index 3c50061e96..8417b92408 100644 --- a/files/venv-requirements.txt +++ b/files/venv-requirements.txt @@ -1,3 +1,4 @@ +cryptography lxml MySQL-python netifaces From 8c32e0df7bf35e860ce95e5db3c78e5d6dd33ff6 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Wed, 4 Mar 2015 14:53:05 -0800 Subject: [PATCH 0013/2941] Set rootfstype=ramfs for low memory Ironic nodes When running with low memory (<1024), we need to switch from the default rootfstype from tmpfs to ramfs to ensure nodes can decompress deployment ramdisks. Change-Id: I1b9dd614e592d99b2f59dea899b1ed3859ae0811 --- lib/ironic | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index bc30cdbeba..325bee9060 100644 --- a/lib/ironic +++ b/lib/ironic @@ -343,13 +343,24 @@ function configure_ironic_conductor { iniset $IRONIC_CONF_FILE pxe tftp_server $IRONIC_TFTPSERVER_IP iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images + + local pxe_params="" if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then - local pxe_params="nofb nomodeset vga=normal console=ttyS0" + pxe_params+="nofb nomodeset vga=normal console=ttyS0" if is_deployed_with_ipa_ramdisk; then pxe_params+=" systemd.journald.forward_to_console=yes" fi + fi + # When booting with less than 1GB, we need to switch from default tmpfs + # to ramfs for ramdisks to decompress successfully. + if (is_ironic_hardware && [[ "$IRONIC_HW_NODE_RAM" -lt 1024 ]]) || + (! is_ironic_hardware && [[ "$IRONIC_VM_SPECS_RAM" -lt 1024 ]]); then + pxe_params+=" rootfstype=ramfs" + fi + if [[ -n "$pxe_params" ]]; then iniset $IRONIC_CONF_FILE pxe pxe_append_params "$pxe_params" fi + if is_deployed_by_agent; then if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then iniset $IRONIC_CONF_FILE glance swift_temp_url_key $SWIFT_TEMPURL_KEY From 249e36dec6198c1dfd8e4f80d1f0a815fe6f36aa Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Thu, 5 Mar 2015 14:01:45 +1300 Subject: [PATCH 0014/2941] Remove lib/dib diskimage-builder is a utility rather than a service, and is already installed in devstack via pip when required. lib/dib was created to allow an image to be created during a devstack run for the heat functional tests, however this approach is no longer being taken and there are no other known uses for lib/dib. This change removes lib/dib and moves the pip mirror building to lib/heat so that snapshot pip packages of the heat agent projects can be made available to servers which the heat functional tests boot. This also removes tripleo-image-elements, which has never been utilised, and since images won't be created during heat functional test runs it is no longer required. Change-Id: Ic77f841437ea23c0645d3a34d9dd6bfd1ee28714 --- doc/source/index.rst | 2 - extras.d/40-dib.sh | 27 ----- files/apache-dib-pip-repo.template | 15 --- files/apache-heat-pip-repo.template | 15 +++ lib/dib | 149 ---------------------------- lib/heat | 69 ++++++++----- stack.sh | 6 +- stackrc | 10 +- 8 files changed, 65 insertions(+), 228 deletions(-) delete mode 100644 extras.d/40-dib.sh delete mode 100644 files/apache-dib-pip-repo.template create mode 100644 files/apache-heat-pip-repo.template delete mode 100644 lib/dib diff --git a/doc/source/index.rst b/doc/source/index.rst index 10f4355c07..cfde99132d 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -158,7 +158,6 @@ Scripts * `lib/cinder `__ * `lib/config `__ * `lib/database `__ -* `lib/dib `__ * `lib/dstat `__ * `lib/glance `__ * `lib/heat `__ @@ -181,7 +180,6 @@ Scripts * `clean.sh `__ * `run\_tests.sh `__ -* `extras.d/40-dib.sh `__ * `extras.d/50-ironic.sh `__ * `extras.d/60-ceph.sh `__ * `extras.d/70-sahara.sh `__ diff --git a/extras.d/40-dib.sh b/extras.d/40-dib.sh deleted file mode 100644 index fdae01191f..0000000000 --- a/extras.d/40-dib.sh +++ /dev/null @@ -1,27 +0,0 @@ -# dib.sh - Devstack extras script to install diskimage-builder - -if is_service_enabled dib; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/dib - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing diskimage-builder" - install_dib - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - # no-op - : - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # no-op - : - fi - - if [[ "$1" == "unstack" ]]; then - # no-op - : - fi - - if [[ "$1" == "clean" ]]; then - # no-op - : - fi -fi diff --git a/files/apache-dib-pip-repo.template b/files/apache-dib-pip-repo.template deleted file mode 100644 index 5d2379b5bb..0000000000 --- a/files/apache-dib-pip-repo.template +++ /dev/null @@ -1,15 +0,0 @@ -Listen %DIB_PIP_REPO_PORT% - - - DocumentRoot %DIB_PIP_REPO% - - DirectoryIndex index.html - Require all granted - Order allow,deny - allow from all - - - ErrorLog /var/log/%APACHE_NAME%/dib_pip_repo_error.log - LogLevel warn - CustomLog /var/log/%APACHE_NAME%/dib_pip_repo_access.log combined - diff --git a/files/apache-heat-pip-repo.template b/files/apache-heat-pip-repo.template new file mode 100644 index 0000000000..d88ac3e35a --- /dev/null +++ b/files/apache-heat-pip-repo.template @@ -0,0 +1,15 @@ +Listen %HEAT_PIP_REPO_PORT% + + + DocumentRoot %HEAT_PIP_REPO% + + DirectoryIndex index.html + Require all granted + Order allow,deny + allow from all + + + ErrorLog /var/log/%APACHE_NAME%/heat_pip_repo_error.log + LogLevel warn + CustomLog /var/log/%APACHE_NAME%/heat_pip_repo_access.log combined + diff --git a/lib/dib b/lib/dib deleted file mode 100644 index 88d9fd8434..0000000000 --- a/lib/dib +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/bash -# -# lib/dib -# Install and build images with **diskimage-builder** - -# Dependencies: -# -# - functions -# - DEST, DATA_DIR must be defined - -# stack.sh -# --------- -# - install_dib - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - -# Defaults -# -------- - -# set up default directories -DIB_DIR=$DEST/diskimage-builder -TIE_DIR=$DEST/tripleo-image-elements - -# NOTE: Setting DIB_APT_SOURCES assumes you will be building -# Debian/Ubuntu based images. Leave unset for other flavors. -DIB_APT_SOURCES=${DIB_APT_SOURCES:-""} -DIB_BUILD_OFFLINE=$(trueorfalse False DIB_BUILD_OFFLINE) -DIB_IMAGE_CACHE=$DATA_DIR/diskimage-builder/image-create -DIB_PIP_REPO=$DATA_DIR/diskimage-builder/pip-repo -DIB_PIP_REPO_PORT=${DIB_PIP_REPO_PORT:-8899} - -OCC_DIR=$DEST/os-collect-config -ORC_DIR=$DEST/os-refresh-config -OAC_DIR=$DEST/os-apply-config - -# Functions -# --------- - -# install_dib() - Collect source and prepare -function install_dib { - pip_install diskimage-builder - - git_clone $TIE_REPO $TIE_DIR $TIE_BRANCH - git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH - git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH - git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH - mkdir -p $DIB_IMAGE_CACHE -} - -# build_dib_pip_repo() - Builds a local pip repo from local projects -function build_dib_pip_repo { - local project_dirs=$1 - local projpath proj package - - rm -rf $DIB_PIP_REPO - mkdir -p $DIB_PIP_REPO - - echo "" > $DIB_PIP_REPO/index.html - for projpath in $project_dirs; do - proj=$(basename $projpath) - mkdir -p $DIB_PIP_REPO/$proj - pushd $projpath - rm -rf dist - python setup.py sdist - pushd dist - package=$(ls *) - mv $package $DIB_PIP_REPO/$proj/$package - popd - - echo "$package" > $DIB_PIP_REPO/$proj/index.html - echo "$proj
" >> $DIB_PIP_REPO/index.html - - popd - done - - echo "" >> $DIB_PIP_REPO/index.html - - local dib_pip_repo_apache_conf=$(apache_site_config_for dib_pip_repo) - - sudo cp $FILES/apache-dib-pip-repo.template $dib_pip_repo_apache_conf - sudo sed -e " - s|%DIB_PIP_REPO%|$DIB_PIP_REPO|g; - s|%DIB_PIP_REPO_PORT%|$DIB_PIP_REPO_PORT|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - " -i $dib_pip_repo_apache_conf - enable_apache_site dib_pip_repo -} - -# disk_image_create_upload() - Creates and uploads a diskimage-builder built image -function disk_image_create_upload { - - local image_name=$1 - local image_elements=$2 - local elements_path=$3 - - local image_path=$TOP_DIR/files/$image_name.qcow2 - - # Include the apt-sources element in builds if we have an - # alternative sources.list specified. - if [ -n "$DIB_APT_SOURCES" ]; then - if [ ! -e "$DIB_APT_SOURCES" ]; then - die $LINENO "DIB_APT_SOURCES set but not found at $DIB_APT_SOURCES" - fi - local extra_elements="apt-sources" - fi - - # Set the local pip repo as the primary index mirror so the - # image is built with local packages - local pypi_mirror_url=http://$SERVICE_HOST:$DIB_PIP_REPO_PORT/ - local pypi_mirror_url_1 - - if [ -a $HOME/.pip/pip.conf ]; then - # Add the current pip.conf index-url as an extra-index-url - # in the image build - pypi_mirror_url_1=$(iniget $HOME/.pip/pip.conf global index-url) - else - # If no pip.conf, set upstream pypi as an extra mirror - # (this also sets the .pydistutils.cfg index-url) - pypi_mirror_url_1=http://pypi.python.org/simple - fi - - # The disk-image-create command to run - ELEMENTS_PATH=$elements_path \ - DIB_APT_SOURCES=$DIB_APT_SOURCES \ - DIB_OFFLINE=$DIB_BUILD_OFFLINE \ - PYPI_MIRROR_URL=$pypi_mirror_url \ - PYPI_MIRROR_URL_1=$pypi_mirror_url_1 \ - disk-image-create -a amd64 $image_elements ${extra_elements:-} \ - --image-cache $DIB_IMAGE_CACHE \ - -o $image_path - - local token=$(keystone token-get | grep ' id ' | get_field 2) - die_if_not_set $LINENO token "Keystone fail to get token" - - glance --os-auth-token $token --os-image-url http://$GLANCE_HOSTPORT \ - image-create --name $image_name --is-public True \ - --container-format=bare --disk-format qcow2 \ - < $image_path -} - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/heat b/lib/heat index c1021639b4..a088e82886 100644 --- a/lib/heat +++ b/lib/heat @@ -8,9 +8,7 @@ # ENABLED_SERVICES+=,heat,h-api,h-api-cfn,h-api-cw,h-eng # Dependencies: -# -# - functions -# - dib (if HEAT_CREATE_TEST_IMAGE=True) +# (none) # stack.sh # --------- @@ -37,6 +35,13 @@ GITDIR["python-heatclient"]=$DEST/python-heatclient HEAT_DIR=$DEST/heat HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates +OCC_DIR=$DEST/os-collect-config +ORC_DIR=$DEST/os-refresh-config +OAC_DIR=$DEST/os-apply-config + +HEAT_PIP_REPO=$DATA_DIR/heat-pip-repo +HEAT_PIP_REPO_PORT=${HEAT_PIP_REPO_PORT:-8899} + HEAT_AUTH_CACHE_DIR=${HEAT_AUTH_CACHE_DIR:-/var/cache/heat} HEAT_STANDALONE=$(trueorfalse False HEAT_STANDALONE) HEAT_ENABLE_ADOPT_ABANDON=$(trueorfalse False HEAT_ENABLE_ADOPT_ABANDON) @@ -47,10 +52,6 @@ HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN) HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP} HEAT_API_PORT=${HEAT_API_PORT:-8004} -HEAT_FUNCTIONAL_IMAGE_ELEMENTS=${HEAT_FUNCTIONAL_IMAGE_ELEMENTS:-\ -vm fedora selinux-permissive pypi os-collect-config os-refresh-config \ -os-apply-config heat-cfntools heat-config heat-config-cfn-init \ -heat-config-puppet heat-config-script} # other default options @@ -296,22 +297,44 @@ function create_heat_accounts { fi } -# build_heat_functional_test_image() - Build and upload functional test image -function build_heat_functional_test_image { - if is_service_enabled dib; then - build_dib_pip_repo "$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR" - local image_name=heat-functional-tests-image - - # Elements path for tripleo-image-elements and heat-templates software-config - local elements_path=$TIE_DIR/elements:$HEAT_TEMPLATES_REPO_DIR/hot/software-config/elements - - disk_image_create_upload "$image_name" "$HEAT_FUNCTIONAL_IMAGE_ELEMENTS" "$elements_path" - iniset $TEMPEST_CONFIG orchestration image_ref $image_name - else - echo "Error, HEAT_CREATE_TEST_IMAGE=True requires dib" >&2 - echo "Add \"enable_service dib\" to your localrc" >&2 - exit 1 - fi +# build_heat_pip_mirror() - Build a pip mirror containing heat agent projects +function build_heat_pip_mirror { + local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR" + local projpath proj package + + rm -rf $HEAT_PIP_REPO + mkdir -p $HEAT_PIP_REPO + + echo "" > $HEAT_PIP_REPO/index.html + for projpath in $project_dirs; do + proj=$(basename $projpath) + mkdir -p $HEAT_PIP_REPO/$proj + pushd $projpath + rm -rf dist + python setup.py sdist + pushd dist + package=$(ls *) + mv $package $HEAT_PIP_REPO/$proj/$package + popd + + echo "$package" > $HEAT_PIP_REPO/$proj/index.html + echo "$proj
" >> $HEAT_PIP_REPO/index.html + + popd + done + + echo "" >> $HEAT_PIP_REPO/index.html + + local heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo) + + sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf + sudo sed -e " + s|%HEAT_PIP_REPO%|$HEAT_PIP_REPO|g; + s|%HEAT_PIP_REPO_PORT%|$HEAT_PIP_REPO_PORT|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + " -i $heat_pip_repo_apache_conf + enable_apache_site heat_pip_repo + restart_apache_server } # Restore xtrace diff --git a/stack.sh b/stack.sh index 2ac7dfadeb..bf9fc0118e 100755 --- a/stack.sh +++ b/stack.sh @@ -1227,9 +1227,9 @@ if is_service_enabled heat; then init_heat echo_summary "Starting Heat" start_heat - if [ "$HEAT_CREATE_TEST_IMAGE" = "True" ]; then - echo_summary "Building Heat functional test image" - build_heat_functional_test_image + if [ "$HEAT_BUILD_PIP_MIRROR" = "True" ]; then + echo_summary "Building Heat pip mirror" + build_heat_pip_mirror fi fi diff --git a/stackrc b/stackrc index cb044b8fed..efea1e4560 100644 --- a/stackrc +++ b/stackrc @@ -419,14 +419,10 @@ GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master} ################## # -# TripleO Components +# TripleO / Heat Agent Components # ################## -# diskimage-builder -DIB_REPO=${DIB_REPO:-${GIT_BASE}/openstack/diskimage-builder.git} -DIB_BRANCH=${DIB_BRANCH:-master} - # os-apply-config configuration template tool OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git} OAC_BRANCH=${OAC_BRANCH:-master} @@ -439,10 +435,6 @@ OCC_BRANCH=${OCC_BRANCH:-master} ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git} ORC_BRANCH=${ORC_BRANCH:-master} -# Tripleo elements for diskimage-builder images -TIE_REPO=${TIE_REPO:-${GIT_BASE}/openstack/tripleo-image-elements.git} -TIE_BRANCH=${TIE_BRANCH:-master} - ################# # # 3rd Party Components (non pip installable) From cd8824ac04989e625d7f1ae442498383250932a9 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Wed, 4 Mar 2015 16:40:19 -0800 Subject: [PATCH 0015/2941] Pass PIP_FIND_LINKS through sudo to pip We weren't actually using the wheels since PIP_FIND_LINKS environmental variable was getting lost during the sudo Change-Id: I4a89a70df63772a16ee5a8c3f1cd86e9c7bb5242 --- inc/python | 2 ++ 1 file changed, 2 insertions(+) diff --git a/inc/python b/inc/python index dfc4d63ca4..d72c3c94d7 100644 --- a/inc/python +++ b/inc/python @@ -97,6 +97,7 @@ function pip_install { http_proxy=${http_proxy:-} \ https_proxy=${https_proxy:-} \ no_proxy=${no_proxy:-} \ + PIP_FIND_LINKS=$PIP_FIND_LINKS \ $cmd_pip install \ $@ @@ -108,6 +109,7 @@ function pip_install { http_proxy=${http_proxy:-} \ https_proxy=${https_proxy:-} \ no_proxy=${no_proxy:-} \ + PIP_FIND_LINKS=$PIP_FIND_LINKS \ $cmd_pip install \ -r $test_req fi From 82450a5ebcc050bc4161d99dc5e6d41d7b289a40 Mon Sep 17 00:00:00 2001 From: Baohua Yang Date: Thu, 5 Mar 2015 17:14:06 +0800 Subject: [PATCH 0016/2941] Fix typo of setings to settings The typo happens at the head part, and only one time occurs. Change-Id: Ic6d3d8e17447066fe5e8ab867b10516dc8f185cc --- samples/local.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/local.conf b/samples/local.conf index 9e0b540250..e4052c21ac 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -3,7 +3,7 @@ # NOTE: Copy this file to the root ``devstack`` directory for it to # work properly. -# ``local.conf`` is a user-maintained setings file that is sourced from ``stackrc``. +# ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``. # This gives it the ability to override any variables set in ``stackrc``. # Also, most of the settings in ``stack.sh`` are written to only be set if no # value has already been set; this lets ``local.conf`` effectively override the From 1340ee72bfefa5a4fd0930cb90987275301280c8 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Fri, 6 Mar 2015 21:11:55 +0000 Subject: [PATCH 0017/2941] XenAPI: Default JEOS VM to only use 1GB RAM While Devstack needs 4GB RAM (or more!) the JEOS used as the base for the Devstack VM for XenServer needs much less. Allowing the initial install to use 1GB means we have lower requirements overall Change-Id: Iecaeeb4db0dffcc43c5532b5d57cb041d47047a6 --- tools/xen/install_os_domU.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index 082c27e8f3..b49347e09b 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -227,7 +227,7 @@ if [ -z "$templateuuid" ]; then -n "$UBUNTU_INST_BRIDGE_OR_NET_NAME" \ -l "$GUEST_NAME" - set_vm_memory "$GUEST_NAME" "$OSDOMU_MEM_MB" + set_vm_memory "$GUEST_NAME" "1024" xe vm-start vm="$GUEST_NAME" From 40ce320bb013f850a47d32781dd2f77a4d7927fa Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Fri, 6 Mar 2015 15:33:32 -0800 Subject: [PATCH 0018/2941] Temporarily stop building mumpy wheel It turns out we aren't actually using this wheel since we are still installing the deb python-numpy, and building numpy takes several minutes which is a lot considering we do it on every single dsvm job. So until we have wheel caching in place, and are actually using the version we build ourselves, stop wasting time. Change-Id: I7643c55598e5ecc29ea708c537818b37a8047d4b --- files/venv-requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt index 8417b92408..e473a2fe02 100644 --- a/files/venv-requirements.txt +++ b/files/venv-requirements.txt @@ -2,7 +2,7 @@ cryptography lxml MySQL-python netifaces -numpy +#numpy # slowest wheel by far, stop building until we are actually using the output posix-ipc psycopg2 pycrypto From a3c103be7dd218168fcd9f4d78a113490902a26d Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Sun, 8 Mar 2015 15:13:23 +0900 Subject: [PATCH 0019/2941] neutron-nec: Vendor code split Neutron NEC plugin support is configured using DevStack external plugin mechanism. The following needs to be added in local.conf: Q_PLUGIN=nec enable_plugin networking-nec https://git.openstack.org/stackforge/networking-nec Also removes lib/neutron_thirdparty/trema and files/debs/trema. DevStack external plugin for Trema Sliceable Switch is available and the following is needed to enable it in DevStack. enable_plugin trema-devstack-plugin https://github.com/nec-openstack/trema-devstack-plugin Change-Id: If983b986355fcc0118b6e0446b3b295f23b3c40e --- files/debs/trema | 15 ----- lib/neutron_plugins/nec | 127 +---------------------------------- lib/neutron_thirdparty/trema | 119 -------------------------------- 3 files changed, 3 insertions(+), 258 deletions(-) delete mode 100644 files/debs/trema delete mode 100644 lib/neutron_thirdparty/trema diff --git a/files/debs/trema b/files/debs/trema deleted file mode 100644 index f685ca53b4..0000000000 --- a/files/debs/trema +++ /dev/null @@ -1,15 +0,0 @@ -# Trema -make -ruby1.8 -rubygems1.8 -ruby1.8-dev -libpcap-dev -libsqlite3-dev -libglib2.0-dev - -# Sliceable Switch -sqlite3 -libdbi-perl -libdbd-sqlite3-perl -apache2 -libjson-perl diff --git a/lib/neutron_plugins/nec b/lib/neutron_plugins/nec index 3b1a25716a..9ea7338696 100644 --- a/lib/neutron_plugins/nec +++ b/lib/neutron_plugins/nec @@ -1,131 +1,10 @@ #!/bin/bash -# -# Neutron NEC OpenFlow plugin -# --------------------------- -# Save trace setting -NEC_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -# Configuration parameters -OFC_HOST=${OFC_HOST:-127.0.0.1} -OFC_PORT=${OFC_PORT:-8888} - -OFC_API_HOST=${OFC_API_HOST:-$OFC_HOST} -OFC_API_PORT=${OFC_API_PORT:-$OFC_PORT} -OFC_OFP_HOST=${OFC_OFP_HOST:-$OFC_HOST} -OFC_OFP_PORT=${OFC_OFP_PORT:-6633} -OFC_DRIVER=${OFC_DRIVER:-trema} -OFC_RETRY_MAX=${OFC_RETRY_MAX:-0} -OFC_RETRY_INTERVAL=${OFC_RETRY_INTERVAL:-1} - -# Main logic -# --------------------------- - -source $TOP_DIR/lib/neutron_plugins/ovs_base - -function neutron_plugin_create_nova_conf { - _neutron_ovs_base_configure_nova_vif_driver -} - -function neutron_plugin_install_agent_packages { - # SKIP_OVS_INSTALL is useful when we want to use Open vSwitch whose - # version is different from the version provided by the distribution. - if [[ "$SKIP_OVS_INSTALL" = "True" ]]; then - echo "You need to install Open vSwitch manually." - return - fi - _neutron_ovs_base_install_agent_packages -} - -function neutron_plugin_configure_common { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/nec - Q_PLUGIN_CONF_FILENAME=nec.ini - Q_PLUGIN_CLASS="neutron.plugins.nec.nec_plugin.NECPluginV2" -} - -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command -} - -function neutron_plugin_configure_dhcp_agent { - : -} - -function neutron_plugin_configure_l3_agent { - _neutron_ovs_base_configure_l3_agent -} - -function _quantum_plugin_setup_bridge { - if [[ "$SKIP_OVS_BRIDGE_SETUP" = "True" ]]; then - return - fi - # Set up integration bridge - _neutron_ovs_base_setup_bridge $OVS_BRIDGE - # Generate datapath ID from HOST_IP - local dpid=$(printf "%07d%03d%03d%03d\n" ${HOST_IP//./ }) - sudo ovs-vsctl --no-wait set Bridge $OVS_BRIDGE other-config:datapath-id=$dpid - sudo ovs-vsctl --no-wait set-fail-mode $OVS_BRIDGE secure - sudo ovs-vsctl --no-wait set-controller $OVS_BRIDGE tcp:$OFC_OFP_HOST:$OFC_OFP_PORT - if [ -n "$OVS_INTERFACE" ]; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_BRIDGE $OVS_INTERFACE - fi - _neutron_setup_ovs_tunnels $OVS_BRIDGE -} - -function neutron_plugin_configure_plugin_agent { - _quantum_plugin_setup_bridge - - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nec-agent" - - _neutron_ovs_base_configure_firewall_driver -} - -function neutron_plugin_configure_service { - iniset $NEUTRON_CONF DEFAULT api_extensions_path neutron/plugins/nec/extensions/ - iniset /$Q_PLUGIN_CONF_FILE ofc host $OFC_API_HOST - iniset /$Q_PLUGIN_CONF_FILE ofc port $OFC_API_PORT - iniset /$Q_PLUGIN_CONF_FILE ofc driver $OFC_DRIVER - iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_max OFC_RETRY_MAX - iniset /$Q_PLUGIN_CONF_FILE ofc api_retry_interval OFC_RETRY_INTERVAL - - _neutron_ovs_base_configure_firewall_driver -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver - iniset $conf_file DEFAULT ovs_use_veth True -} - -# Utility functions -# --------------------------- - -# Setup OVS tunnel manually -function _neutron_setup_ovs_tunnels { - local bridge=$1 - local id=0 - GRE_LOCAL_IP=${GRE_LOCAL_IP:-$HOST_IP} - if [ -n "$GRE_REMOTE_IPS" ]; then - for ip in ${GRE_REMOTE_IPS//:/ }; do - if [[ "$ip" == "$GRE_LOCAL_IP" ]]; then - continue - fi - sudo ovs-vsctl --no-wait add-port $bridge gre$id -- \ - set Interface gre$id type=gre options:remote_ip=$ip - id=`expr $id + 1` - done - fi -} +# This file is needed so Q_PLUGIN=nec will work. +# FIXME(amotoki): This function should not be here, but unfortunately +# devstack calls it before the external plugins are fetched function has_neutron_plugin_security_group { # 0 means True here return 0 } - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} - -# Restore xtrace -$NEC_XTRACE diff --git a/lib/neutron_thirdparty/trema b/lib/neutron_thirdparty/trema deleted file mode 100644 index 075f013ded..0000000000 --- a/lib/neutron_thirdparty/trema +++ /dev/null @@ -1,119 +0,0 @@ -#!/bin/bash -# -# Trema Sliceable Switch -# ---------------------- - -# Trema is a Full-Stack OpenFlow Framework in Ruby and C -# https://github.com/trema/trema -# -# Trema Sliceable Switch is an OpenFlow controller which provides -# virtual layer-2 network slices. -# https://github.com/trema/apps/wiki - -# Trema Sliceable Switch (OpenFlow Controller) -TREMA_APPS_REPO=${TREMA_APPS_REPO:-https://github.com/trema/apps.git} -TREMA_APPS_BRANCH=${TREMA_APPS_BRANCH:-master} - -# Save trace setting -TREMA3_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -TREMA_DIR=${TREMA_DIR:-$DEST/trema} -TREMA_SS_DIR="$TREMA_DIR/apps/sliceable_switch" - -TREMA_DATA_DIR=${TREMA_DATA_DIR:-$DATA_DIR/trema} -TREMA_SS_ETC_DIR=$TREMA_DATA_DIR/sliceable_switch/etc -TREMA_SS_DB_DIR=$TREMA_DATA_DIR/sliceable_switch/db -TREMA_SS_SCRIPT_DIR=$TREMA_DATA_DIR/sliceable_switch/script -TREMA_TMP_DIR=$TREMA_DATA_DIR/trema - -TREMA_LOG_LEVEL=${TREMA_LOG_LEVEL:-info} - -TREMA_SS_CONFIG=$TREMA_SS_ETC_DIR/sliceable.conf -TREMA_SS_APACHE_CONFIG=$(apache_site_config_for sliceable_switch) - -# configure_trema - Set config files, create data dirs, etc -function configure_trema { - # prepare dir - for d in $TREMA_SS_ETC_DIR $TREMA_SS_DB_DIR $TREMA_SS_SCRIPT_DIR; do - sudo mkdir -p $d - sudo chown -R `whoami` $d - done - sudo mkdir -p $TREMA_TMP_DIR -} - -# init_trema - Initialize databases, etc. -function init_trema { - local _pwd=$(pwd) - - # Initialize databases for Sliceable Switch - cd $TREMA_SS_DIR - rm -f filter.db slice.db - ./create_tables.sh - mv filter.db slice.db $TREMA_SS_DB_DIR - # Make sure that apache cgi has write access to the databases - sudo chown -R www-data.www-data $TREMA_SS_DB_DIR - cd $_pwd - - # Setup HTTP Server for sliceable_switch - cp $TREMA_SS_DIR/{Slice.pm,Filter.pm,config.cgi} $TREMA_SS_SCRIPT_DIR - sed -i -e "s|/home/sliceable_switch/db|$TREMA_SS_DB_DIR|" \ - $TREMA_SS_SCRIPT_DIR/config.cgi - - sudo cp $TREMA_SS_DIR/apache/sliceable_switch $TREMA_SS_APACHE_CONFIG - sudo sed -i -e "s|/home/sliceable_switch/script|$TREMA_SS_SCRIPT_DIR|" \ - $TREMA_SS_APACHE_CONFIG - # TODO(gabriel-bezerra): use some function from lib/apache to enable these modules - sudo a2enmod rewrite actions - enable_apache_site sliceable_switch - - cp $TREMA_SS_DIR/sliceable_switch_null.conf $TREMA_SS_CONFIG - sed -i -e "s|^\$apps_dir.*$|\$apps_dir = \"$TREMA_DIR/apps\"|" \ - -e "s|^\$db_dir.*$|\$db_dir = \"$TREMA_SS_DB_DIR\"|" \ - $TREMA_SS_CONFIG -} - -function gem_install { - [[ "$OFFLINE" = "True" ]] && return - [ -n "$RUBYGEMS_CMD" ] || get_gem_command - - local pkg=$1 - $RUBYGEMS_CMD list | grep "^${pkg} " && return - sudo $RUBYGEMS_CMD install $pkg -} - -function get_gem_command { - # Trema requires ruby 1.8, so gem1.8 is checked first - RUBYGEMS_CMD=$(which gem1.8 || which gem) - if [ -z "$RUBYGEMS_CMD" ]; then - echo "Warning: ruby gems command not found." - fi -} - -function install_trema { - # Trema - gem_install trema - # Sliceable Switch - git_clone $TREMA_APPS_REPO $TREMA_DIR/apps $TREMA_APPS_BRANCH - make -C $TREMA_DIR/apps/topology - make -C $TREMA_DIR/apps/flow_manager - make -C $TREMA_DIR/apps/sliceable_switch -} - -function start_trema { - restart_apache_server - - sudo LOGGING_LEVEL=$TREMA_LOG_LEVEL TREMA_TMP=$TREMA_TMP_DIR \ - trema run -d -c $TREMA_SS_CONFIG -} - -function stop_trema { - sudo TREMA_TMP=$TREMA_TMP_DIR trema killall -} - -function check_trema { - : -} - -# Restore xtrace -$TREMA3_XTRACE From dc757dd8506b9524defcffcf68dbc443380926a9 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Mon, 9 Mar 2015 14:48:09 +1100 Subject: [PATCH 0020/2941] Configure neutron->nova with identity v3 Use authentication plugins for neutron -> nova communications and default to using the password plugin, which defaults to using the v3 Identity API. Neutron config change: 13427a40768f1a4646520c6b7e3e8c988ce6e18c Change-Id: If152b97f940286ed08767225b13dedf6ef8c2342 --- lib/neutron | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/lib/neutron b/lib/neutron index a7aabc5909..e41abafda9 100755 --- a/lib/neutron +++ b/lib/neutron @@ -871,7 +871,7 @@ function _configure_neutron_common { fi if is_ssl_enabled_service "nova"; then - iniset $NEUTRON_CONF DEFAULT nova_ca_certificates_file "$SSL_BUNDLE_FILE" + iniset $NEUTRON_CONF nova cafile $SSL_BUNDLE_FILE fi if is_ssl_enabled_service "neutron"; then @@ -1045,13 +1045,15 @@ function _configure_neutron_service { # Configuration for neutron notifations to nova. iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - iniset $NEUTRON_CONF DEFAULT nova_url "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2" - iniset $NEUTRON_CONF DEFAULT nova_region_name $REGION_NAME - iniset $NEUTRON_CONF DEFAULT nova_admin_username nova - iniset $NEUTRON_CONF DEFAULT nova_admin_password $SERVICE_PASSWORD - ADMIN_TENANT_ID=$(openstack project list | awk "/ service / { print \$2 }") - iniset $NEUTRON_CONF DEFAULT nova_admin_tenant_id $ADMIN_TENANT_ID - iniset $NEUTRON_CONF DEFAULT nova_admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + + iniset $NEUTRON_CONF nova auth_plugin password + iniset $NEUTRON_CONF nova auth_url $KEYSTONE_AUTH_URI + iniset $NEUTRON_CONF nova username nova + iniset $NEUTRON_CONF nova password $SERVICE_PASSWORD + iniset $NEUTRON_CONF nova user_domain_id default + iniset $NEUTRON_CONF nova project_name $SERVICE_TENANT_NAME + iniset $NEUTRON_CONF nova project_domain_id default + iniset $NEUTRON_CONF nova region_name $REGION_NAME # Configure plugin neutron_plugin_configure_service From fbb3e773f017e90286f7e988c4167c3758edba45 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 3 Mar 2015 15:08:28 +0100 Subject: [PATCH 0021/2941] Remove the kesytone admin token from swift The keystone admin token supposed to be used only for setting up keystone and it should not be used in any other service config. Change-Id: Iaa9be1878e89a6bc3a84a0c57fc6f5cecc371d2f --- lib/swift | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/swift b/lib/swift index 8a96615d01..4a63500b54 100644 --- a/lib/swift +++ b/lib/swift @@ -441,16 +441,15 @@ function configure_swift { if is_service_enabled swift3; then cat <>${SWIFT_CONFIG_PROXY_SERVER} -# NOTE(chmou): s3token middleware is not updated yet to use only -# username and password. [filter:s3token] paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory auth_port = ${KEYSTONE_AUTH_PORT} auth_host = ${KEYSTONE_AUTH_HOST} auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} cafile = ${SSL_BUNDLE_FILE} -auth_token = ${SERVICE_TOKEN} -admin_token = ${SERVICE_TOKEN} +admin_user = swift +admin_tenant_name = ${SERVICE_TENANT_NAME} +admin_password = ${SERVICE_PASSWORD} [filter:swift3] use = egg:swift3#swift3 From bf2ad7015d068f9a85c01813cea0aa79143b1d0f Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 9 Mar 2015 15:16:10 -0500 Subject: [PATCH 0022/2941] Move configuration functions into inc/* * config/INI functions from functions-common to to inc/ini-config * local.conf meta-config functions from lib/config to inc/meta-config Change-Id: I00fab724075a693529273878875cfd292d00b18a --- doc/source/index.rst | 7 +- functions | 1 + functions-common | 199 ---------------- inc/ini-config | 223 ++++++++++++++++++ lib/config => inc/meta-config | 10 +- stack.sh | 2 +- tests/{test_ini.sh => test_ini_config.sh} | 4 +- tests/{test_config.sh => test_meta_config.sh} | 6 +- tools/build_docs.sh | 2 +- tox.ini | 1 + 10 files changed, 243 insertions(+), 212 deletions(-) create mode 100644 inc/ini-config rename lib/config => inc/meta-config (96%) rename tests/{test_ini.sh => test_ini_config.sh} (99%) rename tests/{test_config.sh => test_meta_config.sh} (99%) diff --git a/doc/source/index.rst b/doc/source/index.rst index cfde99132d..bac593de03 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -156,7 +156,6 @@ Scripts * `lib/ceilometer `__ * `lib/ceph `__ * `lib/cinder `__ -* `lib/config `__ * `lib/database `__ * `lib/dstat `__ * `lib/glance `__ @@ -188,6 +187,12 @@ Scripts * `extras.d/70-zaqar.sh `__ * `extras.d/80-tempest.sh `__ +* `inc/ini-config `__ +* `inc/meta-config `__ +* `inc/python `__ + +* `pkg/elasticsearch.sh `_ + Configuration ------------- diff --git a/functions b/functions index 79b2b37b4e..9adbfe7cf6 100644 --- a/functions +++ b/functions @@ -13,6 +13,7 @@ # Include the common functions FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) source ${FUNC_DIR}/functions-common +source ${FUNC_DIR}/inc/ini-config source ${FUNC_DIR}/inc/python # Save trace setting diff --git a/functions-common b/functions-common index df69cbad16..4739e42e90 100644 --- a/functions-common +++ b/functions-common @@ -43,197 +43,6 @@ declare -A GITDIR TRACK_DEPENDS=${TRACK_DEPENDS:-False} -# Config Functions -# ================ - -# Append a new option in an ini file without replacing the old value -# iniadd config-file section option value1 value2 value3 ... -function iniadd { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - shift 3 - - local values="$(iniget_multiline $file $section $option) $@" - iniset_multiline $file $section $option $values - $xtrace -} - -# Comment an option in an INI file -# inicomment config-file section option -function inicomment { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" - $xtrace -} - -# Get an option from an INI file -# iniget config-file section option -function iniget { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - local line - - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - echo ${line#*=} - $xtrace -} - -# Get a multiple line option from an INI file -# iniget_multiline config-file section option -function iniget_multiline { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - local values - - values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") - echo ${values} - $xtrace -} - -# Determinate is the given option present in the INI file -# ini_has_option config-file section option -function ini_has_option { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - local line - - line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") - $xtrace - [ -n "$line" ] -} - -# Add another config line for a multi-line option. -# It's normally called after iniset of the same option and assumes -# that the section already exists. -# -# Note that iniset_multiline requires all the 'lines' to be supplied -# in the argument list. Doing that will cause incorrect configuration -# if spaces are used in the config values. -# -# iniadd_literal config-file section option value -function iniadd_literal { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - local value=$4 - - [[ -z $section || -z $option ]] && return - - # Add it - sed -i -e "/^\[$section\]/ a\\ -$option = $value -" "$file" - - $xtrace -} - -function inidelete { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - - [[ -z $section || -z $option ]] && return - - # Remove old values - sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" - - $xtrace -} - -# Set an option in an INI file -# iniset config-file section option value -function iniset { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - local value=$4 - - [[ -z $section || -z $option ]] && return - - if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - fi - if ! ini_has_option "$file" "$section" "$option"; then - # Add it - sed -i -e "/^\[$section\]/ a\\ -$option = $value -" "$file" - else - local sep=$(echo -ne "\x01") - # Replace it - sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" - fi - $xtrace -} - -# Set a multiple line option in an INI file -# iniset_multiline config-file section option value1 value2 valu3 ... -function iniset_multiline { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - - shift 3 - local values - for v in $@; do - # The later sed command inserts each new value in the line next to - # the section identifier, which causes the values to be inserted in - # the reverse order. Do a reverse here to keep the original order. - values="$v ${values}" - done - if ! grep -q "^\[$section\]" "$file"; then - # Add section at the end - echo -e "\n[$section]" >>"$file" - else - # Remove old values - sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" - fi - # Add new ones - for v in $values; do - sed -i -e "/^\[$section\]/ a\\ -$option = $v -" "$file" - done - $xtrace -} - -# Uncomment an option in an INI file -# iniuncomment config-file section option -function iniuncomment { - local xtrace=$(set +o | grep xtrace) - set +o xtrace - local file=$1 - local section=$2 - local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" - $xtrace -} # Normalize config values to True or False # Accepts as False: 0 no No NO false False FALSE @@ -253,14 +62,6 @@ function trueorfalse { $xtrace } -function isset { - nounset=$(set +o | grep nounset) - set +o nounset - [[ -n "${!1+x}" ]] - result=$? - $nounset - return $result -} # Control Functions # ================= diff --git a/inc/ini-config b/inc/ini-config new file mode 100644 index 0000000000..0d6d169f8b --- /dev/null +++ b/inc/ini-config @@ -0,0 +1,223 @@ +#!/bin/bash +# +# **inc/ini-config** - Configuration/INI functions +# +# Support for manipulating INI-style configuration files +# +# These functions have no external dependencies and no side-effects + +# Save trace setting +INC_CONF_TRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Config Functions +# ================ + +# Append a new option in an ini file without replacing the old value +# iniadd config-file section option value1 value2 value3 ... +function iniadd { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + shift 3 + + local values="$(iniget_multiline $file $section $option) $@" + iniset_multiline $file $section $option $values + $xtrace +} + +# Comment an option in an INI file +# inicomment config-file section option +function inicomment { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" + $xtrace +} + +# Get an option from an INI file +# iniget config-file section option +function iniget { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + local line + + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + echo ${line#*=} + $xtrace +} + +# Get a multiple line option from an INI file +# iniget_multiline config-file section option +function iniget_multiline { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + local values + + values=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { s/^$option[ \t]*=[ \t]*//gp; }" "$file") + echo ${values} + $xtrace +} + +# Determinate is the given option present in the INI file +# ini_has_option config-file section option +function ini_has_option { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + local line + + line=$(sed -ne "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ p; }" "$file") + $xtrace + [ -n "$line" ] +} + +# Add another config line for a multi-line option. +# It's normally called after iniset of the same option and assumes +# that the section already exists. +# +# Note that iniset_multiline requires all the 'lines' to be supplied +# in the argument list. Doing that will cause incorrect configuration +# if spaces are used in the config values. +# +# iniadd_literal config-file section option value +function iniadd_literal { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + local value=$4 + + [[ -z $section || -z $option ]] && return + + # Add it + sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + + $xtrace +} + +# Remove an option from an INI file +# inidelete config-file section option +function inidelete { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + + [[ -z $section || -z $option ]] && return + + # Remove old values + sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + + $xtrace +} + +# Set an option in an INI file +# iniset config-file section option value +function iniset { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + local value=$4 + + [[ -z $section || -z $option ]] && return + + if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then + # Add section at the end + echo -e "\n[$section]" >>"$file" + fi + if ! ini_has_option "$file" "$section" "$option"; then + # Add it + sed -i -e "/^\[$section\]/ a\\ +$option = $value +" "$file" + else + local sep=$(echo -ne "\x01") + # Replace it + sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" + fi + $xtrace +} + +# Set a multiple line option in an INI file +# iniset_multiline config-file section option value1 value2 valu3 ... +function iniset_multiline { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + + shift 3 + local values + for v in $@; do + # The later sed command inserts each new value in the line next to + # the section identifier, which causes the values to be inserted in + # the reverse order. Do a reverse here to keep the original order. + values="$v ${values}" + done + if ! grep -q "^\[$section\]" "$file"; then + # Add section at the end + echo -e "\n[$section]" >>"$file" + else + # Remove old values + sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + fi + # Add new ones + for v in $values; do + sed -i -e "/^\[$section\]/ a\\ +$option = $v +" "$file" + done + $xtrace +} + +# Uncomment an option in an INI file +# iniuncomment config-file section option +function iniuncomment { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local file=$1 + local section=$2 + local option=$3 + sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" + $xtrace +} + +function isset { + nounset=$(set +o | grep nounset) + set +o nounset + [[ -n "${!1+x}" ]] + result=$? + $nounset + return $result +} + + +# Restore xtrace +$INC_CONF_TRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/config b/inc/meta-config similarity index 96% rename from lib/config rename to inc/meta-config index 31c6fa6e57..c8789bf816 100644 --- a/lib/config +++ b/inc/meta-config @@ -1,7 +1,9 @@ #!/bin/bash # -# lib/config - Configuration file manipulation functions - +# **lib/meta-config** - Configuration file manipulation functions +# +# Support for DevStack's local.conf meta-config sections +# # These functions have no external dependencies and the following side-effects: # # CONFIG_AWK_CMD is defined, default is ``awk`` @@ -18,7 +20,7 @@ # file-name is the destination of the config file # Save trace setting -C_XTRACE=$(set +o | grep xtrace) +INC_META_XTRACE=$(set +o | grep xtrace) set +o xtrace @@ -176,7 +178,7 @@ function merge_config_group { # Restore xtrace -$C_XTRACE +$INC_META_XTRACE # Local variables: # mode: shell-script diff --git a/stack.sh b/stack.sh index bf9fc0118e..2060f2d65a 100755 --- a/stack.sh +++ b/stack.sh @@ -92,7 +92,7 @@ LAST_SPINNER_PID="" source $TOP_DIR/functions # Import config functions -source $TOP_DIR/lib/config +source $TOP_DIR/inc/meta-config # Import 'public' stack.sh functions source $TOP_DIR/lib/stack diff --git a/tests/test_ini.sh b/tests/test_ini_config.sh similarity index 99% rename from tests/test_ini.sh rename to tests/test_ini_config.sh index 106cc9507f..4a0ae33aeb 100755 --- a/tests/test_ini.sh +++ b/tests/test_ini_config.sh @@ -4,8 +4,8 @@ TOP=$(cd $(dirname "$0")/.. && pwd) -# Import common functions -source $TOP/functions +# Import config functions +source $TOP/inc/ini-config echo "Testing INI functions" diff --git a/tests/test_config.sh b/tests/test_meta_config.sh similarity index 99% rename from tests/test_config.sh rename to tests/test_meta_config.sh index 3252104bf1..9d65280b8c 100755 --- a/tests/test_config.sh +++ b/tests/test_meta_config.sh @@ -4,11 +4,9 @@ TOP=$(cd $(dirname "$0")/.. && pwd) -# Import common functions -source $TOP/functions - # Import config functions -source $TOP/lib/config +source $TOP/inc/ini-config +source $TOP/inc/meta-config # check_result() tests and reports the result values # check_result "actual" "expected" diff --git a/tools/build_docs.sh b/tools/build_docs.sh index 929d1e0e6c..2aa0a0ac04 100755 --- a/tools/build_docs.sh +++ b/tools/build_docs.sh @@ -81,7 +81,7 @@ for f in $(find . -name .git -prune -o \( -type f -name \*.sh -not -path \*shocc mkdir -p $FQ_HTML_BUILD/`dirname $f`; $SHOCCO $f > $FQ_HTML_BUILD/$f.html done -for f in $(find functions functions-common lib samples -type f -name \*); do +for f in $(find functions functions-common inc lib pkg samples -type f -name \*); do echo $f FILES+="$f " mkdir -p $FQ_HTML_BUILD/`dirname $f`; diff --git a/tox.ini b/tox.ini index a958ae7ba4..bc84928d95 100644 --- a/tox.ini +++ b/tox.ini @@ -20,6 +20,7 @@ commands = bash -c "find {toxinidir} \ -name \*.sh -or \ -name \*rc -or \ -name functions\* -or \ + -wholename \*/inc/\* \ # /inc files and -wholename \*/lib/\* \ # /lib files are shell, but \) \ # have no extension -print0 | xargs -0 bashate -v" From 4de0f1cd0ba1541f49eb54a68b32ec7f973c274b Mon Sep 17 00:00:00 2001 From: Takashi NATSUME Date: Tue, 10 Mar 2015 14:51:39 +0900 Subject: [PATCH 0023/2941] Fix typo in devstack-with-nested-kvm.rst 'succesfully' has been fixed to 'successfully'. Change-Id: Ib14b1b8cb612aba759f3fe8b94d35cf47eb9b339 Closes-Bug: #1430149 --- doc/source/guides/devstack-with-nested-kvm.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst index 58ec3d3672..610300be52 100644 --- a/doc/source/guides/devstack-with-nested-kvm.rst +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -129,7 +129,7 @@ DevStack's ``local.conf``: LIBVIRT_TYPE=kvm -Once DevStack is configured succesfully, verify if the Nova instances +Once DevStack is configured successfully, verify if the Nova instances are using KVM by noticing the QEMU CLI invoked by Nova is using the parameter `accel=kvm`, e.g.: From 679f395fbbae68b4dfee0edbddff646ff75b5a0d Mon Sep 17 00:00:00 2001 From: Ethan Lynn Date: Mon, 9 Mar 2015 23:45:18 +0800 Subject: [PATCH 0024/2941] Set os_region_name for cinder Region name should be set to nova.conf and cinder.conf so that cinder volume can work in multiregion env. Closes-Bug: #1429738 Change-Id: Ib20911c24d8daabc07e6515f4a23a745d77593ff --- lib/cinder | 2 ++ lib/nova | 2 ++ 2 files changed, 4 insertions(+) diff --git a/lib/cinder b/lib/cinder index 0d157dd78e..19240eec60 100644 --- a/lib/cinder +++ b/lib/cinder @@ -242,6 +242,8 @@ function configure_cinder { # supported. iniset $CINDER_CONF DEFAULT enable_v1_api true + iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME" + if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local enabled_backends="" local default_name="" diff --git a/lib/nova b/lib/nova index e9e78c7bc4..199daeea3d 100644 --- a/lib/nova +++ b/lib/nova @@ -544,6 +544,8 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT ec2_workers "$API_WORKERS" iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS" + iniset $NOVA_CONF cinder os_region_name "$REGION_NAME" + if [[ "$NOVA_BACKEND" == "LVM" ]]; then iniset $NOVA_CONF libvirt images_type "lvm" iniset $NOVA_CONF libvirt images_volume_group $DEFAULT_VOLUME_GROUP_NAME From 360839e0addbd2692ee34333ac06d957cf60b780 Mon Sep 17 00:00:00 2001 From: Ken Chen Date: Fri, 27 Feb 2015 14:12:23 +0800 Subject: [PATCH 0025/2941] Remove setting use_floating_ips values We remove the code to set use_floating_ips. In old code it was set false if we do not use neutron. However, we cannot deploy clusters with floating ips by that. So we just use the default value, which is set True in Sahara. Closes-Bug: #1426226 Change-Id: Idfcdc5ab776681ddc740dc12035e04da349ea089 --- lib/sahara | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/sahara b/lib/sahara index 9b2e9c406d..521b19a4a1 100644 --- a/lib/sahara +++ b/lib/sahara @@ -139,14 +139,12 @@ function configure_sahara { if is_service_enabled neutron; then iniset $SAHARA_CONF_FILE DEFAULT use_neutron true - iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips true if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then iniset $SAHARA_CONF_FILE neutron ca_file $SSL_BUNDLE_FILE fi else iniset $SAHARA_CONF_FILE DEFAULT use_neutron false - iniset $SAHARA_CONF_FILE DEFAULT use_floating_ips false fi if is_service_enabled heat; then From db1152c96e8e5a4ce599677f9ee3d556f925d734 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 13 Jan 2015 10:18:49 +1100 Subject: [PATCH 0026/2941] Document use of plugins for gate jobs Document use of plugins for gate jobs. See also [1] [1] http://lists.openstack.org/pipermail/openstack-dev/2015-January/054291.html Change-Id: I9ed82f5d195511fb612517745f93f2a54475091a --- doc/source/plugins.rst | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index d1f73771a3..0217d09457 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -131,6 +131,31 @@ An example would be as follows:: enable_plugin glusterfs https://github.com/sdague/devstack-plugins glusterfs +Plugins for gate jobs +--------------------- + +All OpenStack plugins that wish to be used as gate jobs need to exist +in OpenStack's gerrit. Both ``openstack`` namespace and ``stackforge`` +namespace are fine. This allows testing of the plugin as well as +provides network isolation against upstream git repository failures +(which we see often enough to be an issue). + +Ideally plugins will be implemented as ``devstack`` directory inside +the project they are testing. For example, the stackforge/ec2-api +project has it's pluggin support in it's tree. + +In the cases where there is no "project tree" per say (like +integrating a backend storage configuration such as ceph or glusterfs) +it's also allowed to build a dedicated +``stackforge/devstack-plugin-FOO`` project to house the plugin. + +Note jobs must not require cloning of repositories during tests. +Tests must list their repository in the ``PROJECTS`` variable for +`devstack-gate +`_ +for the repository to be available to the test. Further information +is provided in the project creator's guide. + Hypervisor ========== From 7c4ce9edbad6f3c33469d45be832ebea4a46ff70 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 10 Mar 2015 11:32:26 +1100 Subject: [PATCH 0027/2941] Check for new versions of get-pip.py People can leave their devstack installs around for a long time, and in the mean time new versions of pip can be released. The current check does not download a new version if an old one exists. We want to check for new versions, but we also don't want the gate jobs trying this sometimes unreliable fetch. So add a flag-file that tells devstack if it downloaded get-pip.py originally. If so, on each run check for a new version using curl's "-z" flag to request only files modified since the file's timestamp. Change-Id: I91734528f02deafabf3d18d968c3abd749751199 Closes-Bug: #1429943 --- .gitignore | 2 +- tools/install_pip.sh | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 67ab722190..c6900c881c 100644 --- a/.gitignore +++ b/.gitignore @@ -14,7 +14,7 @@ files/*.gz files/*.qcow2 files/images files/pip-* -files/get-pip.py +files/get-pip.py* local.conf local.sh localrc diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 73d0947320..b7b40c7486 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -42,9 +42,21 @@ function get_versions { function install_get_pip { - if [[ ! -r $LOCAL_PIP ]]; then - curl --retry 6 --retry-delay 5 -o $LOCAL_PIP $PIP_GET_PIP_URL || \ + # the openstack gate and others put a cached version of get-pip.py + # for this to find, explicitly to avoid download issues. + # + # However, if devstack *did* download the file, we want to check + # for updates; people can leave thier stacks around for a long + # time and in the mean-time pip might get upgraded. + # + # Thus we use curl's "-z" feature to always check the modified + # since and only download if a new version is out -- but only if + # it seems we downloaded the file originally. + if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then + curl --retry 6 --retry-delay 5 \ + -z $LOCAL_PIP -o $LOCAL_PIP $PIP_GET_PIP_URL || \ die $LINENO "Download of get-pip.py failed" + touch $LOCAL_PIP.downloaded fi sudo -H -E python $LOCAL_PIP } From a72a393d658216ec75a59ad5a788e2504fee4b53 Mon Sep 17 00:00:00 2001 From: kieleth Date: Tue, 10 Mar 2015 08:47:05 -0700 Subject: [PATCH 0028/2941] Add sudo to yum example Add sudo to yum example so following along with copy-paste works Change-Id: I5e64b3d751b55989a353bfe2bb691ea6e51690e3 --- doc/source/guides/single-machine.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 70287a9037..236ece9c01 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -67,7 +67,7 @@ We'll grab the latest version of DevStack via https: :: - sudo apt-get install git -y || yum install -y git + sudo apt-get install git -y || sudo yum install -y git git clone https://git.openstack.org/openstack-dev/devstack cd devstack From b0595235a2374451c3f899fb893ad989a74b04d1 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 11 Mar 2015 12:04:49 +1100 Subject: [PATCH 0029/2941] Don't use packaged unittest2 Let pip install unittest2; pip installation can conflict with the packaged version. Change-Id: Iec9b35174ac68ebf713cd7462d7b5a82583d6e22 Partial-Bug: #1430592 --- files/rpms-suse/general | 1 - files/rpms/general | 1 - 2 files changed, 2 deletions(-) diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 7f4bbfb1b6..63cf14bd5b 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -16,7 +16,6 @@ openssl psmisc python-cmd2 # dist:opensuse-12.3 python-pylint -python-unittest2 screen tar tcpdump diff --git a/files/rpms/general b/files/rpms/general index cf406325e7..eac4ec36a7 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -14,7 +14,6 @@ libxml2-devel libxslt-devel psmisc pylint -python-unittest2 python-devel screen tar From 4d74e0f49515e23668a9dd955f30939cd03f94b2 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 11 Mar 2015 09:45:59 +1100 Subject: [PATCH 0030/2941] Fail if run in POSIX compatability mode This is mostly to detect if someone is running the script with "sh ./stack.sh" where sh is the bash-symlink that puts it in POSIX mode (this can be invoked in other ways, but much less common). In this case POSIXLY_CORRECT is set; so if we see that, bail out early before we start hitting syntax errors. Closes-Bug: #1430535 Change-Id: I7bbc4b0f656df9f6d9da2243c8caeb42d30ace95 --- stack.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/stack.sh b/stack.sh index bf9fc0118e..36a9ed46d2 100755 --- a/stack.sh +++ b/stack.sh @@ -21,6 +21,13 @@ # Learn more and get the most recent version at http://devstack.org +# check if someone has invoked with "sh" +if [[ "${POSIXLY_CORRECT}" == "y" ]]; then + echo "You appear to be running bash in POSIX compatability mode." + echo "devstack uses bash features. \"./stack.sh\" should do the right thing" + exit 1 +fi + # Make sure custom grep options don't get in the way unset GREP_OPTIONS From 10a8c88cccc43a8a9222b5e414198b105b190a67 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 11 Mar 2015 16:41:32 +0900 Subject: [PATCH 0031/2941] README.md: Correct the defaults of some of Q_ML2_PLUGIN variables Also, tweak Q_AGENT description and note its default. Change-Id: Ie24d14f58c09ccd375fd981683dba2efd5211f24 --- README.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 206ffe098b..2e269762f7 100644 --- a/README.md +++ b/README.md @@ -249,14 +249,17 @@ To change this, set the `Q_AGENT` variable to the agent you want to run Variable Name Notes ---------------------------------------------------------------------------- Q_AGENT This specifies which agent to run with the - ML2 Plugin (either `openvswitch` or `linuxbridge`). + ML2 Plugin (Typically either `openvswitch` + or `linuxbridge`). + Defaults to `openvswitch`. Q_ML2_PLUGIN_MECHANISM_DRIVERS The ML2 MechanismDrivers to load. The default - is none. Note, ML2 will work with the OVS - and LinuxBridge agents by default. + is `openvswitch,linuxbridge`. Q_ML2_PLUGIN_TYPE_DRIVERS The ML2 TypeDrivers to load. Defaults to all available TypeDrivers. - Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to none. - Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to none. + Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to + `tunnel_id_ranges=1:1000'. + Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to + `vni_ranges=1001:2000` Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none. # Heat From d28ea9cbcb1db93d6ae925ee6efc8a032a319816 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Wed, 11 Mar 2015 18:53:33 +0100 Subject: [PATCH 0032/2941] Add package mongodb to files/rpms/zaqar-server The command mongo, used in lib/zaqar, is part of the package mongodb. At the moment only mongodb-server is listed in files/rpms/zaqar-server, mongodb has to be added there. Change-Id: I60edeae6760addad62b9b61c3dcdecc2ff01cba7 Closes-bug: #1430939 --- files/rpms/zaqar-server | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms/zaqar-server b/files/rpms/zaqar-server index 69e8bfa80b..541cefa99d 100644 --- a/files/rpms/zaqar-server +++ b/files/rpms/zaqar-server @@ -1,4 +1,5 @@ selinux-policy-targeted +mongodb mongodb-server pymongo redis # NOPRIME From 7ca90cded374685c8c68ea50381220b915eb0b63 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Wed, 4 Mar 2015 17:25:07 -0800 Subject: [PATCH 0033/2941] Allow devstack plugins to specify prereq packages We offer main devstack components the ability to install their own system package preqreqs via files/{debs, rpms}/$service. This adds similar functionality for plugins, who can now do the same in their own tree at ./devstack/files/{debs, rpms}/$plugin. Change-Id: I63af8dc54c75a6e80ca4b2a96c76233a0795aabb --- doc/source/plugins.rst | 21 ++++++ functions-common | 153 +++++++++++++++++++++++---------------- tools/install_prereqs.sh | 2 + 3 files changed, 115 insertions(+), 61 deletions(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 5d6d3f183d..a9153df0d1 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -154,3 +154,24 @@ functions: - ``start_nova_hypervisor`` - start any external services - ``stop_nova_hypervisor`` - stop any external services - ``cleanup_nova_hypervisor`` - remove transient data and cache + +System Packages +=============== + +Devstack provides a framework for getting packages installed at an early +phase of its execution. This packages may be defined in a plugin as files +that contain new-line separated lists of packages required by the plugin + +Supported packaging systems include apt and yum across multiple distributions. +To enable a plugin to hook into this and install package dependencies, packages +may be listed at the following locations in the top-level of the plugin +repository: + +- ``./devstack/files/debs/$plugin_name`` - Packages to install when running + on Ubuntu, Debian or Linux Mint. + +- ``./devstack/files/rpms/$plugin_name`` - Packages to install when running + on Red Hat, Fedora, CentOS or XenServer. + +- ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when + running on SUSE Linux or openSUSE. diff --git a/functions-common b/functions-common index df69cbad16..e791ad79c8 100644 --- a/functions-common +++ b/functions-common @@ -973,13 +973,18 @@ function get_or_create_endpoint { # _get_package_dir function _get_package_dir { + local base_dir=$1 local pkg_dir + + if [[ -z "$base_dir" ]]; then + base_dir=$FILES + fi if is_ubuntu; then - pkg_dir=$FILES/debs + pkg_dir=$base_dir/debs elif is_fedora; then - pkg_dir=$FILES/rpms + pkg_dir=$base_dir/rpms elif is_suse; then - pkg_dir=$FILES/rpms-suse + pkg_dir=$base_dir/rpms-suse else exit_distro_not_supported "list of packages" fi @@ -1005,6 +1010,59 @@ function apt_get { apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" } +function _parse_package_files { + local files_to_parse=$@ + + if [[ -z "$DISTRO" ]]; then + GetDistro + fi + + for fname in ${files_to_parse}; do + local OIFS line package distros distro + [[ -e $fname ]] || continue + + OIFS=$IFS + IFS=$'\n' + for line in $(<${fname}); do + if [[ $line =~ "NOPRIME" ]]; then + continue + fi + + # Assume we want this package + package=${line%#*} + inst_pkg=1 + + # Look for # dist:xxx in comment + if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then + # We are using BASH regexp matching feature. + package=${BASH_REMATCH[1]} + distros=${BASH_REMATCH[2]} + # In bash ${VAR,,} will lowecase VAR + # Look for a match in the distro list + if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then + # If no match then skip this package + inst_pkg=0 + fi + fi + + # Look for # testonly in comment + if [[ $line =~ (.*)#.*testonly.* ]]; then + package=${BASH_REMATCH[1]} + # Are we installing test packages? (test for the default value) + if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then + # If not installing test packages the skip this package + inst_pkg=0 + fi + fi + + if [[ $inst_pkg = 1 ]]; then + echo $package + fi + done + IFS=$OIFS + done +} + # get_packages() collects a list of package names of any type from the # prerequisite files in ``files/{debs|rpms}``. The list is intended # to be passed to a package installer such as apt or yum. @@ -1029,103 +1087,76 @@ function get_packages { echo "No package directory supplied" return 1 fi - if [[ -z "$DISTRO" ]]; then - GetDistro - fi for service in ${services//,/ }; do # Allow individual services to specify dependencies if [[ -e ${package_dir}/${service} ]]; then - file_to_parse="${file_to_parse} $service" + file_to_parse="${file_to_parse} ${package_dir}/${service}" fi # NOTE(sdague) n-api needs glance for now because that's where # glance client is if [[ $service == n-api ]]; then if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" + file_to_parse="${file_to_parse} ${package_dir}/nova" fi if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" + file_to_parse="${file_to_parse} ${package_dir}/glance" fi elif [[ $service == c-* ]]; then if [[ ! $file_to_parse =~ cinder ]]; then - file_to_parse="${file_to_parse} cinder" + file_to_parse="${file_to_parse} ${package_dir}/cinder" fi elif [[ $service == ceilometer-* ]]; then if [[ ! $file_to_parse =~ ceilometer ]]; then - file_to_parse="${file_to_parse} ceilometer" + file_to_parse="${file_to_parse} ${package_dir}/ceilometer" fi elif [[ $service == s-* ]]; then if [[ ! $file_to_parse =~ swift ]]; then - file_to_parse="${file_to_parse} swift" + file_to_parse="${file_to_parse} ${package_dir}/swift" fi elif [[ $service == n-* ]]; then if [[ ! $file_to_parse =~ nova ]]; then - file_to_parse="${file_to_parse} nova" + file_to_parse="${file_to_parse} ${package_dir}/nova" fi elif [[ $service == g-* ]]; then if [[ ! $file_to_parse =~ glance ]]; then - file_to_parse="${file_to_parse} glance" + file_to_parse="${file_to_parse} ${package_dir}/glance" fi elif [[ $service == key* ]]; then if [[ ! $file_to_parse =~ keystone ]]; then - file_to_parse="${file_to_parse} keystone" + file_to_parse="${file_to_parse} ${package_dir}/keystone" fi elif [[ $service == q-* ]]; then if [[ ! $file_to_parse =~ neutron ]]; then - file_to_parse="${file_to_parse} neutron" + file_to_parse="${file_to_parse} ${package_dir}/neutron" fi elif [[ $service == ir-* ]]; then if [[ ! $file_to_parse =~ ironic ]]; then - file_to_parse="${file_to_parse} ironic" + file_to_parse="${file_to_parse} ${package_dir}/ironic" fi fi done + echo "$(_parse_package_files $file_to_parse)" + $xtrace +} - for file in ${file_to_parse}; do - local fname=${package_dir}/${file} - local OIFS line package distros distro - [[ -e $fname ]] || continue - - OIFS=$IFS - IFS=$'\n' - for line in $(<${fname}); do - if [[ $line =~ "NOPRIME" ]]; then - continue - fi - - # Assume we want this package - package=${line%#*} - inst_pkg=1 - - # Look for # dist:xxx in comment - if [[ $line =~ (.*)#.*dist:([^ ]*) ]]; then - # We are using BASH regexp matching feature. - package=${BASH_REMATCH[1]} - distros=${BASH_REMATCH[2]} - # In bash ${VAR,,} will lowecase VAR - # Look for a match in the distro list - if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then - # If no match then skip this package - inst_pkg=0 - fi - fi - - # Look for # testonly in comment - if [[ $line =~ (.*)#.*testonly.* ]]; then - package=${BASH_REMATCH[1]} - # Are we installing test packages? (test for the default value) - if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then - # If not installing test packages the skip this package - inst_pkg=0 - fi - fi - - if [[ $inst_pkg = 1 ]]; then - echo $package - fi - done - IFS=$OIFS +# get_plugin_packages() collects a list of package names of any type from a +# plugin's prerequisite files in ``$PLUGIN/devstack/files/{debs|rpms}``. The +# list is intended to be passed to a package installer such as apt or yum. +# +# Only packages required for enabled and collected plugins will included. +# +# The same metadata used in the main devstack prerequisite files may be used +# in these prerequisite files, see get_packages() for more info. +function get_plugin_packages { + local xtrace=$(set +o | grep xtrace) + set +o xtrace + local files_to_parse="" + local package_dir="" + for plugin in ${DEVSTACK_PLUGINS//,/ }; do + local package_dir="$(_get_package_dir ${GITDIR[$plugin]}/devstack/files)" + files_to_parse+="$package_dir/$plugin" done + echo "$(_parse_package_files $files_to_parse)" $xtrace } diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 303cc63307..917980ccc5 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -62,6 +62,8 @@ export_proxy_variables # Install package requirements PACKAGES=$(get_packages general $ENABLED_SERVICES) +PACKAGES="$PACKAGES $(get_plugin_packages)" + if is_ubuntu && echo $PACKAGES | grep -q dkms ; then # ensure headers for the running kernel are installed for any DKMS builds PACKAGES="$PACKAGES linux-headers-$(uname -r)" From e32c868f220720079facf462fa11c2bc9737e9c0 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Thu, 26 Feb 2015 14:10:05 +0100 Subject: [PATCH 0034/2941] Comment `log_file` for Zaqar Instead of logging to a file, let it use stdout so we can see the output in the screen logs. Change-Id: I0e5e12a6ddc5ad91dd37e97362ac9a5bed238e32 --- lib/zaqar | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/zaqar b/lib/zaqar index c9321b997f..79b4c5a2ca 100644 --- a/lib/zaqar +++ b/lib/zaqar @@ -37,8 +37,6 @@ ZAQAR_DIR=$DEST/zaqar ZAQARCLIENT_DIR=$DEST/python-zaqarclient ZAQAR_CONF_DIR=/etc/zaqar ZAQAR_CONF=$ZAQAR_CONF_DIR/zaqar.conf -ZAQAR_API_LOG_DIR=/var/log/zaqar -ZAQAR_API_LOG_FILE=$ZAQAR_API_LOG_DIR/queues.log ZAQAR_AUTH_CACHE_DIR=${ZAQAR_AUTH_CACHE_DIR:-/var/cache/zaqar} # Support potential entry-points console scripts @@ -110,14 +108,10 @@ function configure_zaqar { [ ! -d $ZAQAR_CONF_DIR ] && sudo mkdir -m 755 -p $ZAQAR_CONF_DIR sudo chown $USER $ZAQAR_CONF_DIR - [ ! -d $ZAQAR_API_LOG_DIR ] && sudo mkdir -m 755 -p $ZAQAR_API_LOG_DIR - sudo chown $USER $ZAQAR_API_LOG_DIR - iniset $ZAQAR_CONF DEFAULT debug True iniset $ZAQAR_CONF DEFAULT verbose True iniset $ZAQAR_CONF DEFAULT admin_mode True iniset $ZAQAR_CONF DEFAULT use_syslog $SYSLOG - iniset $ZAQAR_CONF DEFAULT log_file $ZAQAR_API_LOG_FILE iniset $ZAQAR_CONF 'drivers:transport:wsgi' bind $ZAQAR_SERVICE_HOST configure_auth_token_middleware $ZAQAR_CONF zaqar $ZAQAR_AUTH_CACHE_DIR From de77c471f3df400c4c7df724c78dbd7dc771c618 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Wed, 11 Mar 2015 17:15:42 -0700 Subject: [PATCH 0035/2941] Make ironic's service check flexible We currently assume we are deploying ironic with the rest of a cloud and assert that glance/neutron/nova are enabled. This makes it a bit more flexible and allows deploying with only the minimum required services if desired, and asserts the others are enabled when we intend on testing nova+ironic integration. This is required for in-tree python-ironicclient functional tests, which we aim to run against a minimal devstack deployment. Change-Id: I99001d151161fa225b97c3ba6b167a61aa9b59fe --- lib/ironic | 6 +++++- lib/nova_plugins/hypervisor-ironic | 4 +--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/ironic b/lib/ironic index 0d7c12777c..e530f523f2 100644 --- a/lib/ironic +++ b/lib/ironic @@ -180,7 +180,11 @@ function is_deployed_with_ipa_ramdisk { # install_ironic() - Collect source and prepare function install_ironic { # make sure all needed service were enabled - for srv in nova glance key; do + local req_services="mysql rabbit key" + if [[ "$VIRT_DRIVER" == "ironic" ]]; then + req_services+=" nova glance neutron" + fi + for srv in $req_services; do if ! is_service_enabled "$srv"; then die $LINENO "$srv should be enabled for Ironic." fi diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index 0169d731e8..b9e286d5b6 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -54,9 +54,7 @@ function configure_nova_hypervisor { # install_nova_hypervisor() - Install external components function install_nova_hypervisor { - if ! is_service_enabled neutron; then - die $LINENO "Neutron should be enabled for usage of the Ironic Nova driver." - elif is_ironic_hardware; then + if is_ironic_hardware; then return fi install_libvirt From 41daa208d9f1bcc56c861b7751a51d3ef4b0f3f4 Mon Sep 17 00:00:00 2001 From: Vincent Hou Date: Wed, 4 Mar 2015 15:34:41 +0800 Subject: [PATCH 0036/2941] Remove my_ip from cinder.conf The current issue is that if we deploy c-vol service on a separate machine, my_ip and SERVICE_HOST will be different, because my_ip is the machine where c-vol service is running and SERVICE_HOST points to the machine where the cinder api service is running. If my_ip of c-vol in cinder.conf is set to the IP of c-api, it will cause the issue that the volume is unable to attach. The issue can be resolved by removing my_ip from cinder.conf. Change-Id: I699c0b5297c60e9f9934f74684abf563f4b0e977 closes-bug: #1428013 --- lib/cinder | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index 0d157dd78e..bc8a481ad6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -228,7 +228,6 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CINDER_CONF DEFAULT verbose True - iniset $CINDER_CONF DEFAULT my_ip "$CINDER_SERVICE_HOST" iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm iniset $CINDER_CONF DEFAULT sql_connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI From cf3b41fa8bfa0f3d8dce897e4ad6ce4c88ac5ab3 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Thu, 12 Mar 2015 13:33:12 +0000 Subject: [PATCH 0037/2941] Shut down ironic services in the modern way Instead of killing the screen, use stop_process which will shut the processes when USE_SCREEN is False. Change-Id: If0f714cb112dbf5fe9e4fdd7291cb4fb1df87f42 --- lib/ironic | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/lib/ironic b/lib/ironic index 0d7c12777c..35b5411cd4 100644 --- a/lib/ironic +++ b/lib/ironic @@ -482,9 +482,8 @@ function start_ironic_conductor { # stop_ironic() - Stop running processes function stop_ironic { - # Kill the Ironic screen windows - screen -S $SCREEN_NAME -p ir-api -X kill - screen -S $SCREEN_NAME -p ir-cond -X kill + stop_process ir-api + stop_process ir-cond # Cleanup the WSGI files if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then From b1a094d289a53fcbb0d04c0d3fa72707583728bd Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 12 Mar 2015 13:57:11 +0100 Subject: [PATCH 0038/2941] Install rsync-daemon on f22 swift requires to have rsyncd running as service or behind xinetd. /etc/xinetd.d/rsync is not shipped with f22 and the daemon mode requires an additional package. Adding rsync-daemon as swift dependency for f22 and f23(rawhide). Change-Id: I33222719cabed59a261ce1b8ddedc457aa03800e --- files/rpms/swift | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms/swift b/files/rpms/swift index 0fcdb0fe27..5789a198e8 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -13,3 +13,4 @@ pyxattr sqlite xfsprogs xinetd +rsync-daemon # dist:f22,f23 From 23d6d5068752358c1d3bbacc314594b1b50e2fc8 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Fri, 6 Mar 2015 15:24:22 -0800 Subject: [PATCH 0039/2941] Stop using deprecated oslo_concurrency and sql_connection config options As per the logs: Option "lock_path" from group "DEFAULT" is deprecated. Use option "lock_path" from group "oslo_concurrency". Option "sql_connection" from group "DEFAULT" is deprecated. Use option "connection" from group "database". Change-Id: I2109cec07ebee916c9ce0ccd24bd9a47d8d3c688 --- lib/cinder | 4 ++-- lib/ironic | 2 +- lib/nova | 6 +++--- lib/tempest | 2 +- lib/trove | 6 +++--- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/lib/cinder b/lib/cinder index 0d157dd78e..8ae2581169 100644 --- a/lib/cinder +++ b/lib/cinder @@ -230,12 +230,12 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT my_ip "$CINDER_SERVICE_HOST" iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm - iniset $CINDER_CONF DEFAULT sql_connection `database_connection_url cinder` + iniset $CINDER_CONF database connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH - iniset $CINDER_CONF DEFAULT lock_path $CINDER_STATE_PATH + iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL # NOTE(thingee): Cinder V1 API is deprecated and defaults to off as of # Juno. Keep it enabled so we can continue testing while it's still diff --git a/lib/ironic b/lib/ironic index bc30cdbeba..a65ea8807b 100644 --- a/lib/ironic +++ b/lib/ironic @@ -273,7 +273,7 @@ function configure_ironic { cp $IRONIC_DIR/etc/ironic/ironic.conf.sample $IRONIC_CONF_FILE iniset $IRONIC_CONF_FILE DEFAULT debug True inicomment $IRONIC_CONF_FILE DEFAULT log_file - iniset $IRONIC_CONF_FILE DEFAULT sql_connection `database_connection_url ironic` + iniset $IRONIC_CONF_FILE database connection `database_connection_url ironic` iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG # Configure Ironic conductor, if it was enabled. diff --git a/lib/nova b/lib/nova index e9e78c7bc4..81064380c0 100644 --- a/lib/nova +++ b/lib/nova @@ -437,7 +437,7 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST" iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" - iniset $NOVA_CONF DEFAULT sql_connection `database_connection_url nova` + iniset $NOVA_CONF database connection `database_connection_url nova` iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" @@ -471,7 +471,7 @@ function create_nova_conf { if [ -n "$NOVA_STATE_PATH" ]; then iniset $NOVA_CONF DEFAULT state_path "$NOVA_STATE_PATH" - iniset $NOVA_CONF DEFAULT lock_path "$NOVA_STATE_PATH" + iniset $NOVA_CONF oslo_concurrency lock_path "$NOVA_STATE_PATH" fi if [ -n "$NOVA_INSTANCES_PATH" ]; then iniset $NOVA_CONF DEFAULT instances_path "$NOVA_INSTANCES_PATH" @@ -575,7 +575,7 @@ function create_nova_conf { function init_nova_cells { if is_service_enabled n-cell; then cp $NOVA_CONF $NOVA_CELLS_CONF - iniset $NOVA_CELLS_CONF DEFAULT sql_connection `database_connection_url $NOVA_CELLS_DB` + iniset $NOVA_CELLS_CONF database connection `database_connection_url $NOVA_CELLS_DB` iniset $NOVA_CELLS_CONF DEFAULT rabbit_virtual_host child_cell iniset $NOVA_CELLS_CONF DEFAULT dhcpbridge_flagfile $NOVA_CELLS_CONF iniset $NOVA_CELLS_CONF cells enable True diff --git a/lib/tempest b/lib/tempest index f856ce05f9..7464499d43 100644 --- a/lib/tempest +++ b/lib/tempest @@ -275,7 +275,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG # Oslo - iniset $TEMPEST_CONFIG DEFAULT lock_path $TEMPEST_STATE_PATH + iniset $TEMPEST_CONFIG oslo_concurrency lock_path $TEMPEST_STATE_PATH mkdir -p $TEMPEST_STATE_PATH iniset $TEMPEST_CONFIG DEFAULT use_stderr False iniset $TEMPEST_CONFIG DEFAULT log_file tempest.log diff --git a/lib/trove b/lib/trove index d77798308b..31c3eef5ea 100644 --- a/lib/trove +++ b/lib/trove @@ -136,7 +136,7 @@ function configure_trove { iniset $TROVE_CONF DEFAULT rabbit_userid $RABBIT_USERID iniset $TROVE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_CONF DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_CONF database connection `database_connection_url trove` iniset $TROVE_CONF DEFAULT default_datastore $TROVE_DATASTORE_TYPE setup_trove_logging $TROVE_CONF iniset $TROVE_CONF DEFAULT trove_api_workers "$API_WORKERS" @@ -149,7 +149,7 @@ function configure_trove { iniset $TROVE_TASKMANAGER_CONF DEFAULT rabbit_userid $RABBIT_USERID iniset $TROVE_TASKMANAGER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_TASKMANAGER_CONF DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_TASKMANAGER_CONF database connection `database_connection_url trove` iniset $TROVE_TASKMANAGER_CONF DEFAULT taskmanager_manager trove.taskmanager.manager.Manager iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_user radmin iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_tenant_name trove @@ -162,7 +162,7 @@ function configure_trove { if is_service_enabled tr-cond; then iniset $TROVE_CONDUCTOR_CONF DEFAULT rabbit_userid $RABBIT_USERID iniset $TROVE_CONDUCTOR_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_CONDUCTOR_CONF DEFAULT sql_connection `database_connection_url trove` + iniset $TROVE_CONDUCTOR_CONF database connection `database_connection_url trove` iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_user radmin iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_tenant_name trove iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS From 3011ec75493c456589947567d972c3a3c1f602bb Mon Sep 17 00:00:00 2001 From: gordon chung Date: Thu, 12 Mar 2015 00:34:06 -0400 Subject: [PATCH 0040/2941] install missing elasticsearch client elasticsearch client must be explicitly installed as it's an optional backend requirement. this patch installs the client when installing elasticsearch Change-Id: I534cf0c78ab1fe7d309ef5f808bbe7b5422b403e --- pkg/elasticsearch.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh index 239d6b938e..f53c7f2ff2 100755 --- a/pkg/elasticsearch.sh +++ b/pkg/elasticsearch.sh @@ -77,6 +77,7 @@ function stop_elasticsearch { } function install_elasticsearch { + pip_install elasticsearch if is_package_installed elasticsearch; then echo "Note: elasticsearch was already installed." return From 6ac97deba6af9ced38f3c0ec93327d352e20c6df Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Thu, 12 Mar 2015 09:03:28 +1100 Subject: [PATCH 0041/2941] Swift use v3 auth_token credentials The keystonemiddleware 1.5.0 released 2015-03-11 supports configuring auth plugins from the paste config file. This means that swift can now use authentication plugins for auth_token middleware. Change-Id: Icb9f008a57b6f75e0506cbecd0a1e0f28b7dadda --- lib/swift | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/lib/swift b/lib/swift index 8a96615d01..f291d872d4 100644 --- a/lib/swift +++ b/lib/swift @@ -415,16 +415,8 @@ function configure_swift { # IDs will included in all of its log messages. iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift - # NOTE(jamielennox): swift cannot use the regular configure_auth_token_middleware function because swift - # doesn't use oslo.config which is the only way to configure auth plugins with the middleare. iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken paste.filter_factory keystonemiddleware.auth_token:filter_factory - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken identity_uri $KEYSTONE_AUTH_URI - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_user swift - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_password $SERVICE_PASSWORD - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken auth_uri $KEYSTONE_SERVICE_URI - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cafile $SSL_BUNDLE_FILE - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken signing_dir $SWIFT_AUTH_CACHE_DIR + configure_auth_token_middleware $SWIFT_CONFIG_PROXY_SERVER swift $SWIFT_AUTH_CACHE_DIR filter:authtoken iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken delay_auth_decision 1 iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken cache swift.cache iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken include_service_catalog False From e52f6ca11fc581d5ab3da4200ed0128287ec2d39 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 5 Mar 2015 09:35:52 +1100 Subject: [PATCH 0042/2941] Remove packaged rpm python libraries Let's just use pip versions Change-Id: Idf3a3a914b54779172776822710b3e52e751b1d1 --- files/rpms/ceilometer-collector | 1 - files/rpms/cinder | 1 - files/rpms/glance | 8 -------- files/rpms/horizon | 15 --------------- files/rpms/ironic | 1 - files/rpms/keystone | 10 ---------- files/rpms/ldap | 1 - files/rpms/n-api | 1 - files/rpms/n-cpu | 2 +- files/rpms/neutron | 9 --------- files/rpms/nova | 16 ---------------- files/rpms/qpid | 1 - files/rpms/swift | 9 --------- files/rpms/zaqar-server | 1 - 14 files changed, 1 insertion(+), 75 deletions(-) diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector index 9cf580d22d..b139ed2b6b 100644 --- a/files/rpms/ceilometer-collector +++ b/files/rpms/ceilometer-collector @@ -1,4 +1,3 @@ selinux-policy-targeted mongodb-server #NOPRIME -pymongo # NOPRIME mongodb # NOPRIME diff --git a/files/rpms/cinder b/files/rpms/cinder index 082a35af32..9f1359f988 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -3,4 +3,3 @@ scsi-target-utils qemu-img postgresql-devel iscsi-initiator-utils -python-lxml diff --git a/files/rpms/glance b/files/rpms/glance index a09b669309..119492a3f8 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -3,12 +3,4 @@ libxslt-devel # testonly mysql-devel # testonly openssl-devel # testonly postgresql-devel # testonly -python-argparse -python-eventlet -python-greenlet -python-lxml -python-paste-deploy -python-routes -python-sqlalchemy -pyxattr zlib-devel # testonly diff --git a/files/rpms/horizon b/files/rpms/horizon index 585c36cfde..8d7f0371ef 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -2,20 +2,5 @@ Django httpd # NOPRIME mod_wsgi # NOPRIME pylint -python-anyjson -python-BeautifulSoup -python-coverage -python-dateutil -python-eventlet -python-greenlet -python-httplib2 -python-migrate -python-mox -python-nose -python-paste -python-paste-deploy -python-routes -python-sqlalchemy -python-webob pyxattr pcre-devel # pyScss diff --git a/files/rpms/ironic b/files/rpms/ironic index 0a46314964..2bf8bb370e 100644 --- a/files/rpms/ironic +++ b/files/rpms/ironic @@ -8,7 +8,6 @@ libvirt-python net-tools openssh-clients openvswitch -python-libguestfs sgabios syslinux tftp-server diff --git a/files/rpms/keystone b/files/rpms/keystone index 45492e0de6..8074119fdb 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,14 +1,4 @@ MySQL-python -python-greenlet libxslt-devel -python-lxml -python-paste -python-paste-deploy -python-paste-script -python-routes -python-sqlalchemy -python-webob sqlite mod_ssl - -# Deps installed via pip for RHEL diff --git a/files/rpms/ldap b/files/rpms/ldap index 2f7ab5de46..d89c4cf8c1 100644 --- a/files/rpms/ldap +++ b/files/rpms/ldap @@ -1,3 +1,2 @@ openldap-servers openldap-clients -python-ldap diff --git a/files/rpms/n-api b/files/rpms/n-api index 6f59e603b2..0928cd56b9 100644 --- a/files/rpms/n-api +++ b/files/rpms/n-api @@ -1,2 +1 @@ -python-dateutil fping diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 32b1546c39..c1a8e8ffa6 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -4,4 +4,4 @@ lvm2 genisoimage sysfsutils sg3_utils -python-libguestfs # NOPRIME + diff --git a/files/rpms/neutron b/files/rpms/neutron index d11dab7e1a..c0dee78a48 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -10,15 +10,6 @@ mysql-devel # testonly mysql-server # NOPRIME openvswitch # NOPRIME postgresql-devel # testonly -python-eventlet -python-greenlet -python-iso8601 -python-paste -python-paste-deploy -python-qpid # NOPRIME -python-routes -python-sqlalchemy -python-suds rabbitmq-server # NOPRIME qpid-cpp-server # NOPRIME sqlite diff --git a/files/rpms/nova b/files/rpms/nova index 557de908f1..527928a581 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -21,22 +21,6 @@ mysql-devel # testonly mysql-server # NOPRIME parted polkit -python-cheetah -python-eventlet -python-feedparser -python-greenlet -python-iso8601 -python-lockfile -python-migrate -python-mox -python-paramiko -python-paste -python-paste-deploy -python-qpid # NOPRIME -python-routes -python-sqlalchemy -python-suds -python-tempita rabbitmq-server # NOPRIME qpid-cpp-server # NOPRIME sqlite diff --git a/files/rpms/qpid b/files/rpms/qpid index c5e2699fcc..41dd2f69f9 100644 --- a/files/rpms/qpid +++ b/files/rpms/qpid @@ -1,4 +1,3 @@ qpid-proton-c-devel # NOPRIME -python-qpid-proton # NOPRIME cyrus-sasl-lib # NOPRIME cyrus-sasl-plain # NOPRIME diff --git a/files/rpms/swift b/files/rpms/swift index 0fcdb0fe27..accf547ece 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,14 +1,5 @@ curl memcached -python-configobj -python-coverage -python-eventlet -python-greenlet -python-netifaces -python-nose -python-paste-deploy -python-simplejson -python-webob pyxattr sqlite xfsprogs diff --git a/files/rpms/zaqar-server b/files/rpms/zaqar-server index 541cefa99d..78806fb3f6 100644 --- a/files/rpms/zaqar-server +++ b/files/rpms/zaqar-server @@ -3,4 +3,3 @@ mongodb mongodb-server pymongo redis # NOPRIME -python-redis # NOPRIME From b6197e6ab0b6085f2b81f7a29fa6a3ea5ec03748 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Thu, 12 Feb 2015 15:33:35 -0500 Subject: [PATCH 0043/2941] switch to use ceilometermiddleware swift middleware contained in ceilometer is now deprecated. the middleware is available in ceilometermiddleware. Change-Id: I6e41986245f4d95a9385dc7829479ed1199f10ac --- lib/ceilometer | 2 ++ lib/rpc_backend | 9 +++++++++ lib/swift | 6 +++++- stack.sh | 3 +++ 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index 9db0640940..a464c52b6c 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -322,6 +322,8 @@ function install_ceilometermiddleware { if use_library_from_git "ceilometermiddleware"; then git_clone_by_name "ceilometermiddleware" setup_dev_lib "ceilometermiddleware" + else + pip_install ceilometermiddleware fi } diff --git a/lib/rpc_backend b/lib/rpc_backend index ff22bbf8fa..a399d1757e 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -233,6 +233,15 @@ function restart_rpc_backend { fi } +# builds transport url string +function get_transport_url { + if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then + echo "qpid://$QPID_USERNAME:$QPID_PASSWORD@$QPID_HOST:5672/" + elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/" + fi +} + # iniset cofiguration function iniset_rpc_backend { local package=$1 diff --git a/lib/swift b/lib/swift index 4a63500b54..3decd2f9f0 100644 --- a/lib/swift +++ b/lib/swift @@ -378,7 +378,11 @@ function configure_swift { # Configure Ceilometer if is_service_enabled ceilometer; then iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer "set log_level" "WARN" - iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer use "egg:ceilometer#swift" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer paste.filter_factory "ceilometermiddleware.swift:filter_factory" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer control_exchange "swift" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer url $(get_transport_url) + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer driver "messaging" + iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:ceilometer topic "notifications" SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" fi diff --git a/stack.sh b/stack.sh index 2060f2d65a..5c726a3a1c 100755 --- a/stack.sh +++ b/stack.sh @@ -739,6 +739,9 @@ if is_service_enabled keystone; then fi if is_service_enabled s-proxy; then + if is_service_enabled ceilometer; then + install_ceilometermiddleware + fi stack_install_service swift configure_swift From 7b2eaedabf0700a50ddcb32ac54570ea200c616e Mon Sep 17 00:00:00 2001 From: Andrea Frittoli Date: Fri, 13 Mar 2015 12:05:49 +0000 Subject: [PATCH 0044/2941] Adding tempest_roles when auth_version is v3 With identity v3 the _member_ role is not added on the projects automatically for the user when it's created. Setting _member_ to tempest_roles so that tempest adds the role. Change-Id: Iaae9286ecc6f019d36261a5c450068a650e24a28 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index f856ce05f9..fcb0e59937 100644 --- a/lib/tempest +++ b/lib/tempest @@ -315,6 +315,9 @@ function configure_tempest { # Auth iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} + if [[ "$TEMPEST_AUTH_VERSION" == "v3" ]]; then + iniset $TEMPEST_CONFIG auth tempest_roles "Member" + fi # Compute iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED From 58065f26b6f35272636174c3b07006255424b9c5 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sat, 14 Mar 2015 06:13:26 -0700 Subject: [PATCH 0045/2941] Set compute-feature-enabled.preserve_ports=True in tempest.conf Because of branchless Tempest we have to set a compute-feature-enabled flag to test preserving preexisting ports from Neutron since the code only works starting in Kilo and won't be backported to stable/juno or stable/icehouse. We can remove this flag once juno-eol happens. Depends-On: I95469e4c2f4aa2bc4e6342860a9c222fb4fa7e16 Related-Bug: #1431724 Change-Id: I214baa3b861e29bedf6bb7b50534ac2286676dd1 --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index f856ce05f9..f9a2da1650 100644 --- a/lib/tempest +++ b/lib/tempest @@ -349,6 +349,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled change_password False iniset $TEMPEST_CONFIG compute-feature-enabled block_migration_for_live_migration ${USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION:-False} iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions $compute_api_extensions + # TODO(mriedem): Remove the preserve_ports flag when Juno is end of life. + iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True # Compute admin iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME From 5dfecc8966912c2f74a4c7ecc85dd5f0b930cd99 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Sat, 14 Mar 2015 12:28:59 -0500 Subject: [PATCH 0046/2941] Keystone RPC backend config consistency lib/keystone was setting up rabbit config directly rather than using the iniset_rpc_backend function that other projects use. Change-Id: Ic368f102c808cdbd2e4cbc1ff457cdf17a681332 --- lib/keystone | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/lib/keystone b/lib/keystone index 0968445dc0..c9433d98fe 100644 --- a/lib/keystone +++ b/lib/keystone @@ -226,12 +226,7 @@ function configure_keystone { iniset $KEYSTONE_CONF assignment driver "keystone.assignment.backends.$KEYSTONE_ASSIGNMENT_BACKEND.Assignment" fi - # Configure rabbitmq credentials - if is_service_enabled rabbit; then - iniset $KEYSTONE_CONF DEFAULT rabbit_userid $RABBIT_USERID - iniset $KEYSTONE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $KEYSTONE_CONF DEFAULT rabbit_host $RABBIT_HOST - fi + iniset_rpc_backend keystone $KEYSTONE_CONF DEFAULT # Set the URL advertised in the ``versions`` structure returned by the '/' route if is_service_enabled tls-proxy; then From 2dd110ce8668f6cb7b507928bad972d94656e2d7 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Sat, 14 Mar 2015 12:39:14 -0500 Subject: [PATCH 0047/2941] iniset_rpc_backend default section iniset_rpc_backend should know what section it needs to set the config options in better than the callers. The config options have actually been moved to different sections and the options in the DEFAULT section are deprecated. Change-Id: I0e07fe03c7812ef8df49e126bf71c57588635639 --- lib/ceilometer | 2 +- lib/cinder | 2 +- lib/glance | 4 ++-- lib/heat | 2 +- lib/ironic | 2 +- lib/keystone | 2 +- lib/neutron | 2 +- lib/nova | 2 +- lib/rpc_backend | 2 +- lib/sahara | 2 +- lib/zaqar | 2 +- 11 files changed, 12 insertions(+), 12 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 9db0640940..e2400833c8 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -169,7 +169,7 @@ function configure_ceilometer { [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR sudo chown $STACK_USER $CEILOMETER_API_LOG_DIR - iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT + iniset_rpc_backend ceilometer $CEILOMETER_CONF iniset $CEILOMETER_CONF DEFAULT notification_topics "$CEILOMETER_NOTIFICATION_TOPICS" iniset $CEILOMETER_CONF DEFAULT verbose True diff --git a/lib/cinder b/lib/cinder index 880af1fd40..a8f05389b5 100644 --- a/lib/cinder +++ b/lib/cinder @@ -281,7 +281,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT use_syslog True fi - iniset_rpc_backend cinder $CINDER_CONF DEFAULT + iniset_rpc_backend cinder $CINDER_CONF if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then iniset $CINDER_CONF DEFAULT secure_delete False diff --git a/lib/glance b/lib/glance index eb1df2e8ae..8400ca18cc 100755 --- a/lib/glance +++ b/lib/glance @@ -112,7 +112,7 @@ function configure_glance { if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging fi - iniset_rpc_backend glance $GLANCE_REGISTRY_CONF DEFAULT + iniset_rpc_backend glance $GLANCE_REGISTRY_CONF cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL @@ -125,7 +125,7 @@ function configure_glance { if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then iniset $GLANCE_API_CONF DEFAULT notification_driver messaging fi - iniset_rpc_backend glance $GLANCE_API_CONF DEFAULT + iniset_rpc_backend glance $GLANCE_API_CONF if [ "$VIRT_DRIVER" = 'xenserver' ]; then iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz" iniset $GLANCE_API_CONF DEFAULT disk_formats "ami,ari,aki,vhd,raw,iso" diff --git a/lib/heat b/lib/heat index a088e82886..d713f18269 100644 --- a/lib/heat +++ b/lib/heat @@ -105,7 +105,7 @@ function configure_heat { cp $HEAT_DIR/etc/heat/policy.json $HEAT_POLICY_FILE # common options - iniset_rpc_backend heat $HEAT_CONF DEFAULT + iniset_rpc_backend heat $HEAT_CONF iniset $HEAT_CONF DEFAULT heat_metadata_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT iniset $HEAT_CONF DEFAULT heat_waitcondition_server_url http://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1/waitcondition iniset $HEAT_CONF DEFAULT heat_watch_server_url http://$HEAT_API_CW_HOST:$HEAT_API_CW_PORT diff --git a/lib/ironic b/lib/ironic index 35b5411cd4..2dc95bb435 100644 --- a/lib/ironic +++ b/lib/ironic @@ -313,7 +313,7 @@ function configure_ironic_api { iniset $IRONIC_CONF_FILE keystone_authtoken cafile $SSL_BUNDLE_FILE iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api - iniset_rpc_backend ironic $IRONIC_CONF_FILE DEFAULT + iniset_rpc_backend ironic $IRONIC_CONF_FILE iniset $IRONIC_CONF_FILE api port $IRONIC_SERVICE_PORT cp -p $IRONIC_DIR/etc/ironic/policy.json $IRONIC_POLICY_JSON diff --git a/lib/keystone b/lib/keystone index c9433d98fe..89e9aa1918 100644 --- a/lib/keystone +++ b/lib/keystone @@ -226,7 +226,7 @@ function configure_keystone { iniset $KEYSTONE_CONF assignment driver "keystone.assignment.backends.$KEYSTONE_ASSIGNMENT_BACKEND.Assignment" fi - iniset_rpc_backend keystone $KEYSTONE_CONF DEFAULT + iniset_rpc_backend keystone $KEYSTONE_CONF # Set the URL advertised in the ``versions`` structure returned by the '/' route if is_service_enabled tls-proxy; then diff --git a/lib/neutron b/lib/neutron index e41abafda9..a31a064063 100755 --- a/lib/neutron +++ b/lib/neutron @@ -422,7 +422,7 @@ function is_neutron_enabled { # Set common config for all neutron server and agents. function configure_neutron { _configure_neutron_common - iniset_rpc_backend neutron $NEUTRON_CONF DEFAULT + iniset_rpc_backend neutron $NEUTRON_CONF # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES if is_service_enabled q-lbaas; then diff --git a/lib/nova b/lib/nova index 199daeea3d..f0490951b7 100644 --- a/lib/nova +++ b/lib/nova @@ -537,7 +537,7 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" iniset $NOVA_CONF DEFAULT keystone_ec2_url $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens - iniset_rpc_backend nova $NOVA_CONF DEFAULT + iniset_rpc_backend nova $NOVA_CONF iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS" diff --git a/lib/rpc_backend b/lib/rpc_backend index ff22bbf8fa..32c3e17aa3 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -237,7 +237,7 @@ function restart_rpc_backend { function iniset_rpc_backend { local package=$1 local file=$2 - local section=$3 + local section=${3:-DEFAULT} if is_service_enabled zeromq; then iniset $file $section rpc_backend "zmq" iniset $file $section rpc_zmq_host `hostname` diff --git a/lib/sahara b/lib/sahara index 521b19a4a1..7c22d0f5b9 100644 --- a/lib/sahara +++ b/lib/sahara @@ -127,7 +127,7 @@ function configure_sahara { if is_service_enabled ceilometer; then iniset $SAHARA_CONF_FILE DEFAULT enable_notifications "true" iniset $SAHARA_CONF_FILE DEFAULT notification_driver "messaging" - iniset_rpc_backend sahara $SAHARA_CONF_FILE DEFAULT + iniset_rpc_backend sahara $SAHARA_CONF_FILE fi iniset $SAHARA_CONF_FILE DEFAULT verbose True diff --git a/lib/zaqar b/lib/zaqar index 79b4c5a2ca..02b69879fe 100644 --- a/lib/zaqar +++ b/lib/zaqar @@ -133,7 +133,7 @@ function configure_zaqar { iniset $ZAQAR_CONF DEFAULT notification_driver messaging iniset $ZAQAR_CONF DEFAULT control_exchange zaqar fi - iniset_rpc_backend zaqar $ZAQAR_CONF DEFAULT + iniset_rpc_backend zaqar $ZAQAR_CONF cleanup_zaqar } From 1577663f4a7d50542e1c729a3f975af627f6c47b Mon Sep 17 00:00:00 2001 From: Anand Shanmugam Date: Sat, 14 Mar 2015 19:24:10 +0530 Subject: [PATCH 0048/2941] Fix Traceback exceptions in g-api and g-reg The log_context format specified in glance-api and glance-reistry are causing tracebacks. This fix changes the project_id and user_id to tenant and user which are supported in oslo-context. This is the format used by other projects Change-Id: Ifbf268e9765039a0085c9af930dabf8a5cc681b8 Closes-Bug: #1431784 --- lib/glance | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/glance b/lib/glance index eb1df2e8ae..26d7960143 100755 --- a/lib/glance +++ b/lib/glance @@ -185,8 +185,8 @@ function configure_glance { # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $GLANCE_API_CONF DEFAULT "project_id" "user_id" - setup_colorized_logging $GLANCE_REGISTRY_CONF DEFAULT "project_id" "user_id" + setup_colorized_logging $GLANCE_API_CONF DEFAULT tenant user + setup_colorized_logging $GLANCE_REGISTRY_CONF DEFAULT tenant user fi cp -p $GLANCE_DIR/etc/glance-registry-paste.ini $GLANCE_REGISTRY_PASTE_INI From 80cdbc423112f76720f9b8b4cdf19f17815429f8 Mon Sep 17 00:00:00 2001 From: Zhenzan Zhou Date: Mon, 16 Mar 2015 12:30:44 +0800 Subject: [PATCH 0049/2941] Make ironic ssh check timeout configurable On some environments, the current 10s timeout waiting for server ssh-able is not enough. SSH session was killed before the command executed by server and then break the whole stack.sh. Change-Id: I4d842744793455d44a633dee8920a60552e8075e --- lib/ironic | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index 35b5411cd4..863d023c38 100644 --- a/lib/ironic +++ b/lib/ironic @@ -63,6 +63,7 @@ IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates} IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False IRONIC_BAREMETAL_BASIC_OPS) IRONIC_ENABLED_DRIVERS=${IRONIC_ENABLED_DRIVERS:-fake,pxe_ssh,pxe_ipmitool} IRONIC_SSH_USERNAME=${IRONIC_SSH_USERNAME:-`whoami`} +IRONIC_SSH_TIMEOUT=${IRONIC_SSH_TIMEOUT:-15} IRONIC_SSH_KEY_DIR=${IRONIC_SSH_KEY_DIR:-$IRONIC_DATA_DIR/ssh_keys} IRONIC_SSH_KEY_FILENAME=${IRONIC_SSH_KEY_FILENAME:-ironic_key} IRONIC_KEY_FILE=${IRONIC_KEY_FILE:-$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME} @@ -703,7 +704,7 @@ function ironic_ssh_check { function configure_ironic_auxiliary { configure_ironic_ssh_keypair - ironic_ssh_check $IRONIC_KEY_FILE $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME 10 + ironic_ssh_check $IRONIC_KEY_FILE $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME $IRONIC_SSH_TIMEOUT } function build_ipa_coreos_ramdisk { From 1331a828dab91f2e053cb1a4b78b872af00410c1 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Mon, 16 Mar 2015 10:27:47 +0100 Subject: [PATCH 0050/2941] Add \n at the end of samples/local.conf Currently if you `cat` the file, the bash prompt will be at a weird position. And if you programmaticaly add a new line to this file, the line will be, in fact, appended to the previous line. Change-Id: I19ba018d9a934f8fdc07cc9bec50a0105f2710f9 --- samples/local.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/local.conf b/samples/local.conf index e4052c21ac..63000b65ba 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -98,4 +98,4 @@ SWIFT_DATA_DIR=$DEST/data # ------- # Install the tempest test suite -enable_service tempest \ No newline at end of file +enable_service tempest From cb3ceceda227036c357a1c3173ecbd8c3116b148 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 16 Mar 2015 10:37:51 -0400 Subject: [PATCH 0051/2941] Set INSTALL_TEMPEST to default true This commit switches the default value for INSTALL_TEMPEST on master devstack to be true. Not installing tempest by default on devstack is confusing for devs and people working with tempest in devstack. The venv isolation is only really required on stable branches because of conflicting requirements, however it is not really necessary on master. Change-Id: I368cb56fd9e0cbf59cefe24a46507d3f58b9a8e3 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index f856ce05f9..85bccb62a0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -66,7 +66,7 @@ BUILD_TIMEOUT=${BUILD_TIMEOUT:-196} # This must be False on stable branches, as master tempest # deps do not match stable branch deps. Set this to True to # have tempest installed in devstack by default. -INSTALL_TEMPEST=${INSTALL_TEMPEST:-"False"} +INSTALL_TEMPEST=${INSTALL_TEMPEST:-"True"} BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}" From a3430270f3b652317a85c9eabe76962bd64f4543 Mon Sep 17 00:00:00 2001 From: Andreas Scheuring Date: Mon, 9 Mar 2015 16:55:32 +0100 Subject: [PATCH 0052/2941] Support detection of interfaces with non-word chars in the name The current regex only matches host interface names that consits of "word characters" (regex \w). Intefaces having other special chars like "-" or "." are not parsed. Examples that are not yet matched are br-ex (ovs bridge) or enccw0.0.1234 (s390 eth device name). In addition it's hard to understand the the regex. This fix is replacing the regex by a simple awk statement also matching those names. In addition the determination of the host_ip_iface was moved down into the if clause, as it is only used inside. Change-Id: I3d1b1afa32956e4e8c55c7e68cbafaf8e03e7da2 Closes-Bug: #1429903 --- functions-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 4739e42e90..ed43e20d8d 100644 --- a/functions-common +++ b/functions-common @@ -542,11 +542,11 @@ function get_default_host_ip { local host_ip_iface=$3 local host_ip=$4 - # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip route | sed -n '/^default/{ s/.*dev \(\w\+\)\s\+.*/\1/; p; }' | head -1)} # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then host_ip="" + # Find the interface used for the default route + host_ip_iface=${host_ip_iface:-$(ip route | awk '/default/ {print $5}' | head -1)} local host_ips=$(LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}') local ip for ip in $host_ips; do From 45e7db0e7a702e7a1cdb1121acd7c2766c664dbc Mon Sep 17 00:00:00 2001 From: Morgan Jones Date: Mon, 16 Mar 2015 12:20:32 -0400 Subject: [PATCH 0053/2941] Change datastore version for Mysql datastore The Replication V2 change to Trove moves the test datastore from Mysql 5.5 to Mysql 5.6. This change reflects that in devstack. Change-Id: Ibdf32b46c200d3061975d390c872be77d19bc361 Implements: blueprint bp/replication-v2 Closes-bug: #1432686 Depends-On: I8eec708f41e791e3db04a2c7b7c12855118b64ac --- lib/trove | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/trove b/lib/trove index d77798308b..9c2140cfba 100644 --- a/lib/trove +++ b/lib/trove @@ -44,8 +44,8 @@ TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove TROVE_LOCAL_API_PASTE_INI=$TROVE_LOCAL_CONF_DIR/api-paste.ini TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} TROVE_DATASTORE_TYPE=${TROVE_DATASTORE_TYPE:-"mysql"} -TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.5"} -TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.5"} +TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.6"} +TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.6"} # Support entry points installation of console scripts if [[ -d $TROVE_DIR/bin ]]; then From ad13c0a8dacbf5d5d399aa2df99313bb5fb6a39b Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Sat, 14 Mar 2015 12:14:31 -0500 Subject: [PATCH 0054/2941] Keystone stop using config refs - Rather than using config refs, deployments should be using a tool such as chef or puppet to set the options correctly. - Config refs have a bug where you can only reference an option in the DEFAULT group, which limits the usefulness, and with this feature it's impossible to move any config options out of the DEFAULT group, luckily this has been ignored anyways since I think everyone realizes how broken it is. Change-Id: I74cae09f9d75177f8efea69e7ae981ed8f14039f --- lib/keystone | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/keystone b/lib/keystone index 0968445dc0..4592440248 100644 --- a/lib/keystone +++ b/lib/keystone @@ -234,13 +234,8 @@ function configure_keystone { fi # Set the URL advertised in the ``versions`` structure returned by the '/' route - if is_service_enabled tls-proxy; then - iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/" - iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/" - else - iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(public_port)s/" - iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:%(admin_port)s/" - fi + iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/" + iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/" iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST" # Register SSL certificates if provided From 2662395fac0c7cf8e842b56987ad0f0cdedc3d5f Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Wed, 16 Jul 2014 17:41:53 +0400 Subject: [PATCH 0055/2941] Add rootwrap daemon mode support for Neutron Daemon mode is turned on by default. Implements: blueprint rootwrap-daemon-mode Change-Id: I632df4149e9d7f78cb5a7091dfe4ea8f8ca3ddfa --- lib/neutron | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/neutron b/lib/neutron index e41abafda9..411c6961ce 100755 --- a/lib/neutron +++ b/lib/neutron @@ -153,6 +153,7 @@ Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} # RHEL's support for namespaces requires using veths with ovs Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) # Meta data IP Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} # Allow Overlapping IP among subnets @@ -226,6 +227,9 @@ if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then else NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + fi fi @@ -896,6 +900,9 @@ function _configure_neutron_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE @@ -910,6 +917,9 @@ function _configure_neutron_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi if ! is_service_enabled q-l3; then if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then @@ -943,6 +953,9 @@ function _configure_neutron_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $Q_L3_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi _neutron_setup_interface_driver $Q_L3_CONF_FILE @@ -956,6 +969,9 @@ function _configure_neutron_metadata_agent { iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $Q_META_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi # Configures keystone for metadata_agent # The third argument "True" sets auth_url needed to communicate with keystone @@ -1008,6 +1024,9 @@ function _configure_neutron_plugin_agent { # Specify the default root helper prior to agent configuration to # ensure that an agent's configuration can override the default iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi iniset $NEUTRON_CONF DEFAULT verbose True iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL @@ -1106,16 +1125,21 @@ function _neutron_setup_rootwrap { sudo chmod 0644 $Q_RR_CONF_FILE # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" + ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" # Set up the rootwrap sudoers for neutron TEMPFILE=`mktemp` echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE chmod 0440 $TEMPFILE sudo chown root:root $TEMPFILE sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap # Update the root_helper iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi } # Configures keystone integration for neutron service and agents From 6a83c423fc1d788d9e81b58a8659eca1cb84095d Mon Sep 17 00:00:00 2001 From: Eli Qiao Date: Tue, 17 Mar 2015 16:54:16 +0800 Subject: [PATCH 0056/2941] Fix pip install error If we set mutiple proxy (ip or url), pip install will treat the second proxy as a command. Add quotation marks around proxy. expecially for no_proxy Change-Id: I38ad3f083ba5155cda0e5e2b8f5df64492b7fecd --- inc/python | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/inc/python b/inc/python index d72c3c94d7..229c54009d 100644 --- a/inc/python +++ b/inc/python @@ -94,9 +94,9 @@ function pip_install { $xtrace $sudo_pip \ - http_proxy=${http_proxy:-} \ - https_proxy=${https_proxy:-} \ - no_proxy=${no_proxy:-} \ + http_proxy="${http_proxy:-}" \ + https_proxy="${https_proxy:-}" \ + no_proxy="${no_proxy:-}" \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ $cmd_pip install \ $@ From 302389bf8d1d9dfeed9a7c07e8bb4b85fc731028 Mon Sep 17 00:00:00 2001 From: Vitaly Gridnev Date: Thu, 12 Mar 2015 14:15:44 +0300 Subject: [PATCH 0057/2941] [SAHARA] Remove copying sahara.conf from base config file Sahara is going to remove sahara.conf.sample because it is not gated anymore and therefore it gets out of date. So, we need to remove copying sahara.conf from base config file sahara.conf.sample Change-Id: I0ddf36cfc15694dfe41fe695d577199da75ce7f1 --- lib/sahara | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/sahara b/lib/sahara index 521b19a4a1..709e90e1bd 100644 --- a/lib/sahara +++ b/lib/sahara @@ -111,9 +111,6 @@ function configure_sahara { cp -p $SAHARA_DIR/etc/sahara/policy.json $SAHARA_CONF_DIR fi - # Copy over sahara configuration file and configure common parameters. - cp $SAHARA_DIR/etc/sahara/sahara.conf.sample $SAHARA_CONF_FILE - # Create auth cache dir sudo mkdir -p $SAHARA_AUTH_CACHE_DIR sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR From d66bac3f70ce2470be8d19f91b6945483a62328f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 17 Mar 2015 09:10:01 -0400 Subject: [PATCH 0058/2941] remove horizon exercises The horizon team keeps changing how the UI is exposed. This exercise keeps lagging and doesn't really test anything useful any more. Just delete it. Change-Id: Id62904868f1d4b39e33d2ad63340b5ee2177fb56 --- exercises/horizon.sh | 45 -------------------------------------------- 1 file changed, 45 deletions(-) delete mode 100755 exercises/horizon.sh diff --git a/exercises/horizon.sh b/exercises/horizon.sh deleted file mode 100755 index 4020580157..0000000000 --- a/exercises/horizon.sh +++ /dev/null @@ -1,45 +0,0 @@ -#!/usr/bin/env bash - -# **horizon.sh** - -# Sanity check that horizon started if enabled - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -is_service_enabled horizon || exit 55 - -# can we get the front page -$CURL_GET http://$SERVICE_HOST 2>/dev/null | grep -q 'Log In' || die $LINENO "Horizon front page not functioning!" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" - From a03ed3762613a70e9f15811bc02a5006c0004f05 Mon Sep 17 00:00:00 2001 From: Thanassis Parathyras Date: Thu, 12 Mar 2015 22:15:50 +0200 Subject: [PATCH 0059/2941] Adds documentation for devstack unit tests enablement Closes Bug: 1283214 Closes Bug: 1203723 Change-Id: Iac25185c7cc92ddebd3a22b602f7c8885d009807 --- doc/source/configuration.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index fe3e2c29bf..7d06658ee2 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -378,6 +378,18 @@ IP Version can be configured with any valid IPv6 prefix. The default values make use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC 4193.* +Unit tests dependencies install +------------------------------- + + | *Default: ``INSTALL_TESTONLY_PACKAGES=False``* + | In order to be able to run unit tests with script ``run_test.sh``, + the required package dependencies need to be installed. + Setting this option as below does the work. + + :: + + INSTALL_TESTONLY_PACKAGES=True + Examples ======== From 744c2afd6f5a594a5a16144c773436fbca263c4d Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Tue, 16 Dec 2014 12:00:40 +1300 Subject: [PATCH 0060/2941] Allow heat-standalone to work with keystone v3 Heat can now run in standalone mode with the default v3 keystone backend. This change removes the installation of the v2 contrib backend. It also configures saner defaults when HEAT_STANDALONE is True. Using trusts and a stack-domain will never work in standalone mode since they both require a service user which doesn't exist in standalone mode. Finally, this change prevents heat.conf being populated with service user options not required by standalone mode. Configuring the v2 backend may be reintroduced later with a dedicated flag variable. Change-Id: I88403e359e5e59e776b25ba1b65fae6fa8a3548e --- lib/heat | 83 ++++++++++++++++++++++++++++++-------------------------- stack.sh | 2 +- 2 files changed, 45 insertions(+), 40 deletions(-) diff --git a/lib/heat b/lib/heat index a088e82886..cef70692c7 100644 --- a/lib/heat +++ b/lib/heat @@ -49,13 +49,19 @@ HEAT_CONF_DIR=/etc/heat HEAT_CONF=$HEAT_CONF_DIR/heat.conf HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates -HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN) HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP} HEAT_API_PORT=${HEAT_API_PORT:-8004} # other default options -HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts} +if [[ "$HEAT_STANDALONE" = "True" ]]; then + # for standalone, use defaults which require no service user + HEAT_STACK_DOMAIN=`trueorfalse False $HEAT_STACK_DOMAIN` + HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-password} +else + HEAT_STACK_DOMAIN=`trueorfalse True $HEAT_STACK_DOMAIN` + HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts} +fi # Tell Tempest this project is present TEMPEST_SERVICES+=,heat @@ -77,13 +83,11 @@ function cleanup_heat { sudo rm -rf $HEAT_AUTH_CACHE_DIR sudo rm -rf $HEAT_ENV_DIR sudo rm -rf $HEAT_TEMPLATES_DIR + sudo rm -rf $HEAT_CONF_DIR } # configure_heat() - Set config files, create data dirs, etc function configure_heat { - if [[ "$HEAT_STANDALONE" = "True" ]]; then - setup_develop $HEAT_DIR/contrib/heat_keystoneclient_v2 - fi if [[ ! -d $HEAT_CONF_DIR ]]; then sudo mkdir -p $HEAT_CONF_DIR @@ -127,24 +131,22 @@ function configure_heat { # auth plugin setup. This should be fixed in heat. Heat is also the only # service that requires the auth_uri to include a /v2.0. Remove this custom # setup when bug #1300246 is resolved. - iniset $HEAT_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0 - iniset $HEAT_CONF keystone_authtoken admin_user heat - iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $HEAT_CONF keystone_authtoken cafile $SSL_BUNDLE_FILE - iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR - - # ec2authtoken - iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0 - - # paste_deploy if [[ "$HEAT_STANDALONE" = "True" ]]; then iniset $HEAT_CONF paste_deploy flavor standalone - iniset $HEAT_CONF DEFAULT keystone_backend heat_keystoneclient_v2.client.KeystoneClientV2 iniset $HEAT_CONF clients_heat url "http://$HEAT_API_HOST:$HEAT_API_PORT/v1/%(tenant_id)s" + else + iniset $HEAT_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI + iniset $HEAT_CONF keystone_authtoken admin_user heat + iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD + iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME + iniset $HEAT_CONF keystone_authtoken cafile $SSL_BUNDLE_FILE + iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR fi + # ec2authtoken + iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0 + # OpenStack API iniset $HEAT_CONF heat_api bind_port $HEAT_API_PORT iniset $HEAT_CONF heat_api workers "$API_WORKERS" @@ -243,29 +245,32 @@ function stop_heat { # create_heat_accounts() - Set up common required heat accounts function create_heat_accounts { - create_service_user "heat" "admin" - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - - local heat_service=$(get_or_create_service "heat" \ - "orchestration" "Heat Orchestration Service") - get_or_create_endpoint $heat_service \ - "$REGION_NAME" \ - "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ - "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ - "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" - - local heat_cfn_service=$(get_or_create_service "heat-cfn" \ - "cloudformation" "Heat CloudFormation Service") - get_or_create_endpoint $heat_cfn_service \ - "$REGION_NAME" \ - "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ - "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ - "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" - fi + if [[ "$HEAT_STANDALONE" != "True" ]]; then + + create_service_user "heat" "admin" + + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + + local heat_service=$(get_or_create_service "heat" \ + "orchestration" "Heat Orchestration Service") + get_or_create_endpoint $heat_service \ + "$REGION_NAME" \ + "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ + "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" + + local heat_cfn_service=$(get_or_create_service "heat-cfn" \ + "cloudformation" "Heat CloudFormation Service") + get_or_create_endpoint $heat_cfn_service \ + "$REGION_NAME" \ + "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ + "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ + "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" + fi - # heat_stack_user role is for users created by Heat - get_or_create_role "heat_stack_user" + # heat_stack_user role is for users created by Heat + get_or_create_role "heat_stack_user" + fi if [[ $HEAT_DEFERRED_AUTH == trusts ]]; then iniset $HEAT_CONF DEFAULT deferred_auth_method trusts diff --git a/stack.sh b/stack.sh index eac7eec724..f049782387 100755 --- a/stack.sh +++ b/stack.sh @@ -978,7 +978,7 @@ if is_service_enabled keystone; then create_swift_accounts fi - if is_service_enabled heat && [[ "$HEAT_STANDALONE" != "True" ]]; then + if is_service_enabled heat; then create_heat_accounts fi From fdf00f27db19f572ac1d8fd3714c5b412556dbf3 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 13 Mar 2015 11:50:02 +1100 Subject: [PATCH 0061/2941] Add defaults for yum proxy variables Without these defaults, sourcing functions-common with -u turned on (as say ./tools/build_wheels.sh does) will bail out with unset variable errors. Also fix up quoting, and add no_proxy for zypper run Change-Id: Ideb441634243c1c5ce7db3a375c2d98617e9d1dc --- functions-common | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index 4739e42e90..838ca53f6a 100644 --- a/functions-common +++ b/functions-common @@ -1019,8 +1019,8 @@ function yum_install { # The manual check for missing packages is because yum -y assumes # missing packages are OK. See # https://bugzilla.redhat.com/show_bug.cgi?id=965567 - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ - no_proxy=$no_proxy \ + $sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \ + no_proxy="${no_proxy:-}" \ ${YUM:-yum} install -y "$@" 2>&1 | \ awk ' BEGIN { fail=0 } @@ -1042,7 +1042,8 @@ function zypper_install { [[ "$OFFLINE" = "True" ]] && return local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" - $sudo http_proxy=$http_proxy https_proxy=$https_proxy \ + $sudo http_proxy="${http_proxy:-}" https_proxy="${https_proxy:-}" \ + no_proxy="${no_proxy:-}" \ zypper --non-interactive install --auto-agree-with-licenses "$@" } From ee78c9e2f6076db2db1b2fc3c1178905de6f7e5d Mon Sep 17 00:00:00 2001 From: Josh Gachnang Date: Mon, 16 Mar 2015 23:24:52 -0700 Subject: [PATCH 0062/2941] Add Ironic cleaning network The cleaning network is where ramdisks will be booted during the cleaning process. We want to ensure nodes are being properly cleaned on tear down. Change-Id: Ic38de10668c97648d073fdf9a3afc59712057849 Implements: bp/implement-cleaning-states --- lib/ironic | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/ironic b/lib/ironic index 35b5411cd4..71f9933d36 100644 --- a/lib/ironic +++ b/lib/ironic @@ -427,6 +427,11 @@ function create_ironic_accounts { # init_ironic() - Initialize databases, etc. function init_ironic { + # Save private network as cleaning network + local cleaning_network_uuid + cleaning_network_uuid=$(neutron net-list | grep private | get_field 1) + iniset $IRONIC_CONF_FILE neutron cleaning_network_uuid ${cleaning_network_uuid} + # (Re)create ironic database recreate_database ironic From 8421c2b9ab5d8242abb7d1bdc20435408db8b802 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 16 Mar 2015 13:52:19 -0500 Subject: [PATCH 0063/2941] Use install(1) where possible This eliminated a number of sudo calls by doing the copy/chown/chmod in a single step and sets a common pattern. Change-Id: I9c8f48854d5bc443cc187df0948c28b82c4d2838 --- lib/ceilometer | 9 ++------- lib/cinder | 21 ++++++++------------- lib/glance | 18 +++--------------- lib/heat | 16 ++++------------ lib/ironic | 17 ++++------------- lib/keystone | 11 +++-------- lib/neutron | 20 ++++++-------------- lib/nova | 27 ++++++++++----------------- lib/sahara | 10 ++-------- lib/swift | 10 ++++------ lib/tempest | 8 ++------ lib/trove | 5 +---- lib/zaqar | 6 ++---- 13 files changed, 51 insertions(+), 127 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 9db0640940..318c5467cf 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -163,11 +163,7 @@ function _config_ceilometer_apache_wsgi { # configure_ceilometer() - Set config files, create data dirs, etc function configure_ceilometer { - [ ! -d $CEILOMETER_CONF_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_CONF_DIR - sudo chown $STACK_USER $CEILOMETER_CONF_DIR - - [ ! -d $CEILOMETER_API_LOG_DIR ] && sudo mkdir -m 755 -p $CEILOMETER_API_LOG_DIR - sudo chown $STACK_USER $CEILOMETER_API_LOG_DIR + sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR $CEILOMETER_API_LOG_DIR iniset_rpc_backend ceilometer $CEILOMETER_CONF DEFAULT @@ -267,8 +263,7 @@ function configure_mongodb { # init_ceilometer() - Initialize etc. function init_ceilometer { # Create cache dir - sudo mkdir -p $CEILOMETER_AUTH_CACHE_DIR - sudo chown $STACK_USER $CEILOMETER_AUTH_CACHE_DIR + sudo install -d -o $STACK_USER $CEILOMETER_AUTH_CACHE_DIR rm -f $CEILOMETER_AUTH_CACHE_DIR/* if is_service_enabled mysql postgresql; then diff --git a/lib/cinder b/lib/cinder index 880af1fd40..51f0163e44 100644 --- a/lib/cinder +++ b/lib/cinder @@ -174,16 +174,15 @@ function configure_cinder_rootwrap { if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then sudo rm -rf $CINDER_CONF_DIR/rootwrap.d fi + # Deploy filters to /etc/cinder/rootwrap.d - sudo mkdir -m 755 $CINDER_CONF_DIR/rootwrap.d - sudo cp $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d - sudo chown -R root:root $CINDER_CONF_DIR/rootwrap.d - sudo chmod 644 $CINDER_CONF_DIR/rootwrap.d/* + sudo install -d -o root -g root -m 755 $CINDER_CONF_DIR/rootwrap.d + sudo install -o root -g root -m 644 $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d + # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d - sudo cp $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR/ + sudo install -o root -g root -m 644 $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf - sudo chown root:root $CINDER_CONF_DIR/rootwrap.conf - sudo chmod 0644 $CINDER_CONF_DIR/rootwrap.conf + # Specify rootwrap.conf as first parameter to rootwrap ROOTWRAP_CSUDOER_CMD="$cinder_rootwrap $CINDER_CONF_DIR/rootwrap.conf *" @@ -197,10 +196,7 @@ function configure_cinder_rootwrap { # configure_cinder() - Set config files, create data dirs, etc function configure_cinder { - if [[ ! -d $CINDER_CONF_DIR ]]; then - sudo mkdir -p $CINDER_CONF_DIR - fi - sudo chown $STACK_USER $CINDER_CONF_DIR + sudo install -d -o $STACK_USER -m 755 $CINDER_CONF_DIR cp -p $CINDER_DIR/etc/cinder/policy.json $CINDER_CONF_DIR @@ -351,8 +347,7 @@ function create_cinder_accounts { # create_cinder_cache_dir() - Part of the init_cinder() process function create_cinder_cache_dir { # Create cache dir - sudo mkdir -p $CINDER_AUTH_CACHE_DIR - sudo chown $STACK_USER $CINDER_AUTH_CACHE_DIR + sudo install -d -o $STACK_USER $CINDER_AUTH_CACHE_DIR rm -f $CINDER_AUTH_CACHE_DIR/* } diff --git a/lib/glance b/lib/glance index eb1df2e8ae..db3de17232 100755 --- a/lib/glance +++ b/lib/glance @@ -90,15 +90,7 @@ function cleanup_glance { # configure_glance() - Set config files, create data dirs, etc function configure_glance { - if [[ ! -d $GLANCE_CONF_DIR ]]; then - sudo mkdir -p $GLANCE_CONF_DIR - fi - sudo chown $STACK_USER $GLANCE_CONF_DIR - - if [[ ! -d $GLANCE_METADEF_DIR ]]; then - sudo mkdir -p $GLANCE_METADEF_DIR - fi - sudo chown $STACK_USER $GLANCE_METADEF_DIR + sudo install -d -o $STACK_USER $GLANCE_CONF_DIR $GLANCE_METADEF_DIR # Copy over our glance configurations and update them cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF @@ -263,12 +255,8 @@ function create_glance_accounts { # create_glance_cache_dir() - Part of the init_glance() process function create_glance_cache_dir { # Create cache dir - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/api - sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/api - rm -f $GLANCE_AUTH_CACHE_DIR/api/* - sudo mkdir -p $GLANCE_AUTH_CACHE_DIR/registry - sudo chown $STACK_USER $GLANCE_AUTH_CACHE_DIR/registry - rm -f $GLANCE_AUTH_CACHE_DIR/registry/* + sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry + rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/* } # init_glance() - Initialize databases, etc. diff --git a/lib/heat b/lib/heat index a088e82886..d75652ab7b 100644 --- a/lib/heat +++ b/lib/heat @@ -85,10 +85,7 @@ function configure_heat { setup_develop $HEAT_DIR/contrib/heat_keystoneclient_v2 fi - if [[ ! -d $HEAT_CONF_DIR ]]; then - sudo mkdir -p $HEAT_CONF_DIR - fi - sudo chown $STACK_USER $HEAT_CONF_DIR + sudo install -d -o $STACK_USER $HEAT_CONF_DIR # remove old config files rm -f $HEAT_CONF_DIR/heat-*.conf @@ -172,15 +169,11 @@ function configure_heat { iniset $HEAT_CONF DEFAULT enable_stack_abandon true fi - # heat environment - sudo mkdir -p $HEAT_ENV_DIR - sudo chown $STACK_USER $HEAT_ENV_DIR + sudo install -d -o $STACK_USER $HEAT_ENV_DIR $HEAT_TEMPLATES_DIR + # copy the default environment cp $HEAT_DIR/etc/heat/environment.d/* $HEAT_ENV_DIR/ - # heat template resources. - sudo mkdir -p $HEAT_TEMPLATES_DIR - sudo chown $STACK_USER $HEAT_TEMPLATES_DIR # copy the default templates cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/ @@ -199,8 +192,7 @@ function init_heat { # create_heat_cache_dir() - Part of the init_heat() process function create_heat_cache_dir { # Create cache dirs - sudo mkdir -p $HEAT_AUTH_CACHE_DIR - sudo chown $STACK_USER $HEAT_AUTH_CACHE_DIR + sudo install -d -o $STACK_USER $HEAT_AUTH_CACHE_DIR } # install_heatclient() - Collect source and prepare diff --git a/lib/ironic b/lib/ironic index 35b5411cd4..2fddc54b63 100644 --- a/lib/ironic +++ b/lib/ironic @@ -233,22 +233,14 @@ function cleanup_ironic { # configure_ironic_dirs() - Create all directories required by Ironic and # associated services. function configure_ironic_dirs { - if [[ ! -d $IRONIC_CONF_DIR ]]; then - sudo mkdir -p $IRONIC_CONF_DIR - fi + sudo install -d -o $STACK_USER $IRONIC_CONF_DIR $STACK_USER $IRONIC_DATA_DIR \ + $IRONIC_STATE_PATH $IRONIC_TFTPBOOT_DIR $IRONIC_TFTPBOOT_DIR/pxelinux.cfg + sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - sudo mkdir -p $IRONIC_HTTP_DIR - sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_HTTP_DIR + sudo install -d -o $STACK_USER -g $LIBVIRT_GROUP $IRONIC_HTTP_DIR fi - sudo mkdir -p $IRONIC_DATA_DIR - sudo mkdir -p $IRONIC_STATE_PATH - sudo mkdir -p $IRONIC_TFTPBOOT_DIR - sudo chown -R $STACK_USER $IRONIC_DATA_DIR $IRONIC_STATE_PATH - sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR - mkdir -p $IRONIC_TFTPBOOT_DIR/pxelinux.cfg - if [ ! -f $IRONIC_PXE_BOOT_IMAGE ]; then die $LINENO "PXE boot file $IRONIC_PXE_BOOT_IMAGE not found." fi @@ -267,7 +259,6 @@ function configure_ironic_dirs { # configure_ironic() - Set config files, create data dirs, etc function configure_ironic { configure_ironic_dirs - sudo chown $STACK_USER $IRONIC_CONF_DIR # Copy over ironic configuration file and configure common parameters. cp $IRONIC_DIR/etc/ironic/ironic.conf.sample $IRONIC_CONF_FILE diff --git a/lib/keystone b/lib/keystone index 0968445dc0..9dfdc608d4 100644 --- a/lib/keystone +++ b/lib/keystone @@ -175,14 +175,10 @@ function _config_keystone_apache_wsgi { # configure_keystone() - Set config files, create data dirs, etc function configure_keystone { - if [[ ! -d $KEYSTONE_CONF_DIR ]]; then - sudo mkdir -p $KEYSTONE_CONF_DIR - fi - sudo chown $STACK_USER $KEYSTONE_CONF_DIR + sudo install -d -o $STACK_USER $KEYSTONE_CONF_DIR if [[ "$KEYSTONE_CONF_DIR" != "$KEYSTONE_DIR/etc" ]]; then - cp -p $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF - chmod 600 $KEYSTONE_CONF + install -m 600 $KEYSTONE_DIR/etc/keystone.conf.sample $KEYSTONE_CONF cp -p $KEYSTONE_DIR/etc/policy.json $KEYSTONE_CONF_DIR if [[ -f "$KEYSTONE_DIR/etc/keystone-paste.ini" ]]; then cp -p "$KEYSTONE_DIR/etc/keystone-paste.ini" "$KEYSTONE_PASTE_INI" @@ -490,8 +486,7 @@ function init_keystone { $KEYSTONE_DIR/bin/keystone-manage pki_setup # Create cache dir - sudo mkdir -p $KEYSTONE_AUTH_CACHE_DIR - sudo chown $STACK_USER $KEYSTONE_AUTH_CACHE_DIR + sudo install -d -o $STACK_USER $KEYSTONE_AUTH_CACHE_DIR rm -f $KEYSTONE_AUTH_CACHE_DIR/* fi } diff --git a/lib/neutron b/lib/neutron index e41abafda9..d0463d8089 100755 --- a/lib/neutron +++ b/lib/neutron @@ -495,8 +495,7 @@ function create_nova_conf_neutron { # create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process function create_neutron_cache_dir { # Create cache dir - sudo mkdir -p $NEUTRON_AUTH_CACHE_DIR - sudo chown $STACK_USER $NEUTRON_AUTH_CACHE_DIR + sudo install -d -o $STACK_USER $NEUTRON_AUTH_CACHE_DIR rm -f $NEUTRON_AUTH_CACHE_DIR/* } @@ -800,10 +799,7 @@ function cleanup_neutron { function _create_neutron_conf_dir { # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find - if [[ ! -d $NEUTRON_CONF_DIR ]]; then - sudo mkdir -p $NEUTRON_CONF_DIR - fi - sudo chown $STACK_USER $NEUTRON_CONF_DIR + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR } # _configure_neutron_common() @@ -1075,10 +1071,8 @@ function _neutron_service_plugin_class_add { # _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). function _neutron_deploy_rootwrap_filters { local srcdir=$1 - mkdir -p -m 755 $Q_CONF_ROOTWRAP_D - sudo cp -pr $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ - sudo chown -R root:root $Q_CONF_ROOTWRAP_D - sudo chmod 644 $Q_CONF_ROOTWRAP_D/* + sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D + sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ } # _neutron_setup_rootwrap() - configure Neutron's rootwrap @@ -1097,13 +1091,11 @@ function _neutron_setup_rootwrap { # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` # location moved in newer versions, prefer new location if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo cp -p $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE else - sudo cp -p $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE - sudo chown root:root $Q_RR_CONF_FILE - sudo chmod 0644 $Q_RR_CONF_FILE # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" diff --git a/lib/nova b/lib/nova index 199daeea3d..e850fe9daf 100644 --- a/lib/nova +++ b/lib/nova @@ -232,16 +232,15 @@ function configure_nova_rootwrap { if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then sudo rm -rf $NOVA_CONF_DIR/rootwrap.d fi + # Deploy filters to /etc/nova/rootwrap.d - sudo mkdir -m 755 $NOVA_CONF_DIR/rootwrap.d - sudo cp $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d - sudo chown -R root:root $NOVA_CONF_DIR/rootwrap.d - sudo chmod 644 $NOVA_CONF_DIR/rootwrap.d/* + sudo install -d -o root -g root -m 755 $NOVA_CONF_DIR/rootwrap.d + sudo install -o root -g root -m 644 $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d + # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d - sudo cp $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR/ + sudo install -o root -g root -m 644 $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf - sudo chown root:root $NOVA_CONF_DIR/rootwrap.conf - sudo chmod 0644 $NOVA_CONF_DIR/rootwrap.conf + # Specify rootwrap.conf as first parameter to nova-rootwrap local rootwrap_sudoer_cmd="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *" @@ -256,10 +255,7 @@ function configure_nova_rootwrap { # configure_nova() - Set config files, create data dirs, etc function configure_nova { # Put config files in ``/etc/nova`` for everyone to find - if [[ ! -d $NOVA_CONF_DIR ]]; then - sudo mkdir -p $NOVA_CONF_DIR - fi - sudo chown $STACK_USER $NOVA_CONF_DIR + sudo install -d -o $STACK_USER $NOVA_CONF_DIR cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR @@ -318,8 +314,7 @@ function configure_nova { # ---------------- # Nova stores each instance in its own directory. - sudo mkdir -p $NOVA_INSTANCES_PATH - sudo chown -R $STACK_USER $NOVA_INSTANCES_PATH + sudo install -d -o $STACK_USER $NOVA_INSTANCES_PATH # You can specify a different disk to be mounted and used for backing the # virtual machines. If there is a partition labeled nova-instances we @@ -603,8 +598,7 @@ function init_nova_cells { # create_nova_cache_dir() - Part of the init_nova() process function create_nova_cache_dir { # Create cache dir - sudo mkdir -p $NOVA_AUTH_CACHE_DIR - sudo chown $STACK_USER $NOVA_AUTH_CACHE_DIR + sudo install -d -o $STACK_USER $NOVA_AUTH_CACHE_DIR rm -f $NOVA_AUTH_CACHE_DIR/* } @@ -621,8 +615,7 @@ function create_nova_conf_nova_network { # create_nova_keys_dir() - Part of the init_nova() process function create_nova_keys_dir { # Create keys dir - sudo mkdir -p ${NOVA_STATE_PATH}/keys - sudo chown -R $STACK_USER ${NOVA_STATE_PATH} + sudo install -d -o $STACK_USER ${NOVA_STATE_PATH} ${NOVA_STATE_PATH}/keys } # init_nova() - Initialize databases, etc. diff --git a/lib/sahara b/lib/sahara index 521b19a4a1..f389326a6f 100644 --- a/lib/sahara +++ b/lib/sahara @@ -101,11 +101,7 @@ function cleanup_sahara { # configure_sahara() - Set config files, create data dirs, etc function configure_sahara { - - if [[ ! -d $SAHARA_CONF_DIR ]]; then - sudo mkdir -p $SAHARA_CONF_DIR - fi - sudo chown $STACK_USER $SAHARA_CONF_DIR + sudo install -d -o $STACK_USER $SAHARA_CONF_DIR if [[ -f $SAHARA_DIR/etc/sahara/policy.json ]]; then cp -p $SAHARA_DIR/etc/sahara/policy.json $SAHARA_CONF_DIR @@ -115,9 +111,7 @@ function configure_sahara { cp $SAHARA_DIR/etc/sahara/sahara.conf.sample $SAHARA_CONF_FILE # Create auth cache dir - sudo mkdir -p $SAHARA_AUTH_CACHE_DIR - sudo chown $STACK_USER $SAHARA_AUTH_CACHE_DIR - sudo chmod 700 $SAHARA_AUTH_CACHE_DIR + sudo install -d -o $STACK_USER -m 700 $SAHARA_AUTH_CACHE_DIR rm -rf $SAHARA_AUTH_CACHE_DIR/* configure_auth_token_middleware $SAHARA_CONF_FILE sahara $SAHARA_AUTH_CACHE_DIR diff --git a/lib/swift b/lib/swift index 4a63500b54..9781e862db 100644 --- a/lib/swift +++ b/lib/swift @@ -306,8 +306,8 @@ function configure_swift { # Make sure to kill all swift processes first swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true - sudo mkdir -p ${SWIFT_CONF_DIR}/{object,container,account}-server - sudo chown -R ${STACK_USER}: ${SWIFT_CONF_DIR} + sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR} + sudo install -d -o ${STACK_USER} ${SWIFT_CONF_DIR}/{object,container,account}-server if [[ "$SWIFT_CONF_DIR" != "/etc/swift" ]]; then # Some swift tools are hard-coded to use ``/etc/swift`` and are apparently not going to be fixed. @@ -534,8 +534,7 @@ function create_swift_disk { # changing the permissions so we can run it as our user. local user_group=$(id -g ${STACK_USER}) - sudo mkdir -p ${SWIFT_DATA_DIR}/{drives,cache,run,logs} - sudo chown -R ${STACK_USER}:${user_group} ${SWIFT_DATA_DIR} + sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs} # Create a loopback disk and format it to XFS. if [[ -e ${SWIFT_DISK_IMAGE} ]]; then @@ -675,8 +674,7 @@ function init_swift { } && popd >/dev/null # Create cache dir - sudo mkdir -p $SWIFT_AUTH_CACHE_DIR - sudo chown $STACK_USER $SWIFT_AUTH_CACHE_DIR + sudo install -d -o ${STACK_USER} $SWIFT_AUTH_CACHE_DIR rm -f $SWIFT_AUTH_CACHE_DIR/* } diff --git a/lib/tempest b/lib/tempest index f856ce05f9..99cfc5857b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -170,12 +170,8 @@ function configure_tempest { # Create tempest.conf from tempest.conf.sample # copy every time, because the image UUIDS are going to change - if [[ ! -d $TEMPEST_CONFIG_DIR ]]; then - sudo mkdir -p $TEMPEST_CONFIG_DIR - fi - sudo chown $STACK_USER $TEMPEST_CONFIG_DIR - cp $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG - chmod 644 $TEMPEST_CONFIG + sudo install -d -o $STACK_USER $TEMPEST_CONFIG_DIR + install -m 644 $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG password=${ADMIN_PASSWORD:-secrete} diff --git a/lib/trove b/lib/trove index d77798308b..dab6dced54 100644 --- a/lib/trove +++ b/lib/trove @@ -121,10 +121,7 @@ function configure_trove { setup_develop $TROVE_DIR # Create the trove conf dir and cache dirs if they don't exist - sudo mkdir -p ${TROVE_CONF_DIR} - sudo mkdir -p ${TROVE_AUTH_CACHE_DIR} - sudo chown -R $STACK_USER: ${TROVE_CONF_DIR} - sudo chown -R $STACK_USER: ${TROVE_AUTH_CACHE_DIR} + sudo install -d -o $STACK_USER ${TROVE_CONF_DIR} ${TROVE_AUTH_CACHE_DIR} # Copy api-paste file over to the trove conf dir cp $TROVE_LOCAL_API_PASTE_INI $TROVE_API_PASTE_INI diff --git a/lib/zaqar b/lib/zaqar index 79b4c5a2ca..5f3f7bb20b 100644 --- a/lib/zaqar +++ b/lib/zaqar @@ -105,8 +105,7 @@ function configure_zaqarclient { function configure_zaqar { setup_develop $ZAQAR_DIR - [ ! -d $ZAQAR_CONF_DIR ] && sudo mkdir -m 755 -p $ZAQAR_CONF_DIR - sudo chown $USER $ZAQAR_CONF_DIR + sudo install -d -o $STACK_USER -m 755 $ZAQAR_CONF_DIR iniset $ZAQAR_CONF DEFAULT debug True iniset $ZAQAR_CONF DEFAULT verbose True @@ -168,8 +167,7 @@ function configure_mongodb { # init_zaqar() - Initialize etc. function init_zaqar { # Create cache dir - sudo mkdir -p $ZAQAR_AUTH_CACHE_DIR - sudo chown $STACK_USER $ZAQAR_AUTH_CACHE_DIR + sudo install -d -o $STACK_USER $ZAQAR_AUTH_CACHE_DIR rm -f $ZAQAR_AUTH_CACHE_DIR/* } From 199c6048fd209d87c87c0bb40860f4b4b2661a5e Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Wed, 18 Mar 2015 10:48:47 +0000 Subject: [PATCH 0064/2941] Recreate tox venv for tempest on stack.sh run Sometimes when certain dependencies are changed, recreating an existing DevStack fails, as tempest is using an old existing venv for tox run, where dependencies are not met. This change should help developers who are reusing their existing DevStack. The gate should not be affected as there the vev is freshly created anyway. Change-Id: Ic42ba1cb0aa829c5120151d3d8cdafa4efc3ffaa --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index fcb0e59937..655bb08368 100644 --- a/lib/tempest +++ b/lib/tempest @@ -337,7 +337,7 @@ function configure_tempest { # NOTE(mtreinish): This must be done after auth settings are added to the tempest config local tmp_cfg_file=$(mktemp) cd $TEMPEST_DIR - tox -evenv -- verify-tempest-config -uro $tmp_cfg_file + tox -revenv -- verify-tempest-config -uro $tmp_cfg_file local compute_api_extensions=${COMPUTE_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_COMPUTE_API_EXTENSIONS" ]]; then From 4bf861c76c220a98a3b3165eea5448411d000f3a Mon Sep 17 00:00:00 2001 From: John Griffith Date: Tue, 17 Mar 2015 21:07:39 -0600 Subject: [PATCH 0065/2941] Create global_filter to avoid scan of missing devs A while back I added an lvm.conf file with a device filter setting to try and clean up the LVM hangs in the gate: (commit 0b9e76f280208b5b5ad54bb6fbc4133e63037286) It turns out this wasn't the real problem, the real problem is that on an LVS/VGS command LVM will attempt to open and read all potential block devices in /dev to see if they have LVM data on them. I initially thought the local filter would keep that from happening, as it turns out the local filter only limits what's returned AFTER the actual scan process. In order to keep the scan from happening at all, either a global_filter needs to be used or lvmetad needs to be running and enabled. There are situations in gate tests where /dev/sdX devices are created and deleted and the result is that we hit situations where LVM tries to open up devices to check them even if they've been removed. The result is we have a blocking open call from LVM that takes approx 60 seconds to time out and fail. Ubuntu won't have a version of lvmetad until Vivid, so for now that just leaves the global_filter as an option. This patch adds the filter routine to the end of stack.sh. We don't want to put the routine in lib/cinder_backend/lvm like we had it because now we have to set the global filter for all LVM commands on the system. So we put this as one of the last steps in stack.sh and run it if Cinder is enabled. This way we can query PV's on the system regardless of what other services may be running and using LVM and make sure that all of their devices are added to the filter as well. Also, make sure we only set this for Ubuntu as Fedora/RHEL variants utilize lvmetad. This patch also removes the old change that set the local filter. DocImpact Should add this to recommended config for Cinder on systems that don't have lvmetad, and recommend lvmetad for those that do. Change-Id: I5d5c48e188cbb9b4208096736807f082bce524e8 Closes-Bug: #1373513 --- lib/cinder | 17 ----------------- lib/cinder_backends/lvm | 31 ------------------------------- lib/lvm | 25 +++++++++++++++++++++++++ stack.sh | 9 +++++++++ 4 files changed, 34 insertions(+), 48 deletions(-) diff --git a/lib/cinder b/lib/cinder index 880af1fd40..958c7f05ac 100644 --- a/lib/cinder +++ b/lib/cinder @@ -372,15 +372,9 @@ function init_cinder { if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then local be be_name be_type - local has_lvm=0 for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_type=${be%%:*} be_name=${be##*:} - - if [[ $be_type == 'lvm' ]]; then - has_lvm=1 - fi - if type init_cinder_backend_${be_type} >/dev/null 2>&1; then # Always init the default volume group for lvm. if [[ "$be_type" == "lvm" ]]; then @@ -391,17 +385,6 @@ function init_cinder { done fi - # Keep it simple, set a marker if there's an LVM backend - # use the created VG's to setup lvm filters - if [[ $has_lvm == 1 ]]; then - # Order matters here, not only obviously to make - # sure the VG's are created, but also some distros - # do some customizations to lvm.conf on init, we - # want to make sure we copy those over - sudo cp /etc/lvm/lvm.conf /etc/cinder/lvm.conf - configure_cinder_backend_conf_lvm - fi - mkdir -p $CINDER_STATE_PATH/volumes create_cinder_cache_dir } diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index 52fc6fbf63..f210578339 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -19,7 +19,6 @@ # clean_cinder_backend_lvm - called from clean_cinder() # configure_cinder_backend_lvm - called from configure_cinder() # init_cinder_backend_lvm - called from init_cinder() -# configure_cinder_backend_conf_lvm - called from configure_cinder() # Save trace setting @@ -66,36 +65,6 @@ function init_cinder_backend_lvm { init_lvm_volume_group $VOLUME_GROUP_NAME-$be_name $VOLUME_BACKING_FILE_SIZE } -# configure_cinder_backend_conf_lvm - Sets device filter in /etc/cinder/lvm.conf -# init_cinder_backend_lvm -function configure_cinder_backend_conf_lvm { - local filter_suffix='"r/.*/" ]' - local filter_string="filter = [ " - local conf_entries=$(grep volume_group /etc/cinder/cinder.conf | sed "s/ //g") - local pv - local vg - local line - - for pv_info in $(sudo pvs --noheadings -o name,vg_name --separator ';'); do - echo_summary "Evaluate PV info for Cinder lvm.conf: $pv_info" - IFS=';' read pv vg <<< "$pv_info" - for line in ${conf_entries}; do - IFS='=' read label group <<< "$line" - group=$(echo $group|sed "s/^ *//g") - if [[ "$vg" == "$group" ]]; then - new="\"a$pv/\", " - filter_string=$filter_string$new - fi - done - done - filter_string=$filter_string$filter_suffix - - # FIXME(jdg): Possible odd case that the lvm.conf file has been modified - # and doesn't have a filter entry to search/replace. For devstack don't - # know that we care, but could consider adding a check and add - sudo sed -i "s#^[ \t]*filter.*# $filter_string#g" /etc/cinder/lvm.conf - echo "set LVM filter_strings: $filter_string" -} # Restore xtrace $MY_XTRACE diff --git a/lib/lvm b/lib/lvm index 39eed00675..d0322c76b3 100644 --- a/lib/lvm +++ b/lib/lvm @@ -138,6 +138,31 @@ function init_default_lvm_volume_group { fi } +# set_lvm_filter() Gather all devices configured for LVM and +# use them to build a global device filter +# set_lvm_filter() Create a device filter +# and add to /etc/lvm.conf. Note this uses +# all current PV's in use by LVM on the +# system to build it's filter. +# +# Usage: set_lvm_filter() +function set_lvm_filter { + local filter_suffix='"r|.*|" ]' + local filter_string="global_filter = [ " + local pv + local vg + local line + + for pv_info in $(sudo pvs --noheadings -o name); do + pv=$(echo -e "${pv_info}" | sed 's/ //g' | sed 's/\/dev\///g') + new="\"a|$pv|\", " + filter_string=$filter_string$new + done + filter_string=$filter_string$filter_suffix + + sudo sed -i "/# global_filter = \[*\]/a\ $global_filter$filter_string" /etc/lvm/lvm.conf + echo_summary "set lvm.conf device global_filter to: $filter_string" +} # Restore xtrace $MY_XTRACE diff --git a/stack.sh b/stack.sh index eac7eec724..ee1b985eab 100755 --- a/stack.sh +++ b/stack.sh @@ -1316,6 +1316,15 @@ service_check # Prepare bash completion for OSC openstack complete | sudo tee /etc/bash_completion.d/osc.bash_completion > /dev/null +# If cinder is configured, set global_filter for PV devices +if is_service_enabled cinder; then + if is_ubuntu; then + echo_summary "Configuring lvm.conf global device filter" + set_lvm_filter + else + echo_summary "Skip setting lvm filters for non Ubuntu systems" + fi +fi # Fin # === From 16819951038c464d330233d0d3df4173420f14ae Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 18 Mar 2015 13:45:40 +1300 Subject: [PATCH 0066/2941] Install missing heat agent projects This change adds the dib-utils repo, and adds git_clone calls for the required os-*-config projects. Change-Id: I2641feb0c462d2940f2698515ff62a2ff06c0e70 --- lib/heat | 7 ++++++- stackrc | 4 ++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/lib/heat b/lib/heat index cef70692c7..b8a434bdb7 100644 --- a/lib/heat +++ b/lib/heat @@ -36,6 +36,7 @@ HEAT_DIR=$DEST/heat HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates OCC_DIR=$DEST/os-collect-config +DIB_UTILS_DIR=$DEST/dib-utils ORC_DIR=$DEST/os-refresh-config OAC_DIR=$DEST/os-apply-config @@ -224,6 +225,10 @@ function install_heat { function install_heat_other { git_clone $HEAT_CFNTOOLS_REPO $HEAT_CFNTOOLS_DIR $HEAT_CFNTOOLS_BRANCH git_clone $HEAT_TEMPLATES_REPO $HEAT_TEMPLATES_REPO_DIR $HEAT_TEMPLATES_BRANCH + git_clone $OAC_REPO $OAC_DIR $OAC_BRANCH + git_clone $OCC_REPO $OCC_DIR $OCC_BRANCH + git_clone $ORC_REPO $ORC_DIR $ORC_BRANCH + git_clone $DIB_UTILS_REPO $DIB_UTILS_DIR $DIB_UTILS_BRANCH } # start_heat() - Start running processes, including screen @@ -304,7 +309,7 @@ function create_heat_accounts { # build_heat_pip_mirror() - Build a pip mirror containing heat agent projects function build_heat_pip_mirror { - local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR" + local project_dirs="$OCC_DIR $OAC_DIR $ORC_DIR $HEAT_CFNTOOLS_DIR $DIB_UTILS_DIR" local projpath proj package rm -rf $HEAT_PIP_REPO diff --git a/stackrc b/stackrc index 02b12a36a0..f8d9c43c38 100644 --- a/stackrc +++ b/stackrc @@ -427,6 +427,10 @@ GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master} # ################## +# run-parts script required by os-refresh-config +DIB_UTILS_REPO=${DIB_UTILS_REPO:-${GIT_BASE}/openstack/dib-utils.git} +DIB_UTILS_BRANCH=${DIB_UTILS_BRANCH:-master} + # os-apply-config configuration template tool OAC_REPO=${OAC_REPO:-${GIT_BASE}/openstack/os-apply-config.git} OAC_BRANCH=${OAC_BRANCH:-master} From 886cbb2a86e475a7982df1d98ea8452d0f9873fd Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Wed, 18 Mar 2015 22:03:01 -0400 Subject: [PATCH 0067/2941] Set heat stack role in tempest config to _member_ In kilo heat started to use keystone delegations to perform the needed operations, as part of this the need to set the explicit role in devstack for stack management disappeared. However, in tempest as part of the effort to make credentials configuration more explicit an option was added to ensure that the users created by tempest have the proper role set for stack management in the heat tests. This commit sets the value of this config option in tempest to be the default role _member_ to reflect that there is no separate heat_stack_owner role created anymore. (which is the tempest default value) Change-Id: Id98a83f0a716de0fdb5f36d03407364830e8fa5f --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 9b44f47d57..5f7be3ccb9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -399,6 +399,7 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG orchestration instance_type "m1.heat" iniset $TEMPEST_CONFIG orchestration build_timeout 900 + iniset $TEMPEST_CONFIG orchestration stack_owner_role "_member_" fi # Scenario From 4599fd174c0c10f3a7e51ad6cba5d4c74abac207 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Thu, 12 Mar 2015 21:30:58 -0400 Subject: [PATCH 0068/2941] Add roles when we create groups We should prime the groups that were created with some roles on projects. Eventually we can add users directly to the groups and not have to resort to individual user assignments. Change-Id: Icebafc06859f8879c584cfd67aa51cb0c9ce48af --- functions-common | 21 +++++++++++++++++++++ lib/keystone | 16 ++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 4739e42e90..67697671d3 100644 --- a/functions-common +++ b/functions-common @@ -728,6 +728,27 @@ function get_or_add_user_project_role { echo $user_role_id } +# Gets or adds group role to project +# Usage: get_or_add_group_project_role +function get_or_add_group_project_role { + # Gets group role id + local group_role_id=$(openstack role list \ + --group $2 \ + --project $3 \ + --column "ID" \ + --column "Name" \ + | grep " $1 " | get_field 1) + if [[ -z "$group_role_id" ]]; then + # Adds role to group + group_role_id=$(openstack role add \ + $1 \ + --group $2 \ + --project $3 \ + | grep " id " | get_field 2) + fi + echo $group_role_id +} + # Gets or creates service # Usage: get_or_create_service function get_or_create_service { diff --git a/lib/keystone b/lib/keystone index c9433d98fe..acc8c2c9d0 100644 --- a/lib/keystone +++ b/lib/keystone @@ -362,6 +362,12 @@ function configure_keystone_extensions { # demo demo Member, anotherrole # invisible_to_admin demo Member +# Group Users Roles Tenant +# ------------------------------------------------------------------ +# admins admin admin admin +# nonadmin demo Member, anotherrole demo + + # Migrated from keystone_data.sh function create_keystone_accounts { @@ -403,8 +409,14 @@ function create_keystone_accounts { get_or_add_user_project_role $another_role $demo_user $demo_tenant get_or_add_user_project_role $member_role $demo_user $invis_tenant - get_or_create_group "developers" "default" "openstack developers" - get_or_create_group "testers" "default" + local admin_group=$(get_or_create_group "admins" \ + "default" "openstack admin group") + local non_admin_group=$(get_or_create_group "nonadmins" \ + "default" "non-admin group") + + get_or_add_group_project_role $member_role $non_admin_group $demo_tenant + get_or_add_group_project_role $another_role $non_admin_group $demo_tenant + get_or_add_group_project_role $admin_role $admin_group $admin_tenant # Keystone if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then From f26deea6b1d7a91da44979d8c7feaf1ff8970b25 Mon Sep 17 00:00:00 2001 From: yuntongjin Date: Sat, 28 Feb 2015 10:50:34 +0800 Subject: [PATCH 0069/2941] create install_default_policy Recent versions of oslo policy allow the use of a policy.d to break up policy in a more user understandable way. Nova is going to use this in Kilo to break out v2 and v2.1 API policy definitions. This provides a unified helper for installing sample policies. It makes some assumptions on project directory structure. Porting other projects to use this can happen in the future. Change-Id: Iec23b095176332414faf76a9c329f8bb5f3aa6c3 --- functions-common | 22 ++++++++++++++++++++++ lib/nova | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index df69cbad16..8d7db96606 100644 --- a/functions-common +++ b/functions-common @@ -787,6 +787,28 @@ function get_field { done } +# install default policy +# copy over a default policy.json and policy.d for projects +function install_default_policy { + local project=$1 + local project_uc=$(echo $1|tr a-z A-Z) + local conf_dir="${project_uc}_CONF_DIR" + # eval conf dir to get the variable + conf_dir="${!conf_dir}" + local project_dir="${project_uc}_DIR" + # eval project dir to get the variable + project_dir="${!project_dir}" + local sample_conf_dir="${project_dir}/etc/${project}" + local sample_policy_dir="${project_dir}/etc/${project}/policy.d" + + # first copy any policy.json + cp -p $sample_conf_dir/policy.json $conf_dir + # then optionally copy over policy.d + if [[ -d $sample_policy_dir ]]; then + cp -r $sample_policy_dir $conf_dir/policy.d + fi +} + # Add a policy to a policy.json file # Do nothing if the policy already exists # ``policy_add policy_file policy_name policy_permissions`` diff --git a/lib/nova b/lib/nova index e9e78c7bc4..fe61e83620 100644 --- a/lib/nova +++ b/lib/nova @@ -261,7 +261,7 @@ function configure_nova { fi sudo chown $STACK_USER $NOVA_CONF_DIR - cp -p $NOVA_DIR/etc/nova/policy.json $NOVA_CONF_DIR + install_default_policy nova configure_nova_rootwrap From 9ebd65be32357a0554e4e5525037e7f1803077f8 Mon Sep 17 00:00:00 2001 From: Ivan Kolodyazhny Date: Sun, 8 Mar 2015 23:51:55 +0200 Subject: [PATCH 0070/2941] Increase Swift disk size up to 2GB if Glance is enabled Minimum Cinder volume size is 1GB so if Swift backend for Glance is only 1GB we can not upload volume to image. Change-Id: Ifd4cb42bf96367ff3ada0c065fa258fa5ba635d9 --- lib/swift | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/lib/swift b/lib/swift index 8a96615d01..5005ba0e10 100644 --- a/lib/swift +++ b/lib/swift @@ -64,11 +64,19 @@ if is_service_enabled s-proxy && is_service_enabled swift3; then S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} fi -# DevStack will create a loop-back disk formatted as XFS to store the -# swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in -# kilobytes. -# Default is 1 gigabyte. -SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G +if is_service_enabled g-api; then + # Minimum Cinder volume size is 1G so if Swift backend for Glance is + # only 1G we can not upload volume to image. + # Increase Swift disk size up to 2G + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=2G +else + # DevStack will create a loop-back disk formatted as XFS to store the + # swift data. Set ``SWIFT_LOOPBACK_DISK_SIZE`` to the disk size in + # kilobytes. + # Default is 1 gigabyte. + SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=1G +fi + # if tempest enabled the default size is 6 Gigabyte. if is_service_enabled tempest; then SWIFT_LOOPBACK_DISK_SIZE_DEFAULT=${SWIFT_LOOPBACK_DISK_SIZE:-6G} From 6f3f310848d1134ff73dd23e246ad00f7cd13365 Mon Sep 17 00:00:00 2001 From: Ryan Hsu Date: Thu, 19 Mar 2015 16:26:45 -0700 Subject: [PATCH 0071/2941] Fix packages not getting installed if service name in base path Currently, if devstack base path includes the name of a given service (e.g. nova), then the service's prereq packages will not be installed. This fix changes the checking the match against the full path of the package list file rather than the name of a given service. Closes-Bug: #1434314 Change-Id: Ie81352ebd5691afc6d0019f71d5b62370e8bb95f --- functions-common | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/functions-common b/functions-common index 875c2e697d..b191f3a2b9 100644 --- a/functions-common +++ b/functions-common @@ -918,42 +918,42 @@ function get_packages { # NOTE(sdague) n-api needs glance for now because that's where # glance client is if [[ $service == n-api ]]; then - if [[ ! $file_to_parse =~ nova ]]; then + if [[ ! $file_to_parse =~ $package_dir/nova ]]; then file_to_parse="${file_to_parse} ${package_dir}/nova" fi - if [[ ! $file_to_parse =~ glance ]]; then + if [[ ! $file_to_parse =~ $package_dir/glance ]]; then file_to_parse="${file_to_parse} ${package_dir}/glance" fi elif [[ $service == c-* ]]; then - if [[ ! $file_to_parse =~ cinder ]]; then + if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then file_to_parse="${file_to_parse} ${package_dir}/cinder" fi elif [[ $service == ceilometer-* ]]; then - if [[ ! $file_to_parse =~ ceilometer ]]; then + if [[ ! $file_to_parse =~ $package_dir/ceilometer ]]; then file_to_parse="${file_to_parse} ${package_dir}/ceilometer" fi elif [[ $service == s-* ]]; then - if [[ ! $file_to_parse =~ swift ]]; then + if [[ ! $file_to_parse =~ $package_dir/swift ]]; then file_to_parse="${file_to_parse} ${package_dir}/swift" fi elif [[ $service == n-* ]]; then - if [[ ! $file_to_parse =~ nova ]]; then + if [[ ! $file_to_parse =~ $package_dir/nova ]]; then file_to_parse="${file_to_parse} ${package_dir}/nova" fi elif [[ $service == g-* ]]; then - if [[ ! $file_to_parse =~ glance ]]; then + if [[ ! $file_to_parse =~ $package_dir/glance ]]; then file_to_parse="${file_to_parse} ${package_dir}/glance" fi elif [[ $service == key* ]]; then - if [[ ! $file_to_parse =~ keystone ]]; then + if [[ ! $file_to_parse =~ $package_dir/keystone ]]; then file_to_parse="${file_to_parse} ${package_dir}/keystone" fi elif [[ $service == q-* ]]; then - if [[ ! $file_to_parse =~ neutron ]]; then + if [[ ! $file_to_parse =~ $package_dir/neutron ]]; then file_to_parse="${file_to_parse} ${package_dir}/neutron" fi elif [[ $service == ir-* ]]; then - if [[ ! $file_to_parse =~ ironic ]]; then + if [[ ! $file_to_parse =~ $package_dir/ironic ]]; then file_to_parse="${file_to_parse} ${package_dir}/ironic" fi fi From 6f6e2fd2cc9517b09b98fc45912d21c8574a4f94 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 20 Mar 2015 12:16:28 +1100 Subject: [PATCH 0072/2941] Move contributing into HACKING contributing.rst doesn't add very much over the extant HACKING.rst, so move some of the unique bits into HACKING.rst and then link that into the documentation. Change-Id: I0530f38eda92f8dd374c0ec224556ace6e679f54 --- HACKING.rst | 54 ++++++++++++++++++--- doc/source/contributing.rst | 94 ------------------------------------- doc/source/hacking.rst | 1 + doc/source/index.rst | 4 +- 4 files changed, 51 insertions(+), 102 deletions(-) delete mode 100644 doc/source/contributing.rst create mode 100644 doc/source/hacking.rst diff --git a/HACKING.rst b/HACKING.rst index b3c82a37a8..4971db250b 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -25,23 +25,63 @@ __ contribute_ __ lp_ .. _lp: https://launchpad.net/~devstack +The `Gerrit review +queue `__ +is used for all commits. + The primary script in DevStack is ``stack.sh``, which performs the bulk of the work for DevStack's use cases. There is a subscript ``functions`` that contains generally useful shell functions and is used by a number of the scripts in DevStack. -The ``lib`` directory contains sub-scripts for projects or packages that ``stack.sh`` -sources to perform much of the work related to those projects. These sub-scripts -contain configuration defaults and functions to configure, start and stop the project -or package. These variables and functions are also used by related projects, -such as Grenade, to manage a DevStack installation. - A number of additional scripts can be found in the ``tools`` directory that may be useful in supporting DevStack installations. Of particular note are ``info.sh`` to collect and report information about the installed system, and ``install_prereqs.sh`` that handles installation of the prerequisite packages for DevStack. It is suitable, for example, to pre-load a system for making a snapshot. +Repo Layout +----------- + +The DevStack repo generally keeps all of the primary scripts at the root +level. + +``doc`` - Contains the Sphinx source for the documentation. +``tools/build_docs.sh`` is used to generate the HTML versions of the +DevStack scripts. A complete doc build can be run with ``tox -edocs``. + +``exercises`` - Contains the test scripts used to sanity-check and +demonstrate some OpenStack functions. These scripts know how to exit +early or skip services that are not enabled. + +``extras.d`` - Contains the dispatch scripts called by the hooks in +``stack.sh``, ``unstack.sh`` and ``clean.sh``. See :doc:`the plugins +docs ` for more information. + +``files`` - Contains a variety of otherwise lost files used in +configuring and operating DevStack. This includes templates for +configuration files and the system dependency information. This is also +where image files are downloaded and expanded if necessary. + +``lib`` - Contains the sub-scripts specific to each project. This is +where the work of managing a project's services is located. Each +top-level project (Keystone, Nova, etc) has a file here. Additionally +there are some for system services and project plugins. These +variables and functions are also used by related projects, such as +Grenade, to manage a DevStack installation. + +``samples`` - Contains a sample of the local files not included in the +DevStack repo. + +``tests`` - the DevStack test suite is rather sparse, mostly consisting +of test of specific fragile functions in the ``functions`` and +``functions-common`` files. + +``tools`` - Contains a collection of stand-alone scripts. While these +may reference the top-level DevStack configuration they can generally be +run alone. There are also some sub-directories to support specific +environments such as XenServer. + Scripts ------- @@ -249,6 +289,7 @@ Whitespace Rules Control Structure Rules ----------------------- + - then should be on the same line as the if - do should be on the same line as the for @@ -270,6 +311,7 @@ Example:: Variables and Functions ----------------------- + - functions should be used whenever possible for clarity - functions should use ``local`` variables as much as possible to ensure they are isolated from the rest of the environment diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst deleted file mode 100644 index 50c0100578..0000000000 --- a/doc/source/contributing.rst +++ /dev/null @@ -1,94 +0,0 @@ -============ -Contributing -============ - -DevStack uses the standard OpenStack contribution process as outlined in -`the OpenStack developer -guide `__. This -means that you will need to meet the requirements of the Contribututors -License Agreement (CLA). If you have already done that for another -OpenStack project you are good to go. - -Things To Know -============== - -| -| **Where Things Are** - -The official DevStack repository is located at -``git://git.openstack.org/openstack-dev/devstack.git``, replicated from -the repo maintained by Gerrit. GitHub also has a mirror at -``git://github.com/openstack-dev/devstack.git``. - -The `blueprint `__ and `bug -trackers `__ are on Launchpad. It -should be noted that DevStack generally does not use these as strongly -as other projects, but we're trying to change that. - -The `Gerrit review -queue `__ -is, however, used for all commits except for the text of this website. -That should also change in the near future. - -| -| **HACKING.rst** - -Like most OpenStack projects, DevStack includes a ``HACKING.rst`` file -that describes the layout, style and conventions of the project. Because -``HACKING.rst`` is in the main DevStack repo it is considered -authoritative. Much of the content on this page is taken from there. - -| -| **bashate Formatting** - -Around the time of the OpenStack Havana release we added a tool to do -style checking in DevStack similar to what pep8/flake8 do for Python -projects. It is still \_very\_ simplistic, focusing mostly on stray -whitespace to help prevent -1 on reviews that are otherwise acceptable. -Oddly enough it is called ``bashate``. It will be expanded to enforce -some of the documentation rules in comments that are used in formatting -the script pages for devstack.org and possibly even simple code -formatting. Run it on the entire project with ``./run_tests.sh``. - -Code -==== - -| -| **Repo Layout** - -The DevStack repo generally keeps all of the primary scripts at the root -level. - -``doc`` - Contains the Sphinx source for the documentation. -``tools/build_docs.sh`` is used to generate the HTML versions of the -DevStack scripts. A complete doc build can be run with ``tox -edocs``. - -``exercises`` - Contains the test scripts used to sanity-check and -demonstrate some OpenStack functions. These scripts know how to exit -early or skip services that are not enabled. - -``extras.d`` - Contains the dispatch scripts called by the hooks in -``stack.sh``, ``unstack.sh`` and ``clean.sh``. See :doc:`the plugins -docs ` for more information. - -``files`` - Contains a variety of otherwise lost files used in -configuring and operating DevStack. This includes templates for -configuration files and the system dependency information. This is also -where image files are downloaded and expanded if necessary. - -``lib`` - Contains the sub-scripts specific to each project. This is -where the work of managing a project's services is located. Each -top-level project (Keystone, Nova, etc) has a file here. Additionally -there are some for system services and project plugins. - -``samples`` - Contains a sample of the local files not included in the -DevStack repo. - -``tests`` - the DevStack test suite is rather sparse, mostly consisting -of test of specific fragile functions in the ``functions`` and -``functions-common`` files. - -``tools`` - Contains a collection of stand-alone scripts. While these -may reference the top-level DevStack configuration they can generally be -run alone. There are also some sub-directories to support specific -environments such as XenServer. diff --git a/doc/source/hacking.rst b/doc/source/hacking.rst new file mode 100644 index 0000000000..a2bcf4fd67 --- /dev/null +++ b/doc/source/hacking.rst @@ -0,0 +1 @@ +.. include:: ../../HACKING.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index bac593de03..0ffb15c2a8 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -12,7 +12,7 @@ DevStack - an OpenStack Community Production plugins faq changes - contributing + hacking Quick Start ----------- @@ -139,7 +139,7 @@ FAQ Contributing ------------ -:doc:`Pitching in to make DevStack a better place ` +:doc:`Pitching in to make DevStack a better place ` Code ==== From 5686dbc45dbdc552080592e31bed63b0f201717e Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Mon, 9 Mar 2015 14:27:51 -0500 Subject: [PATCH 0073/2941] Add global venv enable/disable knob Adds USE_VENV to globally enable/disable use of virtual environments. ADDITIONAL_VENV_PACKAGES is used to manually add packages that do not appear in requirements.txt or test-requirements.txt to be installed into each venv. Database Python bindings are handled this way when a dataabse service is enabled. Change-Id: I9cf298b936fd10c95e2ce5f51aab0d49d4b7f37f --- doc/source/configuration.rst | 24 ++++++++++++++++++++++++ doc/source/index.rst | 2 ++ lib/database | 5 +++++ lib/databases/mysql | 3 +++ lib/databases/postgresql | 3 +++ lib/stack | 10 +++++++--- stack.sh | 1 + stackrc | 10 ++++++++++ 8 files changed, 55 insertions(+), 3 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 7d06658ee2..05a21cde5c 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -170,6 +170,30 @@ Libraries from Git LIBS_FROM_GIT=python-keystoneclient,oslo.config +Virtual Environments +-------------------- + + | *Default: ``USE_VENV=False``* + | Enable the use of Python virtual environments by setting ``USE_VENV`` + to ``True``. This will enable the creation of venvs for each project + that is defined in the ``PROJECT_VENV`` array. + + | *Default: ``PROJECT_VENV['']='.venv'* + | Each entry in the ``PROJECT_VENV`` array contains the directory name + of a venv to be used for the project. The array index is the project + name. Multiple projects can use the same venv if desired. + + :: + + PROJECT_VENV["glance"]=${GLANCE_DIR}.venv + + | *Default: ``ADDITIONAL_VENV_PACKAGES=""``* + | A comma-separated list of additional packages to be installed into each + venv. Often projects will not have certain packages listed in its + ``requirements.txt`` file because they are 'optional' requirements, + i.e. only needed for certain configurations. By default, the enabled + databases will have their Python bindings added when they are enabled. + Enable Logging -------------- diff --git a/doc/source/index.rst b/doc/source/index.rst index bac593de03..d98e573dcc 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -210,6 +210,8 @@ Tools ----- * `tools/build\_docs.sh `__ +* `tools/build\_venv.sh `__ +* `tools/build\_wheels.sh `__ * `tools/create-stack-user.sh `__ * `tools/create\_userrc.sh `__ * `tools/fixup\_stuff.sh `__ diff --git a/lib/database b/lib/database index b114e9e4f6..ff1fafee26 100644 --- a/lib/database +++ b/lib/database @@ -109,6 +109,11 @@ function install_database { install_database_$DATABASE_TYPE } +# Install the database Python packages +function install_database_python { + install_database_python_$DATABASE_TYPE +} + # Configure and start the database function configure_database { configure_database_$DATABASE_TYPE diff --git a/lib/databases/mysql b/lib/databases/mysql index 70073c4c6f..d548db103e 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -151,9 +151,12 @@ EOF else exit_distro_not_supported "mysql installation" fi +} +function install_database_python_mysql { # Install Python client module pip_install MySQL-python + ADDITIONAL_VENV_PACKAGES+=",MySQL-python" } function database_connection_url_mysql { diff --git a/lib/databases/postgresql b/lib/databases/postgresql index e891a08754..a6bcf8c0a2 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -100,9 +100,12 @@ EOF else exit_distro_not_supported "postgresql installation" fi +} +function install_database_python_postgresql { # Install Python client module pip_install psycopg2 + ADDITIONAL_VENV_PACKAGES+=",psycopg2" } function database_connection_url_postgresql { diff --git a/lib/stack b/lib/stack index 9a509d8318..11dd87ca28 100644 --- a/lib/stack +++ b/lib/stack @@ -16,13 +16,17 @@ function stack_install_service { local service=$1 if type install_${service} >/dev/null 2>&1; then - if [[ -n ${PROJECT_VENV[$service]:-} ]]; then + if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then rm -rf ${PROJECT_VENV[$service]} - source $TOP_DIR/tools/build_venv.sh ${PROJECT_VENV[$service]} + source $TOP_DIR/tools/build_venv.sh ${PROJECT_VENV[$service]} ${ADDITIONAL_VENV_PACKAGES//,/ } export PIP_VIRTUAL_ENV=${PROJECT_VENV[$service]:-} + + # Install other OpenStack prereqs that might come from source repos + install_oslo + install_keystonemiddleware fi install_${service} - if [[ -n ${PROJECT_VENV[$service]:-} ]]; then + if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then unset PIP_VIRTUAL_ENV fi fi diff --git a/stack.sh b/stack.sh index d83952accc..9d4a2061ef 100755 --- a/stack.sh +++ b/stack.sh @@ -702,6 +702,7 @@ install_rpc_backend if is_service_enabled $DATABASE_BACKENDS; then install_database + install_database_python fi if is_service_enabled neutron; then diff --git a/stackrc b/stackrc index 02b12a36a0..3c4593a27f 100644 --- a/stackrc +++ b/stackrc @@ -104,6 +104,16 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi +# Enable use of Python virtual environments. Individual project use of +# venvs are controlled by the PROJECT_VENV array; every project with +# an entry in the array will be installed into the named venv. +# By default this will put each project into its own venv. +USE_VENV=$(trueorfalse False USE_VENV) + +# Add packages that need to be installed into a venv but are not in any +# requirmenets files here, in a comma-separated list +ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""} + # Configure wheel cache location export WHEELHOUSE=${WHEELHOUSE:-$DEST/.wheelhouse} export PIP_WHEEL_DIR=${PIP_WHEEL_DIR:-$WHEELHOUSE} From 7448edb031028af2e26e17bce6373d8f8929570d Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 11 Mar 2015 20:06:26 -0500 Subject: [PATCH 0074/2941] Keystone use new section for eventlet server options Configuration options that are only relevant when running keystone under eventlet (rather than Apache httpd) were moved to the [eventlet_server] and [eventlet_server_ssl] groups in the keystone.conf file to avoid confusion. This change updates devstack to use the new non-deprecated group for these options. Change-Id: I651a278d09f6a3a32b2e96fac87f1e5ea0f18c39 --- lib/keystone | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/keystone b/lib/keystone index b7acb37931..97307b169b 100644 --- a/lib/keystone +++ b/lib/keystone @@ -227,21 +227,21 @@ function configure_keystone { # Set the URL advertised in the ``versions`` structure returned by the '/' route iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/" iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/" - iniset $KEYSTONE_CONF DEFAULT admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST" + iniset $KEYSTONE_CONF eventlet_server admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST" # Register SSL certificates if provided if is_ssl_enabled_service key; then ensure_certificates KEYSTONE - iniset $KEYSTONE_CONF ssl enable True - iniset $KEYSTONE_CONF ssl certfile $KEYSTONE_SSL_CERT - iniset $KEYSTONE_CONF ssl keyfile $KEYSTONE_SSL_KEY + iniset $KEYSTONE_CONF eventlet_server_ssl enable True + iniset $KEYSTONE_CONF eventlet_server_ssl certfile $KEYSTONE_SSL_CERT + iniset $KEYSTONE_CONF eventlet_server_ssl keyfile $KEYSTONE_SSL_KEY fi if is_service_enabled tls-proxy; then # Set the service ports for a proxy to take the originals - iniset $KEYSTONE_CONF DEFAULT public_port $KEYSTONE_SERVICE_PORT_INT - iniset $KEYSTONE_CONF DEFAULT admin_port $KEYSTONE_AUTH_PORT_INT + iniset $KEYSTONE_CONF eventlet_server public_port $KEYSTONE_SERVICE_PORT_INT + iniset $KEYSTONE_CONF eventlet_server admin_port $KEYSTONE_AUTH_PORT_INT fi iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" @@ -317,7 +317,7 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 - iniset $KEYSTONE_CONF DEFAULT admin_workers "$API_WORKERS" + iniset $KEYSTONE_CONF eventlet_server admin_workers "$API_WORKERS" # Public workers will use the server default, typically number of CPU. } From f8ae647f2eabfd06c1006a1c3c92a3ef78578cfa Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 17 Feb 2015 11:05:06 -0600 Subject: [PATCH 0075/2941] Install Keystone into its own venv Configure Apache to use the Keystone venv. Change-Id: I86f1bfdfd800f5b818bfb5c4d2750ff732049107 --- files/apache-keystone.template | 4 ++-- lib/keystone | 30 ++++++++++++++++++++++++------ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 504dc01d21..1d20af7f90 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -2,7 +2,7 @@ Listen %PUBLICPORT% Listen %ADMINPORT% - WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} + WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup keystone-public WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} @@ -18,7 +18,7 @@ Listen %ADMINPORT% - WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% display-name=%{GROUP} + WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup keystone-admin WSGIScriptAlias / %ADMINWSGI% WSGIApplicationGroup %{GLOBAL} diff --git a/lib/keystone b/lib/keystone index b7acb37931..0f9b512eb7 100644 --- a/lib/keystone +++ b/lib/keystone @@ -37,8 +37,16 @@ set +o xtrace # Set up default directories GITDIR["python-keystoneclient"]=$DEST/python-keystoneclient GITDIR["keystonemiddleware"]=$DEST/keystonemiddleware - KEYSTONE_DIR=$DEST/keystone + +# Keystone virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["keystone"]=${KEYSTONE_DIR}.venv + KEYSTONE_BIN_DIR=${PROJECT_VENV["keystone"]}/bin +else + KEYSTONE_BIN_DIR=$(get_python_exec_prefix) +fi + KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} @@ -144,6 +152,7 @@ function _config_keystone_apache_wsgi { local keystone_keyfile="" local keystone_service_port=$KEYSTONE_SERVICE_PORT local keystone_auth_port=$KEYSTONE_AUTH_PORT + local venv_path="" if is_ssl_enabled_service key; then keystone_ssl="SSLEngine On" @@ -154,6 +163,9 @@ function _config_keystone_apache_wsgi { keystone_service_port=$KEYSTONE_SERVICE_PORT_INT keystone_auth_port=$KEYSTONE_AUTH_PORT_INT fi + if [[ ${USE_VENV} = True ]]; then + venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/python2.7/site-packages" + fi # copy proxy vhost and wsgi file sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/main @@ -169,7 +181,8 @@ function _config_keystone_apache_wsgi { s|%SSLENGINE%|$keystone_ssl|g; s|%SSLCERTFILE%|$keystone_certfile|g; s|%SSLKEYFILE%|$keystone_keyfile|g; - s|%USER%|$STACK_USER|g + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g " -i $keystone_apache_conf } @@ -460,20 +473,20 @@ function init_keystone { recreate_database keystone # Initialize keystone database - $KEYSTONE_DIR/bin/keystone-manage db_sync + $KEYSTONE_BIN_DIR/keystone-manage db_sync local extension_value for extension_value in ${KEYSTONE_EXTENSIONS//,/ }; do if [[ -z "${extension_value}" ]]; then continue fi - $KEYSTONE_DIR/bin/keystone-manage db_sync --extension "${extension_value}" + $KEYSTONE_BIN_DIR/keystone-manage db_sync --extension "${extension_value}" done if [[ "$KEYSTONE_TOKEN_FORMAT" != "uuid" ]]; then # Set up certificates rm -rf $KEYSTONE_CONF_DIR/ssl - $KEYSTONE_DIR/bin/keystone-manage pki_setup + $KEYSTONE_BIN_DIR/keystone-manage pki_setup # Create cache dir sudo install -d -o $STACK_USER $KEYSTONE_AUTH_CACHE_DIR @@ -492,9 +505,14 @@ function install_keystoneclient { # install_keystonemiddleware() - Collect source and prepare function install_keystonemiddleware { + # install_keystonemiddleware() is called when keystonemiddleware is needed + # to provide an opportunity to install it from the source repo if use_library_from_git "keystonemiddleware"; then git_clone_by_name "keystonemiddleware" setup_dev_lib "keystonemiddleware" + else + # When not installing from repo, keystonemiddleware is still needed... + pip_install keystonemiddleware fi } @@ -542,7 +560,7 @@ function start_keystone { tail_log key-access /var/log/$APACHE_NAME/keystone_access.log else # Start Keystone in a screen window - run_process key "$KEYSTONE_DIR/bin/keystone-all --config-file $KEYSTONE_CONF" + run_process key "$KEYSTONE_BIN_DIR/keystone-all --config-file $KEYSTONE_CONF" fi echo "Waiting for keystone to start..." From aed607920542ee27f87f8e5cdb659faf1bde00eb Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 17 Feb 2015 15:38:16 -0600 Subject: [PATCH 0076/2941] Install Glance into its own venv Change-Id: Ib46b89dafa1fc81a2d0717150203b848b87ea323 --- lib/glance | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/lib/glance b/lib/glance index d16b3456a3..00f07540e8 100755 --- a/lib/glance +++ b/lib/glance @@ -31,8 +31,16 @@ set +o xtrace # Set up default directories GITDIR["python-glanceclient"]=$DEST/python-glanceclient GITDIR["glance_store"]=$DEST/glance_store - GLANCE_DIR=$DEST/glance + +# Glance virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["glance"]=${GLANCE_DIR}.venv + GLANCE_BIN_DIR=${PROJECT_VENV["glance"]}/bin +else + GLANCE_BIN_DIR=$(get_python_exec_prefix) +fi + GLANCE_CACHE_DIR=${GLANCE_CACHE_DIR:=$DATA_DIR/glance/cache} GLANCE_IMAGE_DIR=${GLANCE_IMAGE_DIR:=$DATA_DIR/glance/images} GLANCE_AUTH_CACHE_DIR=${GLANCE_AUTH_CACHE_DIR:-/var/cache/glance} @@ -47,13 +55,6 @@ GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json -# Support entry points installation of console scripts -if [[ -d $GLANCE_DIR/bin ]]; then - GLANCE_BIN_DIR=$GLANCE_DIR/bin -else - GLANCE_BIN_DIR=$(get_python_exec_prefix) -fi - if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then GLANCE_SERVICE_PROTOCOL="https" fi From 6aaad5f7239c8e199fde0d1e5fca14f0a77164f2 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 18 Feb 2015 07:09:04 -0600 Subject: [PATCH 0077/2941] Install Cinder into its own venv rootwrap is horribly called indirectly via PATH. The choice, other than fixing such nonsense, is to force the path in sudo. Change-Id: Idac07455359b347e1c617736a515c2261b56d871 --- lib/cinder | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/lib/cinder b/lib/cinder index 3c3fff3087..be4ef75208 100644 --- a/lib/cinder +++ b/lib/cinder @@ -39,8 +39,16 @@ fi # set up default directories GITDIR["python-cinderclient"]=$DEST/python-cinderclient - CINDER_DIR=$DEST/cinder + +# Cinder virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["cinder"]=${CINDER_DIR}.venv + CINDER_BIN_DIR=${PROJECT_VENV["cinder"]}/bin +else + CINDER_BIN_DIR=$(get_python_exec_prefix) +fi + CINDER_STATE_PATH=${CINDER_STATE_PATH:=$DATA_DIR/cinder} CINDER_AUTH_CACHE_DIR=${CINDER_AUTH_CACHE_DIR:-/var/cache/cinder} @@ -57,13 +65,6 @@ CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -# Support entry points installation of console scripts -if [[ -d $CINDER_DIR/bin ]]; then - CINDER_BIN_DIR=$CINDER_DIR/bin -else - CINDER_BIN_DIR=$(get_python_exec_prefix) -fi - # Default backends # The backend format is type:name where type is one of the supported backend @@ -164,12 +165,11 @@ function cleanup_cinder { fi } +# Deploy new rootwrap filters files and configure sudo # configure_cinder_rootwrap() - configure Cinder's rootwrap function configure_cinder_rootwrap { - # Set the paths of certain binaries - local cinder_rootwrap=$(get_rootwrap_location cinder) + local cinder_rootwrap=$CINDER_BIN_DIR/cinder-rootwrap - # Deploy new rootwrap filters files (owned by root). # Wipe any existing rootwrap.d files first if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then sudo rm -rf $CINDER_CONF_DIR/rootwrap.d @@ -188,10 +188,17 @@ function configure_cinder_rootwrap { # Set up the rootwrap sudoers for cinder local tempfile=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >$tempfile + echo "Defaults:$STACK_USER secure_path=$CINDER_BIN_DIR:/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >$tempfile + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >>$tempfile chmod 0440 $tempfile sudo chown root:root $tempfile sudo mv $tempfile /etc/sudoers.d/cinder-rootwrap + + # So rootwrap and PATH are broken beyond belief. WTF relies on a SECURE operation + # to blindly follow PATH??? We learned that was a bad idea in the 80's! + # So to fix this in a venv, we must exploit the very hole we want to close by dropping + # a copy of the venv rootwrap binary into /usr/local/bin. + #sudo cp -p $cinder_rootwrap /usr/local/bin } # configure_cinder() - Set config files, create data dirs, etc From 2f6576bfaf7f43d9bf820e3e5ecc0b7fd136ce3c Mon Sep 17 00:00:00 2001 From: Geronimo Orozco Date: Thu, 19 Mar 2015 12:08:23 -0600 Subject: [PATCH 0078/2941] Creates SWIFT_DATA_DIR if it does not exist If SWIFT_DATA_DIR is set on local.conf we need to make sure to create the directory with proper permissions Change-Id: If29fa53f01b4c0c8a881ec3734383ecffac334ce Closes-Bug: 1302893 --- lib/swift | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/lib/swift b/lib/swift index af19c68a1b..0fd671160e 100644 --- a/lib/swift +++ b/lib/swift @@ -302,6 +302,7 @@ function configure_swift { local node_number local swift_node_config local swift_log_dir + local user_group # Make sure to kill all swift processes first swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true @@ -505,10 +506,12 @@ EOF fi fi + local user_group=$(id -g ${STACK_USER}) + sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR} + local swift_log_dir=${SWIFT_DATA_DIR}/logs - rm -rf ${swift_log_dir} - mkdir -p ${swift_log_dir}/hourly - sudo chown -R ${STACK_USER}:adm ${swift_log_dir} + sudo rm -rf ${swift_log_dir} + sudo install -d -o ${STACK_USER} -g adm ${swift_log_dir}/hourly if [[ $SYSLOG != "False" ]]; then sed "s,%SWIFT_LOGDIR%,${swift_log_dir}," $FILES/swift/rsyslog.conf | sudo \ From 10ba751a784f92a78933137f5b1baf09a3daf930 Mon Sep 17 00:00:00 2001 From: Mitsuhiro SHIGEMATSU Date: Sat, 21 Mar 2015 06:59:05 +0900 Subject: [PATCH 0079/2941] Fix typo in devstack/stack.sh Change-Id: Ie13f1ae7fb5a46bb654aa3ab526933107c485b0b --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index d83952accc..0e22804db9 100755 --- a/stack.sh +++ b/stack.sh @@ -23,7 +23,7 @@ # check if someone has invoked with "sh" if [[ "${POSIXLY_CORRECT}" == "y" ]]; then - echo "You appear to be running bash in POSIX compatability mode." + echo "You appear to be running bash in POSIX compatibility mode." echo "devstack uses bash features. \"./stack.sh\" should do the right thing" exit 1 fi From c7df4df0b18a50313497bfca31af04e5475f780f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 20 Mar 2015 12:18:52 +1100 Subject: [PATCH 0080/2941] Add some discussion about review criteria An attempt to layout some of the ratioanle behind devstack reviews. Change-Id: I9f4878653b5c746159206cd44b49255d9fdd32ef --- HACKING.rst | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/HACKING.rst b/HACKING.rst index 4971db250b..a40af54b45 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -320,3 +320,48 @@ Variables and Functions - function names should_have_underscores, NotCamelCase. - functions should be declared as per the regex ^function foo {$ with code starting on the next line + + +Review Criteria +=============== + +There are some broad criteria that will be followed when reviewing +your change + +* **Is it passing tests** -- your change will not be reviewed + throughly unless the official CI has run successfully against it. + +* **Does this belong in DevStack** -- DevStack reviewers have a + default position of "no" but are ready to be convinced by your + change. + + For very large changes, you should consider :doc:`the plugins system + ` to see if your code is better abstracted from the main + repository. + + For smaller changes, you should always consider if the change can be + encapsulated by per-user settings in ``local.conf``. A common example + is adding a simple config-option to an ``ini`` file. Specific flags + are not usually required for this, although adding documentation + about how to achieve a larger goal (which might include turning on + various settings, etc) is always welcome. + +* **Work-arounds** -- often things get broken and DevStack can be in a + position to fix them. Work-arounds are fine, but should be + presented in the context of fixing the root-cause of the problem. + This means it is well-commented in the code and the change-log and + mostly likely includes links to changes or bugs that fix the + underlying problem. + +* **Should this be upstream** -- DevStack generally does not override + default choices provided by projects and attempts to not + unexpectedly modify behaviour. + +* **Context in commit messages** -- DevStack touches many different + areas and reviewers need context around changes to make good + decisions. We also always want it to be clear to someone -- perhaps + even years from now -- why we were motivated to make a change at the + time. + +* **Reviewers** -- please see ``MAINTAINERS.rst`` for a list of people + that should be added to reviews of various sub-systems. From ec47bc1d720852ca07f1af2143c2a6c1353e9306 Mon Sep 17 00:00:00 2001 From: Wiekus Beukes Date: Thu, 19 Mar 2015 08:20:38 -0700 Subject: [PATCH 0081/2941] Add support for Oracle Linux 7 and later. Most of the changes revolves around using MySQL rather than MariaDB, plus enabling the addon repos on public-yum.oracle.com. The patch just touch the areas where there is a divergence between the Fedora and Oracle distributions and in all other cases the is_fedora will result in the correct decision to be made and left as is. Collapsed the is_suse and is_oraclelinux into a single check in configure_database_mysql and cleanup_database_mysql Added Oracle Linux to MAINTAINERS.rst Rather than duplicating most of the Redhat version check code, added a check in the block to do the determination if it is Oracle Linux Change-Id: I5f1f15106329eec67aa008b17847fa44863f243f --- MAINTAINERS.rst | 4 ++++ functions-common | 19 +++++++++++++++++-- lib/databases/mysql | 24 ++++++++++++------------ stack.sh | 4 ++++ 4 files changed, 37 insertions(+), 14 deletions(-) diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst index a376eb0428..20e8655d69 100644 --- a/MAINTAINERS.rst +++ b/MAINTAINERS.rst @@ -90,3 +90,7 @@ Zaqar (Marconi) * Flavio Percoco * Malini Kamalambal + +Oracle Linux +~~~~~~~~~~~~ +* Wiekus Beukes diff --git a/functions-common b/functions-common index 3dae8147b5..d00d4a7c6f 100644 --- a/functions-common +++ b/functions-common @@ -246,6 +246,7 @@ function GetOSVersion { # CentOS Linux release 6.0 (Final) # Fedora release 16 (Verne) # XenServer release 6.2.0-70446c (xenenterprise) + # Oracle Linux release 7 os_CODENAME="" for r in "Red Hat" CentOS Fedora XenServer; do os_VENDOR=$r @@ -259,6 +260,9 @@ function GetOSVersion { fi os_VENDOR="" done + if [ "$os_VENDOR" = "Red Hat" ] && [[ -r /etc/oracle-release ]]; then + os_VENDOR=OracleLinux + fi os_PACKAGE="rpm" elif [[ -r /etc/SuSE-release ]]; then for r in openSUSE "SUSE Linux"; do @@ -310,7 +314,7 @@ function GetDistro { fi elif [[ "$os_VENDOR" =~ (Red Hat) || \ "$os_VENDOR" =~ (CentOS) || \ - "$os_VENDOR" =~ (OracleServer) ]]; then + "$os_VENDOR" =~ (OracleLinux) ]]; then # Drop the . release as we assume it's compatible DISTRO="rhel${os_RELEASE::1}" elif [[ "$os_VENDOR" =~ (XenServer) ]]; then @@ -328,6 +332,17 @@ function is_arch { [[ "$(uname -m)" == "$1" ]] } +# Determine if current distribution is an Oracle distribution +# is_oraclelinux +function is_oraclelinux { + if [[ -z "$os_VENDOR" ]]; then + GetOSVersion + fi + + [ "$os_VENDOR" = "OracleLinux" ] +} + + # Determine if current distribution is a Fedora-based distribution # (Fedora, RHEL, CentOS, etc). # is_fedora @@ -337,7 +352,7 @@ function is_fedora { fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ - [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleServer" ] + [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleLinux" ] } diff --git a/lib/databases/mysql b/lib/databases/mysql index 70073c4c6f..dabd7d05fe 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -16,7 +16,7 @@ register_database mysql # Linux distros, thank you for being incredibly consistent MYSQL=mysql -if is_fedora; then +if is_fedora && ! is_oraclelinux; then MYSQL=mariadb fi @@ -32,12 +32,12 @@ function cleanup_database_mysql { sudo rm -rf /var/lib/mysql sudo rm -rf /etc/mysql return + elif is_suse || is_oraclelinux; then + uninstall_package mysql-community-server + sudo rm -rf /var/lib/mysql elif is_fedora; then uninstall_package mariadb-server sudo rm -rf /var/lib/mysql - elif is_suse; then - uninstall_package mysql-community-server - sudo rm -rf /var/lib/mysql else return fi @@ -56,12 +56,12 @@ function configure_database_mysql { if is_ubuntu; then my_conf=/etc/mysql/my.cnf mysql=mysql + elif is_suse || is_oraclelinux; then + my_conf=/etc/my.cnf + mysql=mysql elif is_fedora; then mysql=mariadb my_conf=/etc/my.cnf - elif is_suse; then - my_conf=/etc/my.cnf - mysql=mysql else exit_distro_not_supported "mysql configuration" fi @@ -140,14 +140,14 @@ EOF chmod 0600 $HOME/.my.cnf fi # Install mysql-server - if is_fedora; then - install_package mariadb-server - elif is_ubuntu; then - install_package mysql-server - elif is_suse; then + if is_suse || is_oraclelinux; then if ! is_package_installed mariadb; then install_package mysql-community-server fi + elif is_fedora; then + install_package mariadb-server + elif is_ubuntu; then + install_package mysql-server else exit_distro_not_supported "mysql installation" fi diff --git a/stack.sh b/stack.sh index d83952accc..a475aab1b0 100755 --- a/stack.sh +++ b/stack.sh @@ -278,6 +278,10 @@ EOF die $LINENO "Error installing RDO repo, cannot continue" fi + if is_oraclelinux; then + sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56 + fi + fi From a519f429cf3cd90f06dd50b95608ec18f81d400c Mon Sep 17 00:00:00 2001 From: Mahito OGURA Date: Mon, 23 Mar 2015 15:19:57 +0900 Subject: [PATCH 0082/2941] Fix unstack.sh to stop stop_dstat when it is disabled. unstack.sh always stop dstat process, however unstack.sh show 'dstat: no process found' when dstat is disabled. This patch stop function of stop_dstat, when dstat is disabled. Change-Id: If9054826bed8a7fedd4f77ef4efef2c0ccd7f16e Closes-Bug: #1435148 --- unstack.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/unstack.sh b/unstack.sh index a6aeec5a25..fdd63fbcd0 100755 --- a/unstack.sh +++ b/unstack.sh @@ -173,7 +173,9 @@ if is_service_enabled trove; then cleanup_trove fi -stop_dstat +if is_service_enabled dstat; then + stop_dstat +fi # Clean up the remainder of the screen processes SCREEN=$(which screen) From d2287cfb9f4dfac71f14f3374514f5b8c2b0c70b Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 22 Mar 2015 07:20:06 -0700 Subject: [PATCH 0083/2941] Config driver: use "True" instead of "always" Commit c12a78b35dc910fa97df888960ef2b9a64557254 has set the "always" flag to be deprecated in liberty. This moves to using "True" instead. Change-Id: Idecf7966968369d2f372abffcab85fbf9aa097c7 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 7fbade0713..502bb35f05 100644 --- a/lib/nova +++ b/lib/nova @@ -81,7 +81,7 @@ NOVA_ROOTWRAP=$(get_rootwrap_location nova) # Option to enable/disable config drive # NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive -FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"always"} +FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"True"} # Nova supports pluggable schedulers. The default ``FilterScheduler`` # should work in most cases. From 435cd4dc6e4e5bbd0201f85524e21d83a7407719 Mon Sep 17 00:00:00 2001 From: Sergey Reshetnyak Date: Sun, 1 Mar 2015 12:44:02 +0300 Subject: [PATCH 0084/2941] Add support running sahara in distributed mode Sahara supports running in distributed mode. Need to add this ability in devstack. Changes: * configure rpc backend for sahara by default * added sahara-api service for running api side * added sahara-eng service for running engine side Change-Id: I4fb9d5746b08c9b1fee0d283bcf448e47a87089b --- lib/sahara | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/sahara b/lib/sahara index a965f55f68..0651b0a633 100644 --- a/lib/sahara +++ b/lib/sahara @@ -113,12 +113,13 @@ function configure_sahara { configure_auth_token_middleware $SAHARA_CONF_FILE sahara $SAHARA_AUTH_CACHE_DIR + iniset_rpc_backend sahara $SAHARA_CONF_FILE DEFAULT + # Set configuration to send notifications if is_service_enabled ceilometer; then iniset $SAHARA_CONF_FILE DEFAULT enable_notifications "true" iniset $SAHARA_CONF_FILE DEFAULT notification_driver "messaging" - iniset_rpc_backend sahara $SAHARA_CONF_FILE fi iniset $SAHARA_CONF_FILE DEFAULT verbose True @@ -203,12 +204,16 @@ function sahara_register_images { # start_sahara() - Start running processes, including screen function start_sahara { run_process sahara "$SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE" + run_process sahara-api "$SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE" + run_process sahara-eng "$SAHARA_BIN_DIR/sahara-engine --config-file $SAHARA_CONF_FILE" } # stop_sahara() - Stop running processes function stop_sahara { # Kill the Sahara screen windows stop_process sahara + stop_process sahara-api + stop_process sahara-eng } From 7c57306c33630bd5e8a99b9afbd27b45b1157959 Mon Sep 17 00:00:00 2001 From: Aishwarya Thangappa Date: Wed, 18 Feb 2015 01:51:13 -0800 Subject: [PATCH 0085/2941] Added devstack-with-lbaas-v2 installation documentation This document explains the steps to configure Load-Balancer in kilo. Change-Id: Ic8c2f3cca80e331b7275f689051c07d863d918ea Depends-On: I64a94aeeabe6357b5ea7796e34c9306c55c9ae67 --- doc/source/guides/devstack-with-lbaas-v2.rst | 99 ++++++++++++++++++++ doc/source/index.rst | 1 + 2 files changed, 100 insertions(+) create mode 100644 doc/source/guides/devstack-with-lbaas-v2.rst diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst new file mode 100644 index 0000000000..f67978310d --- /dev/null +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -0,0 +1,99 @@ +Configure Load-Balancer in Kilo +================================= + +The Kilo release of OpenStack will support Version 2 of the neutron load balancer. Until now, using OpenStack `LBaaS V2 `_ has required a good understanding of neutron and LBaaS architecture and several manual steps. + + +Phase 1: Create DevStack + 2 nova instances +-------------------------------------------- + +First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space, make sure it is updated. Install git and any other developer tools you find useful. + +Install devstack + + :: + + git clone https://git.openstack.org/openstack-dev/devstack + cd devstack + + +Edit your `local.conf` to look like + + :: + + [[local|localrc]] + # Load the external LBaaS plugin. + enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas + + # ===== BEGIN localrc ===== + DATABASE_PASSWORD=password + ADMIN_PASSWORD=password + SERVICE_PASSWORD=password + SERVICE_TOKEN=password + RABBIT_PASSWORD=password + # Enable Logging + LOGFILE=$DEST/logs/stack.sh.log + VERBOSE=True + LOG_COLOR=True + SCREEN_LOGDIR=$DEST/logs + # Pre-requisite + ENABLED_SERVICES=rabbit,mysql,key + # Horizon + ENABLED_SERVICES+=,horizon + # Nova + ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch + IMAGE_URLS+=",https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" + # Glance + ENABLED_SERVICES+=,g-api,g-reg + # Neutron + ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta + # Enable LBaaS V2 + ENABLED_SERVICES+=,q-lbaasv2 + # Cinder + ENABLED_SERVICES+=,c-api,c-vol,c-sch + # Tempest + ENABLED_SERVICES+=,tempest + # ===== END localrc ===== + +Run stack.sh and do some sanity checks + + :: + + ./stack.sh + . ./openrc + + neutron net-list # should show public and private networks + +Create two nova instances that we can use as test http servers: + + :: + + #create nova instances on private network + nova boot --image $(nova image-list | awk '/ cirros-0.3.0-x86_64-disk / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node1 + nova boot --image $(nova image-list | awk '/ cirros-0.3.0-x86_64-disk / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node2 + nova list # should show the nova instances just created + + #add secgroup rule to allow ssh etc.. + neutron security-group-rule-create default --protocol icmp + neutron security-group-rule-create default --protocol tcp --port-range-min 22 --port-range-max 22 + neutron security-group-rule-create default --protocol tcp --port-range-min 80 --port-range-max 80 + +Set up a simple web server on each of these instances. ssh into each instance (username 'cirros', password 'cubswin:)') and run + + :: + + MYIP=$(ifconfig eth0|grep 'inet addr'|awk -F: '{print $2}'| awk '{print $1}') + while true; do echo -e "HTTP/1.0 200 OK\r\n\r\nWelcome to $MYIP" | sudo nc -l -p 80 ; done& + +Phase 2: Create your load balancers +------------------------------------ + + :: + + neutron lbaas-loadbalancer-create --name lb1 private-subnet + neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1 + neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + neutron lbaas-member-create --subnet private-subnet --address 10.0.0.3 --protocol-port 80 pool1 + neutron lbaas-member-create --subnet private-subnet --address 10.0.0.5 --protocol-port 80 pool1 + +Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes (in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is "curl that-lb-ip", which should alternate between showing the IPs of the two nodes. diff --git a/doc/source/index.rst b/doc/source/index.rst index 10f4355c07..84ef6fd638 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -68,6 +68,7 @@ Walk through various setups used by stackers guides/neutron guides/devstack-with-nested-kvm guides/nova + guides/devstack-with-lbaas-v2 All-In-One Single VM -------------------- From d01ff96e3f330684f3f1041ce6e08f729cf4006c Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Mon, 23 Mar 2015 15:05:39 -0700 Subject: [PATCH 0086/2941] Stop using deprecated rabbit related config options Stop services from throwing the following warning: Option "rabbit_password" from group "DEFAULT" is deprecated. Use option "rabbit_password" from group "oslo_messaging_rabbit". Same for rabbit_hosts and rabbit_userid Change-Id: I7da503ef50b3653b888cb243caa74b4253a495e2 --- lib/rpc_backend | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 3d4ef762a7..3033cbe08e 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -272,9 +272,9 @@ function iniset_rpc_backend { fi elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then iniset $file $section rpc_backend "rabbit" - iniset $file $section rabbit_hosts $RABBIT_HOST - iniset $file $section rabbit_password $RABBIT_PASSWORD - iniset $file $section rabbit_userid $RABBIT_USERID + iniset $file oslo_messaging_rabbit rabbit_hosts $RABBIT_HOST + iniset $file oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD + iniset $file oslo_messaging_rabbit rabbit_userid $RABBIT_USERID fi } From 0f20ad41f3bb7f674a85b341556386cea492830e Mon Sep 17 00:00:00 2001 From: Li Ma Date: Mon, 23 Mar 2015 23:05:15 -0700 Subject: [PATCH 0087/2941] Add pluggability for matchmaker-redis This commit introduces stevedore to matchmaker-redis: https://review.openstack.org/#/c/161615/ Change-Id: I547157c02c9e0536add6949910a911540f27fb2d --- lib/rpc_backend | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 3d4ef762a7..8a4a0908cd 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -251,8 +251,7 @@ function iniset_rpc_backend { iniset $file $section rpc_backend "zmq" iniset $file $section rpc_zmq_host `hostname` if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then - iniset $file $section rpc_zmq_matchmaker \ - oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis + iniset $file $section rpc_zmq_matchmaker "redis" MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1} iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST else From 16e0656bd0def2ea37b9020109aa9cdf8146e89b Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Thu, 19 Mar 2015 15:32:20 -0700 Subject: [PATCH 0088/2941] Until we prebuild wheels don't build wheels for deb installed libs Until we have the ability to prebuild wheels so we don't spend time compiling them during devstack runs, stop building wheels for libraries that we still install from deb packages. Long term we want to move away from using deb packages to install python packages and use wheels. But until the wheel building logic is in place so we don't have to compile wheels on each devstack run, don't waste time compiling python libraries that we just use the packaged version of anyway. Change-Id: I962e2cfff223f7ab8efd5766ee0ef22229ab27bf --- files/venv-requirements.txt | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt index e473a2fe02..73d05793ce 100644 --- a/files/venv-requirements.txt +++ b/files/venv-requirements.txt @@ -1,10 +1,11 @@ +# Once we can prebuild wheels before a devstack run, uncomment the skipped libraries cryptography -lxml +# lxml # still install from from packages MySQL-python -netifaces +# netifaces # still install from packages #numpy # slowest wheel by far, stop building until we are actually using the output posix-ipc -psycopg2 +# psycopg # still install from packages pycrypto pyOpenSSL PyYAML From 668749ae8582bd19e22bb1d0ec46cf3a18cc1bf0 Mon Sep 17 00:00:00 2001 From: Peter Stachowski Date: Tue, 24 Mar 2015 18:00:29 +0000 Subject: [PATCH 0089/2941] Allow external ENV setup for conf files In order to keep redstack in synch with devstack, the conf files referenced within devstack need to be able to be declared externally. This change allows *_CONF values to be specified, and uses the original values as defaults. Change-Id: Ic67f6347b92b05619103a77e9f7ea80a299a6869 --- lib/trove | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/trove b/lib/trove index 4c5a438fb8..5dd4f23611 100644 --- a/lib/trove +++ b/lib/trove @@ -33,12 +33,12 @@ fi GITDIR["python-troveclient"]=$DEST/python-troveclient TROVE_DIR=$DEST/trove -TROVE_CONF_DIR=/etc/trove -TROVE_CONF=$TROVE_CONF_DIR/trove.conf -TROVE_TASKMANAGER_CONF=$TROVE_CONF_DIR/trove-taskmanager.conf -TROVE_CONDUCTOR_CONF=$TROVE_CONF_DIR/trove-conductor.conf -TROVE_GUESTAGENT_CONF=$TROVE_CONF_DIR/trove-guestagent.conf -TROVE_API_PASTE_INI=$TROVE_CONF_DIR/api-paste.ini +TROVE_CONF_DIR=${TROVE_CONF_DIR:-/etc/trove} +TROVE_CONF=${TROVE_CONF:-$TROVE_CONF_DIR/trove.conf} +TROVE_TASKMANAGER_CONF=${TROVE_TASKMANAGER_CONF:-$TROVE_CONF_DIR/trove-taskmanager.conf} +TROVE_CONDUCTOR_CONF=${TROVE_CONDUCTOR_CONF:-$TROVE_CONF_DIR/trove-conductor.conf} +TROVE_GUESTAGENT_CONF=${TROVE_GUESTAGENT_CONF:-$TROVE_CONF_DIR/trove-guestagent.conf} +TROVE_API_PASTE_INI=${TROVE_API_PASTE_INI:-$TROVE_CONF_DIR/api-paste.ini} TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove TROVE_LOCAL_API_PASTE_INI=$TROVE_LOCAL_CONF_DIR/api-paste.ini From 89983b6dfe15e8e83f390e9870cc3ddfbf2b8243 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 18 Mar 2015 11:12:15 +1300 Subject: [PATCH 0090/2941] Do not download Fedora cloud image for heat Tempest can now do all heat tests with cirros, and heat functional tests now load a custom test image from tarballs.o.o, so devstack no longer needs to register the fedora cloud image. Depends-On: I6041b8d6e7e9422f6e220d7aef0ca38857085e4b Change-Id: I9b3ea2c157b96dee139a24f0fa6f68f6764a7d67 --- stackrc | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/stackrc b/stackrc index f8d9c43c38..5b2ed70584 100644 --- a/stackrc +++ b/stackrc @@ -560,18 +560,6 @@ case "$VIRT_DRIVER" in IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};; esac -# Use 64bit fedora image if heat is enabled -if [[ "$ENABLED_SERVICES" =~ 'h-api' ]]; then - case "$VIRT_DRIVER" in - libvirt|ironic) - HEAT_CFN_IMAGE_URL=${HEAT_CFN_IMAGE_URL:-"https://download.fedoraproject.org/pub/alt/openstack/20/x86_64/Fedora-x86_64-20-20140618-sda.qcow2"} - IMAGE_URLS+=",$HEAT_CFN_IMAGE_URL" - ;; - *) - ;; - esac -fi - # Trove needs a custom image for its work if [[ "$ENABLED_SERVICES" =~ 'tr-api' ]]; then case "$VIRT_DRIVER" in @@ -584,17 +572,6 @@ if [[ "$ENABLED_SERVICES" =~ 'tr-api' ]]; then esac fi -# Staging Area for New Images, have them here for at least 24hrs for nodepool -# to cache them otherwise the failure rates in the gate are too high -PRECACHE_IMAGES=$(trueorfalse False PRECACHE_IMAGES) -if [[ "$PRECACHE_IMAGES" == "True" ]]; then - # staging in update for nodepool - IMAGE_URL="https://download.fedoraproject.org/pub/alt/openstack/20/x86_64/Fedora-x86_64-20-20140618-sda.qcow2" - if ! [[ "$IMAGE_URLS" =~ "$IMAGE_URL" ]]; then - IMAGE_URLS+=",$IMAGE_URL" - fi -fi - # 10Gb default volume backing file size VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M} From 51c48d4c801fecce9d2486ce956a2602eb8a0ea9 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 25 Mar 2015 06:26:03 +1100 Subject: [PATCH 0091/2941] Add a note on default values of globals Add a note on default values of globals in plugin settings Change-Id: I0d5d3a7e0597abe7e2401f8bae30ccc5682eab03 --- doc/source/plugins.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 5a610634b4..c4ed2285cb 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -113,6 +113,11 @@ directory. Inside this directory there can be 2 files. services using ``run_process`` as it only works with enabled services. + Be careful to allow users to override global-variables for + customizing their environment. Usually it is best to provide a + default value only if the variable is unset or empty; e.g. in bash + syntax ``FOO=${FOO:-default}``. + - ``plugin.sh`` - the actual plugin. It will be executed by devstack during it's run. The run order will be done in the registration order for these plugins, and will occur immediately after all in From 93b2100c983e1c271a8d51aa7f4755a6445be6a8 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Fri, 20 Feb 2015 11:45:21 -0500 Subject: [PATCH 0092/2941] Support for single interface Neutron networking with OVS When running Neutron on a single node that only has a single interface, the following operations are required: * Remove the IP address from the physical interface * Add the interface to the OVS physical bridge * Add the IP address from the physical interface to the OVS bridge * Update the routing table The reverse is done on cleanup. In order run Neutron on a single interface, the $PUBLIC_INTERFACE and $OVS_PHYSICAL_BRIDGE variables must be set. Co-Authored-By: Brian Haley Change-Id: Ie35cb537bb670c4773598b8db29877fb8a12ff50 --- lib/neutron | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/lib/neutron b/lib/neutron index 411c6961ce..dc9a339f5b 100755 --- a/lib/neutron +++ b/lib/neutron @@ -780,9 +780,41 @@ function stop_neutron { fi } +# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge +# on startup, or back to the public interface on cleanup +function _move_neutron_addresses_route { + local from_intf=$1 + local to_intf=$2 + local add_ovs_port=$3 + + if [[ -n "$from_intf" && -n "$to_intf" ]]; then + # Remove the primary IP address from $from_intf and add it to $to_intf, + # along with the default route, if it exists. Also, when called + # on configure we will also add $from_intf as a port on $to_intf, + # assuming it is an OVS bridge. + + local IP_BRD=$(ip -4 a s dev $from_intf | awk '/inet/ { print $2, $3, $4; exit }') + local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }") + local ADD_OVS_PORT="" + + if [ "$DEFAULT_ROUTE_GW" != "" ]; then + ADD_DEFAULT_ROUTE="sudo ip r replace default via $DEFAULT_ROUTE_GW dev $to_intf" + fi + + if [[ "$add_ovs_port" == "True" ]]; then + ADD_OVS_PORT="sudo ovs-vsctl add-port $to_intf $from_intf" + fi + + sudo ip addr del $IP_BRD dev $from_intf; sudo ip addr add $IP_BRD dev $to_intf; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE + fi +} + # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_neutron { + + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False + if is_provider_network && is_ironic_hardware; then for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE @@ -960,6 +992,8 @@ function _configure_neutron_l3_agent { _neutron_setup_interface_driver $Q_L3_CONF_FILE neutron_plugin_configure_l3_agent + + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True } function _configure_neutron_metadata_agent { @@ -1235,8 +1269,10 @@ function _neutron_configure_router_v4 { if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then local ext_gw_interface=$(_neutron_get_ext_gw_interface) local cidr_len=${FLOATING_RANGE#*/} - sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface - sudo ip link set $ext_gw_interface up + if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" ]]; then + sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface + sudo ip link set $ext_gw_interface up + fi ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'` die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP From eeb7bda510ad29dce7bfc5eb8aed9b6fe25efea1 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 25 Mar 2015 11:55:32 -0400 Subject: [PATCH 0093/2941] eliminate TEST_ONLY differentiation devstack is a development and test environment, but by default we were only installing the runtime dependencies. We should install all the testing required packages as well. Change-Id: I7c95927b9daad15766aac9d1276b10ca62efb24c --- files/debs/general | 2 +- files/debs/glance | 10 +++++----- files/debs/neutron | 6 +++--- files/debs/nova | 2 +- files/debs/trove | 2 +- files/rpms-suse/general | 2 +- files/rpms-suse/neutron | 4 ++-- files/rpms-suse/trove | 2 +- files/rpms/general | 2 +- files/rpms/glance | 12 ++++++------ files/rpms/neutron | 6 +++--- files/rpms/nova | 2 +- files/rpms/trove | 2 +- functions-common | 10 ---------- inc/python | 23 +++++++++++------------ stackrc | 3 --- 16 files changed, 38 insertions(+), 52 deletions(-) diff --git a/files/debs/general b/files/debs/general index 84d43029ff..5f10a20fc3 100644 --- a/files/debs/general +++ b/files/debs/general @@ -6,7 +6,7 @@ psmisc gcc g++ git -graphviz # testonly - docs +graphviz # needed for docs lsof # useful when debugging openssh-server openssl diff --git a/files/debs/glance b/files/debs/glance index 9fda6a63d9..37877a85c2 100644 --- a/files/debs/glance +++ b/files/debs/glance @@ -1,6 +1,6 @@ -libmysqlclient-dev # testonly -libpq-dev # testonly -libssl-dev # testonly +libmysqlclient-dev +libpq-dev +libssl-dev libxml2-dev -libxslt1-dev # testonly -zlib1g-dev # testonly +libxslt1-dev +zlib1g-dev diff --git a/files/debs/neutron b/files/debs/neutron index aa3d7095ca..2d69a71c3a 100644 --- a/files/debs/neutron +++ b/files/debs/neutron @@ -1,12 +1,12 @@ -acl # testonly +acl ebtables iptables iputils-ping iputils-arping -libmysqlclient-dev # testonly +libmysqlclient-dev mysql-server #NOPRIME sudo -postgresql-server-dev-all # testonly +postgresql-server-dev-all python-mysqldb python-mysql.connector python-qpid # NOPRIME diff --git a/files/debs/nova b/files/debs/nova index 0c31385ddd..9d9acde3e9 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -4,7 +4,7 @@ conntrack kpartx parted iputils-arping -libmysqlclient-dev # testonly +libmysqlclient-dev mysql-server # NOPRIME python-mysqldb python-mysql.connector diff --git a/files/debs/trove b/files/debs/trove index 09dcee8104..96f8f29277 100644 --- a/files/debs/trove +++ b/files/debs/trove @@ -1 +1 @@ -libxslt1-dev # testonly +libxslt1-dev diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 63cf14bd5b..2219426141 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -6,7 +6,7 @@ euca2ools gcc gcc-c++ git-core -graphviz # testonly - docs +graphviz # docs iputils libopenssl-devel # to rebuild pyOpenSSL if needed lsof # useful when debugging diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron index 66d6e4cad0..d278363e98 100644 --- a/files/rpms-suse/neutron +++ b/files/rpms-suse/neutron @@ -1,11 +1,11 @@ -acl # testonly +acl dnsmasq dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 ebtables iptables iputils mariadb # NOPRIME -postgresql-devel # testonly +postgresql-devel python-eventlet python-greenlet python-iso8601 diff --git a/files/rpms-suse/trove b/files/rpms-suse/trove index 09dcee8104..96f8f29277 100644 --- a/files/rpms-suse/trove +++ b/files/rpms-suse/trove @@ -1 +1 @@ -libxslt1-dev # testonly +libxslt1-dev diff --git a/files/rpms/general b/files/rpms/general index eac4ec36a7..d74ecc6e98 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -5,7 +5,7 @@ euca2ools # only for testing client gcc gcc-c++ git-core -graphviz # testonly - docs +graphviz # needed only for docs openssh-server openssl openssl-devel # to rebuild pyOpenSSL if needed diff --git a/files/rpms/glance b/files/rpms/glance index 119492a3f8..479194f918 100644 --- a/files/rpms/glance +++ b/files/rpms/glance @@ -1,6 +1,6 @@ -libxml2-devel # testonly -libxslt-devel # testonly -mysql-devel # testonly -openssl-devel # testonly -postgresql-devel # testonly -zlib-devel # testonly +libxml2-devel +libxslt-devel +mysql-devel +openssl-devel +postgresql-devel +zlib-devel diff --git a/files/rpms/neutron b/files/rpms/neutron index c0dee78a48..8292e7bffe 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -1,15 +1,15 @@ MySQL-python -acl # testonly +acl dnsmasq # for q-dhcp dnsmasq-utils # for dhcp_release ebtables iptables iputils mysql-connector-python -mysql-devel # testonly +mysql-devel mysql-server # NOPRIME openvswitch # NOPRIME -postgresql-devel # testonly +postgresql-devel rabbitmq-server # NOPRIME qpid-cpp-server # NOPRIME sqlite diff --git a/files/rpms/nova b/files/rpms/nova index 527928a581..ebd667454a 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -17,7 +17,7 @@ libxml2-python numpy # needed by websockify for spice console m2crypto mysql-connector-python -mysql-devel # testonly +mysql-devel mysql-server # NOPRIME parted polkit diff --git a/files/rpms/trove b/files/rpms/trove index c5cbdea012..e7bbd43cd6 100644 --- a/files/rpms/trove +++ b/files/rpms/trove @@ -1 +1 @@ -libxslt-devel # testonly +libxslt-devel diff --git a/functions-common b/functions-common index f96da5b799..48e400dfb1 100644 --- a/functions-common +++ b/functions-common @@ -883,16 +883,6 @@ function _parse_package_files { fi fi - # Look for # testonly in comment - if [[ $line =~ (.*)#.*testonly.* ]]; then - package=${BASH_REMATCH[1]} - # Are we installing test packages? (test for the default value) - if [[ $INSTALL_TESTONLY_PACKAGES = "False" ]]; then - # If not installing test packages the skip this package - inst_pkg=0 - fi - fi - if [[ $inst_pkg = 1 ]]; then echo $package fi diff --git a/inc/python b/inc/python index 229c54009d..2d76081a52 100644 --- a/inc/python +++ b/inc/python @@ -101,18 +101,17 @@ function pip_install { $cmd_pip install \ $@ - INSTALL_TESTONLY_PACKAGES=$(trueorfalse False INSTALL_TESTONLY_PACKAGES) - if [[ "$INSTALL_TESTONLY_PACKAGES" == "True" ]]; then - local test_req="$@/test-requirements.txt" - if [[ -e "$test_req" ]]; then - $sudo_pip \ - http_proxy=${http_proxy:-} \ - https_proxy=${https_proxy:-} \ - no_proxy=${no_proxy:-} \ - PIP_FIND_LINKS=$PIP_FIND_LINKS \ - $cmd_pip install \ - -r $test_req - fi + # Also install test requirements + local test_req="$@/test-requirements.txt" + if [[ -e "$test_req" ]]; then + echo "Installing test-requirements for $test_req" + $sudo_pip \ + http_proxy=${http_proxy:-} \ + https_proxy=${https_proxy:-} \ + no_proxy=${no_proxy:-} \ + PIP_FIND_LINKS=$PIP_FIND_LINKS \ + $cmd_pip install \ + -r $test_req fi } diff --git a/stackrc b/stackrc index f8d9c43c38..a13f82a889 100644 --- a/stackrc +++ b/stackrc @@ -615,9 +615,6 @@ USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} # Set default screen name SCREEN_NAME=${SCREEN_NAME:-stack} -# Do not install packages tagged with 'testonly' by default -INSTALL_TESTONLY_PACKAGES=${INSTALL_TESTONLY_PACKAGES:-False} - # Undo requirements changes by global requirements UNDO_REQUIREMENTS=${UNDO_REQUIREMENTS:-True} From 72f026b60d350ede39e22e08b8f7f286fd0d2633 Mon Sep 17 00:00:00 2001 From: "Andrea Frittoli (andreaf)" Date: Wed, 25 Mar 2015 17:24:24 -0400 Subject: [PATCH 0094/2941] Always defines tempest_roles as Member Because tests might force the auth version to v3, we always need to have Member in the list of roles. Change-Id: I06fd043e1b31ae0e5e33f4dcf898fb58f2907267 --- lib/tempest | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index fcb0e59937..7535fa6596 100644 --- a/lib/tempest +++ b/lib/tempest @@ -315,9 +315,7 @@ function configure_tempest { # Auth iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} - if [[ "$TEMPEST_AUTH_VERSION" == "v3" ]]; then - iniset $TEMPEST_CONFIG auth tempest_roles "Member" - fi + iniset $TEMPEST_CONFIG auth tempest_roles "Member" # Compute iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED From 85c98b3e18b132d99c569626e1b747eafa59f7c6 Mon Sep 17 00:00:00 2001 From: Alessandro Pilotti Date: Thu, 26 Mar 2015 00:14:05 +0100 Subject: [PATCH 0095/2941] Revert "Support for single interface Neutron networking with OVS" This patch is causing blocking failures in some 3rd party CIs. The issue can be tracked to the fact that the PUBLIC_INTERFACE interface might have no address assigned. This reverts commit 93b2100c983e1c271a8d51aa7f4755a6445be6a8. Partial-Bug: #1436607 Change-Id: I0943aa542b911fbcebb100543e0adbb38159b233 --- lib/neutron | 40 ++-------------------------------------- 1 file changed, 2 insertions(+), 38 deletions(-) diff --git a/lib/neutron b/lib/neutron index 89ed3ccea0..5ff39212fc 100755 --- a/lib/neutron +++ b/lib/neutron @@ -779,41 +779,9 @@ function stop_neutron { fi } -# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge -# on startup, or back to the public interface on cleanup -function _move_neutron_addresses_route { - local from_intf=$1 - local to_intf=$2 - local add_ovs_port=$3 - - if [[ -n "$from_intf" && -n "$to_intf" ]]; then - # Remove the primary IP address from $from_intf and add it to $to_intf, - # along with the default route, if it exists. Also, when called - # on configure we will also add $from_intf as a port on $to_intf, - # assuming it is an OVS bridge. - - local IP_BRD=$(ip -4 a s dev $from_intf | awk '/inet/ { print $2, $3, $4; exit }') - local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }") - local ADD_OVS_PORT="" - - if [ "$DEFAULT_ROUTE_GW" != "" ]; then - ADD_DEFAULT_ROUTE="sudo ip r replace default via $DEFAULT_ROUTE_GW dev $to_intf" - fi - - if [[ "$add_ovs_port" == "True" ]]; then - ADD_OVS_PORT="sudo ovs-vsctl add-port $to_intf $from_intf" - fi - - sudo ip addr del $IP_BRD dev $from_intf; sudo ip addr add $IP_BRD dev $to_intf; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE - fi -} - # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_neutron { - - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False - if is_provider_network && is_ironic_hardware; then for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE @@ -988,8 +956,6 @@ function _configure_neutron_l3_agent { _neutron_setup_interface_driver $Q_L3_CONF_FILE neutron_plugin_configure_l3_agent - - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True } function _configure_neutron_metadata_agent { @@ -1261,10 +1227,8 @@ function _neutron_configure_router_v4 { if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then local ext_gw_interface=$(_neutron_get_ext_gw_interface) local cidr_len=${FLOATING_RANGE#*/} - if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" ]]; then - sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface - sudo ip link set $ext_gw_interface up - fi + sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface + sudo ip link set $ext_gw_interface up ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'` die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP From 5a9739a4cae7957a24898fb11562559be2916121 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 25 Mar 2015 11:33:51 -0500 Subject: [PATCH 0096/2941] Rename lib/neutron to lib/neutron-legacy Preparing to refactor lib/neutron to support Neutron as the default network config. lib/neutron will be renamed internally and refined to support a couple of specific configurations. Change-Id: I0d3773d14c4c636a4b915734784e7241f4d15474 --- clean.sh | 2 +- doc/source/index.rst | 2 +- exercises/boot_from_volume.sh | 2 +- exercises/euca.sh | 2 +- exercises/floating_ips.sh | 2 +- exercises/neutron-adv-test.sh | 2 +- exercises/volumes.sh | 2 +- lib/neutron | 1468 +----------------------------- lib/neutron-legacy | 1467 +++++++++++++++++++++++++++++ lib/neutron_plugins/README.md | 2 +- lib/neutron_thirdparty/README.md | 2 +- stack.sh | 2 +- unstack.sh | 2 +- 13 files changed, 1479 insertions(+), 1478 deletions(-) mode change 100755 => 120000 lib/neutron create mode 100755 lib/neutron-legacy diff --git a/clean.sh b/clean.sh index ad4525ba62..035489c045 100755 --- a/clean.sh +++ b/clean.sh @@ -49,7 +49,7 @@ source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat -source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ironic source $TOP_DIR/lib/trove diff --git a/doc/source/index.rst b/doc/source/index.rst index 537f6570a2..b7012379cb 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -166,7 +166,7 @@ Scripts * `lib/ironic `__ * `lib/keystone `__ * `lib/ldap `__ -* `lib/neutron `__ +* `lib/neutron-legacy `__ * `lib/nova `__ * `lib/oslo `__ * `lib/rpc\_backend `__ diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index a2ae275b04..aa348307af 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -32,7 +32,7 @@ source $TOP_DIR/functions # Import project functions source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/neutron-legacy # Import configuration source $TOP_DIR/openrc diff --git a/exercises/euca.sh b/exercises/euca.sh index f9c47523e6..df5e233b8d 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -37,7 +37,7 @@ source $TOP_DIR/eucarc source $TOP_DIR/exerciserc # Import project functions -source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/neutron-legacy # If nova api is not enabled we exit with exitcode 55 so that # the exercise is skipped diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 57f48e08b2..59444e1ebd 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -31,7 +31,7 @@ source $TOP_DIR/functions source $TOP_DIR/openrc # Import project functions -source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/neutron-legacy # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 5b3281b16a..9230587817 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -49,7 +49,7 @@ source $TOP_DIR/functions source $TOP_DIR/openrc # Import neutron functions -source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/neutron-legacy # If neutron is not enabled we exit with exitcode 55, which means exercise is skipped. neutron_plugin_check_adv_test_requirements || exit 55 diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 504fba1a04..3ac2016254 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -32,7 +32,7 @@ source $TOP_DIR/openrc # Import project functions source $TOP_DIR/lib/cinder -source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/neutron-legacy # Import exercise configuration source $TOP_DIR/exerciserc diff --git a/lib/neutron b/lib/neutron deleted file mode 100755 index 5ff39212fc..0000000000 --- a/lib/neutron +++ /dev/null @@ -1,1467 +0,0 @@ -#!/bin/bash -# -# lib/neutron -# functions - functions specific to neutron - -# Dependencies: -# ``functions`` file -# ``DEST`` must be defined -# ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# - install_neutron_agent_packages -# - install_neutronclient -# - install_neutron -# - install_neutron_third_party -# - configure_neutron -# - init_neutron -# - configure_neutron_third_party -# - init_neutron_third_party -# - start_neutron_third_party -# - create_nova_conf_neutron -# - start_neutron_service_and_check -# - check_neutron_third_party_integration -# - start_neutron_agents -# - create_neutron_initial_network -# - setup_neutron_debug -# -# ``unstack.sh`` calls the entry points in this order: -# -# - teardown_neutron_debug -# - stop_neutron -# - stop_neutron_third_party -# - cleanup_neutron - -# Functions in lib/neutron are classified into the following categories: -# -# - entry points (called from stack.sh or unstack.sh) -# - internal functions -# - neutron exercises -# - 3rd party programs - - -# Neutron Networking -# ------------------ - -# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want -# to run Neutron on this host, make sure that q-svc is also in -# ``ENABLED_SERVICES``. -# -# See "Neutron Network Configuration" below for additional variables -# that must be set in localrc for connectivity across hosts with -# Neutron. -# -# With Neutron networking the NETWORK_MANAGER variable is ignored. - -# Settings -# -------- - -# Timeout value in seconds to wait for IPv6 gateway configuration -GATEWAY_TIMEOUT=30 - - -# Neutron Network Configuration -# ----------------------------- - -# Subnet IP version -IP_VERSION=${IP_VERSION:-4} -# Validate IP_VERSION -if [[ $IP_VERSION != "4" ]] && [[ $IP_VERSION != "6" ]] && [[ $IP_VERSION != "4+6" ]]; then - die $LINENO "IP_VERSION must be either 4, 6, or 4+6" -fi -# Gateway and subnet defaults, in case they are not customized in localrc -NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} -PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} -PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} -PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} - -if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then - Q_PROTOCOL="https" -fi - -# Generate 40-bit IPv6 Global ID to comply with RFC 4193 -IPV6_GLOBAL_ID=`uuidgen | sed s/-//g | cut -c 23- | sed -e "s/\(..\)\(....\)\(....\)/\1:\2:\3/"` - -# IPv6 gateway and subnet defaults, in case they are not customized in localrc -IPV6_RA_MODE=${IPV6_RA_MODE:-slaac} -IPV6_ADDRESS_MODE=${IPV6_ADDRESS_MODE:-slaac} -IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet} -IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet} -FIXED_RANGE_V6=${FIXED_RANGE_V6:-fd$IPV6_GLOBAL_ID::/64} -IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-fd$IPV6_GLOBAL_ID::1} -IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-fe80:cafe:cafe::/64} -IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-fe80:cafe:cafe::2} -# IPV6_ROUTER_GW_IP must be defined when IP_VERSION=4+6 as it cannot be -# obtained conventionally until the l3-agent has support for dual-stack -# TODO (john-davidge) Remove once l3-agent supports dual-stack -IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-fe80:cafe:cafe::1} - -# Set up default directories -GITDIR["python-neutronclient"]=$DEST/python-neutronclient - - -NEUTRON_DIR=$DEST/neutron -NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas -NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas -NEUTRON_VPNAAS_DIR=$DEST/neutron-vpnaas -NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} - -# Support entry points installation of console scripts -if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then - NEUTRON_BIN_DIR=$NEUTRON_DIR/bin -else - NEUTRON_BIN_DIR=$(get_python_exec_prefix) -fi - -NEUTRON_CONF_DIR=/etc/neutron -NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf -export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} - -# Agent binaries. Note, binary paths for other agents are set in per-service -# scripts in lib/neutron_plugins/services/ -AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" -AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} -AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" - -# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and -# loaded from per-plugin scripts in lib/neutron_plugins/ -Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini -Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini -Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini -Q_VPN_CONF_FILE=$NEUTRON_CONF_DIR/vpn_agent.ini -Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini - -# Default name for Neutron database -Q_DB_NAME=${Q_DB_NAME:-neutron} -# Default Neutron Plugin -Q_PLUGIN=${Q_PLUGIN:-ml2} -# Default Neutron Port -Q_PORT=${Q_PORT:-9696} -# Default Neutron Internal Port when using TLS proxy -Q_PORT_INT=${Q_PORT_INT:-19696} -# Default Neutron Host -Q_HOST=${Q_HOST:-$SERVICE_HOST} -# Default protocol -Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} -# Default admin username -Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} -# Default auth strategy -Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# Use namespace or not -Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} -# RHEL's support for namespaces requires using veths with ovs -Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} -Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} -Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) -# Meta data IP -Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} -# Allow Overlapping IP among subnets -Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} -# Use neutron-debug command -Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} -# The name of the default q-l3 router -Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} -# nova vif driver that all plugins should use -NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} -Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} -Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} -VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} -VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} -# Specify if the initial private and external networks should be created -NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} - -## Provider Network Information -PROVIDER_SUBNET_NAME=${PROVIDER_SUBNET_NAME:-"provider_net"} - -# Use flat providernet for public network -# -# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a flat provider network -# for external interface of neutron l3-agent. In that case, -# PUBLIC_PHYSICAL_NETWORK specifies provider:physical_network value -# used for the network. In case of ofagent, you should add the -# corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS. -# For openvswitch agent, you should add the corresponding entry to -# your OVS_BRIDGE_MAPPINGS. -# -# eg. (ofagent) -# Q_USE_PROVIDERNET_FOR_PUBLIC=True -# Q_USE_PUBLIC_VETH=True -# PUBLIC_PHYSICAL_NETWORK=public -# OFAGENT_PHYSICAL_INTERFACE_MAPPINGS=public:veth-pub-int -# -# eg. (openvswitch agent) -# Q_USE_PROVIDERNET_FOR_PUBLIC=True -# PUBLIC_PHYSICAL_NETWORK=public -# OVS_BRIDGE_MAPPINGS=public:br-ex -Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-False} -PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public} - -# If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of -# PUBLIC_BRIDGE. This is intended to be used with -# Q_USE_PROVIDERNET_FOR_PUBLIC=True. -Q_USE_PUBLIC_VETH=${Q_USE_PUBLIC_VETH:-False} -Q_PUBLIC_VETH_EX=${Q_PUBLIC_VETH_EX:-veth-pub-ex} -Q_PUBLIC_VETH_INT=${Q_PUBLIC_VETH_INT:-veth-pub-int} - -# The next two variables are configured by plugin -# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* -# -# The plugin supports L3. -Q_L3_ENABLED=${Q_L3_ENABLED:-False} -# L3 routers exist per tenant -Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False} - -# List of config file names in addition to the main plugin config file -# See _configure_neutron_common() for details about setting it up -declare -a Q_PLUGIN_EXTRA_CONF_FILES - -# List of (optional) config files for VPN device drivers to use with -# the neutron-q-vpn agent -declare -a Q_VPN_EXTRA_CONF_FILES - - -Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf -if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - Q_RR_COMMAND="sudo" -else - NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) - Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - fi -fi - - -# Distributed Virtual Router (DVR) configuration -# Can be: -# - ``legacy`` - No DVR functionality -# - ``dvr_snat`` - Controller or single node DVR -# - ``dvr`` - Compute node in multi-node DVR -# -Q_DVR_MODE=${Q_DVR_MODE:-legacy} -if [[ "$Q_DVR_MODE" != "legacy" ]]; then - Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,l2population -fi - -# Provider Network Configurations -# -------------------------------- - -# The following variables control the Neutron ML2 plugins' allocation -# of tenant networks and availability of provider networks. If these -# are not configured in ``localrc``, tenant networks will be local to -# the host (with no remote connectivity), and no physical resources -# will be available for the allocation of provider networks. - -# To disable tunnels (GRE or VXLAN) for tenant networks, -# set to False in ``local.conf``. -# GRE tunnels are only supported by the openvswitch. -ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} - -# If using GRE tunnels for tenant networks, specify the range of -# tunnel IDs from which tenant networks are allocated. Can be -# overriden in ``localrc`` in necesssary. -TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} - -# To use VLANs for tenant networks, set to True in localrc. VLANs -# are supported by the ML2 plugins, requiring additional configuration -# described below. -ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} - -# If using VLANs for tenant networks, set in ``localrc`` to specify -# the range of VLAN VIDs from which tenant networks are -# allocated. An external network switch must be configured to -# trunk these VLANs between hosts for multi-host connectivity. -# -# Example: ``TENANT_VLAN_RANGE=1000:1999`` -TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} - -# If using VLANs for tenant networks, or if using flat or VLAN -# provider networks, set in ``localrc`` to the name of the physical -# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the -# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge -# agent, as described below. -# -# Example: ``PHYSICAL_NETWORK=default`` -PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} - -# With the openvswitch agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the OVS bridge to use for the physical network. The -# bridge will be created if it does not already exist, but a -# physical interface must be manually added to the bridge as a -# port for external connectivity. -# -# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` -OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} - -# With the linuxbridge agent, if using VLANs for tenant networks, -# or if using flat or VLAN provider networks, set in ``localrc`` to -# the name of the network interface to use for the physical -# network. -# -# Example: ``LB_PHYSICAL_INTERFACE=eth1`` -LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} - -# When Neutron tunnels are enabled it is needed to specify the -# IP address of the end point in the local server. This IP is set -# by default to the same IP address that the HOST IP. -# This variable can be used to specify a different end point IP address -# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1`` -TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP} - -# With the openvswitch plugin, set to True in ``localrc`` to enable -# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. -# -# Example: ``OVS_ENABLE_TUNNELING=True`` -OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} - -# Use DHCP agent for providing metadata service in the case of -# without L3 agent (No Route Agent), set to True in localrc. -ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} - -# Add a static route as dhcp option, so the request to 169.254.169.254 -# will be able to reach through a route(DHCP agent) -# This option require ENABLE_ISOLATED_METADATA = True -ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} -# Neutron plugin specific functions -# --------------------------------- - -# Please refer to ``lib/neutron_plugins/README.md`` for details. -source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN - -# Agent loadbalancer service plugin functions -# ------------------------------------------- - -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/loadbalancer - -# Agent metering service plugin functions -# ------------------------------------------- - -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/metering - -# VPN service plugin functions -# ------------------------------------------- -# Hardcoding for 1 service plugin for now -source $TOP_DIR/lib/neutron_plugins/services/vpn - -# Firewall Service Plugin functions -# --------------------------------- -source $TOP_DIR/lib/neutron_plugins/services/firewall - -# Use security group or not -if has_neutron_plugin_security_group; then - Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} -else - Q_USE_SECGROUP=False -fi - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,neutron - - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Functions -# --------- - -function _determine_config_server { - local cfg_file - local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do - opts+=" --config-file /$cfg_file" - done - echo "$opts" -} - -function _determine_config_vpn { - local cfg_file - local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE --config-file=$Q_VPN_CONF_FILE" - if is_service_enabled q-fwaas; then - opts+=" --config-file $Q_FWAAS_CONF_FILE" - fi - for cfg_file in ${Q_VPN_EXTRA_CONF_FILES[@]}; do - opts+=" --config-file $cfg_file" - done - echo "$opts" - -} - -function _determine_config_l3 { - local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" - if is_service_enabled q-fwaas; then - opts+=" --config-file $Q_FWAAS_CONF_FILE" - fi - echo "$opts" -} - -# For services and agents that require it, dynamically construct a list of -# --config-file arguments that are passed to the binary. -function determine_config_files { - local opts="" - case "$1" in - "neutron-server") opts="$(_determine_config_server)" ;; - "neutron-vpn-agent") opts="$(_determine_config_vpn)" ;; - "neutron-l3-agent") opts="$(_determine_config_l3)" ;; - esac - if [ -z "$opts" ] ; then - die $LINENO "Could not determine config files for $1." - fi - echo "$opts" -} - -# Test if any Neutron services are enabled -# is_neutron_enabled -function is_neutron_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 - return 1 -} - -# configure_neutron() -# Set common config for all neutron server and agents. -function configure_neutron { - _configure_neutron_common - iniset_rpc_backend neutron $NEUTRON_CONF - - # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES - if is_service_enabled q-lbaas; then - _configure_neutron_lbaas - fi - if is_service_enabled q-metering; then - _configure_neutron_metering - fi - if is_service_enabled q-vpn; then - _configure_neutron_vpn - fi - if is_service_enabled q-fwaas; then - _configure_neutron_fwaas - fi - if is_service_enabled q-agt q-svc; then - _configure_neutron_service - fi - if is_service_enabled q-agt; then - _configure_neutron_plugin_agent - fi - if is_service_enabled q-dhcp; then - _configure_neutron_dhcp_agent - fi - if is_service_enabled q-l3; then - _configure_neutron_l3_agent - fi - if is_service_enabled q-meta; then - _configure_neutron_metadata_agent - fi - - if [[ "$Q_DVR_MODE" != "legacy" ]]; then - _configure_dvr - fi - if is_service_enabled ceilometer; then - _configure_neutron_ceilometer_notifications - fi - - _configure_neutron_debug_command -} - -function create_nova_conf_neutron { - iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API" - iniset $NOVA_CONF neutron admin_username "$Q_ADMIN_USERNAME" - iniset $NOVA_CONF neutron admin_password "$SERVICE_PASSWORD" - iniset $NOVA_CONF neutron admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY" - iniset $NOVA_CONF neutron admin_tenant_name "$SERVICE_TENANT_NAME" - iniset $NOVA_CONF neutron region_name "$REGION_NAME" - iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT" - - if [[ "$Q_USE_SECGROUP" == "True" ]]; then - LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver - iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER - iniset $NOVA_CONF DEFAULT security_group_api neutron - fi - - # set NOVA_VIF_DRIVER and optionally set options in nova_conf - neutron_plugin_create_nova_conf - - iniset $NOVA_CONF libvirt vif_driver "$NOVA_VIF_DRIVER" - iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" - if is_service_enabled q-meta; then - iniset $NOVA_CONF neutron service_metadata_proxy "True" - fi - - iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" - iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" -} - -# create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process -function create_neutron_cache_dir { - # Create cache dir - sudo install -d -o $STACK_USER $NEUTRON_AUTH_CACHE_DIR - rm -f $NEUTRON_AUTH_CACHE_DIR/* -} - -# create_neutron_accounts() - Set up common required neutron accounts - -# Tenant User Roles -# ------------------------------------------------------------------ -# service neutron admin # if enabled - -# Migrated from keystone_data.sh -function create_neutron_accounts { - if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then - - create_service_user "neutron" - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - - local neutron_service=$(get_or_create_service "neutron" \ - "network" "Neutron Service") - get_or_create_endpoint $neutron_service \ - "$REGION_NAME" \ - "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \ - "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \ - "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" - fi - fi -} - -function create_neutron_initial_network { - TENANT_ID=$(openstack project list | grep " demo " | get_field 1) - die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" - - # Allow drivers that need to create an initial network to do so here - if type -p neutron_plugin_create_initial_network_profile > /dev/null; then - neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK - fi - - if is_provider_network; then - die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" - die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specifiy the PROVIDER_NETWORK_TYPE" - NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" - - if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) - die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $TENANT_ID" - fi - - if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode slaac --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2) - die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $PROVIDER_SUBNET_NAME_V6 $TENANT_ID" - fi - - sudo ip link set $OVS_PHYSICAL_BRIDGE up - sudo ip link set br-int up - sudo ip link set $PUBLIC_INTERFACE up - else - NET_ID=$(neutron net-create --tenant-id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) - die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $TENANT_ID" - - if [[ "$IP_VERSION" =~ 4.* ]]; then - # Create IPv4 private subnet - SUBNET_ID=$(_neutron_create_private_subnet_v4) - fi - - if [[ "$IP_VERSION" =~ .*6 ]]; then - # Create IPv6 private subnet - IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6) - fi - fi - - if [[ "$Q_L3_ENABLED" == "True" ]]; then - # Create a router, and add the private subnet as one of its interfaces - if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then - # create a tenant-owned router. - ROUTER_ID=$(neutron router-create --tenant-id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME" - else - # Plugin only supports creating a single router, which should be admin owned. - ROUTER_ID=$(neutron router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) - die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" - fi - - # Create an external network, and a subnet. Configure the external network as router gw - if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then - EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True --provider:network_type=flat --provider:physical_network=${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) - else - EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) - fi - die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" - - if [[ "$IP_VERSION" =~ 4.* ]]; then - # Configure router for IPv4 public access - _neutron_configure_router_v4 - fi - - if [[ "$IP_VERSION" =~ .*6 ]]; then - # Configure router for IPv6 public access - _neutron_configure_router_v6 - fi - fi -} - -# init_neutron() - Initialize databases, etc. -function init_neutron { - recreate_database $Q_DB_NAME - # Run Neutron db migrations - $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head - for svc in fwaas lbaas vpnaas; do - if [ "$svc" = "vpnaas" ]; then - q_svc="q-vpn" - else - q_svc="q-$svc" - fi - if is_service_enabled $q_svc; then - $NEUTRON_BIN_DIR/neutron-db-manage --service $svc --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head - fi - done -} - -# install_neutron() - Collect source and prepare -function install_neutron { - git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH - setup_develop $NEUTRON_DIR - if is_service_enabled q-fwaas; then - git_clone $NEUTRON_FWAAS_REPO $NEUTRON_FWAAS_DIR $NEUTRON_FWAAS_BRANCH - setup_develop $NEUTRON_FWAAS_DIR - fi - if is_service_enabled q-lbaas; then - git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH - setup_develop $NEUTRON_LBAAS_DIR - fi - if is_service_enabled q-vpn; then - git_clone $NEUTRON_VPNAAS_REPO $NEUTRON_VPNAAS_DIR $NEUTRON_VPNAAS_BRANCH - setup_develop $NEUTRON_VPNAAS_DIR - fi - - if [ "$VIRT_DRIVER" == 'xenserver' ]; then - local dom0_ip - dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) - - local ssh_dom0 - ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" - - # Find where the plugins should go in dom0 - local xen_functions - xen_functions=$(cat $TOP_DIR/tools/xen/functions) - local plugin_dir - plugin_dir=$($ssh_dom0 "$xen_functions; set -eux; xapi_plugin_location") - - # install neutron plugins to dom0 - tar -czf - -C $NEUTRON_DIR/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/ ./ | - $ssh_dom0 "tar -xzf - -C $plugin_dir && chmod a+x $plugin_dir/*" - fi -} - -# install_neutronclient() - Collect source and prepare -function install_neutronclient { - if use_library_from_git "python-neutronclient"; then - git_clone_by_name "python-neutronclient" - setup_dev_lib "python-neutronclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-neutronclient"]}/tools/,/etc/bash_completion.d/}neutron.bash_completion - fi -} - -# install_neutron_agent_packages() - Collect source and prepare -function install_neutron_agent_packages { - # radvd doesn't come with the OS. Install it if the l3 service is enabled. - if is_service_enabled q-l3; then - install_package radvd - fi - # install packages that are specific to plugin agent(s) - if is_service_enabled q-agt q-dhcp q-l3; then - neutron_plugin_install_agent_packages - fi - - if is_service_enabled q-lbaas; then - neutron_agent_lbaas_install_agent_packages - fi -} - -# Start running processes, including screen -function start_neutron_service_and_check { - local cfg_file_options="$(determine_config_files neutron-server)" - local service_port=$Q_PORT - local service_protocol=$Q_PROTOCOL - if is_service_enabled tls-proxy; then - service_port=$Q_PORT_INT - service_protocol="http" - fi - # Start the Neutron service - run_process q-svc "python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options" - echo "Waiting for Neutron to start..." - if is_ssl_enabled_service "neutron"; then - ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" - fi - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port; do sleep 1; done"; then - die $LINENO "Neutron did not start" - fi - # Start proxy if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT & - fi -} - -# Start running processes, including screen -function start_neutron_agents { - # Start up the neutron agents if enabled - run_process q-agt "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" - - if is_provider_network; then - sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE - sudo ip link set $OVS_PHYSICAL_BRIDGE up - sudo ip link set br-int up - sudo ip link set $PUBLIC_INTERFACE up - if is_ironic_hardware; then - for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $PUBLIC_INTERFACE - sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE - done - sudo route add -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - fi - - if is_service_enabled q-vpn; then - run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)" - else - run_process q-l3 "python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" - fi - - run_process q-meta "python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" - - if [ "$VIRT_DRIVER" = 'xenserver' ]; then - # For XenServer, start an agent for the domU openvswitch - run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" - fi - - if is_service_enabled q-lbaas; then - run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" - fi - - if is_service_enabled q-metering; then - run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" - fi -} - -# stop_neutron() - Stop running processes (non-screen) -function stop_neutron { - if is_service_enabled q-dhcp; then - stop_process q-dhcp - pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') - [ ! -z "$pid" ] && sudo kill -9 $pid - fi - - stop_process q-svc - stop_process q-l3 - - if is_service_enabled q-meta; then - sudo pkill -9 -f neutron-ns-metadata-proxy || : - stop_process q-meta - fi - - stop_process q-agt - - if is_service_enabled q-lbaas; then - neutron_lbaas_stop - fi - if is_service_enabled q-fwaas; then - neutron_fwaas_stop - fi - if is_service_enabled q-vpn; then - neutron_vpn_stop - fi - if is_service_enabled q-metering; then - neutron_metering_stop - fi -} - -# cleanup_neutron() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_neutron { - if is_provider_network && is_ironic_hardware; then - for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE - sudo ip addr add $IP dev $PUBLIC_INTERFACE - done - sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE - fi - - if is_neutron_ovs_base_plugin; then - neutron_ovs_base_cleanup - fi - - # delete all namespaces created by neutron - for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do - sudo ip netns delete ${ns} - done -} - - -function _create_neutron_conf_dir { - # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find - sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR -} - -# _configure_neutron_common() -# Set common config for all neutron server and agents. -# This MUST be called before other ``_configure_neutron_*`` functions. -function _configure_neutron_common { - _create_neutron_conf_dir - - cp $NEUTRON_DIR/etc/neutron.conf $NEUTRON_CONF - - # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. - # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. - # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``, - # ``Q_PLUGIN_EXTRA_CONF_FILES``. For example: - # - # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)`` - neutron_plugin_configure_common - - if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then - die $LINENO "Neutron plugin not set.. exiting" - fi - - # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` - mkdir -p /$Q_PLUGIN_CONF_PATH - Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE - - iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` - iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron - iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG - # If addition config files are set, make sure their path name is set as well - if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then - die $LINENO "Neutron additional plugin config not set.. exiting" - fi - - # If additional config files exist, copy them over to neutron configuration - # directory - if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then - local f - for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do - Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} - done - fi - - if [ "$VIRT_DRIVER" = 'fake' ]; then - # Disable arbitrary limits - iniset $NEUTRON_CONF quotas quota_network -1 - iniset $NEUTRON_CONF quotas quota_subnet -1 - iniset $NEUTRON_CONF quotas quota_port -1 - iniset $NEUTRON_CONF quotas quota_security_group -1 - iniset $NEUTRON_CONF quotas quota_security_group_rule -1 - fi - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $NEUTRON_CONF DEFAULT project_id - else - # Show user_name and project_name by default like in nova - iniset $NEUTRON_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" - fi - - if is_service_enabled tls-proxy; then - # Set the service port for a proxy to take the original - iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" - fi - - if is_ssl_enabled_service "nova"; then - iniset $NEUTRON_CONF nova cafile $SSL_BUNDLE_FILE - fi - - if is_ssl_enabled_service "neutron"; then - ensure_certificates NEUTRON - - iniset $NEUTRON_CONF DEFAULT use_ssl True - iniset $NEUTRON_CONF DEFAULT ssl_cert_file "$NEUTRON_SSL_CERT" - iniset $NEUTRON_CONF DEFAULT ssl_key_file "$NEUTRON_SSL_KEY" - fi - - _neutron_setup_rootwrap -} - -function _configure_neutron_debug_command { - if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then - return - fi - - cp $NEUTRON_DIR/etc/l3_agent.ini $NEUTRON_TEST_CONFIG_FILE - - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - - _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE - - neutron_plugin_configure_debug_command -} - -function _configure_neutron_dhcp_agent { - - cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE - - iniset $Q_DHCP_CONF_FILE DEFAULT verbose True - iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - - if ! is_service_enabled q-l3; then - if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then - iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA - iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK - else - if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then - die "$LINENO" "Enable isolated metadata is a must for metadata network" - fi - fi - fi - - _neutron_setup_interface_driver $Q_DHCP_CONF_FILE - - neutron_plugin_configure_dhcp_agent -} - -function _configure_neutron_l3_agent { - local cfg_file - Q_L3_ENABLED=True - # for l3-agent, only use per tenant router if we have namespaces - Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE - - if is_service_enabled q-vpn; then - neutron_vpn_configure_agent - fi - - cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE - - iniset $Q_L3_CONF_FILE DEFAULT verbose True - iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_L3_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - - _neutron_setup_interface_driver $Q_L3_CONF_FILE - - neutron_plugin_configure_l3_agent -} - -function _configure_neutron_metadata_agent { - cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE - - iniset $Q_META_CONF_FILE DEFAULT verbose True - iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP - iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_META_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - - # Configures keystone for metadata_agent - # The third argument "True" sets auth_url needed to communicate with keystone - _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True - -} - -function _configure_neutron_ceilometer_notifications { - iniset $NEUTRON_CONF DEFAULT notification_driver messaging -} - -function _configure_neutron_lbaas { - if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf ]; then - cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf $NEUTRON_CONF_DIR - fi - neutron_agent_lbaas_configure_common - neutron_agent_lbaas_configure_agent -} - -function _configure_neutron_metering { - neutron_agent_metering_configure_common - neutron_agent_metering_configure_agent -} - -function _configure_neutron_fwaas { - if [ -f $NEUTRON_FWAAS_DIR/etc/neutron_fwaas.conf ]; then - cp $NEUTRON_FWAAS_DIR/etc/neutron_fwaas.conf $NEUTRON_CONF_DIR - fi - neutron_fwaas_configure_common - neutron_fwaas_configure_driver -} - -function _configure_neutron_vpn { - if [ -f $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf ]; then - cp $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf $NEUTRON_CONF_DIR - fi - neutron_vpn_install_agent_packages - neutron_vpn_configure_common -} - -function _configure_dvr { - iniset $NEUTRON_CONF DEFAULT router_distributed True - iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE -} - - -# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent -# It is called when q-agt is enabled. -function _configure_neutron_plugin_agent { - # Specify the default root helper prior to agent configuration to - # ensure that an agent's configuration can override the default - iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi - iniset $NEUTRON_CONF DEFAULT verbose True - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - - # Configure agent for plugin - neutron_plugin_configure_plugin_agent -} - -# _configure_neutron_service() - Set config files for neutron service -# It is called when q-svc is enabled. -function _configure_neutron_service { - Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini - Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json - - cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE - - # allow neutron user to administer neutron to match neutron account - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE - - # Update either configuration file with plugin - iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS - - if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then - iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES - fi - - iniset $NEUTRON_CONF DEFAULT verbose True - iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_CONF DEFAULT policy_file $Q_POLICY_FILE - iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP - - iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY - _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken - - # Configuration for neutron notifations to nova. - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES - iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES - - iniset $NEUTRON_CONF nova auth_plugin password - iniset $NEUTRON_CONF nova auth_url $KEYSTONE_AUTH_URI - iniset $NEUTRON_CONF nova username nova - iniset $NEUTRON_CONF nova password $SERVICE_PASSWORD - iniset $NEUTRON_CONF nova user_domain_id default - iniset $NEUTRON_CONF nova project_name $SERVICE_TENANT_NAME - iniset $NEUTRON_CONF nova project_domain_id default - iniset $NEUTRON_CONF nova region_name $REGION_NAME - - # Configure plugin - neutron_plugin_configure_service -} - -# Utility Functions -#------------------ - -# _neutron_service_plugin_class_add() - add service plugin class -function _neutron_service_plugin_class_add { - local service_plugin_class=$1 - if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then - Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class - elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then - Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" - fi -} - -# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). -function _neutron_deploy_rootwrap_filters { - local srcdir=$1 - sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D - sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ -} - -# _neutron_setup_rootwrap() - configure Neutron's rootwrap -function _neutron_setup_rootwrap { - if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then - return - fi - # Wipe any existing ``rootwrap.d`` files first - Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d - if [[ -d $Q_CONF_ROOTWRAP_D ]]; then - sudo rm -rf $Q_CONF_ROOTWRAP_D - fi - - _neutron_deploy_rootwrap_filters $NEUTRON_DIR - - # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` - # location moved in newer versions, prefer new location - if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE - else - sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE - fi - sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE - # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap - ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" - ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" - - # Set up the rootwrap sudoers for neutron - TEMPFILE=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE - chmod 0440 $TEMPFILE - sudo chown root:root $TEMPFILE - sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap - - # Update the root_helper - iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND" - if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" - fi -} - -# Configures keystone integration for neutron service and agents -function _neutron_setup_keystone { - local conf_file=$1 - local section=$2 - local use_auth_url=$3 - - # Configures keystone for metadata_agent - # metadata_agent needs auth_url to communicate with keystone - if [[ "$use_auth_url" == "True" ]]; then - iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI/v2.0 - fi - - create_neutron_cache_dir - configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section -} - -function _neutron_setup_interface_driver { - - # ovs_use_veth needs to be set before the plugin configuration - # occurs to allow plugins to override the setting. - iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH - - neutron_plugin_setup_interface_driver $1 -} - -# Create private IPv4 subnet -function _neutron_create_private_subnet_v4 { - local subnet_params="--tenant-id $TENANT_ID " - subnet_params+="--ip_version 4 " - subnet_params+="--gateway $NETWORK_GATEWAY " - subnet_params+="--name $PRIVATE_SUBNET_NAME " - subnet_params+="$NET_ID $FIXED_RANGE" - local subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) - die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $TENANT_ID" - echo $subnet_id -} - -# Create private IPv6 subnet -function _neutron_create_private_subnet_v6 { - die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set" - die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set" - local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE" - local subnet_params="--tenant-id $TENANT_ID " - subnet_params+="--ip_version 6 " - subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " - subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME " - subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes" - local ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) - die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $TENANT_ID" - echo $ipv6_subnet_id -} - -# Create public IPv4 subnet -function _neutron_create_public_subnet_v4 { - local subnet_params+="--ip_version 4 " - subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " - subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " - subnet_params+="--name $PUBLIC_SUBNET_NAME " - subnet_params+="$EXT_NET_ID $FLOATING_RANGE " - subnet_params+="-- --enable_dhcp=False" - local id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') - die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" - echo $id_and_ext_gw_ip -} - -# Create public IPv6 subnet -function _neutron_create_public_subnet_v6 { - local subnet_params="--ip_version 6 " - subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " - subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME " - subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE " - subnet_params+="-- --enable_dhcp=False" - local ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') - die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" - echo $ipv6_id_and_ext_gw_ip -} - -# Configure neutron router for IPv4 public access -function _neutron_configure_router_v4 { - neutron router-interface-add $ROUTER_ID $SUBNET_ID - # Create a public subnet on the external network - local id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) - local ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) - PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) - # Configure the external network as the default router gateway - neutron router-gateway-set $ROUTER_ID $EXT_NET_ID - - # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3; then - # Configure and enable public bridge - if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then - local ext_gw_interface=$(_neutron_get_ext_gw_interface) - local cidr_len=${FLOATING_RANGE#*/} - sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface - sudo ip link set $ext_gw_interface up - ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'` - die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" - sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP - fi - _neutron_set_router_id - fi -} - -# Configure neutron router for IPv6 public access -function _neutron_configure_router_v6 { - neutron router-interface-add $ROUTER_ID $IPV6_SUBNET_ID - # Create a public subnet on the external network - local ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) - local ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2) - local ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5) - - # If the external network has not already been set as the default router - # gateway when configuring an IPv4 public subnet, do so now - if [[ "$IP_VERSION" == "6" ]]; then - neutron router-gateway-set $ROUTER_ID $EXT_NET_ID - fi - - # This logic is specific to using the l3-agent for layer 3 - if is_service_enabled q-l3; then - local ipv6_router_gw_port - # Ensure IPv6 forwarding is enabled on the host - sudo sysctl -w net.ipv6.conf.all.forwarding=1 - # Configure and enable public bridge - if [[ "$IP_VERSION" = "6" ]]; then - # Override global IPV6_ROUTER_GW_IP with the true value from neutron - IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'` - die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" - ipv6_router_gw_port=`neutron port-list -c id -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $1; }' | awk -F ' | ' '{ print $2; }'` - die_if_not_set $LINENO ipv6_router_gw_port "Failure retrieving ipv6_router_gw_port" - else - ipv6_router_gw_port=`neutron port-list -c id -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $1; }' | awk -F ' | ' '{ print $2; }'` - die_if_not_set $LINENO ipv6_router_gw_port "Failure retrieving ipv6_router_gw_port" - fi - - # The ovs_base_configure_l3_agent function flushes the public - # bridge's ip addresses, so turn IPv6 support in the host off - # and then on to recover the public bridge's link local address - sudo sysctl -w net.ipv6.conf.${PUBLIC_BRIDGE}.disable_ipv6=1 - sudo sysctl -w net.ipv6.conf.${PUBLIC_BRIDGE}.disable_ipv6=0 - if ! ip -6 addr show dev $PUBLIC_BRIDGE | grep 'scope global'; then - # Create an IPv6 ULA address for PUBLIC_BRIDGE if one is not present - IPV6_BRIDGE_ULA=`uuidgen | sed s/-//g | cut -c 23- | sed -e "s/\(..\)\(....\)\(....\)/\1:\2:\3/"` - sudo ip -6 addr add fd$IPV6_BRIDGE_ULA::1 dev $PUBLIC_BRIDGE - fi - - if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then - local ext_gw_interface=$(_neutron_get_ext_gw_interface) - local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - - # Define router_ns based on whether DVR is enabled - local router_ns=qrouter - if [[ "$Q_DVR_MODE" == "dvr_snat" ]]; then - router_ns=snat - fi - - # Configure interface for public bridge - sudo ip -6 addr add $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface - - # Wait until layer 3 agent has configured the gateway port on - # the public bridge, then add gateway address to the interface - # TODO (john-davidge) Remove once l3-agent supports dual-stack - if [[ "$IP_VERSION" == "4+6" ]]; then - if ! timeout $GATEWAY_TIMEOUT sh -c "until sudo ip netns exec $router_ns-$ROUTER_ID ip addr show qg-${ipv6_router_gw_port:0:11} | grep $ROUTER_GW_IP; do sleep 1; done"; then - die $LINENO "Timeout retrieving ROUTER_GW_IP" - fi - # Configure the gateway port with the public IPv6 adress - sudo ip netns exec $router_ns-$ROUTER_ID ip -6 addr add $IPV6_ROUTER_GW_IP/$ipv6_cidr_len dev qg-${ipv6_router_gw_port:0:11} - # Add a default IPv6 route to the neutron router as the - # l3-agent does not add one in the dual-stack case - sudo ip netns exec $router_ns-$ROUTER_ID ip -6 route replace default via $ipv6_ext_gw_ip dev qg-${ipv6_router_gw_port:0:11} - fi - sudo ip -6 route add $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface - fi - _neutron_set_router_id - fi -} - -# Explicitly set router id in l3 agent configuration -function _neutron_set_router_id { - if [[ "$Q_USE_NAMESPACE" == "False" ]]; then - iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID - fi -} - -# Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH -function _neutron_get_ext_gw_interface { - if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then - echo $Q_PUBLIC_VETH_EX - else - # Disable in-band as we are going to use local port - # to communicate with VMs - sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \ - other_config:disable-in-band=true - echo $PUBLIC_BRIDGE - fi -} - -# Functions for Neutron Exercises -#-------------------------------- - -function delete_probe { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` - neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id -} - -function setup_neutron_debug { - if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then - public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` - neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id - private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` - neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $private_net_id - fi -} - -function teardown_neutron_debug { - delete_probe $PUBLIC_NETWORK_NAME - delete_probe $PRIVATE_NETWORK_NAME -} - -function _get_net_id { - neutron --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' -} - -function _get_probe_cmd_prefix { - local from_net="$1" - net_id=`_get_net_id $from_net` - probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` - echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" -} - -function _ping_check_neutron { - local from_net=$1 - local ip=$2 - local timeout_sec=$3 - local expected=${4:-"True"} - local check_command="" - probe_cmd=`_get_probe_cmd_prefix $from_net` - if [[ "$expected" = "True" ]]; then - check_command="while ! $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" - else - check_command="while $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" - fi - if ! timeout $timeout_sec sh -c "$check_command"; then - if [[ "$expected" = "True" ]]; then - die $LINENO "[Fail] Couldn't ping server" - else - die $LINENO "[Fail] Could ping server" - fi - fi -} - -# ssh check -function _ssh_check_neutron { - local from_net=$1 - local key_file=$2 - local ip=$3 - local user=$4 - local timeout_sec=$5 - local probe_cmd = "" - probe_cmd=`_get_probe_cmd_prefix $from_net` - if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success; do sleep 1; done"; then - die $LINENO "server didn't become ssh-able!" - fi -} - -# Neutron 3rd party programs -#--------------------------- - -# please refer to ``lib/neutron_thirdparty/README.md`` for details -NEUTRON_THIRD_PARTIES="" -for f in $TOP_DIR/lib/neutron_thirdparty/*; do - third_party=$(basename $f) - if is_service_enabled $third_party; then - source $TOP_DIR/lib/neutron_thirdparty/$third_party - NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" - fi -done - -function _neutron_third_party_do { - for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do - ${1}_${third_party} - done -} - -# configure_neutron_third_party() - Set config files, create data dirs, etc -function configure_neutron_third_party { - _neutron_third_party_do configure -} - -# init_neutron_third_party() - Initialize databases, etc. -function init_neutron_third_party { - _neutron_third_party_do init -} - -# install_neutron_third_party() - Collect source and prepare -function install_neutron_third_party { - _neutron_third_party_do install -} - -# start_neutron_third_party() - Start running processes, including screen -function start_neutron_third_party { - _neutron_third_party_do start -} - -# stop_neutron_third_party - Stop running processes (non-screen) -function stop_neutron_third_party { - _neutron_third_party_do stop -} - -# check_neutron_third_party_integration() - Check that third party integration is sane -function check_neutron_third_party_integration { - _neutron_third_party_do check -} - -function is_provider_network { - if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ] && [ "$Q_L3_ENABLED" == "False" ]; then - return 0 - fi - return 1 -} - - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/lib/neutron b/lib/neutron new file mode 120000 index 0000000000..00cd72273e --- /dev/null +++ b/lib/neutron @@ -0,0 +1 @@ +neutron-legacy \ No newline at end of file diff --git a/lib/neutron-legacy b/lib/neutron-legacy new file mode 100755 index 0000000000..5ff39212fc --- /dev/null +++ b/lib/neutron-legacy @@ -0,0 +1,1467 @@ +#!/bin/bash +# +# lib/neutron +# functions - functions specific to neutron + +# Dependencies: +# ``functions`` file +# ``DEST`` must be defined +# ``STACK_USER`` must be defined + +# ``stack.sh`` calls the entry points in this order: +# +# - install_neutron_agent_packages +# - install_neutronclient +# - install_neutron +# - install_neutron_third_party +# - configure_neutron +# - init_neutron +# - configure_neutron_third_party +# - init_neutron_third_party +# - start_neutron_third_party +# - create_nova_conf_neutron +# - start_neutron_service_and_check +# - check_neutron_third_party_integration +# - start_neutron_agents +# - create_neutron_initial_network +# - setup_neutron_debug +# +# ``unstack.sh`` calls the entry points in this order: +# +# - teardown_neutron_debug +# - stop_neutron +# - stop_neutron_third_party +# - cleanup_neutron + +# Functions in lib/neutron are classified into the following categories: +# +# - entry points (called from stack.sh or unstack.sh) +# - internal functions +# - neutron exercises +# - 3rd party programs + + +# Neutron Networking +# ------------------ + +# Make sure that neutron is enabled in ``ENABLED_SERVICES``. If you want +# to run Neutron on this host, make sure that q-svc is also in +# ``ENABLED_SERVICES``. +# +# See "Neutron Network Configuration" below for additional variables +# that must be set in localrc for connectivity across hosts with +# Neutron. +# +# With Neutron networking the NETWORK_MANAGER variable is ignored. + +# Settings +# -------- + +# Timeout value in seconds to wait for IPv6 gateway configuration +GATEWAY_TIMEOUT=30 + + +# Neutron Network Configuration +# ----------------------------- + +# Subnet IP version +IP_VERSION=${IP_VERSION:-4} +# Validate IP_VERSION +if [[ $IP_VERSION != "4" ]] && [[ $IP_VERSION != "6" ]] && [[ $IP_VERSION != "4+6" ]]; then + die $LINENO "IP_VERSION must be either 4, 6, or 4+6" +fi +# Gateway and subnet defaults, in case they are not customized in localrc +NETWORK_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} +PUBLIC_NETWORK_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} +PRIVATE_SUBNET_NAME=${PRIVATE_SUBNET_NAME:-"private-subnet"} +PUBLIC_SUBNET_NAME=${PUBLIC_SUBNET_NAME:-"public-subnet"} + +if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then + Q_PROTOCOL="https" +fi + +# Generate 40-bit IPv6 Global ID to comply with RFC 4193 +IPV6_GLOBAL_ID=`uuidgen | sed s/-//g | cut -c 23- | sed -e "s/\(..\)\(....\)\(....\)/\1:\2:\3/"` + +# IPv6 gateway and subnet defaults, in case they are not customized in localrc +IPV6_RA_MODE=${IPV6_RA_MODE:-slaac} +IPV6_ADDRESS_MODE=${IPV6_ADDRESS_MODE:-slaac} +IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet} +IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet} +FIXED_RANGE_V6=${FIXED_RANGE_V6:-fd$IPV6_GLOBAL_ID::/64} +IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-fd$IPV6_GLOBAL_ID::1} +IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-fe80:cafe:cafe::/64} +IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-fe80:cafe:cafe::2} +# IPV6_ROUTER_GW_IP must be defined when IP_VERSION=4+6 as it cannot be +# obtained conventionally until the l3-agent has support for dual-stack +# TODO (john-davidge) Remove once l3-agent supports dual-stack +IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-fe80:cafe:cafe::1} + +# Set up default directories +GITDIR["python-neutronclient"]=$DEST/python-neutronclient + + +NEUTRON_DIR=$DEST/neutron +NEUTRON_FWAAS_DIR=$DEST/neutron-fwaas +NEUTRON_LBAAS_DIR=$DEST/neutron-lbaas +NEUTRON_VPNAAS_DIR=$DEST/neutron-vpnaas +NEUTRON_AUTH_CACHE_DIR=${NEUTRON_AUTH_CACHE_DIR:-/var/cache/neutron} + +# Support entry points installation of console scripts +if [[ -d $NEUTRON_DIR/bin/neutron-server ]]; then + NEUTRON_BIN_DIR=$NEUTRON_DIR/bin +else + NEUTRON_BIN_DIR=$(get_python_exec_prefix) +fi + +NEUTRON_CONF_DIR=/etc/neutron +NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf +export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} + +# Agent binaries. Note, binary paths for other agents are set in per-service +# scripts in lib/neutron_plugins/services/ +AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" +AGENT_L3_BINARY=${AGENT_L3_BINARY:-"$NEUTRON_BIN_DIR/neutron-l3-agent"} +AGENT_META_BINARY="$NEUTRON_BIN_DIR/neutron-metadata-agent" + +# Agent config files. Note, plugin-specific Q_PLUGIN_CONF_FILE is set and +# loaded from per-plugin scripts in lib/neutron_plugins/ +Q_DHCP_CONF_FILE=$NEUTRON_CONF_DIR/dhcp_agent.ini +Q_L3_CONF_FILE=$NEUTRON_CONF_DIR/l3_agent.ini +Q_FWAAS_CONF_FILE=$NEUTRON_CONF_DIR/fwaas_driver.ini +Q_VPN_CONF_FILE=$NEUTRON_CONF_DIR/vpn_agent.ini +Q_META_CONF_FILE=$NEUTRON_CONF_DIR/metadata_agent.ini + +# Default name for Neutron database +Q_DB_NAME=${Q_DB_NAME:-neutron} +# Default Neutron Plugin +Q_PLUGIN=${Q_PLUGIN:-ml2} +# Default Neutron Port +Q_PORT=${Q_PORT:-9696} +# Default Neutron Internal Port when using TLS proxy +Q_PORT_INT=${Q_PORT_INT:-19696} +# Default Neutron Host +Q_HOST=${Q_HOST:-$SERVICE_HOST} +# Default protocol +Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} +# Default admin username +Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} +# Default auth strategy +Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} +# Use namespace or not +Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} +# RHEL's support for namespaces requires using veths with ovs +Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} +Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} +Q_USE_ROOTWRAP_DAEMON=$(trueorfalse True Q_USE_ROOTWRAP_DAEMON) +# Meta data IP +Q_META_DATA_IP=${Q_META_DATA_IP:-$SERVICE_HOST} +# Allow Overlapping IP among subnets +Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} +# Use neutron-debug command +Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} +# The name of the default q-l3 router +Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} +# nova vif driver that all plugins should use +NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} +Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} +Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} +VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} +VIF_PLUGGING_TIMEOUT=${VIF_PLUGGING_TIMEOUT:-300} +# Specify if the initial private and external networks should be created +NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} + +## Provider Network Information +PROVIDER_SUBNET_NAME=${PROVIDER_SUBNET_NAME:-"provider_net"} + +# Use flat providernet for public network +# +# If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a flat provider network +# for external interface of neutron l3-agent. In that case, +# PUBLIC_PHYSICAL_NETWORK specifies provider:physical_network value +# used for the network. In case of ofagent, you should add the +# corresponding entry to your OFAGENT_PHYSICAL_INTERFACE_MAPPINGS. +# For openvswitch agent, you should add the corresponding entry to +# your OVS_BRIDGE_MAPPINGS. +# +# eg. (ofagent) +# Q_USE_PROVIDERNET_FOR_PUBLIC=True +# Q_USE_PUBLIC_VETH=True +# PUBLIC_PHYSICAL_NETWORK=public +# OFAGENT_PHYSICAL_INTERFACE_MAPPINGS=public:veth-pub-int +# +# eg. (openvswitch agent) +# Q_USE_PROVIDERNET_FOR_PUBLIC=True +# PUBLIC_PHYSICAL_NETWORK=public +# OVS_BRIDGE_MAPPINGS=public:br-ex +Q_USE_PROVIDERNET_FOR_PUBLIC=${Q_USE_PROVIDERNET_FOR_PUBLIC:-False} +PUBLIC_PHYSICAL_NETWORK=${PUBLIC_PHYSICAL_NETWORK:-public} + +# If Q_USE_PUBLIC_VETH=True, create and use a veth pair instead of +# PUBLIC_BRIDGE. This is intended to be used with +# Q_USE_PROVIDERNET_FOR_PUBLIC=True. +Q_USE_PUBLIC_VETH=${Q_USE_PUBLIC_VETH:-False} +Q_PUBLIC_VETH_EX=${Q_PUBLIC_VETH_EX:-veth-pub-ex} +Q_PUBLIC_VETH_INT=${Q_PUBLIC_VETH_INT:-veth-pub-int} + +# The next two variables are configured by plugin +# e.g. _configure_neutron_l3_agent or lib/neutron_plugins/* +# +# The plugin supports L3. +Q_L3_ENABLED=${Q_L3_ENABLED:-False} +# L3 routers exist per tenant +Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False} + +# List of config file names in addition to the main plugin config file +# See _configure_neutron_common() for details about setting it up +declare -a Q_PLUGIN_EXTRA_CONF_FILES + +# List of (optional) config files for VPN device drivers to use with +# the neutron-q-vpn agent +declare -a Q_VPN_EXTRA_CONF_FILES + + +Q_RR_CONF_FILE=$NEUTRON_CONF_DIR/rootwrap.conf +if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + Q_RR_COMMAND="sudo" +else + NEUTRON_ROOTWRAP=$(get_rootwrap_location neutron) + Q_RR_COMMAND="sudo $NEUTRON_ROOTWRAP $Q_RR_CONF_FILE" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + Q_RR_DAEMON_COMMAND="sudo $NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + fi +fi + + +# Distributed Virtual Router (DVR) configuration +# Can be: +# - ``legacy`` - No DVR functionality +# - ``dvr_snat`` - Controller or single node DVR +# - ``dvr`` - Compute node in multi-node DVR +# +Q_DVR_MODE=${Q_DVR_MODE:-legacy} +if [[ "$Q_DVR_MODE" != "legacy" ]]; then + Q_ML2_PLUGIN_MECHANISM_DRIVERS=openvswitch,linuxbridge,l2population +fi + +# Provider Network Configurations +# -------------------------------- + +# The following variables control the Neutron ML2 plugins' allocation +# of tenant networks and availability of provider networks. If these +# are not configured in ``localrc``, tenant networks will be local to +# the host (with no remote connectivity), and no physical resources +# will be available for the allocation of provider networks. + +# To disable tunnels (GRE or VXLAN) for tenant networks, +# set to False in ``local.conf``. +# GRE tunnels are only supported by the openvswitch. +ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} + +# If using GRE tunnels for tenant networks, specify the range of +# tunnel IDs from which tenant networks are allocated. Can be +# overriden in ``localrc`` in necesssary. +TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} + +# To use VLANs for tenant networks, set to True in localrc. VLANs +# are supported by the ML2 plugins, requiring additional configuration +# described below. +ENABLE_TENANT_VLANS=${ENABLE_TENANT_VLANS:-False} + +# If using VLANs for tenant networks, set in ``localrc`` to specify +# the range of VLAN VIDs from which tenant networks are +# allocated. An external network switch must be configured to +# trunk these VLANs between hosts for multi-host connectivity. +# +# Example: ``TENANT_VLAN_RANGE=1000:1999`` +TENANT_VLAN_RANGE=${TENANT_VLAN_RANGE:-} + +# If using VLANs for tenant networks, or if using flat or VLAN +# provider networks, set in ``localrc`` to the name of the physical +# network, and also configure ``OVS_PHYSICAL_BRIDGE`` for the +# openvswitch agent or ``LB_PHYSICAL_INTERFACE`` for the linuxbridge +# agent, as described below. +# +# Example: ``PHYSICAL_NETWORK=default`` +PHYSICAL_NETWORK=${PHYSICAL_NETWORK:-} + +# With the openvswitch agent, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the OVS bridge to use for the physical network. The +# bridge will be created if it does not already exist, but a +# physical interface must be manually added to the bridge as a +# port for external connectivity. +# +# Example: ``OVS_PHYSICAL_BRIDGE=br-eth1`` +OVS_PHYSICAL_BRIDGE=${OVS_PHYSICAL_BRIDGE:-} + +# With the linuxbridge agent, if using VLANs for tenant networks, +# or if using flat or VLAN provider networks, set in ``localrc`` to +# the name of the network interface to use for the physical +# network. +# +# Example: ``LB_PHYSICAL_INTERFACE=eth1`` +LB_PHYSICAL_INTERFACE=${LB_PHYSICAL_INTERFACE:-} + +# When Neutron tunnels are enabled it is needed to specify the +# IP address of the end point in the local server. This IP is set +# by default to the same IP address that the HOST IP. +# This variable can be used to specify a different end point IP address +# Example: ``TUNNEL_ENDPOINT_IP=1.1.1.1`` +TUNNEL_ENDPOINT_IP=${TUNNEL_ENDPOINT_IP:-$HOST_IP} + +# With the openvswitch plugin, set to True in ``localrc`` to enable +# provider GRE tunnels when ``ENABLE_TENANT_TUNNELS`` is False. +# +# Example: ``OVS_ENABLE_TUNNELING=True`` +OVS_ENABLE_TUNNELING=${OVS_ENABLE_TUNNELING:-$ENABLE_TENANT_TUNNELS} + +# Use DHCP agent for providing metadata service in the case of +# without L3 agent (No Route Agent), set to True in localrc. +ENABLE_ISOLATED_METADATA=${ENABLE_ISOLATED_METADATA:-False} + +# Add a static route as dhcp option, so the request to 169.254.169.254 +# will be able to reach through a route(DHCP agent) +# This option require ENABLE_ISOLATED_METADATA = True +ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} +# Neutron plugin specific functions +# --------------------------------- + +# Please refer to ``lib/neutron_plugins/README.md`` for details. +source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN + +# Agent loadbalancer service plugin functions +# ------------------------------------------- + +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/loadbalancer + +# Agent metering service plugin functions +# ------------------------------------------- + +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/metering + +# VPN service plugin functions +# ------------------------------------------- +# Hardcoding for 1 service plugin for now +source $TOP_DIR/lib/neutron_plugins/services/vpn + +# Firewall Service Plugin functions +# --------------------------------- +source $TOP_DIR/lib/neutron_plugins/services/firewall + +# Use security group or not +if has_neutron_plugin_security_group; then + Q_USE_SECGROUP=${Q_USE_SECGROUP:-True} +else + Q_USE_SECGROUP=False +fi + +# Tell Tempest this project is present +TEMPEST_SERVICES+=,neutron + + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Functions +# --------- + +function _determine_config_server { + local cfg_file + local opts="--config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + for cfg_file in ${Q_PLUGIN_EXTRA_CONF_FILES[@]}; do + opts+=" --config-file /$cfg_file" + done + echo "$opts" +} + +function _determine_config_vpn { + local cfg_file + local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE --config-file=$Q_VPN_CONF_FILE" + if is_service_enabled q-fwaas; then + opts+=" --config-file $Q_FWAAS_CONF_FILE" + fi + for cfg_file in ${Q_VPN_EXTRA_CONF_FILES[@]}; do + opts+=" --config-file $cfg_file" + done + echo "$opts" + +} + +function _determine_config_l3 { + local opts="--config-file $NEUTRON_CONF --config-file=$Q_L3_CONF_FILE" + if is_service_enabled q-fwaas; then + opts+=" --config-file $Q_FWAAS_CONF_FILE" + fi + echo "$opts" +} + +# For services and agents that require it, dynamically construct a list of +# --config-file arguments that are passed to the binary. +function determine_config_files { + local opts="" + case "$1" in + "neutron-server") opts="$(_determine_config_server)" ;; + "neutron-vpn-agent") opts="$(_determine_config_vpn)" ;; + "neutron-l3-agent") opts="$(_determine_config_l3)" ;; + esac + if [ -z "$opts" ] ; then + die $LINENO "Could not determine config files for $1." + fi + echo "$opts" +} + +# Test if any Neutron services are enabled +# is_neutron_enabled +function is_neutron_enabled { + [[ ,${ENABLED_SERVICES} =~ ,"q-" ]] && return 0 + return 1 +} + +# configure_neutron() +# Set common config for all neutron server and agents. +function configure_neutron { + _configure_neutron_common + iniset_rpc_backend neutron $NEUTRON_CONF + + # goes before q-svc to init Q_SERVICE_PLUGIN_CLASSES + if is_service_enabled q-lbaas; then + _configure_neutron_lbaas + fi + if is_service_enabled q-metering; then + _configure_neutron_metering + fi + if is_service_enabled q-vpn; then + _configure_neutron_vpn + fi + if is_service_enabled q-fwaas; then + _configure_neutron_fwaas + fi + if is_service_enabled q-agt q-svc; then + _configure_neutron_service + fi + if is_service_enabled q-agt; then + _configure_neutron_plugin_agent + fi + if is_service_enabled q-dhcp; then + _configure_neutron_dhcp_agent + fi + if is_service_enabled q-l3; then + _configure_neutron_l3_agent + fi + if is_service_enabled q-meta; then + _configure_neutron_metadata_agent + fi + + if [[ "$Q_DVR_MODE" != "legacy" ]]; then + _configure_dvr + fi + if is_service_enabled ceilometer; then + _configure_neutron_ceilometer_notifications + fi + + _configure_neutron_debug_command +} + +function create_nova_conf_neutron { + iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API" + iniset $NOVA_CONF neutron admin_username "$Q_ADMIN_USERNAME" + iniset $NOVA_CONF neutron admin_password "$SERVICE_PASSWORD" + iniset $NOVA_CONF neutron admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY" + iniset $NOVA_CONF neutron admin_tenant_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF neutron region_name "$REGION_NAME" + iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT" + + if [[ "$Q_USE_SECGROUP" == "True" ]]; then + LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver + iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER + iniset $NOVA_CONF DEFAULT security_group_api neutron + fi + + # set NOVA_VIF_DRIVER and optionally set options in nova_conf + neutron_plugin_create_nova_conf + + iniset $NOVA_CONF libvirt vif_driver "$NOVA_VIF_DRIVER" + iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" + if is_service_enabled q-meta; then + iniset $NOVA_CONF neutron service_metadata_proxy "True" + fi + + iniset $NOVA_CONF DEFAULT vif_plugging_is_fatal "$VIF_PLUGGING_IS_FATAL" + iniset $NOVA_CONF DEFAULT vif_plugging_timeout "$VIF_PLUGGING_TIMEOUT" +} + +# create_neutron_cache_dir() - Part of the _neutron_setup_keystone() process +function create_neutron_cache_dir { + # Create cache dir + sudo install -d -o $STACK_USER $NEUTRON_AUTH_CACHE_DIR + rm -f $NEUTRON_AUTH_CACHE_DIR/* +} + +# create_neutron_accounts() - Set up common required neutron accounts + +# Tenant User Roles +# ------------------------------------------------------------------ +# service neutron admin # if enabled + +# Migrated from keystone_data.sh +function create_neutron_accounts { + if [[ "$ENABLED_SERVICES" =~ "q-svc" ]]; then + + create_service_user "neutron" + + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + + local neutron_service=$(get_or_create_service "neutron" \ + "network" "Neutron Service") + get_or_create_endpoint $neutron_service \ + "$REGION_NAME" \ + "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \ + "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \ + "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" + fi + fi +} + +function create_neutron_initial_network { + TENANT_ID=$(openstack project list | grep " demo " | get_field 1) + die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for demo" + + # Allow drivers that need to create an initial network to do so here + if type -p neutron_plugin_create_initial_network_profile > /dev/null; then + neutron_plugin_create_initial_network_profile $PHYSICAL_NETWORK + fi + + if is_provider_network; then + die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" + die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specifiy the PROVIDER_NETWORK_TYPE" + NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" + + if [[ "$IP_VERSION" =~ 4.* ]]; then + SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $TENANT_ID" + fi + + if [[ "$IP_VERSION" =~ .*6 ]]; then + SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode slaac --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2) + die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $PROVIDER_SUBNET_NAME_V6 $TENANT_ID" + fi + + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + else + NET_ID=$(neutron net-create --tenant-id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) + die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $TENANT_ID" + + if [[ "$IP_VERSION" =~ 4.* ]]; then + # Create IPv4 private subnet + SUBNET_ID=$(_neutron_create_private_subnet_v4) + fi + + if [[ "$IP_VERSION" =~ .*6 ]]; then + # Create IPv6 private subnet + IPV6_SUBNET_ID=$(_neutron_create_private_subnet_v6) + fi + fi + + if [[ "$Q_L3_ENABLED" == "True" ]]; then + # Create a router, and add the private subnet as one of its interfaces + if [[ "$Q_L3_ROUTER_PER_TENANT" == "True" ]]; then + # create a tenant-owned router. + ROUTER_ID=$(neutron router-create --tenant-id $TENANT_ID $Q_ROUTER_NAME | grep ' id ' | get_field 2) + die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $TENANT_ID $Q_ROUTER_NAME" + else + # Plugin only supports creating a single router, which should be admin owned. + ROUTER_ID=$(neutron router-create $Q_ROUTER_NAME | grep ' id ' | get_field 2) + die_if_not_set $LINENO ROUTER_ID "Failure creating ROUTER_ID for $Q_ROUTER_NAME" + fi + + # Create an external network, and a subnet. Configure the external network as router gw + if [ "$Q_USE_PROVIDERNET_FOR_PUBLIC" = "True" ]; then + EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True --provider:network_type=flat --provider:physical_network=${PUBLIC_PHYSICAL_NETWORK} | grep ' id ' | get_field 2) + else + EXT_NET_ID=$(neutron net-create "$PUBLIC_NETWORK_NAME" -- --router:external=True | grep ' id ' | get_field 2) + fi + die_if_not_set $LINENO EXT_NET_ID "Failure creating EXT_NET_ID for $PUBLIC_NETWORK_NAME" + + if [[ "$IP_VERSION" =~ 4.* ]]; then + # Configure router for IPv4 public access + _neutron_configure_router_v4 + fi + + if [[ "$IP_VERSION" =~ .*6 ]]; then + # Configure router for IPv6 public access + _neutron_configure_router_v6 + fi + fi +} + +# init_neutron() - Initialize databases, etc. +function init_neutron { + recreate_database $Q_DB_NAME + # Run Neutron db migrations + $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head + for svc in fwaas lbaas vpnaas; do + if [ "$svc" = "vpnaas" ]; then + q_svc="q-vpn" + else + q_svc="q-$svc" + fi + if is_service_enabled $q_svc; then + $NEUTRON_BIN_DIR/neutron-db-manage --service $svc --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head + fi + done +} + +# install_neutron() - Collect source and prepare +function install_neutron { + git_clone $NEUTRON_REPO $NEUTRON_DIR $NEUTRON_BRANCH + setup_develop $NEUTRON_DIR + if is_service_enabled q-fwaas; then + git_clone $NEUTRON_FWAAS_REPO $NEUTRON_FWAAS_DIR $NEUTRON_FWAAS_BRANCH + setup_develop $NEUTRON_FWAAS_DIR + fi + if is_service_enabled q-lbaas; then + git_clone $NEUTRON_LBAAS_REPO $NEUTRON_LBAAS_DIR $NEUTRON_LBAAS_BRANCH + setup_develop $NEUTRON_LBAAS_DIR + fi + if is_service_enabled q-vpn; then + git_clone $NEUTRON_VPNAAS_REPO $NEUTRON_VPNAAS_DIR $NEUTRON_VPNAAS_BRANCH + setup_develop $NEUTRON_VPNAAS_DIR + fi + + if [ "$VIRT_DRIVER" == 'xenserver' ]; then + local dom0_ip + dom0_ip=$(echo "$XENAPI_CONNECTION_URL" | cut -d "/" -f 3-) + + local ssh_dom0 + ssh_dom0="sudo -u $DOMZERO_USER ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null root@$dom0_ip" + + # Find where the plugins should go in dom0 + local xen_functions + xen_functions=$(cat $TOP_DIR/tools/xen/functions) + local plugin_dir + plugin_dir=$($ssh_dom0 "$xen_functions; set -eux; xapi_plugin_location") + + # install neutron plugins to dom0 + tar -czf - -C $NEUTRON_DIR/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/ ./ | + $ssh_dom0 "tar -xzf - -C $plugin_dir && chmod a+x $plugin_dir/*" + fi +} + +# install_neutronclient() - Collect source and prepare +function install_neutronclient { + if use_library_from_git "python-neutronclient"; then + git_clone_by_name "python-neutronclient" + setup_dev_lib "python-neutronclient" + sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-neutronclient"]}/tools/,/etc/bash_completion.d/}neutron.bash_completion + fi +} + +# install_neutron_agent_packages() - Collect source and prepare +function install_neutron_agent_packages { + # radvd doesn't come with the OS. Install it if the l3 service is enabled. + if is_service_enabled q-l3; then + install_package radvd + fi + # install packages that are specific to plugin agent(s) + if is_service_enabled q-agt q-dhcp q-l3; then + neutron_plugin_install_agent_packages + fi + + if is_service_enabled q-lbaas; then + neutron_agent_lbaas_install_agent_packages + fi +} + +# Start running processes, including screen +function start_neutron_service_and_check { + local cfg_file_options="$(determine_config_files neutron-server)" + local service_port=$Q_PORT + local service_protocol=$Q_PROTOCOL + if is_service_enabled tls-proxy; then + service_port=$Q_PORT_INT + service_protocol="http" + fi + # Start the Neutron service + run_process q-svc "python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options" + echo "Waiting for Neutron to start..." + if is_ssl_enabled_service "neutron"; then + ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" + fi + if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port; do sleep 1; done"; then + die $LINENO "Neutron did not start" + fi + # Start proxy if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT & + fi +} + +# Start running processes, including screen +function start_neutron_agents { + # Start up the neutron agents if enabled + run_process q-agt "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" + + if is_provider_network; then + sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + if is_ironic_hardware; then + for IP in $(ip addr show dev $PUBLIC_INTERFACE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $PUBLIC_INTERFACE + sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE + done + sudo route add -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi + fi + + if is_service_enabled q-vpn; then + run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)" + else + run_process q-l3 "python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" + fi + + run_process q-meta "python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" + + if [ "$VIRT_DRIVER" = 'xenserver' ]; then + # For XenServer, start an agent for the domU openvswitch + run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" + fi + + if is_service_enabled q-lbaas; then + run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" + fi + + if is_service_enabled q-metering; then + run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" + fi +} + +# stop_neutron() - Stop running processes (non-screen) +function stop_neutron { + if is_service_enabled q-dhcp; then + stop_process q-dhcp + pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') + [ ! -z "$pid" ] && sudo kill -9 $pid + fi + + stop_process q-svc + stop_process q-l3 + + if is_service_enabled q-meta; then + sudo pkill -9 -f neutron-ns-metadata-proxy || : + stop_process q-meta + fi + + stop_process q-agt + + if is_service_enabled q-lbaas; then + neutron_lbaas_stop + fi + if is_service_enabled q-fwaas; then + neutron_fwaas_stop + fi + if is_service_enabled q-vpn; then + neutron_vpn_stop + fi + if is_service_enabled q-metering; then + neutron_metering_stop + fi +} + +# cleanup_neutron() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_neutron { + if is_provider_network && is_ironic_hardware; then + for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE + sudo ip addr add $IP dev $PUBLIC_INTERFACE + done + sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi + + if is_neutron_ovs_base_plugin; then + neutron_ovs_base_cleanup + fi + + # delete all namespaces created by neutron + for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do + sudo ip netns delete ${ns} + done +} + + +function _create_neutron_conf_dir { + # Put config files in ``NEUTRON_CONF_DIR`` for everyone to find + sudo install -d -o $STACK_USER $NEUTRON_CONF_DIR +} + +# _configure_neutron_common() +# Set common config for all neutron server and agents. +# This MUST be called before other ``_configure_neutron_*`` functions. +function _configure_neutron_common { + _create_neutron_conf_dir + + cp $NEUTRON_DIR/etc/neutron.conf $NEUTRON_CONF + + # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. + # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. + # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``, + # ``Q_PLUGIN_EXTRA_CONF_FILES``. For example: + # + # ``Q_PLUGIN_EXTRA_CONF_FILES=(file1, file2)`` + neutron_plugin_configure_common + + if [[ "$Q_PLUGIN_CONF_PATH" == '' || "$Q_PLUGIN_CONF_FILENAME" == '' || "$Q_PLUGIN_CLASS" == '' ]]; then + die $LINENO "Neutron plugin not set.. exiting" + fi + + # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` + mkdir -p /$Q_PLUGIN_CONF_PATH + Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME + cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + + iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` + iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron + iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG + # If addition config files are set, make sure their path name is set as well + if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then + die $LINENO "Neutron additional plugin config not set.. exiting" + fi + + # If additional config files exist, copy them over to neutron configuration + # directory + if [[ $Q_PLUGIN_EXTRA_CONF_PATH != '' ]]; then + local f + for (( f=0; $f < ${#Q_PLUGIN_EXTRA_CONF_FILES[@]}; f+=1 )); do + Q_PLUGIN_EXTRA_CONF_FILES[$f]=$Q_PLUGIN_EXTRA_CONF_PATH/${Q_PLUGIN_EXTRA_CONF_FILES[$f]} + done + fi + + if [ "$VIRT_DRIVER" = 'fake' ]; then + # Disable arbitrary limits + iniset $NEUTRON_CONF quotas quota_network -1 + iniset $NEUTRON_CONF quotas quota_subnet -1 + iniset $NEUTRON_CONF quotas quota_port -1 + iniset $NEUTRON_CONF quotas quota_security_group -1 + iniset $NEUTRON_CONF quotas quota_security_group_rule -1 + fi + + # Format logging + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + setup_colorized_logging $NEUTRON_CONF DEFAULT project_id + else + # Show user_name and project_name by default like in nova + iniset $NEUTRON_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" + fi + + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + iniset $NEUTRON_CONF DEFAULT bind_port "$Q_PORT_INT" + fi + + if is_ssl_enabled_service "nova"; then + iniset $NEUTRON_CONF nova cafile $SSL_BUNDLE_FILE + fi + + if is_ssl_enabled_service "neutron"; then + ensure_certificates NEUTRON + + iniset $NEUTRON_CONF DEFAULT use_ssl True + iniset $NEUTRON_CONF DEFAULT ssl_cert_file "$NEUTRON_SSL_CERT" + iniset $NEUTRON_CONF DEFAULT ssl_key_file "$NEUTRON_SSL_KEY" + fi + + _neutron_setup_rootwrap +} + +function _configure_neutron_debug_command { + if [[ "$Q_USE_DEBUG_COMMAND" != "True" ]]; then + return + fi + + cp $NEUTRON_DIR/etc/l3_agent.ini $NEUTRON_TEST_CONFIG_FILE + + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False + iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi + + _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE + + neutron_plugin_configure_debug_command +} + +function _configure_neutron_dhcp_agent { + + cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE + + iniset $Q_DHCP_CONF_FILE DEFAULT verbose True + iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi + + if ! is_service_enabled q-l3; then + if [[ "$ENABLE_ISOLATED_METADATA" = "True" ]]; then + iniset $Q_DHCP_CONF_FILE DEFAULT enable_isolated_metadata $ENABLE_ISOLATED_METADATA + iniset $Q_DHCP_CONF_FILE DEFAULT enable_metadata_network $ENABLE_METADATA_NETWORK + else + if [[ "$ENABLE_METADATA_NETWORK" = "True" ]]; then + die "$LINENO" "Enable isolated metadata is a must for metadata network" + fi + fi + fi + + _neutron_setup_interface_driver $Q_DHCP_CONF_FILE + + neutron_plugin_configure_dhcp_agent +} + +function _configure_neutron_l3_agent { + local cfg_file + Q_L3_ENABLED=True + # for l3-agent, only use per tenant router if we have namespaces + Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE + + if is_service_enabled q-vpn; then + neutron_vpn_configure_agent + fi + + cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE + + iniset $Q_L3_CONF_FILE DEFAULT verbose True + iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE + iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $Q_L3_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi + + _neutron_setup_interface_driver $Q_L3_CONF_FILE + + neutron_plugin_configure_l3_agent +} + +function _configure_neutron_metadata_agent { + cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE + + iniset $Q_META_CONF_FILE DEFAULT verbose True + iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP + iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $Q_META_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi + + # Configures keystone for metadata_agent + # The third argument "True" sets auth_url needed to communicate with keystone + _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True + +} + +function _configure_neutron_ceilometer_notifications { + iniset $NEUTRON_CONF DEFAULT notification_driver messaging +} + +function _configure_neutron_lbaas { + if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf ]; then + cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf $NEUTRON_CONF_DIR + fi + neutron_agent_lbaas_configure_common + neutron_agent_lbaas_configure_agent +} + +function _configure_neutron_metering { + neutron_agent_metering_configure_common + neutron_agent_metering_configure_agent +} + +function _configure_neutron_fwaas { + if [ -f $NEUTRON_FWAAS_DIR/etc/neutron_fwaas.conf ]; then + cp $NEUTRON_FWAAS_DIR/etc/neutron_fwaas.conf $NEUTRON_CONF_DIR + fi + neutron_fwaas_configure_common + neutron_fwaas_configure_driver +} + +function _configure_neutron_vpn { + if [ -f $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf ]; then + cp $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf $NEUTRON_CONF_DIR + fi + neutron_vpn_install_agent_packages + neutron_vpn_configure_common +} + +function _configure_dvr { + iniset $NEUTRON_CONF DEFAULT router_distributed True + iniset $Q_L3_CONF_FILE DEFAULT agent_mode $Q_DVR_MODE +} + + +# _configure_neutron_plugin_agent() - Set config files for neutron plugin agent +# It is called when q-agt is enabled. +function _configure_neutron_plugin_agent { + # Specify the default root helper prior to agent configuration to + # ensure that an agent's configuration can override the default + iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi + iniset $NEUTRON_CONF DEFAULT verbose True + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + + # Configure agent for plugin + neutron_plugin_configure_plugin_agent +} + +# _configure_neutron_service() - Set config files for neutron service +# It is called when q-svc is enabled. +function _configure_neutron_service { + Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini + Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json + + cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + + # allow neutron user to administer neutron to match neutron account + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE + + # Update either configuration file with plugin + iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS + + if [[ $Q_SERVICE_PLUGIN_CLASSES != '' ]]; then + iniset $NEUTRON_CONF DEFAULT service_plugins $Q_SERVICE_PLUGIN_CLASSES + fi + + iniset $NEUTRON_CONF DEFAULT verbose True + iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $NEUTRON_CONF DEFAULT policy_file $Q_POLICY_FILE + iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP + + iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY + _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken + + # Configuration for neutron notifations to nova. + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES + iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES + + iniset $NEUTRON_CONF nova auth_plugin password + iniset $NEUTRON_CONF nova auth_url $KEYSTONE_AUTH_URI + iniset $NEUTRON_CONF nova username nova + iniset $NEUTRON_CONF nova password $SERVICE_PASSWORD + iniset $NEUTRON_CONF nova user_domain_id default + iniset $NEUTRON_CONF nova project_name $SERVICE_TENANT_NAME + iniset $NEUTRON_CONF nova project_domain_id default + iniset $NEUTRON_CONF nova region_name $REGION_NAME + + # Configure plugin + neutron_plugin_configure_service +} + +# Utility Functions +#------------------ + +# _neutron_service_plugin_class_add() - add service plugin class +function _neutron_service_plugin_class_add { + local service_plugin_class=$1 + if [[ $Q_SERVICE_PLUGIN_CLASSES == '' ]]; then + Q_SERVICE_PLUGIN_CLASSES=$service_plugin_class + elif [[ ! ,${Q_SERVICE_PLUGIN_CLASSES}, =~ ,${service_plugin_class}, ]]; then + Q_SERVICE_PLUGIN_CLASSES="$Q_SERVICE_PLUGIN_CLASSES,$service_plugin_class" + fi +} + +# _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). +function _neutron_deploy_rootwrap_filters { + local srcdir=$1 + sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D + sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ +} + +# _neutron_setup_rootwrap() - configure Neutron's rootwrap +function _neutron_setup_rootwrap { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi + # Wipe any existing ``rootwrap.d`` files first + Q_CONF_ROOTWRAP_D=$NEUTRON_CONF_DIR/rootwrap.d + if [[ -d $Q_CONF_ROOTWRAP_D ]]; then + sudo rm -rf $Q_CONF_ROOTWRAP_D + fi + + _neutron_deploy_rootwrap_filters $NEUTRON_DIR + + # Set up ``rootwrap.conf``, pointing to ``$NEUTRON_CONF_DIR/rootwrap.d`` + # location moved in newer versions, prefer new location + if test -r $NEUTRON_DIR/etc/neutron/rootwrap.conf; then + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/neutron/rootwrap.conf $Q_RR_CONF_FILE + else + sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE + fi + sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap + ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" + ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" + + # Set up the rootwrap sudoers for neutron + TEMPFILE=`mktemp` + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_SUDOER_CMD" >$TEMPFILE + echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_DAEMON_SUDOER_CMD" >>$TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/neutron-rootwrap + + # Update the root_helper + iniset $NEUTRON_CONF agent root_helper "$Q_RR_COMMAND" + if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then + iniset $NEUTRON_CONF agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + fi +} + +# Configures keystone integration for neutron service and agents +function _neutron_setup_keystone { + local conf_file=$1 + local section=$2 + local use_auth_url=$3 + + # Configures keystone for metadata_agent + # metadata_agent needs auth_url to communicate with keystone + if [[ "$use_auth_url" == "True" ]]; then + iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI/v2.0 + fi + + create_neutron_cache_dir + configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section +} + +function _neutron_setup_interface_driver { + + # ovs_use_veth needs to be set before the plugin configuration + # occurs to allow plugins to override the setting. + iniset $1 DEFAULT ovs_use_veth $Q_OVS_USE_VETH + + neutron_plugin_setup_interface_driver $1 +} + +# Create private IPv4 subnet +function _neutron_create_private_subnet_v4 { + local subnet_params="--tenant-id $TENANT_ID " + subnet_params+="--ip_version 4 " + subnet_params+="--gateway $NETWORK_GATEWAY " + subnet_params+="--name $PRIVATE_SUBNET_NAME " + subnet_params+="$NET_ID $FIXED_RANGE" + local subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) + die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $TENANT_ID" + echo $subnet_id +} + +# Create private IPv6 subnet +function _neutron_create_private_subnet_v6 { + die_if_not_set $LINENO IPV6_RA_MODE "IPV6 RA Mode not set" + die_if_not_set $LINENO IPV6_ADDRESS_MODE "IPV6 Address Mode not set" + local ipv6_modes="--ipv6-ra-mode $IPV6_RA_MODE --ipv6-address-mode $IPV6_ADDRESS_MODE" + local subnet_params="--tenant-id $TENANT_ID " + subnet_params+="--ip_version 6 " + subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " + subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME " + subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes" + local ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) + die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $TENANT_ID" + echo $ipv6_subnet_id +} + +# Create public IPv4 subnet +function _neutron_create_public_subnet_v4 { + local subnet_params+="--ip_version 4 " + subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " + subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " + subnet_params+="--name $PUBLIC_SUBNET_NAME " + subnet_params+="$EXT_NET_ID $FLOATING_RANGE " + subnet_params+="-- --enable_dhcp=False" + local id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') + die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" + echo $id_and_ext_gw_ip +} + +# Create public IPv6 subnet +function _neutron_create_public_subnet_v6 { + local subnet_params="--ip_version 6 " + subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " + subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME " + subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE " + subnet_params+="-- --enable_dhcp=False" + local ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') + die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" + echo $ipv6_id_and_ext_gw_ip +} + +# Configure neutron router for IPv4 public access +function _neutron_configure_router_v4 { + neutron router-interface-add $ROUTER_ID $SUBNET_ID + # Create a public subnet on the external network + local id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) + local ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) + PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) + # Configure the external network as the default router gateway + neutron router-gateway-set $ROUTER_ID $EXT_NET_ID + + # This logic is specific to using the l3-agent for layer 3 + if is_service_enabled q-l3; then + # Configure and enable public bridge + if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then + local ext_gw_interface=$(_neutron_get_ext_gw_interface) + local cidr_len=${FLOATING_RANGE#*/} + sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface + sudo ip link set $ext_gw_interface up + ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'` + die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" + sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP + fi + _neutron_set_router_id + fi +} + +# Configure neutron router for IPv6 public access +function _neutron_configure_router_v6 { + neutron router-interface-add $ROUTER_ID $IPV6_SUBNET_ID + # Create a public subnet on the external network + local ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) + local ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2) + local ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5) + + # If the external network has not already been set as the default router + # gateway when configuring an IPv4 public subnet, do so now + if [[ "$IP_VERSION" == "6" ]]; then + neutron router-gateway-set $ROUTER_ID $EXT_NET_ID + fi + + # This logic is specific to using the l3-agent for layer 3 + if is_service_enabled q-l3; then + local ipv6_router_gw_port + # Ensure IPv6 forwarding is enabled on the host + sudo sysctl -w net.ipv6.conf.all.forwarding=1 + # Configure and enable public bridge + if [[ "$IP_VERSION" = "6" ]]; then + # Override global IPV6_ROUTER_GW_IP with the true value from neutron + IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'` + die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" + ipv6_router_gw_port=`neutron port-list -c id -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $1; }' | awk -F ' | ' '{ print $2; }'` + die_if_not_set $LINENO ipv6_router_gw_port "Failure retrieving ipv6_router_gw_port" + else + ipv6_router_gw_port=`neutron port-list -c id -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $1; }' | awk -F ' | ' '{ print $2; }'` + die_if_not_set $LINENO ipv6_router_gw_port "Failure retrieving ipv6_router_gw_port" + fi + + # The ovs_base_configure_l3_agent function flushes the public + # bridge's ip addresses, so turn IPv6 support in the host off + # and then on to recover the public bridge's link local address + sudo sysctl -w net.ipv6.conf.${PUBLIC_BRIDGE}.disable_ipv6=1 + sudo sysctl -w net.ipv6.conf.${PUBLIC_BRIDGE}.disable_ipv6=0 + if ! ip -6 addr show dev $PUBLIC_BRIDGE | grep 'scope global'; then + # Create an IPv6 ULA address for PUBLIC_BRIDGE if one is not present + IPV6_BRIDGE_ULA=`uuidgen | sed s/-//g | cut -c 23- | sed -e "s/\(..\)\(....\)\(....\)/\1:\2:\3/"` + sudo ip -6 addr add fd$IPV6_BRIDGE_ULA::1 dev $PUBLIC_BRIDGE + fi + + if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then + local ext_gw_interface=$(_neutron_get_ext_gw_interface) + local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} + + # Define router_ns based on whether DVR is enabled + local router_ns=qrouter + if [[ "$Q_DVR_MODE" == "dvr_snat" ]]; then + router_ns=snat + fi + + # Configure interface for public bridge + sudo ip -6 addr add $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface + + # Wait until layer 3 agent has configured the gateway port on + # the public bridge, then add gateway address to the interface + # TODO (john-davidge) Remove once l3-agent supports dual-stack + if [[ "$IP_VERSION" == "4+6" ]]; then + if ! timeout $GATEWAY_TIMEOUT sh -c "until sudo ip netns exec $router_ns-$ROUTER_ID ip addr show qg-${ipv6_router_gw_port:0:11} | grep $ROUTER_GW_IP; do sleep 1; done"; then + die $LINENO "Timeout retrieving ROUTER_GW_IP" + fi + # Configure the gateway port with the public IPv6 adress + sudo ip netns exec $router_ns-$ROUTER_ID ip -6 addr add $IPV6_ROUTER_GW_IP/$ipv6_cidr_len dev qg-${ipv6_router_gw_port:0:11} + # Add a default IPv6 route to the neutron router as the + # l3-agent does not add one in the dual-stack case + sudo ip netns exec $router_ns-$ROUTER_ID ip -6 route replace default via $ipv6_ext_gw_ip dev qg-${ipv6_router_gw_port:0:11} + fi + sudo ip -6 route add $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface + fi + _neutron_set_router_id + fi +} + +# Explicitly set router id in l3 agent configuration +function _neutron_set_router_id { + if [[ "$Q_USE_NAMESPACE" == "False" ]]; then + iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID + fi +} + +# Get ext_gw_interface depending on value of Q_USE_PUBLIC_VETH +function _neutron_get_ext_gw_interface { + if [[ "$Q_USE_PUBLIC_VETH" == "True" ]]; then + echo $Q_PUBLIC_VETH_EX + else + # Disable in-band as we are going to use local port + # to communicate with VMs + sudo ovs-vsctl set Bridge $PUBLIC_BRIDGE \ + other_config:disable-in-band=true + echo $PUBLIC_BRIDGE + fi +} + +# Functions for Neutron Exercises +#-------------------------------- + +function delete_probe { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}'` + neutron-debug --os-tenant-name admin --os-username admin probe-delete $probe_id +} + +function setup_neutron_debug { + if [[ "$Q_USE_DEBUG_COMMAND" == "True" ]]; then + public_net_id=`_get_net_id $PUBLIC_NETWORK_NAME` + neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $public_net_id + private_net_id=`_get_net_id $PRIVATE_NETWORK_NAME` + neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-create --device-owner compute $private_net_id + fi +} + +function teardown_neutron_debug { + delete_probe $PUBLIC_NETWORK_NAME + delete_probe $PRIVATE_NETWORK_NAME +} + +function _get_net_id { + neutron --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD net-list | grep $1 | awk '{print $2}' +} + +function _get_probe_cmd_prefix { + local from_net="$1" + net_id=`_get_net_id $from_net` + probe_id=`neutron-debug --os-tenant-name admin --os-username admin --os-password $ADMIN_PASSWORD probe-list -c id -c network_id | grep $net_id | awk '{print $2}' | head -n 1` + echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" +} + +function _ping_check_neutron { + local from_net=$1 + local ip=$2 + local timeout_sec=$3 + local expected=${4:-"True"} + local check_command="" + probe_cmd=`_get_probe_cmd_prefix $from_net` + if [[ "$expected" = "True" ]]; then + check_command="while ! $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" + else + check_command="while $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" + fi + if ! timeout $timeout_sec sh -c "$check_command"; then + if [[ "$expected" = "True" ]]; then + die $LINENO "[Fail] Couldn't ping server" + else + die $LINENO "[Fail] Could ping server" + fi + fi +} + +# ssh check +function _ssh_check_neutron { + local from_net=$1 + local key_file=$2 + local ip=$3 + local user=$4 + local timeout_sec=$5 + local probe_cmd = "" + probe_cmd=`_get_probe_cmd_prefix $from_net` + if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success; do sleep 1; done"; then + die $LINENO "server didn't become ssh-able!" + fi +} + +# Neutron 3rd party programs +#--------------------------- + +# please refer to ``lib/neutron_thirdparty/README.md`` for details +NEUTRON_THIRD_PARTIES="" +for f in $TOP_DIR/lib/neutron_thirdparty/*; do + third_party=$(basename $f) + if is_service_enabled $third_party; then + source $TOP_DIR/lib/neutron_thirdparty/$third_party + NEUTRON_THIRD_PARTIES="$NEUTRON_THIRD_PARTIES,$third_party" + fi +done + +function _neutron_third_party_do { + for third_party in ${NEUTRON_THIRD_PARTIES//,/ }; do + ${1}_${third_party} + done +} + +# configure_neutron_third_party() - Set config files, create data dirs, etc +function configure_neutron_third_party { + _neutron_third_party_do configure +} + +# init_neutron_third_party() - Initialize databases, etc. +function init_neutron_third_party { + _neutron_third_party_do init +} + +# install_neutron_third_party() - Collect source and prepare +function install_neutron_third_party { + _neutron_third_party_do install +} + +# start_neutron_third_party() - Start running processes, including screen +function start_neutron_third_party { + _neutron_third_party_do start +} + +# stop_neutron_third_party - Stop running processes (non-screen) +function stop_neutron_third_party { + _neutron_third_party_do stop +} + +# check_neutron_third_party_integration() - Check that third party integration is sane +function check_neutron_third_party_integration { + _neutron_third_party_do check +} + +function is_provider_network { + if [ "$Q_USE_PROVIDER_NETWORKING" == "True" ] && [ "$Q_L3_ENABLED" == "False" ]; then + return 0 + fi + return 1 +} + + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md index 7192a051b4..4b220d3377 100644 --- a/lib/neutron_plugins/README.md +++ b/lib/neutron_plugins/README.md @@ -13,7 +13,7 @@ Plugin specific configuration variables should be in this file. functions --------- -``lib/neutron`` calls the following functions when the ``$Q_PLUGIN`` is enabled +``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled * ``neutron_plugin_create_nova_conf`` : set ``NOVA_VIF_DRIVER`` and optionally set options in nova_conf diff --git a/lib/neutron_thirdparty/README.md b/lib/neutron_thirdparty/README.md index 5655e0bf60..905ae776a8 100644 --- a/lib/neutron_thirdparty/README.md +++ b/lib/neutron_thirdparty/README.md @@ -10,7 +10,7 @@ Third party program specific configuration variables should be in this file. functions --------- -``lib/neutron`` calls the following functions when the ```` is enabled +``lib/neutron-legacy`` calls the following functions when the ```` is enabled functions to be implemented * ``configure_``: diff --git a/stack.sh b/stack.sh index 9d6230332e..9069367fa0 100755 --- a/stack.sh +++ b/stack.sh @@ -525,7 +525,7 @@ source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat -source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat diff --git a/unstack.sh b/unstack.sh index fdd63fbcd0..a66370b1e9 100755 --- a/unstack.sh +++ b/unstack.sh @@ -63,7 +63,7 @@ source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat -source $TOP_DIR/lib/neutron +source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat From 93b906d53ee457b3768c755615fc7c2395d26c85 Mon Sep 17 00:00:00 2001 From: Angus Salkeld Date: Thu, 26 Mar 2015 12:53:51 +1000 Subject: [PATCH 0097/2941] Always set the Heat deferred auth method Otherwise in standalone mode we use the new default of "trusts" which won't work. Change-Id: If18db711faf7810217af0a89d1e38590a94d8e5b Closes-bug: 1436631 --- lib/heat | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/lib/heat b/lib/heat index c7abd3bb1c..0930776a58 100644 --- a/lib/heat +++ b/lib/heat @@ -124,6 +124,8 @@ function configure_heat { setup_colorized_logging $HEAT_CONF DEFAULT tenant user fi + iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH + # NOTE(jamielennox): heat re-uses specific values from the # keystone_authtoken middleware group and so currently fails when using the # auth plugin setup. This should be fixed in heat. Heat is also the only @@ -269,10 +271,6 @@ function create_heat_accounts { get_or_create_role "heat_stack_user" fi - if [[ $HEAT_DEFERRED_AUTH == trusts ]]; then - iniset $HEAT_CONF DEFAULT deferred_auth_method trusts - fi - if [[ "$HEAT_STACK_DOMAIN" == "True" ]]; then # Note we have to pass token/endpoint here because the current endpoint and # version negotiation in OSC means just --os-identity-api-version=3 won't work From d9de1199bd3f79fc7b71e933970c6f14afa8d310 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 26 Mar 2015 09:25:02 +0100 Subject: [PATCH 0098/2941] Print kernel version Adding `uname -a` to stack.sh to make easy to see from the devstack logs what was the actually running kernel version. Change-Id: I0068504bf055a588b155b0a60215440d365bf53e --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index 9069367fa0..79f8fa3a7f 100755 --- a/stack.sh +++ b/stack.sh @@ -89,6 +89,9 @@ if [[ $EUID -eq 0 ]]; then exit 1 fi +# Print the kernel version +uname -a + # Prepare the environment # ----------------------- From 7ced150f8c70e1acaa4b83c25d3c9271cdb512c3 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 23 Mar 2015 15:51:54 -0400 Subject: [PATCH 0099/2941] Add variable to indicated if Tempest should have admin This commit adds a new flag, TEMPEST_HAS_ADMIN, to enable or disable setting admin credentials in the tempest config file. This allows for devstack / tempest configurations where tempest doesn't have admin to ensure it would work in public cloud scenarios. Change-Id: Id983417801e4b276334fb9e700f2c8e6ab78f9ba --- lib/tempest | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index 4ece349159..2ec353bce8 100644 --- a/lib/tempest +++ b/lib/tempest @@ -175,6 +175,10 @@ function configure_tempest { password=${ADMIN_PASSWORD:-secrete} + # Do we want to make a configuration where Tempest has admin on + # the cloud. We don't always want to so that we can ensure Tempest + # would work on a public cloud. + TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN) # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo # user and tenant are set up... ADMIN_USERNAME=${ADMIN_USERNAME:-admin} @@ -292,11 +296,13 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity alt_username $ALT_USERNAME iniset $TEMPEST_CONFIG identity alt_password "$password" iniset $TEMPEST_CONFIG identity alt_tenant_name $ALT_TENANT_NAME - iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME - iniset $TEMPEST_CONFIG identity admin_password "$password" - iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME - iniset $TEMPEST_CONFIG identity admin_tenant_id $ADMIN_TENANT_ID - iniset $TEMPEST_CONFIG identity admin_domain_name $ADMIN_DOMAIN_NAME + if [[ "$TEMPEST_HAS_ADMIN" == "True" ]]; then + iniset $TEMPEST_CONFIG identity admin_username $ADMIN_USERNAME + iniset $TEMPEST_CONFIG identity admin_password "$password" + iniset $TEMPEST_CONFIG identity admin_tenant_name $ADMIN_TENANT_NAME + iniset $TEMPEST_CONFIG identity admin_tenant_id $ADMIN_TENANT_ID + iniset $TEMPEST_CONFIG identity admin_domain_name $ADMIN_DOMAIN_NAME + fi iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE @@ -310,6 +316,9 @@ function configure_tempest { fi # Auth + # + # + TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONFIG auth tempest_roles "Member" From 1bd79596c3c5f62cbbef92558156401447a9b5d3 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 24 Feb 2015 14:06:56 +0100 Subject: [PATCH 0100/2941] Move back isset to the functions-common isset function was moved to config file related functions by accident, this change also simplfies the isset in a bash >=4.2 way. All supported distro has at least bash 4.2. (RHEL6 used 4.1) Change-Id: Id644b46ff9cdbe18cde46e96aa72764e1c8653ac --- functions-common | 3 +++ inc/ini-config | 10 ---------- tests/functions.sh | 17 +++++++++++++++++ 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/functions-common b/functions-common index 48e400dfb1..56fa64a990 100644 --- a/functions-common +++ b/functions-common @@ -62,6 +62,9 @@ function trueorfalse { $xtrace } +function isset { + [[ -v "$1" ]] +} # Control Functions # ================= diff --git a/inc/ini-config b/inc/ini-config index 0d6d169f8b..26401f3917 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -205,16 +205,6 @@ function iniuncomment { $xtrace } -function isset { - nounset=$(set +o | grep nounset) - set +o nounset - [[ -n "${!1+x}" ]] - result=$? - $nounset - return $result -} - - # Restore xtrace $INC_CONF_TRACE diff --git a/tests/functions.sh b/tests/functions.sh index 874d02230d..126080f1e3 100755 --- a/tests/functions.sh +++ b/tests/functions.sh @@ -196,3 +196,20 @@ if is_ubuntu; then echo "is_package_installed() on deleted package failed" fi fi + +# test isset function +echo "Testing isset()" +you_should_not_have_this_variable=42 + +if isset "you_should_not_have_this_variable"; then + echo "OK" +else + echo "\"you_should_not_have_this_variable\" not declared. failed" +fi + +unset you_should_not_have_this_variable +if isset "you_should_not_have_this_variable"; then + echo "\"you_should_not_have_this_variable\" looks like declared variable. failed" +else + echo "OK" +fi From 0fc946ddc805989adb68c1e836e86b51d1f011cf Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 23 Mar 2015 16:38:30 -0400 Subject: [PATCH 0101/2941] Remove the compute-admin section from tempest config This commit removes the compute-admin section from the tempest config file that devstack generates. These options have been removed from the tempest config and aren't being used, so there is no reason to keep them around. Change-Id: I7500fe3b329b913c60fa505a5230db4a5d35d7f1 --- lib/tempest | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/tempest b/lib/tempest index 2ec353bce8..11e9ac4fa1 100644 --- a/lib/tempest +++ b/lib/tempest @@ -358,11 +358,6 @@ function configure_tempest { # TODO(mriedem): Remove the preserve_ports flag when Juno is end of life. iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True - # Compute admin - iniset $TEMPEST_CONFIG "compute-admin" username $ADMIN_USERNAME - iniset $TEMPEST_CONFIG "compute-admin" password "$password" - iniset $TEMPEST_CONFIG "compute-admin" tenant_name $ADMIN_TENANT_NAME - # Network iniset $TEMPEST_CONFIG network api_version 2.0 iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable" From 00e16a9d53905f309655172d8a2b1cbcfc1cbfa5 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Fri, 20 Feb 2015 11:45:21 -0500 Subject: [PATCH 0102/2941] Support for single interface Neutron networking with OVS When running Neutron on a single node that only has a single interface, the following operations are required: * Remove the IP address from the physical interface * Add the interface to the OVS physical bridge * Add the IP address from the physical interface to the OVS bridge * Update the routing table The reverse is done on cleanup. In order run Neutron on a single interface, the $PUBLIC_INTERFACE and $OVS_PHYSICAL_BRIDGE variables must be set. Co-Authored-By: Brian Haley Change-Id: I71e2594288bae1a71dc2c8c3fb350b913dbd5e2c --- lib/neutron-legacy | 42 ++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5ff39212fc..e9a3926b28 100755 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -779,9 +779,41 @@ function stop_neutron { fi } +# _move_neutron_addresses_route() - Move the primary IP to the OVS bridge +# on startup, or back to the public interface on cleanup +function _move_neutron_addresses_route { + local from_intf=$1 + local to_intf=$2 + local add_ovs_port=$3 + + if [[ -n "$from_intf" && -n "$to_intf" ]]; then + # Remove the primary IP address from $from_intf and add it to $to_intf, + # along with the default route, if it exists. Also, when called + # on configure we will also add $from_intf as a port on $to_intf, + # assuming it is an OVS bridge. + + local IP_BRD=$(ip -4 a s dev $from_intf | awk '/inet/ { print $2, $3, $4; exit }') + local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }") + local ADD_OVS_PORT="" + + if [ "$DEFAULT_ROUTE_GW" != "" ]; then + ADD_DEFAULT_ROUTE="sudo ip r replace default via $DEFAULT_ROUTE_GW dev $to_intf" + fi + + if [[ "$add_ovs_port" == "True" ]]; then + ADD_OVS_PORT="sudo ovs-vsctl add-port $to_intf $from_intf" + fi + + sudo ip addr del $IP_BRD dev $from_intf; sudo ip addr add $IP_BRD dev $to_intf; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE + fi +} + # cleanup_neutron() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_neutron { + + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False + if is_provider_network && is_ironic_hardware; then for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE @@ -956,6 +988,10 @@ function _configure_neutron_l3_agent { _neutron_setup_interface_driver $Q_L3_CONF_FILE neutron_plugin_configure_l3_agent + + if [[ $(ip -4 a s dev "$PUBLIC_INTERFACE" | grep -c 'inet') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True + fi } function _configure_neutron_metadata_agent { @@ -1227,8 +1263,10 @@ function _neutron_configure_router_v4 { if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then local ext_gw_interface=$(_neutron_get_ext_gw_interface) local cidr_len=${FLOATING_RANGE#*/} - sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface - sudo ip link set $ext_gw_interface up + if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" ]]; then + sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface + sudo ip link set $ext_gw_interface up + fi ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'` die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP From ebdd9ac5b41da372c0276a507451ea9878be7dda Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 4 Mar 2015 12:35:14 +0000 Subject: [PATCH 0103/2941] Provide an option to force pip --upgrade Make it possible for someone to config PIP_UPGRADE=True in local.conf and thus force pip_install calls to upgrade. In automated testing this is probably a bad idea, but in manual testing or situations where devstack is being used to spin up proof of concepts having the option to use the latest and greatest Python modules is a useful way of exploring the health of the ecosystem. To help with visibility of the setting, and section has been added in configuration.rst near other similar settings. Change-Id: I484c954f1e1f05ed02c0b08e8e4a9c18558c05ef --- doc/source/configuration.rst | 15 +++++++++++++++ inc/python | 12 +++++++++--- 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 7d06658ee2..79d911c91d 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -247,6 +247,21 @@ A clean install every time RECLONE=yes +Upgrade packages installed by pip +--------------------------------- + + | *Default: ``PIP_UPGRADE=""``* + | By default ``stack.sh`` only installs Python packages if no version + is currently installed or the current version does not match a specified + requirement. If ``PIP_UPGRADE`` is set to ``True`` then existing required + Python packages will be upgraded to the most recent version that + matches requirements. + | + + :: + + PIP_UPGRADE=True + Swift ----- diff --git a/inc/python b/inc/python index 2d76081a52..d00eb0cd1f 100644 --- a/inc/python +++ b/inc/python @@ -54,17 +54,23 @@ function get_python_exec_prefix { # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``INSTALL_TESTONLY_PACKAGES``, ``OFFLINE``, ``PIP_VIRTUAL_ENV``, -# ``TRACK_DEPENDS``, ``*_proxy`` +# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy`` # pip_install package [package ...] function pip_install { local xtrace=$(set +o | grep xtrace) set +o xtrace + local upgrade="" local offline=${OFFLINE:-False} if [[ "$offline" == "True" || -z "$@" ]]; then $xtrace return fi + PIP_UPGRADE=$(trueorfalse False PIP_UPGRADE) + if [[ "$PIP_UPGRADE" = "True" ]] ; then + upgrade="--upgrade" + fi + if [[ -z "$os_PACKAGE" ]]; then GetOSVersion fi @@ -98,7 +104,7 @@ function pip_install { https_proxy="${https_proxy:-}" \ no_proxy="${no_proxy:-}" \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ - $cmd_pip install \ + $cmd_pip install $upgrade \ $@ # Also install test requirements @@ -110,7 +116,7 @@ function pip_install { https_proxy=${https_proxy:-} \ no_proxy=${no_proxy:-} \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ - $cmd_pip install \ + $cmd_pip install $upgrade \ -r $test_req fi } From 6e275e170c042794560c9b2c442a32c3de55566e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 26 Mar 2015 05:54:28 -0400 Subject: [PATCH 0104/2941] provide an override-defaults phase during the glusterfs integration it was seen that plugins might need to set new defaults on projects before the project files load. Create a new override-defaults phase for that. Intentionally not adding to the documentation yet until we're sure this works right in the glusterfs case. Reported-By: Deepak C Shetty Change-Id: I13c961b19bdcc1a99e9a7068fe91bbaac787e948 --- functions-common | 29 +++++++++++++++++++++++++++++ stack.sh | 4 ++++ unstack.sh | 4 ++++ 3 files changed, 37 insertions(+) diff --git a/functions-common b/functions-common index 48e400dfb1..9bad981ab1 100644 --- a/functions-common +++ b/functions-common @@ -1501,6 +1501,33 @@ function load_plugin_settings { done } +# plugin_override_defaults +# +# Run an extremely early setting phase for plugins that allows default +# overriding of services. +function plugin_override_defaults { + local plugins="${DEVSTACK_PLUGINS}" + local plugin + + # short circuit if nothing to do + if [[ -z $plugins ]]; then + return + fi + + echo "Overriding Configuration Defaults" + for plugin in ${plugins//,/ }; do + local dir=${GITDIR[$plugin]} + # source any overrides + if [[ -f $dir/devstack/override-defaults ]]; then + # be really verbose that an override is happening, as it + # may not be obvious if things fail later. + echo "$plugin has overriden the following defaults" + cat $dir/devstack/override-defaults + source $dir/devstack/override-defaults + fi + done +} + # run_plugins # # Run the devstack/plugin.sh in all the plugin directories. These are @@ -1530,6 +1557,8 @@ function run_phase { # the source phase corresponds to settings loading in plugins if [[ "$mode" == "source" ]]; then load_plugin_settings + elif [[ "$mode" == "override_defaults" ]]; then + plugin_override_defaults else run_plugins $mode $phase fi diff --git a/stack.sh b/stack.sh index 9069367fa0..a9deba7ff6 100755 --- a/stack.sh +++ b/stack.sh @@ -507,6 +507,10 @@ fi # Configure Projects # ================== +# Plugin Phase 0: override_defaults - allow pluggins to override +# defaults before other services are run +run_phase override_defaults + # Import apache functions source $TOP_DIR/lib/apache diff --git a/unstack.sh b/unstack.sh index a66370b1e9..c45af7400c 100755 --- a/unstack.sh +++ b/unstack.sh @@ -45,6 +45,10 @@ fi # Configure Projects # ================== +# Plugin Phase 0: override_defaults - allow pluggins to override +# defaults before other services are run +run_phase override_defaults + # Import apache functions source $TOP_DIR/lib/apache From c70605d1013296d0127ad38d8c53a69ed982e647 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Mon, 26 Jan 2015 15:44:47 +0100 Subject: [PATCH 0105/2941] Add lioadm cinder iscsi helper support The Linux-IO is a modern way of handling targets. Per the IRC discussions lioadm as default seams like a better default for everyone, for now it will be optional, but the tgtadm admin support expected to be removed when lioadm works well with all CI (including third party). Change-Id: Ia54c59914c1d3ff2ef5f00ecf819426bc448d0a9 --- files/debs/cinder | 2 +- files/rpms-suse/cinder | 2 +- files/rpms/cinder | 2 +- lib/cinder | 95 ++++++++++++++++++++++------------------- lib/cinder_backends/lvm | 2 +- lib/lvm | 11 +++-- 6 files changed, 63 insertions(+), 51 deletions(-) diff --git a/files/debs/cinder b/files/debs/cinder index 7819c31655..51908eb27b 100644 --- a/files/debs/cinder +++ b/files/debs/cinder @@ -1,4 +1,4 @@ -tgt +tgt # NOPRIME lvm2 qemu-utils libpq-dev diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder index 55078da27c..3fd03cc9be 100644 --- a/files/rpms-suse/cinder +++ b/files/rpms-suse/cinder @@ -1,5 +1,5 @@ lvm2 -tgt +tgt # NOPRIME qemu-tools python-devel postgresql-devel diff --git a/files/rpms/cinder b/files/rpms/cinder index 9f1359f988..a88503b8bc 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,5 @@ lvm2 -scsi-target-utils +scsi-target-utils # NOPRIME qemu-img postgresql-devel iscsi-initiator-utils diff --git a/lib/cinder b/lib/cinder index ef68d8d643..f257afcdb6 100644 --- a/lib/cinder +++ b/lib/cinder @@ -88,6 +88,8 @@ CINDER_SECURE_DELETE=$(trueorfalse True CINDER_SECURE_DELETE) # https://bugs.launchpad.net/cinder/+bug/1180976 CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} +CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} + # Tell Tempest this project is present TEMPEST_SERVICES+=,cinder @@ -125,31 +127,35 @@ function is_cinder_enabled { function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group - local targets=$(sudo tgtadm --op show --mode target) - if [ $? -ne 0 ]; then - # If tgt driver isn't running this won't work obviously - # So check the response and restart if need be - echo "tgtd seems to be in a bad state, restarting..." - if is_ubuntu; then - restart_service tgt - else - restart_service tgtd + if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + local targets=$(sudo tgtadm --op show --mode target) + if [ $? -ne 0 ]; then + # If tgt driver isn't running this won't work obviously + # So check the response and restart if need be + echo "tgtd seems to be in a bad state, restarting..." + if is_ubuntu; then + restart_service tgt + else + restart_service tgtd + fi + targets=$(sudo tgtadm --op show --mode target) fi - targets=$(sudo tgtadm --op show --mode target) - fi - if [[ -n "$targets" ]]; then - local iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) - for i in "${iqn_list[@]}"; do - echo removing iSCSI target: $i - sudo tgt-admin --delete $i - done - fi + if [[ -n "$targets" ]]; then + local iqn_list=( $(grep --no-filename -r iqn $SCSI_PERSIST_DIR | sed 's///') ) + for i in "${iqn_list[@]}"; do + echo removing iSCSI target: $i + sudo tgt-admin --delete $i + done + fi - if is_ubuntu; then - stop_service tgt + if is_ubuntu; then + stop_service tgt + else + stop_service tgtd + fi else - stop_service tgtd + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete fi if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -224,7 +230,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $CINDER_CONF DEFAULT verbose True - iniset $CINDER_CONF DEFAULT iscsi_helper tgtadm + iniset $CINDER_CONF DEFAULT iscsi_helper "$CINDER_ISCSI_HELPER" iniset $CINDER_CONF database connection `database_connection_url cinder` iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" @@ -388,6 +394,13 @@ function init_cinder { function install_cinder { git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR + if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if is_fedora; then + install_package scsi-target-utils + else + install_package tgt + fi + fi } # install_cinderclient() - Collect source and prepare @@ -415,21 +428,23 @@ function start_cinder { service_port=$CINDER_SERVICE_PORT_INT service_protocol="http" fi - if is_service_enabled c-vol; then - # Delete any old stack.conf - sudo rm -f /etc/tgt/conf.d/stack.conf - _configure_tgt_for_config_d - if is_ubuntu; then - sudo service tgt restart - elif is_fedora || is_suse; then - restart_service tgtd - else - # note for other distros: unstack.sh also uses the tgt/tgtd service - # name, and would need to be adjusted too - exit_distro_not_supported "restarting tgt" + if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + if is_service_enabled c-vol; then + # Delete any old stack.conf + sudo rm -f /etc/tgt/conf.d/stack.conf + _configure_tgt_for_config_d + if is_ubuntu; then + sudo service tgt restart + elif is_fedora || is_suse; then + restart_service tgtd + else + # note for other distros: unstack.sh also uses the tgt/tgtd service + # name, and would need to be adjusted too + exit_distro_not_supported "restarting tgt" + fi + # NOTE(gfidente): ensure tgtd is running in debug mode + sudo tgtadm --mode system --op update --name debug --value on fi - # NOTE(gfidente): ensure tgtd is running in debug mode - sudo tgtadm --mode system --op update --name debug --value on fi run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" @@ -459,14 +474,6 @@ function stop_cinder { for serv in c-api c-bak c-sch c-vol; do stop_process $serv done - - if is_service_enabled c-vol; then - if is_ubuntu; then - stop_service tgt - else - stop_service tgtd - fi - fi } # create_volume_types() - Create Cinder's configured volume types diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index f210578339..d369c0c840 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -49,7 +49,7 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_backend_name $be_name iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name - iniset $CINDER_CONF $be_name iscsi_helper "tgtadm" + iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER" if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then iniset $CINDER_CONF $be_name volume_clear none diff --git a/lib/lvm b/lib/lvm index d0322c76b3..519e82c806 100644 --- a/lib/lvm +++ b/lib/lvm @@ -108,15 +108,20 @@ function init_lvm_volume_group { if is_fedora || is_suse; then # services is not started by default start_service lvm2-lvmetad - start_service tgtd + if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then + start_service tgtd + fi fi # Start with a clean volume group _create_lvm_volume_group $vg $size # Remove iscsi targets - sudo tgtadm --op show --mode target | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true - + if [ "$CINDER_ISCSI_HELPER" = "lioadm" ]; then + sudo cinder-rtstool get-targets | sudo xargs -rn 1 cinder-rtstool delete + else + sudo tgtadm --op show --mode target | grep Target | cut -f3 -d ' ' | sudo xargs -n1 tgt-admin --delete || true + fi _clean_lvm_volume_group $vg } From 4533eeec1fe4834ced0996fc8f9c8487dcd31d45 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Tue, 17 Feb 2015 16:25:38 -0600 Subject: [PATCH 0106/2941] Install Nova into its own venv Install a couple of optional feature prereqs in hypervisor plugins. rootwrap is horribly called indirectly via PATH. The choice, other than fixing such nonsense, is to force the path in sudo. TODO: * python guestfs isn't in pypi, need to specifically install it to not use the distro package Change-Id: Iad9a66d8a937fd0b0d1874005588c702e3d75e04 --- files/debs/general | 1 + files/rpms/general | 1 + lib/nova | 52 ++++++++++++++++++++++-------- lib/nova_plugins/functions-libvirt | 14 ++++---- 4 files changed, 48 insertions(+), 20 deletions(-) diff --git a/files/debs/general b/files/debs/general index 84d43029ff..87a6c7cb31 100644 --- a/files/debs/general +++ b/files/debs/general @@ -23,3 +23,4 @@ libffi-dev libssl-dev # for pyOpenSSL gettext # used for compiling message catalogs openjdk-7-jre-headless # NOPRIME +pkg-config diff --git a/files/rpms/general b/files/rpms/general index eac4ec36a7..81cc7891be 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -12,6 +12,7 @@ openssl-devel # to rebuild pyOpenSSL if needed libffi-devel libxml2-devel libxslt-devel +pkgconfig psmisc pylint python-devel diff --git a/lib/nova b/lib/nova index 32dea77fbf..d3bf2a8ec1 100644 --- a/lib/nova +++ b/lib/nova @@ -32,9 +32,16 @@ set +o xtrace # Set up default directories GITDIR["python-novaclient"]=$DEST/python-novaclient +NOVA_DIR=$DEST/nova +# Nova virtual environment +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["nova"]=${NOVA_DIR}.venv + NOVA_BIN_DIR=${PROJECT_VENV["nova"]}/bin +else + NOVA_BIN_DIR=$(get_python_exec_prefix) +fi -NOVA_DIR=$DEST/nova NOVA_STATE_PATH=${NOVA_STATE_PATH:=$DATA_DIR/nova} # INSTANCES_PATH is the previous name for this NOVA_INSTANCES_PATH=${NOVA_INSTANCES_PATH:=${INSTANCES_PATH:=$NOVA_STATE_PATH/instances}} @@ -69,16 +76,6 @@ NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773} EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773} -# Support entry points installation of console scripts -if [[ -d $NOVA_DIR/bin ]]; then - NOVA_BIN_DIR=$NOVA_DIR/bin -else - NOVA_BIN_DIR=$(get_python_exec_prefix) -fi - -# Set the paths of certain binaries -NOVA_ROOTWRAP=$(get_rootwrap_location nova) - # Option to enable/disable config drive # NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"always"} @@ -225,9 +222,11 @@ function cleanup_nova { #fi } +# Deploy new rootwrap filters files and configure sudo # configure_nova_rootwrap() - configure Nova's rootwrap function configure_nova_rootwrap { - # Deploy new rootwrap filters files (owned by root). + nova_rootwrap=$NOVA_BIN_DIR/nova-rootwrap + # Wipe any existing rootwrap.d files first if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then sudo rm -rf $NOVA_CONF_DIR/rootwrap.d @@ -242,14 +241,21 @@ function configure_nova_rootwrap { sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf # Specify rootwrap.conf as first parameter to nova-rootwrap - local rootwrap_sudoer_cmd="$NOVA_ROOTWRAP $NOVA_CONF_DIR/rootwrap.conf *" + local rootwrap_sudoer_cmd="$nova_rootwrap $NOVA_CONF_DIR/rootwrap.conf *" # Set up the rootwrap sudoers for nova local tempfile=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudoer_cmd" >$tempfile + echo "Defaults:$STACK_USER secure_path=$NOVA_BIN_DIR:/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >$tempfile + echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudoer_cmd" >>$tempfile chmod 0440 $tempfile sudo chown root:root $tempfile sudo mv $tempfile /etc/sudoers.d/nova-rootwrap + + # So rootwrap and PATH are broken beyond belief. WTF relies on a SECURE operation + # to blindly follow PATH??? We learned that was a bad idea in the 80's! + # So to fix this in a venv, we must exploit the very hole we want to close by dropping + # a copy of the venv rootwrap binary into /usr/local/bin. + #sudo cp -p $nova_rootwrap /usr/local/bin } # configure_nova() - Set config files, create data dirs, etc @@ -696,6 +702,10 @@ function start_nova_api { service_protocol="http" fi + # Hack to set the path for rootwrap + local old_path=$PATH + export PATH=$NOVA_BIN_DIR:$PATH + run_process n-api "$NOVA_BIN_DIR/nova-api" echo "Waiting for nova-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SERVICE_HOST:$service_port; then @@ -707,10 +717,16 @@ function start_nova_api { start_tls_proxy '*' $NOVA_SERVICE_PORT $NOVA_SERVICE_HOST $NOVA_SERVICE_PORT_INT & start_tls_proxy '*' $EC2_SERVICE_PORT $NOVA_SERVICE_HOST $EC2_SERVICE_PORT_INT & fi + + export PATH=$old_path } # start_nova_compute() - Start the compute process function start_nova_compute { + # Hack to set the path for rootwrap + local old_path=$PATH + export PATH=$NOVA_BIN_DIR:$PATH + if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF else @@ -738,10 +754,16 @@ function start_nova_compute { fi run_process n-cpu "$NOVA_BIN_DIR/nova-compute --config-file $compute_cell_conf" fi + + export PATH=$old_path } # start_nova() - Start running processes, including screen function start_nova_rest { + # Hack to set the path for rootwrap + local old_path=$PATH + export PATH=$NOVA_BIN_DIR:$PATH + local api_cell_conf=$NOVA_CONF if is_service_enabled n-cell; then local compute_cell_conf=$NOVA_CELLS_CONF @@ -769,6 +791,8 @@ function start_nova_rest { # Swift will act as s3 objectstore. is_service_enabled swift3 || \ run_process n-obj "$NOVA_BIN_DIR/nova-objectstore --config-file $api_cell_conf" + + export PATH=$old_path } function start_nova { diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 4d617e8b5e..60707cf859 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -21,14 +21,16 @@ DEBUG_LIBVIRT=$(trueorfalse False DEBUG_LIBVIRT) function install_libvirt { if is_ubuntu; then install_package qemu-kvm - install_package libvirt-bin - install_package python-libvirt - install_package python-guestfs + install_package libvirt-bin libvirt-dev + pip_install libvirt-python + install_package libguestfs0 + #install_package python-guestfs + #pip_install elif is_fedora || is_suse; then install_package kvm - install_package libvirt - install_package libvirt-python - install_package python-libguestfs + install_package libvirt libvirt-devel + pip_install libvirt-python + #install_package python-libguestfs fi # Restart firewalld after install of libvirt to avoid a problem From fab7880bcd98ff8d64e1389aac073b79728e77e5 Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Thu, 26 Mar 2015 13:03:49 -0700 Subject: [PATCH 0107/2941] lib/ironic: Do not check for database or message queue Only check for OpenStack services, not specific database or messaging backends. Change-Id: I7960718defa3f521d1c2128d8523e8ee9328da64 --- lib/ironic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index b99e3255d5..a7738bc14e 100644 --- a/lib/ironic +++ b/lib/ironic @@ -181,7 +181,7 @@ function is_deployed_with_ipa_ramdisk { # install_ironic() - Collect source and prepare function install_ironic { # make sure all needed service were enabled - local req_services="mysql rabbit key" + local req_services="key" if [[ "$VIRT_DRIVER" == "ironic" ]]; then req_services+=" nova glance neutron" fi From 11cf7b64a78d225d7ba84b86597a934d0417ad3c Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Fri, 27 Mar 2015 09:08:53 +1300 Subject: [PATCH 0108/2941] iptables rule for heat pip mirror This is required for guest VMs to reach http://$HOST_IP:8899 Change-Id: I814c682fb02974ae05dfbe5e212409cdd11d16ac --- lib/heat | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/heat b/lib/heat index c7abd3bb1c..a27691fab2 100644 --- a/lib/heat +++ b/lib/heat @@ -337,6 +337,7 @@ function build_heat_pip_mirror { " -i $heat_pip_repo_apache_conf enable_apache_site heat_pip_repo restart_apache_server + sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $HEAT_PIP_REPO_PORT -j ACCEPT || true } # Restore xtrace From 41d6f858be8431975762e65db470929c72b8f2a8 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 25 Mar 2015 22:42:46 -0500 Subject: [PATCH 0109/2941] Clean up additional INSTALL_TESTONLY_PACKAGES bits The original removal is in https://review.openstack.org/#/c/167669/ Change-Id: I3c59f040523d2cd1453465e80280955218880634 --- doc/source/configuration.rst | 12 ------------ functions-common | 2 -- inc/python | 2 +- 3 files changed, 1 insertion(+), 15 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 2af69c6b5e..a0d0840263 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -417,18 +417,6 @@ IP Version can be configured with any valid IPv6 prefix. The default values make use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC 4193.* -Unit tests dependencies install -------------------------------- - - | *Default: ``INSTALL_TESTONLY_PACKAGES=False``* - | In order to be able to run unit tests with script ``run_test.sh``, - the required package dependencies need to be installed. - Setting this option as below does the work. - - :: - - INSTALL_TESTONLY_PACKAGES=True - Examples ======== diff --git a/functions-common b/functions-common index 9bad981ab1..0f80e98f43 100644 --- a/functions-common +++ b/functions-common @@ -909,8 +909,6 @@ function get_packages { local file_to_parse="" local service="" - INSTALL_TESTONLY_PACKAGES=$(trueorfalse False INSTALL_TESTONLY_PACKAGES) - if [[ -z "$package_dir" ]]; then echo "No package directory supplied" return 1 diff --git a/inc/python b/inc/python index d00eb0cd1f..39684b6fc5 100644 --- a/inc/python +++ b/inc/python @@ -53,7 +53,7 @@ function get_python_exec_prefix { } # Wrapper for ``pip install`` to set cache and proxy environment variables -# Uses globals ``INSTALL_TESTONLY_PACKAGES``, ``OFFLINE``, ``PIP_VIRTUAL_ENV``, +# Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy`` # pip_install package [package ...] function pip_install { From 909fa8f49e0e253009be1299a4a067a22d80bb9b Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Fri, 27 Mar 2015 10:56:16 +0900 Subject: [PATCH 0110/2941] Fix Q_USE_PUBLIC_VETH regression A recently merged change Ie35cb537bb670c4773598b8db29877fb8a12ff50 and I71e2594288bae1a71dc2c8c3fb350b913dbd5e2c broke Q_USE_PUBLIC_VETH. This commit fixes the regression. Closes-Bug: #1436637 Change-Id: I1447bf98607143ba4954ce5ec3ed94010320baa5 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index e9a3926b28..d3dd8dd33e 100755 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1263,7 +1263,7 @@ function _neutron_configure_router_v4 { if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then local ext_gw_interface=$(_neutron_get_ext_gw_interface) local cidr_len=${FLOATING_RANGE#*/} - if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" ]]; then + if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" ) ]]; then sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi From 9e98f9435ec36f2fffed0ac368befd520f07e0e1 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 27 Mar 2015 14:43:14 +1100 Subject: [PATCH 0111/2941] Install packaged pyOpenSSL pyOpenSSL has done a rewrite of itself in Python. This may be good for many reasons, but memory usage is not one of them. It just about doubles the size of at least swift, which usually consumes about 6% of a CI testing vm's 8gb RAM. This is enough to push centos hosts into OOM conditions and then everything falls apart. The distro packages of pyOpenSSL are the older C-based versions, which doesn't bring in the kitchen sink of cffi & pycparser. Change-Id: Icd4100da1d5cbdb82017da046b00b9397813c2f2 --- files/rpms/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms/general b/files/rpms/general index d74ecc6e98..64329a4086 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -27,3 +27,4 @@ gettext # used for compiling message catalogs net-tools java-1.7.0-openjdk-headless # NOPRIME rhel7,f20 java-1.8.0-openjdk-headless # NOPRIME f21,f22 +pyOpenSSL # version in pip uses too much memory \ No newline at end of file From bba4742e8cbdc577121bf1010f5fe307c958cd15 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 28 Mar 2015 13:37:26 -0500 Subject: [PATCH 0112/2941] Add parent id to worlddump output Helpful in tracking down some process-doesn't-die problems. Change-Id: I146910403879c9a85d644bd07a53830ea17ca77d --- tools/worlddump.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 9a62c0dfbb..8dd455c274 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -66,7 +66,7 @@ def process_list(): Process Listing =============== """ - psraw = os.popen("ps auxw").read() + psraw = os.popen("ps axo user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args").read() print psraw From 3bb5a6f445f4938f1edca3c649aa22ff4ef8e5c3 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 28 Mar 2015 10:27:43 -0500 Subject: [PATCH 0113/2941] Remove symlink for lib/neutron compat Depends-On: I146910403879c9a85d644bd07a53830ea17ca77d Change-Id: Ia25331fc74fd26df347024a8314bc4c6ed54428e --- lib/neutron | 1 - 1 file changed, 1 deletion(-) delete mode 120000 lib/neutron diff --git a/lib/neutron b/lib/neutron deleted file mode 120000 index 00cd72273e..0000000000 --- a/lib/neutron +++ /dev/null @@ -1 +0,0 @@ -neutron-legacy \ No newline at end of file From dc97cb71e85fc807d2cce6f054c785922d322eb9 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 28 Mar 2015 08:20:50 -0500 Subject: [PATCH 0114/2941] Mostly docs cleanups Fix documentation build errors and RST formatting Change-Id: Id93153400c5b069dd9d772381558c7085f64c207 --- exercise.sh | 10 +- functions | 6 +- functions-common | 4 +- gate/updown.sh | 2 +- lib/ceilometer | 33 +++-- lib/dstat | 2 +- lib/horizon | 2 +- lib/ironic | 6 +- lib/lvm | 8 +- lib/nova | 21 ++-- lib/oslo | 7 +- lib/rpc_backend | 5 +- lib/stack | 9 +- lib/swift | 47 ++++---- lib/tempest | 60 +++++----- lib/tls | 15 +-- lib/trove | 4 +- run_tests.sh | 5 +- samples/local.conf | 3 +- samples/local.sh | 7 +- stack.sh | 185 +++++++++++++++-------------- stackrc | 39 +++--- tools/build_docs.sh | 6 +- tools/build_venv.sh | 9 +- tools/build_wheels.sh | 11 +- tools/create-stack-user.sh | 2 +- tools/fixup_stuff.sh | 12 +- tools/image_list.sh | 2 +- tools/info.sh | 4 +- tools/install_pip.sh | 10 +- tools/install_prereqs.sh | 6 +- tools/ironic/scripts/create-node | 4 +- tools/ironic/scripts/setup-network | 4 +- tools/outfilter.py | 14 +-- unstack.sh | 2 +- 35 files changed, 284 insertions(+), 282 deletions(-) diff --git a/exercise.sh b/exercise.sh index ce694fba66..19c9d80451 100755 --- a/exercise.sh +++ b/exercise.sh @@ -2,7 +2,7 @@ # **exercise.sh** -# Keep track of the current devstack directory. +# Keep track of the current DevStack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions @@ -14,11 +14,11 @@ source $TOP_DIR/stackrc # Run everything in the exercises/ directory that isn't explicitly disabled # comma separated list of script basenames to skip -# to refrain from exercising euca.sh use SKIP_EXERCISES=euca +# to refrain from exercising euca.sh use ``SKIP_EXERCISES=euca`` SKIP_EXERCISES=${SKIP_EXERCISES:-""} # comma separated list of script basenames to run -# to run only euca.sh use RUN_EXERCISES=euca +# to run only euca.sh use ``RUN_EXERCISES=euca`` basenames=${RUN_EXERCISES:-""} EXERCISE_DIR=$TOP_DIR/exercises @@ -27,7 +27,7 @@ if [[ -z "${basenames}" ]]; then # Locate the scripts we should run basenames=$(for b in `ls $EXERCISE_DIR/*.sh`; do basename $b .sh; done) else - # If RUN_EXERCISES was specified, ignore SKIP_EXERCISES. + # If ``RUN_EXERCISES`` was specified, ignore ``SKIP_EXERCISES``. SKIP_EXERCISES= fi @@ -56,7 +56,7 @@ for script in $basenames; do fi done -# output status of exercise run +# Output status of exercise run echo "=====================================================================" for script in $skips; do echo SKIP $script diff --git a/functions b/functions index 9adbfe7cf6..5bc8456281 100644 --- a/functions +++ b/functions @@ -439,7 +439,7 @@ function check_path_perm_sanity { echo "*** DEST path element" echo "*** ${rebuilt_path}" echo "*** appears to have 0700 permissions." - echo "*** This is very likely to cause fatal issues for devstack daemons." + echo "*** This is very likely to cause fatal issues for DevStack daemons." if [[ -n "$SKIP_PATH_SANITY" ]]; then return @@ -526,8 +526,8 @@ function setup_colorized_logging { } # These functions are provided for basic fall-back functionality for -# projects that include parts of devstack (grenade). stack.sh will -# override these with more specific versions for devstack (with fancy +# projects that include parts of DevStack (Grenade). stack.sh will +# override these with more specific versions for DevStack (with fancy # spinners, etc). We never override an existing version if ! function_exists echo_summary; then function echo_summary { diff --git a/functions-common b/functions-common index 0f80e98f43..f1aca29b30 100644 --- a/functions-common +++ b/functions-common @@ -971,7 +971,7 @@ function get_packages { # # Only packages required for enabled and collected plugins will included. # -# The same metadata used in the main devstack prerequisite files may be used +# The same metadata used in the main DevStack prerequisite files may be used # in these prerequisite files, see get_packages() for more info. function get_plugin_packages { local xtrace=$(set +o | grep xtrace) @@ -1471,7 +1471,7 @@ function fetch_plugins { return fi - echo "Fetching devstack plugins" + echo "Fetching DevStack plugins" for plugin in ${plugins//,/ }; do git_clone_by_name $plugin done diff --git a/gate/updown.sh b/gate/updown.sh index d2d7351a2f..f46385cfe1 100755 --- a/gate/updown.sh +++ b/gate/updown.sh @@ -4,7 +4,7 @@ # # Note: this is expected to start running as jenkins -# Step 1: give back sudoers permissions to devstack +# Step 1: give back sudoers permissions to DevStack TEMPFILE=`mktemp` echo "stack ALL=(root) NOPASSWD:ALL" >$TEMPFILE chmod 0440 $TEMPFILE diff --git a/lib/ceilometer b/lib/ceilometer index 7b2215c3d3..81353093b2 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -4,7 +4,7 @@ # Install and start **Ceilometer** service # To enable a minimal set of Ceilometer services, add the following to the -# localrc section of local.conf: +# ``localrc`` section of ``local.conf``: # # enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api # @@ -17,14 +17,11 @@ # of Ceilometer (see within for additional settings): # # CEILOMETER_USE_MOD_WSGI: When True, run the api under mod_wsgi. -# CEILOMETER_PIPELINE_INTERVAL: The number of seconds between pipeline processing -# runs. Default 600. -# CEILOMETER_BACKEND: The database backend (e.g. 'mysql', 'mongodb', 'es') -# CEILOMETER_COORDINATION_URL: The URL for a group membership service provided -# by tooz. +# CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600. +# CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es') +# CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz. # CEILOMETER_EVENTS: Enable event collection - # Dependencies: # # - functions @@ -94,7 +91,7 @@ function is_ceilometer_enabled { return 1 } -# create_ceilometer_accounts() - Set up common required ceilometer accounts +# create_ceilometer_accounts() - Set up common required Ceilometer accounts # # Project User Roles # ------------------------------------------------------------------ @@ -117,14 +114,14 @@ function create_ceilometer_accounts { "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" fi if is_service_enabled swift; then - # Ceilometer needs ResellerAdmin role to access swift account stats. + # Ceilometer needs ResellerAdmin role to access Swift account stats. get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_TENANT_NAME fi fi } -# _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +# _cleanup_keystone_apache_wsgi() - Remove WSGI files, disable and remove Apache vhost file function _cleanup_ceilometer_apache_wsgi { sudo rm -f $CEILOMETER_WSGI_DIR/* sudo rm -f $(apache_site_config_for ceilometer) @@ -149,7 +146,7 @@ function _config_ceilometer_apache_wsgi { local ceilometer_apache_conf=$(apache_site_config_for ceilometer) local apache_version=$(get_apache_version) - # copy proxy vhost and wsgi file + # Copy proxy vhost and wsgi file sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app sudo cp $FILES/apache-ceilometer.template $ceilometer_apache_conf @@ -189,9 +186,9 @@ function configure_ceilometer { sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml fi - # the compute and central agents need these credentials in order to - # call out to other services' public APIs - # the alarm evaluator needs these options to call ceilometer APIs + # The compute and central agents need these credentials in order to + # call out to other services' public APIs. + # The alarm evaluator needs these options to call ceilometer APIs iniset $CEILOMETER_CONF service_credentials os_username ceilometer iniset $CEILOMETER_CONF service_credentials os_password $SERVICE_PASSWORD iniset $CEILOMETER_CONF service_credentials os_tenant_name $SERVICE_TENANT_NAME @@ -237,7 +234,7 @@ function configure_ceilometer { } function configure_mongodb { - # server package is the same on all + # Server package is the same on all local packages=mongodb-server if is_fedora; then @@ -250,13 +247,13 @@ function configure_mongodb { install_package ${packages} if is_fedora; then - # ensure smallfiles selected to minimize freespace requirements + # Ensure smallfiles is selected to minimize freespace requirements sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod restart_service mongod fi - # give mongodb time to start-up + # Give mongodb time to start-up sleep 5 } @@ -347,7 +344,7 @@ function start_ceilometer { run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF" fi - # only die on API if it was actually intended to be turned on + # Only die on API if it was actually intended to be turned on if is_service_enabled ceilometer-api; then echo "Waiting for ceilometer-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/v2/; then diff --git a/lib/dstat b/lib/dstat index 740e48f9e0..c8faa6578c 100644 --- a/lib/dstat +++ b/lib/dstat @@ -41,7 +41,7 @@ function start_dstat { # stop_dstat() stop dstat process function stop_dstat { # dstat runs as a console, not as a service, and isn't trackable - # via the normal mechanisms for devstack. So lets just do a + # via the normal mechanisms for DevStack. So lets just do a # killall and move on. killall dstat || /bin/true } diff --git a/lib/horizon b/lib/horizon index c6e3692d47..63a9d0fe46 100644 --- a/lib/horizon +++ b/lib/horizon @@ -129,7 +129,7 @@ function init_horizon { fi enable_apache_site horizon - # Remove old log files that could mess with how devstack detects whether Horizon + # Remove old log files that could mess with how DevStack detects whether Horizon # has been successfully started (see start_horizon() and functions::screen_it()) # and run_process sudo rm -f /var/log/$APACHE_NAME/horizon_* diff --git a/lib/ironic b/lib/ironic index a7738bc14e..fcf1a543a9 100644 --- a/lib/ironic +++ b/lib/ironic @@ -53,7 +53,7 @@ IRONIC_HW_EPHEMERAL_DISK=${IRONIC_HW_EPHEMERAL_DISK:-0} # The file is composed of multiple lines, each line includes four field # separated by white space: IPMI address, MAC address, IPMI username # and IPMI password. -# An example: +# # 192.168.110.107 00:1e:67:57:50:4c root otc123 IRONIC_IPMIINFO_FILE=${IRONIC_IPMIINFO_FILE:-$IRONIC_DATA_DIR/hardware_info} @@ -99,10 +99,10 @@ IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz} # Which deploy driver to use - valid choices right now -# are 'pxe_ssh', 'pxe_ipmitool', 'agent_ssh' and 'agent_ipmitool'. +# are ``pxe_ssh``, ``pxe_ipmitool``, ``agent_ssh`` and ``agent_ipmitool``. IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-pxe_ssh} -#TODO(agordeev): replace 'ubuntu' with host distro name getting +# TODO(agordeev): replace 'ubuntu' with host distro name getting IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT} # Support entry points installation of console scripts diff --git a/lib/lvm b/lib/lvm index d0322c76b3..6c59937b0c 100644 --- a/lib/lvm +++ b/lib/lvm @@ -1,3 +1,5 @@ +#!/bin/bash +# # lib/lvm # Configure the default LVM volume group used by Cinder and Nova @@ -32,8 +34,8 @@ DEFAULT_VOLUME_GROUP_NAME=$VOLUME_GROUP_NAME-default BACKING_FILE_SUFFIX=-backing-file -# Entry Points -# ------------ +# Functions +# --------- # _clean_lvm_volume_group removes all default LVM volumes # @@ -52,7 +54,7 @@ function _clean_lvm_volume_group { function _clean_lvm_backing_file { local backing_file=$1 - # if the backing physical device is a loop device, it was probably setup by devstack + # If the backing physical device is a loop device, it was probably setup by DevStack if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}') sudo losetup -d $vg_dev diff --git a/lib/nova b/lib/nova index 8e1b2f7b9d..385da4e44b 100644 --- a/lib/nova +++ b/lib/nova @@ -55,8 +55,9 @@ NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} # NOVA_API_VERSION valid options -# - default - setup API end points as nova does out of the box -# - v21default - make v21 the default on /v2 +# - default - setup API end points as nova does out of the box +# - v21default - make v21 the default on /v2 +# # NOTE(sdague): this is for transitional testing of the Nova v21 API. # Expect to remove in L or M. NOVA_API_VERSION=${NOVA_API_VERSION-default} @@ -77,7 +78,7 @@ EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773} EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773} # Option to enable/disable config drive -# NOTE: Set FORCE_CONFIG_DRIVE="False" to turn OFF config drive +# NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive FORCE_CONFIG_DRIVE=${FORCE_CONFIG_DRIVE:-"True"} # Nova supports pluggable schedulers. The default ``FilterScheduler`` @@ -89,11 +90,11 @@ QEMU_CONF=/etc/libvirt/qemu.conf # Set default defaults here as some hypervisor drivers override these PUBLIC_INTERFACE_DEFAULT=br100 FLAT_NETWORK_BRIDGE_DEFAULT=br100 -# set the GUEST_INTERFACE_DEFAULT to some interface on the box so that -# the default isn't completely crazy. This will match eth*, em*, or -# the new p* interfaces, then basically picks the first +# Set ``GUEST_INTERFACE_DEFAULT`` to some interface on the box so that +# the default isn't completely crazy. This will match ``eth*``, ``em*``, or +# the new ``p*`` interfaces, then basically picks the first # alphabetically. It's probably wrong, however it's less wrong than -# always using 'eth0' which doesn't exist on new Linux distros at all. +# always using ``eth0`` which doesn't exist on new Linux distros at all. GUEST_INTERFACE_DEFAULT=$(ip link \ | grep 'state UP' \ | awk '{print $2}' \ @@ -101,8 +102,8 @@ GUEST_INTERFACE_DEFAULT=$(ip link \ | grep ^[ep] \ | head -1) -# $NOVA_VNC_ENABLED can be used to forcibly enable vnc configuration. -# In multi-node setups allows compute hosts to not run n-novnc. +# ``NOVA_VNC_ENABLED`` can be used to forcibly enable VNC configuration. +# In multi-node setups allows compute hosts to not run ``n-novnc``. NOVA_VNC_ENABLED=$(trueorfalse False NOVA_VNC_ENABLED) # Get hypervisor configuration @@ -144,7 +145,7 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # running the VM - removing a SPOF and bandwidth bottleneck. MULTI_HOST=$(trueorfalse False MULTI_HOST) -# ``NOVA_ALLOW_MOVE_TO_SAME_HOST` can be set to False in multi node devstack, +# ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, # where there are at least two nova-computes. NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST) diff --git a/lib/oslo b/lib/oslo index 86efb60a4e..d9688a01cd 100644 --- a/lib/oslo +++ b/lib/oslo @@ -2,7 +2,7 @@ # # lib/oslo # -# Functions to install oslo libraries from git +# Functions to install **Oslo** libraries from git # # We need this to handle the fact that projects would like to use # pre-released versions of oslo libraries. @@ -46,8 +46,9 @@ GITDIR["tooz"]=$DEST/tooz # Support entry points installation of console scripts OSLO_BIN_DIR=$(get_python_exec_prefix) -# Entry Points -# ------------ + +# Functions +# --------- function _do_install_oslo_lib { local name=$1 diff --git a/lib/rpc_backend b/lib/rpc_backend index 3033cbe08e..d82af6de6e 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -1,8 +1,7 @@ #!/bin/bash # # lib/rpc_backend -# Interface for interactig with different rpc backend -# rpc backend settings +# Interface for interactig with different RPC backends # Dependencies: # @@ -27,10 +26,10 @@ RPC_MESSAGING_PROTOCOL=${RPC_MESSAGING_PROTOCOL:-0.9} # messaging server as a service, which it really isn't for multi host QPID_HOST=${QPID_HOST:-} + # Functions # --------- - # Make sure we only have one rpc backend enabled. # Also check the specified rpc backend is available on your platform. function check_rpc_backend { diff --git a/lib/stack b/lib/stack index 11dd87ca28..47e8ce2a22 100644 --- a/lib/stack +++ b/lib/stack @@ -2,15 +2,18 @@ # # lib/stack # -# These functions are code snippets pulled out of stack.sh for easier +# These functions are code snippets pulled out of ``stack.sh`` for easier # re-use by Grenade. They can assume the same environment is available -# as in the lower part of stack.sh, namely a valid stackrc has been sourced -# as well as all of the lib/* files for the services have been sourced. +# as in the lower part of ``stack.sh``, namely a valid stackrc has been sourced +# as well as all of the ``lib/*`` files for the services have been sourced. # # For clarity, all functions declared here that came from ``stack.sh`` # shall be named with the prefix ``stack_``. +# Functions +# --------- + # Generic service install handles venv creation if confgured for service # stack_install_service service function stack_install_service { diff --git a/lib/swift b/lib/swift index 28ef7de1f1..07068bb104 100644 --- a/lib/swift +++ b/lib/swift @@ -38,7 +38,6 @@ fi # Set up default directories GITDIR["python-swiftclient"]=$DEST/python-swiftclient - SWIFT_DIR=$DEST/swift SWIFT_AUTH_CACHE_DIR=${SWIFT_AUTH_CACHE_DIR:-/var/cache/swift} SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} @@ -59,7 +58,7 @@ SWIFT_DISK_IMAGE=${SWIFT_DATA_DIR}/drives/images/swift.img SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift} if is_service_enabled s-proxy && is_service_enabled swift3; then - # If we are using swift3, we can default the s3 port to swift instead + # If we are using ``swift3``, we can default the S3 port to swift instead # of nova-objectstore S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} fi @@ -137,11 +136,12 @@ ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False} SWIFT_TEMPURL_KEY=${SWIFT_TEMPURL_KEY:-} +# Toggle for deploying Swift under HTTPD + mod_wsgi +SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False} + # Tell Tempest this project is present TEMPEST_SERVICES+=,swift -# Toggle for deploying Swift under HTTPD + mod_wsgi -SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False} # Functions # --------- @@ -303,7 +303,6 @@ function generate_swift_config_services { sed -i -e "s,#[ ]*recon_cache_path .*,recon_cache_path = ${SWIFT_DATA_DIR}/cache," ${swift_node_config} } - # configure_swift() - Set config files, create data dirs and loop image function configure_swift { local swift_pipeline="${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH}" @@ -374,12 +373,9 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT key_file "$SWIFT_SSL_KEY" fi - # Devstack is commonly run in a small slow environment, so bump the - # timeouts up. - # node_timeout is how long between read operations a node takes to - # respond to the proxy server - # conn_timeout is all about how long it takes a connect() system call to - # return + # DevStack is commonly run in a small slow environment, so bump the timeouts up. + # ``node_timeout`` is the node read operation response time to the proxy server + # ``conn_timeout`` is how long it takes a connect() system call to return iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server node_timeout 120 iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server conn_timeout 20 @@ -394,10 +390,10 @@ function configure_swift { SWIFT_EXTRAS_MIDDLEWARE_LAST="${SWIFT_EXTRAS_MIDDLEWARE_LAST} ceilometer" fi - # Restrict the length of auth tokens in the swift proxy-server logs. + # Restrict the length of auth tokens in the Swift ``proxy-server`` logs. iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:proxy-logging reveal_sensitive_prefix ${SWIFT_LOG_TOKEN_LENGTH} - # By default Swift will be installed with keystone and tempauth middleware + # By default Swift will be installed with Keystone and tempauth middleware # and add the swift3 middleware if its configured for it. The token for # tempauth would be prefixed with the reseller_prefix setting `TEMPAUTH_` the # token for keystoneauth would have the standard reseller_prefix `AUTH_` @@ -413,17 +409,13 @@ function configure_swift { sed -i "/^pipeline/ { s/tempauth/${swift_pipeline} ${SWIFT_EXTRAS_MIDDLEWARE}/ ;}" ${SWIFT_CONFIG_PROXY_SERVER} sed -i "/^pipeline/ { s/proxy-server/${SWIFT_EXTRAS_MIDDLEWARE_LAST} proxy-server/ ; }" ${SWIFT_CONFIG_PROXY_SERVER} - iniset ${SWIFT_CONFIG_PROXY_SERVER} app:proxy-server account_autocreate true - - # Configure Crossdomain iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:crossdomain use "egg:swift#crossdomain" - - # This causes the authtoken middleware to use the same python logging - # adapter provided by the swift proxy-server, so that request transaction + # Configure authtoken middleware to use the same Python logging + # adapter provided by the Swift ``proxy-server``, so that request transaction # IDs will included in all of its log messages. iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:authtoken log_name swift @@ -436,7 +428,7 @@ function configure_swift { iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth use "egg:swift#keystoneauth" iniset ${SWIFT_CONFIG_PROXY_SERVER} filter:keystoneauth operator_roles "Member, admin" - # Configure Tempauth. In the sample config file, Keystoneauth is commented + # Configure Tempauth. In the sample config file Keystoneauth is commented # out. Make sure we uncomment Tempauth after we uncomment Keystoneauth # otherwise, this code also sets the reseller_prefix for Keystoneauth. iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} filter:tempauth account_autocreate @@ -579,7 +571,8 @@ function create_swift_disk { sudo chown -R ${STACK_USER}: ${node} done } -# create_swift_accounts() - Set up standard swift accounts and extra + +# create_swift_accounts() - Set up standard Swift accounts and extra # one for tests we do this by attaching all words in the account name # since we want to make it compatible with tempauth which use # underscores for separators. @@ -593,9 +586,9 @@ function create_swift_disk { # swifttenanttest4 swiftusertest4 admin swift_test function create_swift_accounts { - # Defines specific passwords used by tools/create_userrc.sh - # As these variables are used by create_userrc.sh, they must be exported - # The _password suffix is expected by create_userrc.sh + # Defines specific passwords used by ``tools/create_userrc.sh`` + # As these variables are used by ``create_userrc.sh,`` they must be exported + # The _password suffix is expected by ``create_userrc.sh``. export swiftusertest1_password=testing export swiftusertest2_password=testing2 export swiftusertest3_password=testing3 @@ -725,8 +718,8 @@ function start_swift { # By default with only one replica we are launching the proxy, # container, account and object server in screen in foreground and - # other services in background. If we have SWIFT_REPLICAS set to something - # greater than one we first spawn all the swift services then kill the proxy + # other services in background. If we have ``SWIFT_REPLICAS`` set to something + # greater than one we first spawn all the Swift services then kill the proxy # service so we can run it in foreground in screen. ``swift-init ... # {stop|restart}`` exits with '1' if no servers are running, ignore it just # in case @@ -762,7 +755,7 @@ function stop_swift { swift-init --run-dir=${SWIFT_DATA_DIR}/run rest stop && return 0 fi - # screen normally killed by unstack.sh + # screen normally killed by ``unstack.sh`` if type -p swift-init >/dev/null; then swift-init --run-dir=${SWIFT_DATA_DIR}/run all stop || true fi diff --git a/lib/tempest b/lib/tempest index 8672a14338..d86ee27fd0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -62,13 +62,11 @@ BUILD_INTERVAL=1 # The default is set to 196 seconds. BUILD_TIMEOUT=${BUILD_TIMEOUT:-196} - # This must be False on stable branches, as master tempest # deps do not match stable branch deps. Set this to True to -# have tempest installed in devstack by default. +# have tempest installed in DevStack by default. INSTALL_TEMPEST=${INSTALL_TEMPEST:-"True"} - BOTO_MATERIALS_PATH="$FILES/images/s3-materials/cirros-${CIRROS_VERSION}" BOTO_CONF=/etc/boto.cfg @@ -83,6 +81,7 @@ TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PR IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) + # Functions # --------- @@ -168,8 +167,8 @@ function configure_tempest { esac fi - # Create tempest.conf from tempest.conf.sample - # copy every time, because the image UUIDS are going to change + # Create ``tempest.conf`` from ``tempest.conf.sample`` + # Copy every time because the image UUIDS are going to change sudo install -d -o $STACK_USER $TEMPEST_CONFIG_DIR install -m 644 $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG @@ -179,8 +178,8 @@ function configure_tempest { # the cloud. We don't always want to so that we can ensure Tempest # would work on a public cloud. TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN) - # See files/keystone_data.sh and stack.sh where admin, demo and alt_demo - # user and tenant are set up... + + # See ``lib/keystone`` where these users and tenants are set up ADMIN_USERNAME=${ADMIN_USERNAME:-admin} ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} ADMIN_DOMAIN_NAME=${ADMIN_DOMAIN_NAME:-Default} @@ -191,13 +190,13 @@ function configure_tempest { ADMIN_TENANT_ID=$(openstack project list | awk "/ admin / { print \$2 }") if is_service_enabled nova; then - # If the ``DEFAULT_INSTANCE_TYPE`` not declared, use the new behavior - # Tempest creates instane types for himself + # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior + # Tempest creates its own instance types if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then available_flavors=$(nova flavor-list) if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then if is_arch "ppc64"; then - # qemu needs at least 128MB of memory to boot on ppc64 + # Qemu needs at least 128MB of memory to boot on ppc64 nova flavor-create m1.nano 42 128 0 1 else nova flavor-create m1.nano 42 64 0 1 @@ -214,8 +213,7 @@ function configure_tempest { fi flavor_ref_alt=84 else - # Check Nova for existing flavors and, if set, look for the - # ``DEFAULT_INSTANCE_TYPE`` and use that. + # Check Nova for existing flavors, if ``DEFAULT_INSTANCE_TYPE`` is set use it. boto_instance_type=$DEFAULT_INSTANCE_TYPE flavor_lines=`nova flavor-list` IFS=$'\r\n' @@ -240,8 +238,8 @@ function configure_tempest { flavor_ref=${flavors[0]} flavor_ref_alt=$flavor_ref - # ensure flavor_ref and flavor_ref_alt have different values - # some resize instance in tempest tests depends on this. + # Ensure ``flavor_ref`` and ``flavor_ref_alt`` have different values. + # Some resize instance in tempest tests depends on this. for f in ${flavors[@]:1}; do if [[ $f -ne $flavor_ref ]]; then flavor_ref_alt=$f @@ -266,7 +264,7 @@ function configure_tempest { public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') if [ "$Q_USE_NAMESPACE" == "False" ]; then - # If namespaces are disabled, devstack will create a single + # If namespaces are disabled, DevStack will create a single # public router that tempest should be configured to use. public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \ { print \$2 }") @@ -274,6 +272,7 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG + # Oslo iniset $TEMPEST_CONFIG oslo_concurrency lock_path $TEMPEST_STATE_PATH mkdir -p $TEMPEST_STATE_PATH @@ -309,15 +308,13 @@ function configure_tempest { fi # Image - # for the gate we want to be able to override this variable so we aren't - # doing an HTTP fetch over the wide internet for this test + # We want to be able to override this variable in the gate to avoid + # doing an external HTTP fetch for this test. if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi # Auth - # - # TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} iniset $TEMPEST_CONFIG auth tempest_roles "Member" @@ -336,7 +333,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method # Compute Features - # Run verify_tempest_config -ur to retrieve enabled extensions on API endpoints + # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints # NOTE(mtreinish): This must be done after auth settings are added to the tempest config local tmp_cfg_file=$(mktemp) cd $TEMPEST_DIR @@ -417,11 +414,11 @@ function configure_tempest { iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Telemetry - # Ceilometer API optimization happened in juno that allows to run more tests in tempest. + # Ceilometer API optimization happened in Juno that allows to run more tests in tempest. # Once Tempest retires support for icehouse this flag can be removed. iniset $TEMPEST_CONFIG telemetry too_slow_to_test "False" - # Object storage + # Object Store local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_OBJECT_STORAGE_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint @@ -445,7 +442,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume-feature-enabled backup False fi - # Using CINDER_ENABLED_BACKENDS + # Using ``CINDER_ENABLED_BACKENDS`` if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" local i=1 @@ -470,7 +467,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG dashboard dashboard_url "http://$SERVICE_HOST/" iniset $TEMPEST_CONFIG dashboard login_url "http://$SERVICE_HOST/auth/login/" - # cli + # CLI iniset $TEMPEST_CONFIG cli cli_dir $NOVA_BIN_DIR # Baremetal @@ -495,7 +492,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled suspend False fi - # service_available + # ``service_available`` for service in ${TEMPEST_SERVICES//,/ }; do if is_service_enabled $service ; then iniset $TEMPEST_CONFIG service_available $service "True" @@ -505,7 +502,7 @@ function configure_tempest { done if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then - # Use the BOTO_CONFIG environment variable to point to this file + # Use the ``BOTO_CONFIG`` environment variable to point to this file iniset $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE sudo chown $STACK_USER $BOTO_CONF fi @@ -520,7 +517,6 @@ function configure_tempest { # ------------------------------------------------------------------ # alt_demo alt_demo Member -# Migrated from keystone_data.sh function create_tempest_accounts { if is_service_enabled tempest; then # Tempest has some tests that validate various authorization checks @@ -531,13 +527,13 @@ function create_tempest_accounts { fi } -# install_tempest_lib() - Collect source, prepare, and install tempest-lib +# install_tempest_lib() - Collect source, prepare, and install ``tempest-lib`` function install_tempest_lib { if use_library_from_git "tempest-lib"; then git_clone_by_name "tempest-lib" setup_dev_lib "tempest-lib" - # NOTE(mtreinish) For testing tempest-lib from git with tempest we need - # put the git version of tempest-lib in the tempest job's tox venv + # NOTE(mtreinish) For testing ``tempest-lib`` from git with Tempest we need to + # put the git version of ``tempest-lib`` in the Tempest job's tox venv export PIP_VIRTUAL_ENV=${PROJECT_VENV["tempest"]} setup_dev_lib "tempest-lib" unset PIP_VIRTUAL_ENV @@ -555,7 +551,7 @@ function install_tempest { popd } -# init_tempest() - Initialize ec2 images +# init_tempest() - Initialize EC2 images function init_tempest { local base_image_name=cirros-${CIRROS_VERSION}-${CIRROS_ARCH} # /opt/stack/devstack/files/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec @@ -564,7 +560,7 @@ function init_tempest { local ramdisk="$image_dir/${base_image_name}-initrd" local disk_image="$image_dir/${base_image_name}-blank.img" if is_service_enabled nova; then - # if the cirros uec downloaded and the system is uec capable + # If the CirrOS uec downloaded and the system is UEC capable if [ -f "$kernel" -a -f "$ramdisk" -a -f "$disk_image" -a "$VIRT_DRIVER" != "openvz" \ -a \( "$LIBVIRT_TYPE" != "lxc" -o "$VIRT_DRIVER" != "libvirt" \) ]; then echo "Prepare aki/ari/ami Images" diff --git a/lib/tls b/lib/tls index 677895b9b2..09f1c2dfdd 100644 --- a/lib/tls +++ b/lib/tls @@ -32,6 +32,7 @@ # - is_ssl_enabled_service # - enable_mod_ssl + # Defaults # -------- @@ -92,7 +93,6 @@ function create_CA_base { cp /dev/null $ca_dir/index.txt } - # Create a new CA configuration file # create_CA_config ca-dir common-name function create_CA_config { @@ -248,7 +248,6 @@ function init_cert { fi } - # make_cert creates and signs a new certificate with the given commonName and CA # make_cert ca-dir cert-name "common-name" ["alt-name" ...] function make_cert { @@ -287,7 +286,6 @@ function make_cert { fi } - # Make an intermediate CA to sign everything else # make_int_CA ca-dir signing-ca-dir function make_int_CA { @@ -362,17 +360,16 @@ function is_ssl_enabled_service { return 1 } - # Ensure that the certificates for a service are in place. This function does # not check that a service is SSL enabled, this should already have been # completed. # # The function expects to find a certificate, key and CA certificate in the -# variables {service}_SSL_CERT, {service}_SSL_KEY and {service}_SSL_CA. For -# example for keystone this would be KEYSTONE_SSL_CERT, KEYSTONE_SSL_KEY and -# KEYSTONE_SSL_CA. +# variables ``{service}_SSL_CERT``, ``{service}_SSL_KEY`` and ``{service}_SSL_CA``. For +# example for keystone this would be ``KEYSTONE_SSL_CERT``, ``KEYSTONE_SSL_KEY`` and +# ``KEYSTONE_SSL_CA``. # -# If it does not find these certificates then the devstack-issued server +# If it does not find these certificates then the DevStack-issued server # certificate, key and CA certificate will be associated with the service. # # If only some of the variables are provided then the function will quit. @@ -437,14 +434,12 @@ function start_tls_proxy { # Cleanup Functions # ================= - # Stops all stud processes. This should be done only after all services # using tls configuration are down. function stop_tls_proxy { killall stud } - # Remove CA along with configuration, as well as the local server certificate function cleanup_CA { rm -rf "$DATA_DIR/CA" "$DEVSTACK_CERT" diff --git a/lib/trove b/lib/trove index 5dd4f23611..b0a96100c2 100644 --- a/lib/trove +++ b/lib/trove @@ -21,6 +21,7 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace + # Defaults # -------- if is_service_enabled neutron; then @@ -80,7 +81,7 @@ function setup_trove_logging { fi } -# create_trove_accounts() - Set up common required trove accounts +# create_trove_accounts() - Set up common required Trove accounts # Tenant User Roles # ------------------------------------------------------------------ @@ -115,7 +116,6 @@ function cleanup_trove { rm -fr $TROVE_CONF_DIR/* } - # configure_trove() - Set config files, create data dirs, etc function configure_trove { setup_develop $TROVE_DIR diff --git a/run_tests.sh b/run_tests.sh index 3ba7e1023d..c6b7da64c0 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -11,9 +11,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -# -# -# this runs a series of unit tests for devstack to ensure it's functioning + +# This runs a series of unit tests for DevStack to ensure it's functioning PASSES="" FAILURES="" diff --git a/samples/local.conf b/samples/local.conf index 63000b65ba..bd0cd9c0db 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -1,7 +1,6 @@ # Sample ``local.conf`` for user-configurable variables in ``stack.sh`` -# NOTE: Copy this file to the root ``devstack`` directory for it to -# work properly. +# NOTE: Copy this file to the root DevStack directory for it to work properly. # ``local.conf`` is a user-maintained settings file that is sourced from ``stackrc``. # This gives it the ability to override any variables set in ``stackrc``. diff --git a/samples/local.sh b/samples/local.sh index 664cb663fe..634f6ddb17 100755 --- a/samples/local.sh +++ b/samples/local.sh @@ -3,15 +3,14 @@ # Sample ``local.sh`` for user-configurable tasks to run automatically # at the successful conclusion of ``stack.sh``. -# NOTE: Copy this file to the root ``devstack`` directory for it to -# work properly. +# NOTE: Copy this file to the root DevStack directory for it to work properly. # This is a collection of some of the things we have found to be useful to run # after ``stack.sh`` to tweak the OpenStack configuration that DevStack produces. # These should be considered as samples and are unsupported DevStack code. -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) # Import common functions @@ -50,7 +49,7 @@ if is_service_enabled nova; then source $TOP_DIR/openrc admin admin # Name of new flavor - # set in ``localrc`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` + # set in ``local.conf`` with ``DEFAULT_INSTANCE_TYPE=m1.micro`` MI_NAME=m1.micro # Create micro flavor if not present diff --git a/stack.sh b/stack.sh index 090d527328..8ab82348f3 100755 --- a/stack.sh +++ b/stack.sh @@ -16,18 +16,11 @@ # (14.04 Trusty or newer), **Fedora** (F20 or newer), or **CentOS/RHEL** # (7 or newer) machine. (It may work on other platforms but support for those # platforms is left to those who added them to DevStack.) It should work in -# a VM or physical server. Additionally, we maintain a list of ``apt`` and +# a VM or physical server. Additionally, we maintain a list of ``deb`` and # ``rpm`` dependencies and other configuration files in this repo. # Learn more and get the most recent version at http://devstack.org -# check if someone has invoked with "sh" -if [[ "${POSIXLY_CORRECT}" == "y" ]]; then - echo "You appear to be running bash in POSIX compatibility mode." - echo "devstack uses bash features. \"./stack.sh\" should do the right thing" - exit 1 -fi - # Make sure custom grep options don't get in the way unset GREP_OPTIONS @@ -44,7 +37,7 @@ umask 022 # Not all distros have sbin in PATH for regular users. PATH=$PATH:/usr/local/sbin:/usr/sbin:/sbin -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0") && pwd) # Check for uninitialized variables, a big cause of bugs @@ -53,6 +46,10 @@ if [[ -n "$NOUNSET" ]]; then set -o nounset fi + +# Configuration +# ============= + # Sanity Checks # ------------- @@ -61,7 +58,7 @@ if [[ -r $TOP_DIR/.stackenv ]]; then rm $TOP_DIR/.stackenv fi -# ``stack.sh`` keeps the list of ``apt`` and ``rpm`` dependencies and config +# ``stack.sh`` keeps the list of ``deb`` and ``rpm`` dependencies, config # templates and other useful files in the ``files`` subdirectory FILES=$TOP_DIR/files if [ ! -d $FILES ]; then @@ -69,12 +66,23 @@ if [ ! -d $FILES ]; then fi # ``stack.sh`` keeps function libraries here +# Make sure ``$TOP_DIR/inc`` directory is present +if [ ! -d $TOP_DIR/inc ]; then + die $LINENO "missing devstack/inc" +fi + +# ``stack.sh`` keeps project libraries here # Make sure ``$TOP_DIR/lib`` directory is present if [ ! -d $TOP_DIR/lib ]; then die $LINENO "missing devstack/lib" fi -# Check if run as root +# Check if run in POSIX shell +if [[ "${POSIXLY_CORRECT}" == "y" ]]; then + echo "You are running POSIX compatibility mode, DevStack requires bash 4.2 or newer." + exit 1 +fi + # OpenStack is designed to be run as a non-root user; Horizon will fail to run # as **root** since Apache will not serve content from **root** user). # ``stack.sh`` must not be run as **root**. It aborts and suggests one course of @@ -89,8 +97,6 @@ if [[ $EUID -eq 0 ]]; then exit 1 fi -# Print the kernel version -uname -a # Prepare the environment # ----------------------- @@ -112,6 +118,7 @@ source $TOP_DIR/lib/stack # and ``DISTRO`` GetDistro + # Global Settings # --------------- @@ -134,7 +141,6 @@ if [[ -r $TOP_DIR/local.conf ]]; then done fi - # ``stack.sh`` is customizable by setting environment variables. Override a # default setting via export:: # @@ -145,18 +151,20 @@ fi # # DATABASE_PASSWORD=simple ./stack.sh # -# Persistent variables can be placed in a ``localrc`` file:: +# Persistent variables can be placed in a ``local.conf`` file:: # +# [[local|localrc]] # DATABASE_PASSWORD=anothersecret # DATABASE_USER=hellaroot # # We try to have sensible defaults, so you should be able to run ``./stack.sh`` -# in most cases. ``localrc`` is not distributed with DevStack and will never +# in most cases. ``local.conf`` is not distributed with DevStack and will never # be overwritten by a DevStack update. # # DevStack distributes ``stackrc`` which contains locations for the OpenStack # repositories, branches to configure, and other configuration defaults. -# ``stackrc`` sources ``localrc`` to allow you to safely override those settings. +# ``stackrc`` sources the ``localrc`` section of ``local.conf`` to allow you to +# safely override those settings. if [[ ! -r $TOP_DIR/stackrc ]]; then die $LINENO "missing $TOP_DIR/stackrc - did you grab more than just stack.sh?" @@ -188,34 +196,27 @@ fi # Make sure the proxy config is visible to sub-processes export_proxy_variables -# Remove services which were negated in ENABLED_SERVICES +# Remove services which were negated in ``ENABLED_SERVICES`` # using the "-" prefix (e.g., "-rabbit") instead of # calling disable_service(). disable_negated_services -# Look for obsolete stuff -# if [[ ,${ENABLED_SERVICES}, =~ ,"swift", ]]; then -# echo "FATAL: 'swift' is not supported as a service name" -# echo "FATAL: Use the actual swift service names to enable them as required:" -# echo "FATAL: s-proxy s-object s-container s-account" -# exit 1 -# fi # Configure sudo # -------------- -# We're not **root**, make sure ``sudo`` is available +# We're not as **root** so make sure ``sudo`` is available is_package_installed sudo || install_package sudo # UEC images ``/etc/sudoers`` does not have a ``#includedir``, add one sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers -# Set up devstack sudoers +# Set up DevStack sudoers TEMPFILE=`mktemp` echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE -# Some binaries might be under /sbin or /usr/sbin, so make sure sudo will -# see them by forcing PATH +# Some binaries might be under ``/sbin`` or ``/usr/sbin``, so make sure sudo will +# see them by forcing ``PATH`` echo "Defaults:$STACK_USER secure_path=/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >> $TEMPFILE echo "Defaults:$STACK_USER !requiretty" >> $TEMPFILE chmod 0440 $TEMPFILE @@ -226,7 +227,7 @@ sudo mv $TEMPFILE /etc/sudoers.d/50_stack_sh # Configure Distro Repositories # ----------------------------- -# For debian/ubuntu make apt attempt to retry network ops on it's own +# For Debian/Ubuntu make apt attempt to retry network ops on it's own if is_ubuntu; then echo 'APT::Acquire::Retries "20";' | sudo tee /etc/apt/apt.conf.d/80retry >/dev/null fi @@ -237,7 +238,7 @@ fi if is_fedora && [[ $DISTRO == "rhel7" ]]; then # RHEL requires EPEL for many Open Stack dependencies - # note we always remove and install latest -- some environments + # NOTE: We always remove and install latest -- some environments # use snapshot images, and if EPEL version updates they break # unless we update them to latest version. if sudo yum repolist enabled epel | grep -q 'epel'; then @@ -248,7 +249,7 @@ if is_fedora && [[ $DISTRO == "rhel7" ]]; then # repo, then removes itself (as epel-release installed the # "real" repo). # - # you would think that rather than this, you could use + # You would think that rather than this, you could use # $releasever directly in .repo file we create below. However # RHEL gives a $releasever of "6Server" which breaks the path; # see https://bugzilla.redhat.com/show_bug.cgi?id=1150759 @@ -265,7 +266,7 @@ EOF sudo yum-config-manager --enable epel-bootstrap yum_install epel-release || \ die $LINENO "Error installing EPEL repo, cannot continue" - # epel rpm has installed it's version + # EPEL rpm has installed it's version sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo # ... and also optional to be enabled @@ -300,7 +301,7 @@ sudo mkdir -p $DEST safe_chown -R $STACK_USER $DEST safe_chmod 0755 $DEST -# a basic test for $DEST path permissions (fatal on error unless skipped) +# Basic test for ``$DEST`` path permissions (fatal on error unless skipped) check_path_perm_sanity ${DEST} # Destination path for service data @@ -488,6 +489,9 @@ set -o errexit # an error. It is also useful for following along as the install occurs. set -o xtrace +# Print the kernel version +uname -a + # Reset the bundle of CA certificates SSL_BUNDLE_FILE="$DATA_DIR/ca-bundle.pem" rm -f $SSL_BUNDLE_FILE @@ -500,7 +504,7 @@ source $TOP_DIR/lib/rpc_backend # and the specified rpc backend is available on your platform. check_rpc_backend -# Service to enable with SSL if USE_SSL is True +# Service to enable with SSL if ``USE_SSL`` is True SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron" if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then @@ -514,7 +518,7 @@ fi # defaults before other services are run run_phase override_defaults -# Import apache functions +# Import Apache functions source $TOP_DIR/lib/apache # Import TLS functions @@ -598,8 +602,9 @@ function read_password { # Database Configuration +# ---------------------- -# To select between database backends, add the following to ``localrc``: +# To select between database backends, add the following to ``local.conf``: # # disable_service mysql # enable_service postgresql @@ -611,9 +616,10 @@ initialize_database_backends && echo "Using $DATABASE_TYPE database backend" || # Queue Configuration +# ------------------- # Rabbit connection info -# In multi node devstack, second node needs RABBIT_USERID, but rabbit +# In multi node DevStack, second node needs ``RABBIT_USERID``, but rabbit # isn't enabled. RABBIT_USERID=${RABBIT_USERID:-stackrabbit} if is_service_enabled rabbit; then @@ -623,6 +629,7 @@ fi # Keystone +# -------- if is_service_enabled keystone; then # The ``SERVICE_TOKEN`` is used to bootstrap the Keystone database. It is @@ -634,14 +641,14 @@ if is_service_enabled keystone; then read_password ADMIN_PASSWORD "ENTER A PASSWORD TO USE FOR HORIZON AND KEYSTONE (20 CHARS OR LESS)." # Keystone can now optionally install OpenLDAP by enabling the ``ldap`` - # service in ``localrc`` (e.g. ``enable_service ldap``). + # service in ``local.conf`` (e.g. ``enable_service ldap``). # To clean out the Keystone contents in OpenLDAP set ``KEYSTONE_CLEAR_LDAP`` - # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``localrc``. To enable the + # to ``yes`` (e.g. ``KEYSTONE_CLEAR_LDAP=yes``) in ``local.conf``. To enable the # Keystone Identity Driver (``keystone.identity.backends.ldap.Identity``) # set ``KEYSTONE_IDENTITY_BACKEND`` to ``ldap`` (e.g. - # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``localrc``. + # ``KEYSTONE_IDENTITY_BACKEND=ldap``) in ``local.conf``. - # only request ldap password if the service is enabled + # Only request LDAP password if the service is enabled if is_service_enabled ldap; then read_password LDAP_PASSWORD "ENTER A PASSWORD TO USE FOR LDAP" fi @@ -649,6 +656,7 @@ fi # Swift +# ----- if is_service_enabled s-proxy; then # We only ask for Swift Hash if we have enabled swift service. @@ -672,14 +680,14 @@ fi echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh -# Configure an appropriate python environment +# Configure an appropriate Python environment if [[ "$OFFLINE" != "True" ]]; then PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh fi TRACK_DEPENDS=${TRACK_DEPENDS:-False} -# Install python packages into a virtualenv so that we can track them +# Install Python packages into a virtualenv so that we can track them if [[ $TRACK_DEPENDS = True ]]; then echo_summary "Installing Python packages into a virtualenv $DEST/.venv" pip_install -U virtualenv @@ -728,10 +736,10 @@ echo_summary "Installing OpenStack project source" # Install required infra support libraries install_infra -# Install oslo libraries that have graduated +# Install Oslo libraries install_oslo -# Install clients libraries +# Install client libraries install_keystoneclient install_glanceclient install_cinderclient @@ -749,7 +757,6 @@ fi # Install middleware install_keystonemiddleware - if is_service_enabled keystone; then if [ "$KEYSTONE_AUTH_HOST" == "$SERVICE_HOST" ]; then stack_install_service keystone @@ -766,7 +773,7 @@ if is_service_enabled s-proxy; then # swift3 middleware to provide S3 emulation to Swift if is_service_enabled swift3; then - # replace the nova-objectstore port by the swift port + # Replace the nova-objectstore port by the swift port S3_SERVICE_PORT=8080 git_clone $SWIFT3_REPO $SWIFT3_DIR $SWIFT3_BRANCH setup_develop $SWIFT3_DIR @@ -774,23 +781,25 @@ if is_service_enabled s-proxy; then fi if is_service_enabled g-api n-api; then - # image catalog service + # Image catalog service stack_install_service glance configure_glance fi if is_service_enabled cinder; then + # Block volume service stack_install_service cinder configure_cinder fi if is_service_enabled neutron; then + # Network service stack_install_service neutron install_neutron_third_party fi if is_service_enabled nova; then - # compute service + # Compute service stack_install_service nova cleanup_nova configure_nova @@ -822,18 +831,18 @@ if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then configure_CA init_CA init_cert - # Add name to /etc/hosts - # don't be naive and add to existing line! + # Add name to ``/etc/hosts``. + # Don't be naive and add to existing line! fi + # Extras Install # -------------- # Phase: install run_phase stack install - -# install the OpenStack client, needed for most setup commands +# Install the OpenStack client, needed for most setup commands if use_library_from_git "python-openstackclient"; then git_clone_by_name "python-openstackclient" setup_dev_lib "python-openstackclient" @@ -841,7 +850,6 @@ else pip_install 'python-openstackclient>=1.0.2' fi - if [[ $TRACK_DEPENDS = True ]]; then $DEST/.venv/bin/pip freeze > $DEST/requires-post-pip if ! diff -Nru $DEST/requires-pre-pip $DEST/requires-post-pip > $DEST/requires.diff; then @@ -934,7 +942,7 @@ if [[ "$USE_SCREEN" == "True" ]]; then screen -r $SCREEN_NAME -X setenv PROMPT_COMMAND /bin/true fi -# Clear screen rc file +# Clear ``screenrc`` file SCREENRC=$TOP_DIR/$SCREEN_NAME-screenrc if [[ -e $SCREENRC ]]; then rm -f $SCREENRC @@ -943,14 +951,16 @@ fi # Initialize the directory for service status check init_service_check + +# Start Services +# ============== + # Dstat -# ------- +# ----- # A better kind of sysstat, with the top process per time slice start_dstat -# Start Services -# ============== # Keystone # -------- @@ -972,7 +982,7 @@ if is_service_enabled keystone; then SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 fi - # Setup OpenStackclient token-flow auth + # Setup OpenStackClient token-endpoint auth export OS_TOKEN=$SERVICE_TOKEN export OS_URL=$SERVICE_ENDPOINT @@ -994,10 +1004,10 @@ if is_service_enabled keystone; then create_heat_accounts fi - # Begone token-flow auth + # Begone token auth unset OS_TOKEN OS_URL - # Set up password-flow auth creds now that keystone is bootstrapped + # Set up password auth credentials now that Keystone is bootstrapped export OS_AUTH_URL=$SERVICE_ENDPOINT export OS_TENANT_NAME=admin export OS_USERNAME=admin @@ -1042,7 +1052,7 @@ if is_service_enabled neutron; then echo_summary "Configuring Neutron" configure_neutron - # Run init_neutron only on the node hosting the neutron API server + # Run init_neutron only on the node hosting the Neutron API server if is_service_enabled $DATABASE_BACKENDS && is_service_enabled q-svc; then init_neutron fi @@ -1118,6 +1128,7 @@ if is_service_enabled nova; then init_nova_cells fi + # Extras Configuration # ==================== @@ -1128,7 +1139,7 @@ run_phase stack post-config # Local Configuration # =================== -# Apply configuration from local.conf if it exists for layer 2 services +# Apply configuration from ``local.conf`` if it exists for layer 2 services # Phase: post-config merge_config_group $TOP_DIR/local.conf post-config @@ -1150,18 +1161,16 @@ if is_service_enabled glance; then start_glance fi + # Install Images # ============== -# Upload an image to glance. +# Upload an image to Glance. # -# The default image is cirros, a small testing image which lets you login as **root** -# cirros has a ``cloud-init`` analog supporting login via keypair and sending +# The default image is CirrOS, a small testing image which lets you login as **root** +# CirrOS has a ``cloud-init`` analog supporting login via keypair and sending # scripts as userdata. -# See https://help.ubuntu.com/community/CloudInit for more on cloud-init -# -# Override ``IMAGE_URLS`` with a comma-separated list of UEC images. -# * **precise**: http://uec-images.ubuntu.com/precise/current/precise-server-cloudimg-amd64.tar.gz +# See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` if is_service_enabled g-reg; then TOKEN=$(keystone token-get | grep ' id ' | get_field 2) @@ -1179,7 +1188,7 @@ if is_service_enabled g-reg; then done fi -# Create an access key and secret key for nova ec2 register image +# Create an access key and secret key for Nova EC2 register image if is_service_enabled keystone && is_service_enabled swift3 && is_service_enabled nova; then eval $(openstack ec2 credentials create --user nova --project $SERVICE_TENANT_NAME -f shell -c access -c secret) iniset $NOVA_CONF DEFAULT s3_access_key "$access" @@ -1242,7 +1251,7 @@ if is_service_enabled ceilometer; then start_ceilometer fi -# Configure and launch heat engine, api and metadata +# Configure and launch Heat engine, api and metadata if is_service_enabled heat; then # Initialize heat echo_summary "Configuring Heat" @@ -1287,30 +1296,34 @@ for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ done -# Local Configuration -# =================== +# Wrapup configuration +# ==================== + +# local.conf extra +# ---------------- -# Apply configuration from local.conf if it exists for layer 2 services +# Apply configuration from ``local.conf`` if it exists for layer 2 services # Phase: extra merge_config_group $TOP_DIR/local.conf extra # Run extras -# ========== +# ---------- # Phase: extra run_phase stack extra -# Local Configuration -# =================== -# Apply configuration from local.conf if it exists for layer 2 services +# local.conf post-extra +# --------------------- + +# Apply late configuration from ``local.conf`` if it exists for layer 2 services # Phase: post-extra merge_config_group $TOP_DIR/local.conf post-extra # Run local script -# ================ +# ---------------- # Run ``local.sh`` if it exists to perform user-managed tasks if [[ -x $TOP_DIR/local.sh ]]; then @@ -1338,6 +1351,7 @@ if is_service_enabled cinder; then fi fi + # Fin # === @@ -1354,11 +1368,12 @@ fi # Using the cloud -# --------------- +# =============== echo "" echo "" echo "" +echo "This is your host ip: $HOST_IP" # If you installed Horizon on this server you should be able # to access the site using your browser. @@ -1368,15 +1383,11 @@ fi # If Keystone is present you can point ``nova`` cli to this server if is_service_enabled keystone; then - echo "Keystone is serving at $KEYSTONE_SERVICE_URI/v2.0/" - echo "Examples on using novaclient command line is in exercise.sh" + echo "Keystone is serving at $KEYSTONE_SERVICE_URI/" echo "The default users are: admin and demo" echo "The password: $ADMIN_PASSWORD" fi -# Echo ``HOST_IP`` - useful for ``build_uec.sh``, which uses dhcp to give the instance an address -echo "This is your host ip: $HOST_IP" - # Warn that a deprecated feature was used if [[ -n "$DEPRECATED_TEXT" ]]; then echo_summary "WARNING: $DEPRECATED_TEXT" diff --git a/stackrc b/stackrc index 143298c347..c27ead3c24 100644 --- a/stackrc +++ b/stackrc @@ -5,7 +5,7 @@ # Find the other rc files RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) -# Source required devstack functions and globals +# Source required DevStack functions and globals source $RC_DIR/functions # Destination path for installation @@ -41,20 +41,20 @@ REGION_NAME=${REGION_NAME:-RegionOne} # enable_service q-dhcp # enable_service q-l3 # enable_service q-meta -# # Optional, to enable tempest configuration as part of devstack +# # Optional, to enable tempest configuration as part of DevStack # enable_service tempest -# this allows us to pass ENABLED_SERVICES +# This allows us to pass ``ENABLED_SERVICES`` if ! isset ENABLED_SERVICES ; then - # core compute (glance / keystone / nova (+ nova-network)) + # Compute (Glance / Keystone / Nova (+ nova-network)) ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth - # cinder + # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol - # heat + # Heat ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw - # dashboard + # Dashboard ENABLED_SERVICES+=,horizon - # additional services + # Additional services ENABLED_SERVICES+=,rabbit,tempest,mysql fi @@ -79,7 +79,7 @@ ENABLE_HTTPD_MOD_WSGI_SERVICES=True # Tell Tempest which services are available. The default is set here as # Tempest falls late in the configuration sequence. This differs from # ``ENABLED_SERVICES`` in that the project names are used here rather than -# the service names, i.e.: TEMPEST_SERVICES="key,glance,nova" +# the service names, i.e.: ``TEMPEST_SERVICES="key,glance,nova"`` TEMPEST_SERVICES="" # Set the default Nova APIs to enable @@ -145,6 +145,7 @@ GIT_TIMEOUT=${GIT_TIMEOUT:-0} # but pass through any extras) REQUIREMENTS_MODE=${REQUIREMENTS_MODE:-strict} + # Repositories # ------------ @@ -155,16 +156,17 @@ GIT_BASE=${GIT_BASE:-git://git.openstack.org} # Which libraries should we install from git instead of using released # versions on pypi? # -# By default devstack is now installing libraries from pypi instead of +# By default DevStack is now installing libraries from pypi instead of # from git repositories by default. This works great if you are # developing server components, but if you want to develop libraries -# and see them live in devstack you need to tell devstack it should +# and see them live in DevStack you need to tell DevStack it should # install them from git. # # ex: LIBS_FROM_GIT=python-keystoneclient,oslo.config # # Will install those 2 libraries from git, the rest from pypi. + ############## # # OpenStack Server Components @@ -231,6 +233,7 @@ SWIFT_BRANCH=${SWIFT_BRANCH:-master} TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git} TROVE_BRANCH=${TROVE_BRANCH:-master} + ############## # # Testing Components @@ -306,6 +309,7 @@ GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-master} # this doesn't exist in a lib file, so set it here GITDIR["python-openstackclient"]=$DEST/python-openstackclient + ################### # # Oslo Libraries @@ -396,6 +400,7 @@ GITBRANCH["tooz"]=${TOOZ_BRANCH:-master} GITREPO["pbr"]=${PBR_REPO:-${GIT_BASE}/openstack-dev/pbr.git} GITBRANCH["pbr"]=${PBR_BRANCH:-master} + ################## # # Libraries managed by OpenStack programs (non oslo) @@ -453,6 +458,7 @@ OCC_BRANCH=${OCC_BRANCH:-master} ORC_REPO=${ORC_REPO:-${GIT_BASE}/openstack/os-refresh-config.git} ORC_BRANCH=${ORC_BRANCH:-master} + ################# # # 3rd Party Components (non pip installable) @@ -474,7 +480,6 @@ SPICE_REPO=${SPICE_REPO:-http://anongit.freedesktop.org/git/spice/spice-html5.gi SPICE_BRANCH=${SPICE_BRANCH:-master} - # Nova hypervisor configuration. We default to libvirt with **kvm** but will # drop back to **qemu** if we are unable to load the kvm module. ``stack.sh`` can # also install an **LXC**, **OpenVZ** or **XenAPI** based system. If xenserver-core @@ -641,7 +646,7 @@ ENABLE_DEBUG_LOG_LEVEL=$(trueorfalse True ENABLE_DEBUG_LOG_LEVEL) # Set fixed and floating range here so we can make sure not to use addresses # from either range when attempting to guess the IP to use for the host. -# Note that setting FIXED_RANGE may be necessary when running DevStack +# Note that setting ``FIXED_RANGE`` may be necessary when running DevStack # in an OpenStack cloud that uses either of these address ranges internally. FLOATING_RANGE=${FLOATING_RANGE:-172.24.4.0/24} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} @@ -669,9 +674,10 @@ LOG_COLOR=$(trueorfalse True LOG_COLOR) # Set to 0 to disable shallow cloning GIT_DEPTH=${GIT_DEPTH:-0} -# Use native SSL for servers in SSL_ENABLED_SERVICES +# Use native SSL for servers in ``SSL_ENABLED_SERVICES`` USE_SSL=$(trueorfalse False USE_SSL) + # Following entries need to be last items in file # Compatibility bits required by other callers like Grenade @@ -693,7 +699,6 @@ USE_SSL=$(trueorfalse False USE_SSL) # For compat, if SCREEN_LOGDIR is set, it will be used to create back-compat symlinks to the LOGDIR # symlinks to SCREEN_LOGDIR (compat) - # Set up new logging defaults if [[ -z "${LOGDIR:-}" ]]; then default_logdir=$DEST/logs @@ -718,8 +723,8 @@ if [[ -z "${LOGDIR:-}" ]]; then unset default_logdir logfile fi -# LOGDIR is always set at this point so it is not useful as a 'enable' for service logs -# SCREEN_LOGDIR may be set, it is useful to enable the compat symlinks +# ``LOGDIR`` is always set at this point so it is not useful as a 'enable' for service logs +# ``SCREEN_LOGDIR`` may be set, it is useful to enable the compat symlinks # Local variables: # mode: shell-script diff --git a/tools/build_docs.sh b/tools/build_docs.sh index 2aa0a0ac04..fda86c05cd 100755 --- a/tools/build_docs.sh +++ b/tools/build_docs.sh @@ -2,8 +2,8 @@ # **build_docs.sh** - Build the docs for DevStack # -# - Install shocco if not found on PATH and INSTALL_SHOCCO is set -# - Clone MASTER_REPO branch MASTER_BRANCH +# - Install shocco if not found on ``PATH`` and ``INSTALL_SHOCCO`` is set +# - Clone ``MASTER_REPO`` branch ``MASTER_BRANCH`` # - Re-creates ``doc/build/html`` directory from existing repo + new generated script docs # Usage: @@ -16,7 +16,7 @@ HTML_BUILD=doc/build/html -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) # Uses this shocco branch: https://github.com/dtroyer/shocco/tree/rst_support diff --git a/tools/build_venv.sh b/tools/build_venv.sh index 11d1d35208..cfa39a82e0 100755 --- a/tools/build_venv.sh +++ b/tools/build_venv.sh @@ -4,11 +4,12 @@ # # build_venv.sh venv-path [package [...]] # +# Installs basic common prereq packages that require compilation +# to allow quick copying of resulting venv as a baseline +# # Assumes: # - a useful pip is installed # - virtualenv will be installed by pip -# - installs basic common prereq packages that require compilation -# to allow quick copying of resulting venv as a baseline VENV_DEST=${1:-.venv} @@ -16,14 +17,14 @@ shift MORE_PACKAGES="$@" -# If TOP_DIR is set we're being sourced rather than running stand-alone +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone # or in a sub-shell if [[ -z "$TOP_DIR" ]]; then set -o errexit set -o nounset - # Keep track of the devstack directory + # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) FILES=$TOP_DIR/files diff --git a/tools/build_wheels.sh b/tools/build_wheels.sh index f1740dfbd6..c57568fa64 100755 --- a/tools/build_wheels.sh +++ b/tools/build_wheels.sh @@ -4,21 +4,22 @@ # # build_wheels.sh [package [...]] # -# System package prerequisites listed in files/*/devlibs will be installed +# System package prerequisites listed in ``files/*/devlibs`` will be installed # # Builds wheels for all virtual env requirements listed in # ``venv-requirements.txt`` plus any supplied on the command line. # -# Assumes ``tools/install_pip.sh`` has been run and a suitable pip/setuptools is available. +# Assumes: +# - ``tools/install_pip.sh`` has been run and a suitable ``pip/setuptools`` is available. -# If TOP_DIR is set we're being sourced rather than running stand-alone +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone # or in a sub-shell if [[ -z "$TOP_DIR" ]]; then set -o errexit set -o nounset - # Keep track of the devstack directory + # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) FILES=$TOP_DIR/files @@ -59,7 +60,7 @@ virtualenv $TMP_VENV_PATH # Install modern pip and wheel PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel -# VENV_PACKAGES is a list of packages we want to pre-install +# ``VENV_PACKAGES`` is a list of packages we want to pre-install VENV_PACKAGE_FILE=$FILES/venv-requirements.txt if [[ -r $VENV_PACKAGE_FILE ]]; then VENV_PACKAGES=$(grep -v '^#' $VENV_PACKAGE_FILE) diff --git a/tools/create-stack-user.sh b/tools/create-stack-user.sh index 9c29ecd901..b49164b22a 100755 --- a/tools/create-stack-user.sh +++ b/tools/create-stack-user.sh @@ -17,7 +17,7 @@ set -o errexit -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) # Import common functions diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index f8edd16ecd..2efb4e0987 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -17,7 +17,7 @@ # - uninstall firewalld (f20 only) -# If TOP_DIR is set we're being sourced rather than running stand-alone +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone # or in a sub-shell if [[ -z "$TOP_DIR" ]]; then set -o errexit @@ -27,7 +27,7 @@ if [[ -z "$TOP_DIR" ]]; then TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $TOOLS_DIR/..; pwd) - # Change dir to top of devstack + # Change dir to top of DevStack cd $TOP_DIR # Import common functions @@ -38,7 +38,7 @@ fi # Keystone Port Reservation # ------------------------- -# Reserve and prevent $KEYSTONE_AUTH_PORT and $KEYSTONE_AUTH_PORT_INT from +# Reserve and prevent ``KEYSTONE_AUTH_PORT`` and ``KEYSTONE_AUTH_PORT_INT`` from # being used as ephemeral ports by the system. The default(s) are 35357 and # 35358 which are in the Linux defined ephemeral port range (in disagreement # with the IANA ephemeral port range). This is a workaround for bug #1253482 @@ -47,9 +47,9 @@ fi # exception into the Kernel for the Keystone AUTH ports. keystone_ports=${KEYSTONE_AUTH_PORT:-35357},${KEYSTONE_AUTH_PORT_INT:-35358} -# only do the reserved ports when available, on some system (like containers) +# Only do the reserved ports when available, on some system (like containers) # where it's not exposed we are almost pretty sure these ports would be -# exclusive for our devstack. +# exclusive for our DevStack. if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then # Get any currently reserved ports, strip off leading whitespace reserved_ports=$(sysctl net.ipv4.ip_local_reserved_ports | awk -F'=' '{print $2;}' | sed 's/^ //') @@ -59,7 +59,7 @@ if sysctl net.ipv4.ip_local_reserved_ports >/dev/null 2>&1; then sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports} else # If there are currently reserved ports, keep those and also reserve the - # keystone specific ports. Duplicate reservations are merged into a single + # Keystone specific ports. Duplicate reservations are merged into a single # reservation (or range) automatically by the kernel. sudo sysctl -w net.ipv4.ip_local_reserved_ports=${keystone_ports},${reserved_ports} fi diff --git a/tools/image_list.sh b/tools/image_list.sh index 88c1d09379..204280704e 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) source $TOP_DIR/functions diff --git a/tools/info.sh b/tools/info.sh index a8f9544073..433206e8ae 100755 --- a/tools/info.sh +++ b/tools/info.sh @@ -2,7 +2,7 @@ # **info.sh** -# Produce a report on the state of devstack installs +# Produce a report on the state of DevStack installs # # Output fields are separated with '|' chars # Output types are git,localrc,os,pip,pkg: @@ -14,7 +14,7 @@ # pkg|| function usage { - echo "$0 - Report on the devstack configuration" + echo "$0 - Report on the DevStack configuration" echo "" echo "Usage: $0" exit 1 diff --git a/tools/install_pip.sh b/tools/install_pip.sh index b7b40c7486..0f7c962b2b 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -16,7 +16,7 @@ set -o xtrace TOOLS_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=`cd $TOOLS_DIR/..; pwd` -# Change dir to top of devstack +# Change dir to top of DevStack cd $TOP_DIR # Import common functions @@ -42,11 +42,11 @@ function get_versions { function install_get_pip { - # the openstack gate and others put a cached version of get-pip.py + # The OpenStack gate and others put a cached version of get-pip.py # for this to find, explicitly to avoid download issues. # - # However, if devstack *did* download the file, we want to check - # for updates; people can leave thier stacks around for a long + # However, if DevStack *did* download the file, we want to check + # for updates; people can leave their stacks around for a long # time and in the mean-time pip might get upgraded. # # Thus we use curl's "-z" feature to always check the modified @@ -74,7 +74,7 @@ function configure_pypi_alternative_url { touch $PIP_CONFIG_FILE fi if ! ini_has_option "$PIP_CONFIG_FILE" "global" "index-url"; then - #it means that the index-url does not exist + # It means that the index-url does not exist iniset "$PIP_CONFIG_FILE" "global" "index-url" "$PYPI_OVERRIDE" fi diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 917980ccc5..a07e58d3e6 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -18,10 +18,10 @@ while getopts ":f" opt; do esac done -# If TOP_DIR is set we're being sourced rather than running stand-alone +# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone # or in a sub-shell if [[ -z "$TOP_DIR" ]]; then - # Keep track of the devstack directory + # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) # Import common functions @@ -65,7 +65,7 @@ PACKAGES=$(get_packages general $ENABLED_SERVICES) PACKAGES="$PACKAGES $(get_plugin_packages)" if is_ubuntu && echo $PACKAGES | grep -q dkms ; then - # ensure headers for the running kernel are installed for any DKMS builds + # Ensure headers for the running kernel are installed for any DKMS builds PACKAGES="$PACKAGES linux-headers-$(uname -r)" fi diff --git a/tools/ironic/scripts/create-node b/tools/ironic/scripts/create-node index 25b53d47f3..b018acddc9 100755 --- a/tools/ironic/scripts/create-node +++ b/tools/ironic/scripts/create-node @@ -6,13 +6,13 @@ set -ex -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) NAME=$1 CPU=$2 MEM=$(( 1024 * $3 )) -# extra G to allow fuzz for partition table : flavor size and registered size +# Extra G to allow fuzz for partition table : flavor size and registered size # need to be different to actual size. DISK=$(( $4 + 1)) diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network index e326bf8ccd..83308ed416 100755 --- a/tools/ironic/scripts/setup-network +++ b/tools/ironic/scripts/setup-network @@ -9,7 +9,7 @@ set -exu LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} -# Keep track of the devstack directory +# Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) BRIDGE_SUFFIX=${1:-''} BRIDGE_NAME=brbm$BRIDGE_SUFFIX @@ -19,7 +19,7 @@ export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI" # Only add bridge if missing (sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME} -# remove bridge before replacing it. +# Remove bridge before replacing it. (virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME} (virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME} diff --git a/tools/outfilter.py b/tools/outfilter.py index 9686a387c2..f82939be1d 100755 --- a/tools/outfilter.py +++ b/tools/outfilter.py @@ -14,8 +14,8 @@ # License for the specific language governing permissions and limitations # under the License. -# This is an output filter to filter and timestamp the logs from grenade and -# devstack. Largely our awk filters got beyond the complexity level which were +# This is an output filter to filter and timestamp the logs from Grenade and +# DevStack. Largely our awk filters got beyond the complexity level which were # sustainable, so this provides us much more control in a single place. # # The overhead of running python should be less than execing `date` a million @@ -32,7 +32,7 @@ def get_options(): parser = argparse.ArgumentParser( - description='Filter output by devstack and friends') + description='Filter output by DevStack and friends') parser.add_argument('-o', '--outfile', help='Output file for content', default=None) @@ -52,7 +52,7 @@ def main(): if opts.outfile: outfile = open(opts.outfile, 'a', 0) - # otherwise fileinput reprocess args as files + # Otherwise fileinput reprocess args as files sys.argv = [] while True: line = sys.stdin.readline() @@ -63,9 +63,9 @@ def main(): if skip_line(line): continue - # this prevents us from nesting date lines, because - # we'd like to pull this in directly in grenade and not double - # up on devstack lines + # This prevents us from nesting date lines, because + # we'd like to pull this in directly in Grenade and not double + # up on DevStack lines if HAS_DATE.search(line) is None: now = datetime.datetime.utcnow() line = ("%s | %s" % ( diff --git a/unstack.sh b/unstack.sh index c45af7400c..30981fd3c6 100755 --- a/unstack.sh +++ b/unstack.sh @@ -19,7 +19,7 @@ while getopts ":a" opt; do esac done -# Keep track of the current devstack directory. +# Keep track of the current DevStack directory. TOP_DIR=$(cd $(dirname "$0") && pwd) FILES=$TOP_DIR/files From 32d6bc6ad1f5d857c8e34e15001f8eb8666c601c Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sun, 29 Mar 2015 14:16:44 -0500 Subject: [PATCH 0115/2941] Add inc/rootwrap Rootwrap shouldn't be a unique snowflake. Plus the binaries tend to be called assuming PATH will find them. Not so with venvs so we need to work around that brokenness. Configure Cinder and Nova to use configure_rootwrap(). Change-Id: I8ee1f66014875caf20a2d14ff6ef3672673ba85a --- functions | 1 + inc/rootwrap | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++ lib/cinder | 38 +------------------------- lib/nova | 38 +------------------------- 4 files changed, 80 insertions(+), 74 deletions(-) create mode 100644 inc/rootwrap diff --git a/functions b/functions index 5bc8456281..4dc20e7f23 100644 --- a/functions +++ b/functions @@ -15,6 +15,7 @@ FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) source ${FUNC_DIR}/functions-common source ${FUNC_DIR}/inc/ini-config source ${FUNC_DIR}/inc/python +source ${FUNC_DIR}/inc/rootwrap # Save trace setting XTRACE=$(set +o | grep xtrace) diff --git a/inc/rootwrap b/inc/rootwrap new file mode 100644 index 0000000000..bac8e1e86c --- /dev/null +++ b/inc/rootwrap @@ -0,0 +1,77 @@ +#!/bin/bash +# +# **inc/rootwrap** - Rootwrap functions +# +# Handle rootwrap's foibles + +# Uses: ``STACK_USER`` +# Defines: ``SUDO_SECURE_PATH_FILE`` + +# Save trace setting +INC_ROOT_TRACE=$(set +o | grep xtrace) +set +o xtrace + +# Accumulate all additions to sudo's ``secure_path`` in one file read last +# so they all work in a venv configuration +SUDO_SECURE_PATH_FILE=${SUDO_SECURE_PATH_FILE:-/etc/sudoers.d/zz-secure-path} + +# Add a directory to the common sudo ``secure_path`` +# add_sudo_secure_path dir +function add_sudo_secure_path { + local dir=$1 + local line + + # This is pretty simplistic for now - assume only the first line is used + if [[ -r SUDO_SECURE_PATH_FILE ]]; then + line=$(head -1 $SUDO_SECURE_PATH_FILE) + else + line="Defaults:$STACK_USER secure_path=/usr/local/sbin:/usr/local/bin:/usr/sbin:/sbin:/usr/bin:/bin" + fi + + # Only add ``dir`` if it is not already present + if [[ $line =~ $dir ]]; then + echo "${line}:$dir" | sudo tee $SUDO_SECURE_PATH_FILE + sudo chmod 400 $SUDO_SECURE_PATH_FILE + sudo chown root:root $SUDO_SECURE_PATH_FILE + fi +} + +# Configure rootwrap +# Make a load of assumptions otherwise we'll have 6 arguments +# configure_rootwrap project bin conf-src-dir +function configure_rootwrap { + local project=$1 # xx + local rootwrap_bin=$2 # /opt/stack/xx.venv/bin/xx-rootwrap + local rootwrap_conf_src_dir=$3 # /opt/stack/xx/etc/xx + + # Start fresh with rootwrap filters + sudo rm -rf /etc/${project}/rootwrap.d + sudo install -d -o root -g root -m 755 /etc/${project}/rootwrap.d + sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.d/*.filters /etc/${project}/rootwrap.d + + # Set up rootwrap.conf, pointing to /etc/*/rootwrap.d + sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf + sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf + + # Specify rootwrap.conf as first parameter to rootwrap + rootwrap_sudo_cmd="$rootwrap_bin /etc/${project}/rootwrap.conf *" + + # Set up the rootwrap sudoers + local tempfile=$(mktemp) + echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile + chmod 0440 $tempfile + sudo chown root:root $tempfile + sudo mv $tempfile /etc/sudoers.d/${project}-rootwrap + + # Add bin dir to sudo's secure_path because rootwrap is being called + # without a path because BROKEN. + add_sudo_secure_path $(dirname $rootwrap_bin) +} + + +# Restore xtrace +$INC_ROOT_TRACE + +# Local variables: +# mode: shell-script +# End: diff --git a/lib/cinder b/lib/cinder index 27fd692292..de41bc5f79 100644 --- a/lib/cinder +++ b/lib/cinder @@ -171,42 +171,6 @@ function cleanup_cinder { fi } -# Deploy new rootwrap filters files and configure sudo -# configure_cinder_rootwrap() - configure Cinder's rootwrap -function configure_cinder_rootwrap { - local cinder_rootwrap=$CINDER_BIN_DIR/cinder-rootwrap - - # Wipe any existing rootwrap.d files first - if [[ -d $CINDER_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $CINDER_CONF_DIR/rootwrap.d - fi - - # Deploy filters to /etc/cinder/rootwrap.d - sudo install -d -o root -g root -m 755 $CINDER_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $CINDER_DIR/etc/cinder/rootwrap.d/*.filters $CINDER_CONF_DIR/rootwrap.d - - # Set up rootwrap.conf, pointing to /etc/cinder/rootwrap.d - sudo install -o root -g root -m 644 $CINDER_DIR/etc/cinder/rootwrap.conf $CINDER_CONF_DIR - sudo sed -e "s:^filters_path=.*$:filters_path=$CINDER_CONF_DIR/rootwrap.d:" -i $CINDER_CONF_DIR/rootwrap.conf - - # Specify rootwrap.conf as first parameter to rootwrap - ROOTWRAP_CSUDOER_CMD="$cinder_rootwrap $CINDER_CONF_DIR/rootwrap.conf *" - - # Set up the rootwrap sudoers for cinder - local tempfile=`mktemp` - echo "Defaults:$STACK_USER secure_path=$CINDER_BIN_DIR:/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >$tempfile - echo "$STACK_USER ALL=(root) NOPASSWD: $ROOTWRAP_CSUDOER_CMD" >>$tempfile - chmod 0440 $tempfile - sudo chown root:root $tempfile - sudo mv $tempfile /etc/sudoers.d/cinder-rootwrap - - # So rootwrap and PATH are broken beyond belief. WTF relies on a SECURE operation - # to blindly follow PATH??? We learned that was a bad idea in the 80's! - # So to fix this in a venv, we must exploit the very hole we want to close by dropping - # a copy of the venv rootwrap binary into /usr/local/bin. - #sudo cp -p $cinder_rootwrap /usr/local/bin -} - # configure_cinder() - Set config files, create data dirs, etc function configure_cinder { sudo install -d -o $STACK_USER -m 755 $CINDER_CONF_DIR @@ -215,7 +179,7 @@ function configure_cinder { rm -f $CINDER_CONF - configure_cinder_rootwrap + configure_rootwrap cinder $CINDER_BIN_DIR/cinder-rootwrap $CINDER_DIR/etc/cinder cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI diff --git a/lib/nova b/lib/nova index 385da4e44b..807dfceeae 100644 --- a/lib/nova +++ b/lib/nova @@ -223,42 +223,6 @@ function cleanup_nova { #fi } -# Deploy new rootwrap filters files and configure sudo -# configure_nova_rootwrap() - configure Nova's rootwrap -function configure_nova_rootwrap { - nova_rootwrap=$NOVA_BIN_DIR/nova-rootwrap - - # Wipe any existing rootwrap.d files first - if [[ -d $NOVA_CONF_DIR/rootwrap.d ]]; then - sudo rm -rf $NOVA_CONF_DIR/rootwrap.d - fi - - # Deploy filters to /etc/nova/rootwrap.d - sudo install -d -o root -g root -m 755 $NOVA_CONF_DIR/rootwrap.d - sudo install -o root -g root -m 644 $NOVA_DIR/etc/nova/rootwrap.d/*.filters $NOVA_CONF_DIR/rootwrap.d - - # Set up rootwrap.conf, pointing to /etc/nova/rootwrap.d - sudo install -o root -g root -m 644 $NOVA_DIR/etc/nova/rootwrap.conf $NOVA_CONF_DIR - sudo sed -e "s:^filters_path=.*$:filters_path=$NOVA_CONF_DIR/rootwrap.d:" -i $NOVA_CONF_DIR/rootwrap.conf - - # Specify rootwrap.conf as first parameter to nova-rootwrap - local rootwrap_sudoer_cmd="$nova_rootwrap $NOVA_CONF_DIR/rootwrap.conf *" - - # Set up the rootwrap sudoers for nova - local tempfile=`mktemp` - echo "Defaults:$STACK_USER secure_path=$NOVA_BIN_DIR:/sbin:/usr/sbin:/usr/bin:/bin:/usr/local/sbin:/usr/local/bin" >$tempfile - echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudoer_cmd" >>$tempfile - chmod 0440 $tempfile - sudo chown root:root $tempfile - sudo mv $tempfile /etc/sudoers.d/nova-rootwrap - - # So rootwrap and PATH are broken beyond belief. WTF relies on a SECURE operation - # to blindly follow PATH??? We learned that was a bad idea in the 80's! - # So to fix this in a venv, we must exploit the very hole we want to close by dropping - # a copy of the venv rootwrap binary into /usr/local/bin. - #sudo cp -p $nova_rootwrap /usr/local/bin -} - # configure_nova() - Set config files, create data dirs, etc function configure_nova { # Put config files in ``/etc/nova`` for everyone to find @@ -266,7 +230,7 @@ function configure_nova { install_default_policy nova - configure_nova_rootwrap + configure_rootwrap nova $NOVA_BIN_DIR/nova-rootwrap $NOVA_DIR/etc/nova if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then # Get the sample configuration file in place From e929fdd47e31919bb8e30d2300e3c8e43b1bb9cc Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Mon, 30 Mar 2015 15:11:11 +1100 Subject: [PATCH 0116/2941] Remove keystone public/admin_endpoint options The public_endpoint and admin_endpoint options are used to set the base hostname when listing versions. If they are not provided then the response will use the same hostname as the request was made with - which is almost always what you actually want. This means devstack will respond correctly to a version list when behind a floating IP for example. Change-Id: Idc48b9d7bee9751deb24d730fdc560b163f39dfe --- lib/keystone | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/keystone b/lib/keystone index 1e39ab6bc2..1d12583b42 100644 --- a/lib/keystone +++ b/lib/keystone @@ -237,9 +237,6 @@ function configure_keystone { iniset_rpc_backend keystone $KEYSTONE_CONF - # Set the URL advertised in the ``versions`` structure returned by the '/' route - iniset $KEYSTONE_CONF DEFAULT public_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/" - iniset $KEYSTONE_CONF DEFAULT admin_endpoint "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/" iniset $KEYSTONE_CONF eventlet_server admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST" # Register SSL certificates if provided From 5f2eb6dade5f3b19b8905b97d8e4018b5e068321 Mon Sep 17 00:00:00 2001 From: James Polley Date: Mon, 30 Mar 2015 17:36:26 +1100 Subject: [PATCH 0117/2941] Add a target for, and link to, minimal config docs Reading through the docs for the first time, the reader encounters an instruction to provide a minimal configuration, with a link that they'd expect to tell them how to do this. At present the link actually takes them to the top of configuration.html, where they read some history about how devstack's configuration has changed over time. This is interesting and important and should be in the docs - but in my opinion a link about setting up a minimal configuration would be more useful if it takes me to a place that tells them about a minimal configuration. To get this, I've had to an an explicit link target into configuration.rst. I'm not hugely keen on this approach, as I don't think it scales well. I'd be open to suggestions about other approaches. The only idea I've had so far though is to simply move the minimal configuration section right to the top of the page, so that a link to the doc is a link to the minimal config - the historical information could be moved to its own topic somewhere further down the doc. Change-Id: I231ca1b7f17b55f09a4e058dab8ee433893f737e --- doc/source/configuration.rst | 2 ++ doc/source/index.rst | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index a0d0840263..1cc7083bb4 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -95,6 +95,8 @@ Also note that the ``localrc`` section is sourced as a shell script fragment and MUST conform to the shell requirements, specifically no whitespace around ``=`` (equals). +.. _minimal-configuration: + Minimal Configuration ===================== diff --git a/doc/source/index.rst b/doc/source/index.rst index c31287cd57..4435b495ae 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -41,8 +41,7 @@ Quick Start #. Configure - We recommend at least a :doc:`minimal - configuration ` be set up. + We recommend at least a :ref:`minimal-configuration` be set up. #. Start the install From 0c04c12b3f91638560544e2b56a2994ac8015557 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Mon, 30 Mar 2015 18:15:19 +0200 Subject: [PATCH 0118/2941] Swift PyEClib build requires 'make' PyEClib was introduced recently to swift-master. It tries to build liberasurecode which requires the `make` binary. Change-Id: I8acfed4f7b46a29eac36f6acbe1d66e7fff800db --- files/debs/swift | 1 + 1 file changed, 1 insertion(+) diff --git a/files/debs/swift b/files/debs/swift index b32b43985a..0089d27fee 100644 --- a/files/debs/swift +++ b/files/debs/swift @@ -1,4 +1,5 @@ curl +make memcached # NOTE python-nose only exists because of swift functional job, we should probably # figure out a more consistent way of installing this from test-requirements.txt instead From 206c596d693d429cbbfa738e4e0a397c646d77c1 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 30 Mar 2015 13:56:11 -0400 Subject: [PATCH 0119/2941] add back python-guestfs This is needed otherwise we fall back to the nbd code path which is super hacky. This shouldn't have been deleted for the venv path. Change-Id: If5cb6cb4944bd0ed3548d53c98443b76725d1c0c --- lib/nova_plugins/functions-libvirt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 60707cf859..edcf73ebe2 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -24,13 +24,13 @@ function install_libvirt { install_package libvirt-bin libvirt-dev pip_install libvirt-python install_package libguestfs0 - #install_package python-guestfs + install_package python-guestfs #pip_install elif is_fedora || is_suse; then install_package kvm install_package libvirt libvirt-devel pip_install libvirt-python - #install_package python-libguestfs + install_package python-libguestfs fi # Restart firewalld after install of libvirt to avoid a problem From 84ee55b3ee3630945792fa849d793a6d71d2364d Mon Sep 17 00:00:00 2001 From: Adam Gandelman Date: Mon, 30 Mar 2015 14:25:27 -0700 Subject: [PATCH 0120/2941] Set fixed_network_name in Tempest config This previously defatuled to 'private' and aligned with devstack's defaults but it has since been updated to 'None'. This sets the config value according to devstack's. Change-Id: I3f480d5480521a93992bedfe602eb20a4999263d Closes-bug: #1438415 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index b3a4c7b6e3..3f33512cf6 100644 --- a/lib/tempest +++ b/lib/tempest @@ -331,6 +331,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method + iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME # Compute Features # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints From f3d52335e728995209feec1c9813d341c2690cb7 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Sat, 28 Mar 2015 10:14:47 -0500 Subject: [PATCH 0121/2941] Add basic Makefile There are a couple of targets in here that some might find useful in doing DevStack testing in multiple remote VMs. There are some of the usual boring targets too, like stack and unstack, that do exactly what you would expect. Change-Id: I7974cac4cc527bacf6f183ac1f344428b05f2fdc --- Makefile | 104 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 Makefile diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000..082aff21d2 --- /dev/null +++ b/Makefile @@ -0,0 +1,104 @@ +# DevStack Makefile of Sanity + +# Interesting targets: +# ds-remote - Create a Git remote for use by ds-push and ds-pull targets +# DS_REMOTE_URL must be set on the command line +# +# ds-push - Merge a list of branches taken from .ds-test and push them +# to the ds-remote repo in ds-test branch +# +# ds-pull - Pull the remote ds-test branch into a fresh local branch +# +# refresh - Performs a sequence of unstack, refresh and stack + +# Duplicated from stackrc for now +DEST=/opt/stack +WHEELHOUSE=$(DEST)/.wheelhouse + +all: + echo "This just saved you from a terrible mistake!" + +# Do Some Work +stack: + ./stack.sh + +unstack: + ./unstack.sh + +wheels: + WHEELHOUSE=$(WHEELHOUSE) tools/build-wheels.sh + +docs: + tox -edocs + +# Just run the shocco source formatting build +docs-build: + INSTALL_SHOCCO=True tools/build_docs.sh + +# Just run the Sphinx docs build +docs-rst: + python setup.py build_sphinx + +# Run the bashate test +bashate: + tox -ebashate + +# Run the function tests +test: + tests/test_ini_config.sh + tests/test_meta_config.sh + tests/test_ip.sh + tests/test_refs.sh + +# Spiff up the place a bit +clean: + ./clean.sh + rm -rf accrc doc/build test*-e *.egg-info + +# Clean out the cache too +realclean: clean + rm -rf files/cirros*.tar.gz files/Fedora*.qcow2 $(WHEELHOUSE) + +# Repo stuffs + +pull: + git pull + + +# These repo targets are used to maintain a branch in a remote repo that +# consists of one or more local branches merged and pushed to the remote. +# This is most useful for iterative testing on multiple or remote servers +# while keeping the working repo local. +# +# It requires: +# * a remote pointing to a remote repo, often GitHub is used for this +# * a branch name to be used on the remote +# * a local file containing the list of local branches to be merged into +# the remote branch + +GIT_REMOTE_NAME=ds-test +GIT_REMOTE_BRANCH=ds-test + +# Push the current branch to a remote named ds-test +ds-push: + git checkout master + git branch -D $(GIT_REMOTE_BRANCH) || true + git checkout -b $(GIT_REMOTE_BRANCH) + for i in $(shell cat .$(GIT_REMOTE_BRANCH) | grep -v "^#" | grep "[^ ]"); do \ + git merge --no-edit $$i; \ + done + git push -f $(GIT_REMOTE_NAME) HEAD:$(GIT_REMOTE_BRANCH) + +# Pull the ds-test branch +ds-pull: + git checkout master + git branch -D $(GIT_REMOTE_BRANCH) || true + git pull $(GIT_REMOTE_NAME) $(GIT_REMOTE_BRANCH) + git checkout $(GIT_REMOTE_BRANCH) + +# Add the remote - set DS_REMOTE_URL=htps://example.com/ on the command line +ds-remote: + git remote add $(GIT_REMOTE_NAME) $(DS_REMOTE_URL) + +# Refresh the current DevStack checkout nd re-initialize +refresh: unstack ds-pull stack From ae7b4f9b9e811f2c0abfb4f7f4e85dd91ca1c2b3 Mon Sep 17 00:00:00 2001 From: Kashyap Chamarthy Date: Tue, 31 Mar 2015 20:49:15 +0200 Subject: [PATCH 0122/2941] functions-libvirt: Enable 'qemu_monitor' logging filter for libvirt A lot of libvirt interactions with QEMU are via the QEMU monitor console, which allows you to either query or modify the state of a virtual machine. Spefici examples include: querying the status of live block operations, live snapshot operations, live migration, etc. Enabling the 'qemu_monitor' log filter allows us to capture precisely what commands libvirt is sending to QEMU. Note that the log level was intentionally set to '1' (i.e. debug) for this specific filter, because (a) it's not extremely verbose, (b) when something breaks, it's helpful to have the exact sequence of interactions between libvirt and QEMU. Change-Id: Iba95b6bd7c9f197c8d48c7d978f538e50d4e31fa --- lib/nova_plugins/functions-libvirt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index edcf73ebe2..05ef605b04 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -99,9 +99,9 @@ EOF # source file paths, not relative paths. This screws with the matching # of '1:libvirt' making everything turn on. So use libvirt.c for now. # This will have to be re-visited when Ubuntu ships libvirt >= 1.2.3 - local log_filters="1:libvirt.c 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util" + local log_filters="1:libvirt.c 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:qemu_monitor" else - local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util" + local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:qemu_monitor" fi local log_outputs="1:file:/var/log/libvirt/libvirtd.log" if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then From edd60481682bf2cca061f94f113835922cd79709 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 27 Mar 2015 09:19:57 -0700 Subject: [PATCH 0123/2941] Update libvirt cpu map before starting nova We are trying to get a working 64bit qemu cpu model in the gate for nova live migration testing. It appears that we need to make this change prior to nova starting. Make the change in configure_libvirt() to handle this along with the other libvirt config updates. This allows us to restart the libvirt service once. This function calls a python tool which parses and updates the XML if necessary. Change-Id: I00667713bfba67ab8cedbcb1660ff94d4f4bcc07 --- lib/nova_plugins/functions-libvirt | 12 +++- tools/cpu_map_update.py | 89 ++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+), 2 deletions(-) create mode 100755 tools/cpu_map_update.py diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 4d617e8b5e..763afaf596 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -110,10 +110,18 @@ EOF fi fi + # Update the libvirt cpu map with a gate64 cpu model. This enables nova + # live migration for 64bit guest OSes on heterogenous cloud "hardware". + if [[ -f /usr/share/libvirt/cpu_map.xml ]] ; then + sudo $TOP_DIR/tools/cpu_map_update.py /usr/share/libvirt/cpu_map.xml + fi + # libvirt detects various settings on startup, as we potentially changed # the system configuration (modules, filesystems), we need to restart - # libvirt to detect those changes. - restart_service $LIBVIRT_DAEMON + # libvirt to detect those changes. Use a stop start as otherwise the new + # cpu_map is not loaded properly on some systems (Ubuntu). + stop_service $LIBVIRT_DAEMON + start_service $LIBVIRT_DAEMON } diff --git a/tools/cpu_map_update.py b/tools/cpu_map_update.py new file mode 100755 index 0000000000..1938793114 --- /dev/null +++ b/tools/cpu_map_update.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# This small script updates the libvirt CPU map to add a gate64 cpu model +# that can be used to enable a common 64bit capable feature set across +# devstack nodes so that features like nova live migration work. + +import sys +import xml.etree.ElementTree as ET +from xml.dom import minidom + + +def update_cpu_map(tree): + root = tree.getroot() + cpus = root#.find("cpus") + x86 = None + for arch in cpus.findall("arch"): + if arch.get("name") == "x86": + x86 = arch + break + if x86 is not None: + # Create a gate64 cpu model that is core2duo less monitor and pse36 + gate64 = ET.SubElement(x86, "model") + gate64.set("name", "gate64") + ET.SubElement(gate64, "vendor").set("name", "Intel") + ET.SubElement(gate64, "feature").set("name", "fpu") + ET.SubElement(gate64, "feature").set("name", "de") + ET.SubElement(gate64, "feature").set("name", "pse") + ET.SubElement(gate64, "feature").set("name", "tsc") + ET.SubElement(gate64, "feature").set("name", "msr") + ET.SubElement(gate64, "feature").set("name", "pae") + ET.SubElement(gate64, "feature").set("name", "mce") + ET.SubElement(gate64, "feature").set("name", "cx8") + ET.SubElement(gate64, "feature").set("name", "apic") + ET.SubElement(gate64, "feature").set("name", "sep") + ET.SubElement(gate64, "feature").set("name", "pge") + ET.SubElement(gate64, "feature").set("name", "cmov") + ET.SubElement(gate64, "feature").set("name", "pat") + ET.SubElement(gate64, "feature").set("name", "mmx") + ET.SubElement(gate64, "feature").set("name", "fxsr") + ET.SubElement(gate64, "feature").set("name", "sse") + ET.SubElement(gate64, "feature").set("name", "sse2") + ET.SubElement(gate64, "feature").set("name", "vme") + ET.SubElement(gate64, "feature").set("name", "mtrr") + ET.SubElement(gate64, "feature").set("name", "mca") + ET.SubElement(gate64, "feature").set("name", "clflush") + ET.SubElement(gate64, "feature").set("name", "pni") + ET.SubElement(gate64, "feature").set("name", "nx") + ET.SubElement(gate64, "feature").set("name", "ssse3") + ET.SubElement(gate64, "feature").set("name", "syscall") + ET.SubElement(gate64, "feature").set("name", "lm") + + +def format_xml(root): + # Adapted from http://pymotw.com/2/xml/etree/ElementTree/create.html + # thank you dhellmann + rough_string = ET.tostring(root, encoding="UTF-8") + dom_parsed = minidom.parseString(rough_string) + return dom_parsed.toprettyxml(" ", encoding="UTF-8") + + +def main(): + if len(sys.argv) != 2: + raise Exception("Must pass path to cpu_map.xml to update") + cpu_map = sys.argv[1] + tree = ET.parse(cpu_map) + for model in tree.getroot().iter("model"): + if model.get("name") == "gate64": + # gate64 model is already present + return + update_cpu_map(tree) + pretty_xml = format_xml(tree.getroot()) + with open(cpu_map, 'w') as f: + f.write(pretty_xml) + + +if __name__ == "__main__": + main() From d3cfb82c65ae7f85b9d92e1245b1d81792a5641b Mon Sep 17 00:00:00 2001 From: Kashyap Chamarthy Date: Wed, 1 Apr 2015 11:30:57 +0200 Subject: [PATCH 0124/2941] functions-libvirt: Enable DEBUG_LIBVIRT config attribute by default Enabling it by default because: - This allows you to get the relevant logs right away when something in the libvirt code path fails, without having to submit another change and keep doing a 'recheck' to re-run the CI check/gate jobs until you hit the bug. - The libvirt log filters specified in the function 'configure_libvirt' are much more _selective_ and not a catch-all debug option where you end up with the unhelpful situation of having to find a "specific piece of hay in a haystack"[1]. FWIW, I always have it enabled in local test environments, and I don't see the resulting libvirtd.log growing beyond a couple of MB for three-four days of usage. [1] http://lists.openstack.org/pipermail/openstack-dev/2014-January/024414.html Change-Id: I5e0b35446075b419fe473e1db8d0bfedd7009741 --- lib/nova_plugins/functions-libvirt | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 05ef605b04..1a8e0e4e75 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -14,8 +14,11 @@ set +o xtrace # Defaults # -------- -# if we should turn on massive libvirt debugging -DEBUG_LIBVIRT=$(trueorfalse False DEBUG_LIBVIRT) +# Turn on selective debug log filters for libvirt. +# (NOTE: Enabling this by default, because the log filters enabled in +# 'configure_libvirt' function further below are _selective_ and not +# extremely verbose.) +DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) # Installs required distro-specific libvirt packages. function install_libvirt { From 279cfe75198c723519f1fb361b2bff3c641c6cef Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 1 Apr 2015 07:33:55 -0400 Subject: [PATCH 0125/2941] minimize the default services This changes the default service list in devstack to minimize what is running out of the box, so that it's likelihood of working in a 4G vm is much higher. This removes heat from the default enabled service list. It drops the ec2 only needed n-obj and n-crt services. It drops all the alternative consoles (xvnc, consoleauth). novnc is fine for libvirt which is the default. It adds dstat, because that's turned out to be so useful in debugging things. Change-Id: I84457260dff6f42a5c6ebcc2c60fb6e01aec9567 --- stackrc | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/stackrc b/stackrc index c27ead3c24..0d8f059c1b 100644 --- a/stackrc +++ b/stackrc @@ -46,16 +46,18 @@ REGION_NAME=${REGION_NAME:-RegionOne} # This allows us to pass ``ENABLED_SERVICES`` if ! isset ENABLED_SERVICES ; then - # Compute (Glance / Keystone / Nova (+ nova-network)) - ENABLED_SERVICES=g-api,g-reg,key,n-api,n-crt,n-obj,n-cpu,n-net,n-cond,n-sch,n-novnc,n-xvnc,n-cauth + # Keystone - nothing works without keystone + ENABLED_SERVICES=key + # Nova - services to support libvirt based openstack clouds + ENABLED_SERVICES=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc + # Glance services needed for Nova + ENABLED_SERVICES=,g-api,g-reg # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol - # Heat - ENABLED_SERVICES+=,h-eng,h-api,h-api-cfn,h-api-cw # Dashboard ENABLED_SERVICES+=,horizon # Additional services - ENABLED_SERVICES+=,rabbit,tempest,mysql + ENABLED_SERVICES+=,rabbit,tempest,mysql,dstat fi # SQLAlchemy supports multiple database drivers for each database server From ba1c56bf5e292df74b17eebdc998c74428c925a7 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 1 Apr 2015 07:40:10 -0400 Subject: [PATCH 0126/2941] remove extraneous +x bits from lib files 2 files had execute bits set on them, fix as a cleanup. These files should not be directly executed. Change-Id: Ic0fdb85d77a3b47ef777524faf4fcdb0d8cedece --- lib/glance | 0 lib/neutron-legacy | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 lib/glance mode change 100755 => 100644 lib/neutron-legacy diff --git a/lib/glance b/lib/glance old mode 100755 new mode 100644 diff --git a/lib/neutron-legacy b/lib/neutron-legacy old mode 100755 new mode 100644 From 6c0da09b00fcfa8c4a9d34fb2ed08f6c704ed06e Mon Sep 17 00:00:00 2001 From: Jay Faulkner Date: Thu, 26 Mar 2015 15:19:32 -0700 Subject: [PATCH 0127/2941] Prepare devstack for Ironic cleaning testing This patch changes the two config options required for Ironic to successfully test cleaning in devstack. First, we disable erase_devices clean step. Erase devices in VMs ends up running shred on the drives for the agent driver, which is incredibly slow and completely unneeded in devstack. Additionally, we allow Ironic more time to complete the unprovision after the nova instance is deleted. This time is spend in the CLEANING state to clean up the node. This is related to the Ironic blueprint "implement-cleaning-states". Change-Id: I77081165a80491da3e66d8a4554b6d71fc3d9353 --- lib/ironic | 1 + lib/tempest | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/ironic b/lib/ironic index b99e3255d5..9651ec3189 100644 --- a/lib/ironic +++ b/lib/ironic @@ -370,6 +370,7 @@ function configure_ironic_conductor { iniset $IRONIC_CONF_FILE glance swift_container glance iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600 iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30 + iniset $IRONIC_CONF_FILE agent agent_erase_devices_priority 0 fi if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then diff --git a/lib/tempest b/lib/tempest index 8672a14338..c981c2ba74 100644 --- a/lib/tempest +++ b/lib/tempest @@ -476,6 +476,7 @@ function configure_tempest { # Baremetal if [ "$VIRT_DRIVER" = "ironic" ] ; then iniset $TEMPEST_CONFIG baremetal driver_enabled True + iniset $TEMPEST_CONFIG baremetal unprovision_timeout 300 iniset $TEMPEST_CONFIG compute-feature-enabled change_password False iniset $TEMPEST_CONFIG compute-feature-enabled console_output False iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False From d2cb234be4b5e6d70635fc7578d951a42a41cc4a Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 2 Apr 2015 11:08:24 -0400 Subject: [PATCH 0128/2941] Set qcow image file location with right cirros version This commit ensures we also set the qcow image location in the tempest config when we update qcow version. The tempest config has a default value for img_file (which is incorrect) but before we can remove the defaults in tempest we need to ensure devstack is using it properly first. The only reason the tests weren't failing here is because tempest falls back to using uec images (which devstack was correctly setting config for) if qcow isn't found. The img_dir was also hardcoded assuming a uec image, however if qcow is intended to be used you'll need to be able to override that, which is added as part of this commit. Change-Id: I05af346b3c9be9560dc8846dd1f437cfbb2d5005 --- lib/tempest | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 3f33512cf6..7d7cf11469 100644 --- a/lib/tempest +++ b/lib/tempest @@ -401,10 +401,12 @@ function configure_tempest { fi # Scenario - iniset $TEMPEST_CONFIG scenario img_dir "$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec" + SCENARIO_IMAGE_DIR=${SCENARIO_IMAGE_DIR:-$FILES/images/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} + iniset $TEMPEST_CONFIG scenario img_dir $SCENARIO_IMAGE_DIR iniset $TEMPEST_CONFIG scenario ami_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img" iniset $TEMPEST_CONFIG scenario ari_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd" iniset $TEMPEST_CONFIG scenario aki_img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz" + iniset $TEMPEST_CONFIG scenario img_file "cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # Large Ops Number iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} From 7cc3907ff1070858af2fcc8dd2d5b480f83022b6 Mon Sep 17 00:00:00 2001 From: Andrey Pavlov Date: Mon, 30 Mar 2015 20:49:22 +0300 Subject: [PATCH 0129/2941] install euca2ools for tempest preparation euca-bundle-image can be run only if euca2ools is installed. but now it doesn't installed and several tests for EC2 doesn't run. Change-Id: Ib3824052d5f4155d3cb5c0ef6fe334d44de5153c --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index 3f33512cf6..d8692dcce9 100644 --- a/lib/tempest +++ b/lib/tempest @@ -562,6 +562,8 @@ function init_tempest { echo "Prepare aki/ari/ami Images" mkdir -p $BOTO_MATERIALS_PATH ( #new namespace + # euca2ools should be installed to call euca-* commands + is_package_installed euca2ools || install_package euca2ools # tenant:demo ; user: demo source $TOP_DIR/accrc/demo/demo euca-bundle-image -r ${CIRROS_ARCH} -i "$kernel" --kernel true -d "$BOTO_MATERIALS_PATH" From 40e652af0e40203e870d6d4c3b8d77176ef8785d Mon Sep 17 00:00:00 2001 From: Andrey Pavlov Date: Thu, 2 Apr 2015 22:39:59 +0300 Subject: [PATCH 0130/2941] Use ec2 and s3 urls from keystone catalog In current implemetation these two urls are defined from Nova definitions. And urls point to nova. But standalone EC2API project has another urls that are defined in keystone catalog in plugin setup. I suggest to use urls from catalog to be able to test stackforge/ec2-api by current tempest. Change-Id: Ibec8c36a8c2fc7ea3d8fab57819adae5f7378045 --- lib/tempest | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 3f33512cf6..9f7e62608c 100644 --- a/lib/tempest +++ b/lib/tempest @@ -271,6 +271,15 @@ function configure_tempest { fi fi + EC2_URL=$(openstack endpoint show -f value -c publicurl ec2 || true) + if [[ -z $EC2_URL ]]; then + EC2_URL="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" + fi + S3_URL=$(openstack endpoint show -f value -c publicurl s3 || true) + if [[ -z $S3_URL ]]; then + S3_URL="http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + fi + iniset $TEMPEST_CONFIG DEFAULT use_syslog $SYSLOG # Oslo @@ -375,8 +384,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG network-feature-enabled api_extensions $network_api_extensions # boto - iniset $TEMPEST_CONFIG boto ec2_url "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" - iniset $TEMPEST_CONFIG boto s3_url "http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" + iniset $TEMPEST_CONFIG boto ec2_url "$EC2_URL" + iniset $TEMPEST_CONFIG boto s3_url "$S3_URL" iniset $TEMPEST_CONFIG boto s3_materials_path "$BOTO_MATERIALS_PATH" iniset $TEMPEST_CONFIG boto ari_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-initrd.manifest.xml iniset $TEMPEST_CONFIG boto ami_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-blank.img.manifest.xml From b58e22d170cc01467adf9cbb191aeb9351317f37 Mon Sep 17 00:00:00 2001 From: Andrew Laski Date: Thu, 2 Apr 2015 17:56:43 -0400 Subject: [PATCH 0131/2941] If cells enabled create a fixed network in the API cell Now that tempest is querying for a network before making server create calls the fixed network needs to be known by the API cell. Server creates should work for networks defined in both databases, but defining a new network via the API will continue to not work. Change-Id: I32461add0d20940a55385c8b34cd493e2561615e --- stack.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stack.sh b/stack.sh index 8ab82348f3..adcaa219cc 100755 --- a/stack.sh +++ b/stack.sh @@ -1214,6 +1214,9 @@ if is_service_enabled q-svc; then elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} if is_service_enabled n-cell; then + # Create a small network in the API cell + $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS + # Everything else should go in the child cell NM_CONF=${NOVA_CELLS_CONF} fi From db02bbf107ff0a9f0ffcf3dc8a9b9219ccd7a2c0 Mon Sep 17 00:00:00 2001 From: Raman Budny Date: Tue, 31 Mar 2015 13:09:09 +0300 Subject: [PATCH 0132/2941] Fixes ovs-vsctl add port issue Setup of OVS may fail, if ports that are added, are already exist. Add "--may-exist" directive to OVS add-port command to prevent this behaviour. Change-Id: I2280be9a63a4a6fbc747b5e32b602697b555ffa8 Closes-Bug: #1394162 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index d3dd8dd33e..c6d9296fba 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -801,7 +801,7 @@ function _move_neutron_addresses_route { fi if [[ "$add_ovs_port" == "True" ]]; then - ADD_OVS_PORT="sudo ovs-vsctl add-port $to_intf $from_intf" + ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" fi sudo ip addr del $IP_BRD dev $from_intf; sudo ip addr add $IP_BRD dev $to_intf; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE From 72a3312904d9a97a99929bf1001a6fef4d8bfa00 Mon Sep 17 00:00:00 2001 From: Sushil Kumar Date: Fri, 3 Apr 2015 09:28:50 +0000 Subject: [PATCH 0133/2941] Fixes devstack stackrc One of the earlier patchset while updating stackrc missed on adding "+=" while adding multiple services. Closes-Bug: #1439983 Change-Id: I74f788e15b7da05a93fc8d99c562e51386b65053 --- stackrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 0d8f059c1b..abedb001d1 100644 --- a/stackrc +++ b/stackrc @@ -49,9 +49,9 @@ if ! isset ENABLED_SERVICES ; then # Keystone - nothing works without keystone ENABLED_SERVICES=key # Nova - services to support libvirt based openstack clouds - ENABLED_SERVICES=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc + ENABLED_SERVICES+=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc # Glance services needed for Nova - ENABLED_SERVICES=,g-api,g-reg + ENABLED_SERVICES+=,g-api,g-reg # Cinder ENABLED_SERVICES+=,c-sch,c-api,c-vol # Dashboard From 5e5a29cc8c83b3f9032de2482987bb9989422b7b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 3 Apr 2015 08:52:29 -0400 Subject: [PATCH 0134/2941] fix bashate target there was a missing -or which meant we weren't running this on most of the files in the repository. Change-Id: If926afc8f12f6beb80d7a9af7c787b3dcc360a89 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index bc84928d95..279dcd4f66 100644 --- a/tox.ini +++ b/tox.ini @@ -20,7 +20,7 @@ commands = bash -c "find {toxinidir} \ -name \*.sh -or \ -name \*rc -or \ -name functions\* -or \ - -wholename \*/inc/\* \ # /inc files and + -wholename \*/inc/\* -or \ # /inc files and -wholename \*/lib/\* \ # /lib files are shell, but \) \ # have no extension -print0 | xargs -0 bashate -v" From a79e1011ca15bee9d1f326270e516edeab6a5257 Mon Sep 17 00:00:00 2001 From: Dane LeBlanc Date: Thu, 26 Mar 2015 14:48:07 -0400 Subject: [PATCH 0135/2941] Remove unused IPv6 setup from DevStack neutron script With the implementation of dual-stack gateway support as part of the Neutron multiple-ipv6-prefixes blueprint, some of the code in the IPv6 setup in the DevStack neutron legacy script must be removed. This code had been added temporarily, with a TODO note indicating that this code should be removed when the Neutron L3 agent is modified to support dual-stack. Without this change, DevStack will fail to configure the Neutron router gateway interface (there will be no external connectivity) whenever IP_VERSION is set to '4+6' in localrc/local.conf, since first DevStack and later the Neutron L3 agent will be trying to ADD the IPv6 address to the router gateway interface. This change also includes a modification of the default prefix to be used for the public IPv6 subnet. The new value (2001:df8::/64) is a special reserved prefix that will be treated as non-routable external to the OpenStack instance. Change-Id: I85fe68782bc54f28f3e14aa4a1d042cb15959dac Partially-implements: blueprint multiple-ipv6-prefixes --- lib/neutron-legacy | 46 ++++++---------------------------------------- 1 file changed, 6 insertions(+), 40 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index c6d9296fba..8eb9e62a90 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -57,9 +57,6 @@ # Settings # -------- -# Timeout value in seconds to wait for IPv6 gateway configuration -GATEWAY_TIMEOUT=30 - # Neutron Network Configuration # ----------------------------- @@ -90,12 +87,9 @@ IPV6_PUBLIC_SUBNET_NAME=${IPV6_PUBLIC_SUBNET_NAME:-ipv6-public-subnet} IPV6_PRIVATE_SUBNET_NAME=${IPV6_PRIVATE_SUBNET_NAME:-ipv6-private-subnet} FIXED_RANGE_V6=${FIXED_RANGE_V6:-fd$IPV6_GLOBAL_ID::/64} IPV6_PRIVATE_NETWORK_GATEWAY=${IPV6_PRIVATE_NETWORK_GATEWAY:-fd$IPV6_GLOBAL_ID::1} -IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-fe80:cafe:cafe::/64} -IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-fe80:cafe:cafe::2} -# IPV6_ROUTER_GW_IP must be defined when IP_VERSION=4+6 as it cannot be -# obtained conventionally until the l3-agent has support for dual-stack -# TODO (john-davidge) Remove once l3-agent supports dual-stack -IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-fe80:cafe:cafe::1} +IPV6_PUBLIC_RANGE=${IPV6_PUBLIC_RANGE:-2001:db8::/64} +IPV6_PUBLIC_NETWORK_GATEWAY=${IPV6_PUBLIC_NETWORK_GATEWAY:-2001:db8::2} +IPV6_ROUTER_GW_IP=${IPV6_ROUTER_GW_IP:-2001:db8::1} # Set up default directories GITDIR["python-neutronclient"]=$DEST/python-neutronclient @@ -1291,20 +1285,12 @@ function _neutron_configure_router_v6 { # This logic is specific to using the l3-agent for layer 3 if is_service_enabled q-l3; then - local ipv6_router_gw_port # Ensure IPv6 forwarding is enabled on the host sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge - if [[ "$IP_VERSION" = "6" ]]; then - # Override global IPV6_ROUTER_GW_IP with the true value from neutron - IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'` - die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" - ipv6_router_gw_port=`neutron port-list -c id -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $1; }' | awk -F ' | ' '{ print $2; }'` - die_if_not_set $LINENO ipv6_router_gw_port "Failure retrieving ipv6_router_gw_port" - else - ipv6_router_gw_port=`neutron port-list -c id -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $1; }' | awk -F ' | ' '{ print $2; }'` - die_if_not_set $LINENO ipv6_router_gw_port "Failure retrieving ipv6_router_gw_port" - fi + # Override global IPV6_ROUTER_GW_IP with the true value from neutron + IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'` + die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" # The ovs_base_configure_l3_agent function flushes the public # bridge's ip addresses, so turn IPv6 support in the host off @@ -1321,28 +1307,8 @@ function _neutron_configure_router_v6 { local ext_gw_interface=$(_neutron_get_ext_gw_interface) local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} - # Define router_ns based on whether DVR is enabled - local router_ns=qrouter - if [[ "$Q_DVR_MODE" == "dvr_snat" ]]; then - router_ns=snat - fi - # Configure interface for public bridge sudo ip -6 addr add $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface - - # Wait until layer 3 agent has configured the gateway port on - # the public bridge, then add gateway address to the interface - # TODO (john-davidge) Remove once l3-agent supports dual-stack - if [[ "$IP_VERSION" == "4+6" ]]; then - if ! timeout $GATEWAY_TIMEOUT sh -c "until sudo ip netns exec $router_ns-$ROUTER_ID ip addr show qg-${ipv6_router_gw_port:0:11} | grep $ROUTER_GW_IP; do sleep 1; done"; then - die $LINENO "Timeout retrieving ROUTER_GW_IP" - fi - # Configure the gateway port with the public IPv6 adress - sudo ip netns exec $router_ns-$ROUTER_ID ip -6 addr add $IPV6_ROUTER_GW_IP/$ipv6_cidr_len dev qg-${ipv6_router_gw_port:0:11} - # Add a default IPv6 route to the neutron router as the - # l3-agent does not add one in the dual-stack case - sudo ip netns exec $router_ns-$ROUTER_ID ip -6 route replace default via $ipv6_ext_gw_ip dev qg-${ipv6_router_gw_port:0:11} - fi sudo ip -6 route add $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface fi _neutron_set_router_id From bcef63ed4306ac8cbfac5c68c828b10e129c3475 Mon Sep 17 00:00:00 2001 From: Andrew McDermott Date: Wed, 1 Apr 2015 15:47:36 +0000 Subject: [PATCH 0136/2941] Update qemu package name for Ubuntu aarch64 The qemu-system package, and not qemu-kvm, should be installed on either trusty- or utopic-based ARMv8 (aarch64) Ubuntu releases. Additionally, libguestfs is not available so that is not installed. No changes are required for vivid. Change-Id: Id9dc1fc465bd7acab17c991c292fb531016758ad Signed-off-by: Andrew McDermott --- lib/nova_plugins/functions-libvirt | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) mode change 100644 => 100755 lib/nova_plugins/functions-libvirt diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt old mode 100644 new mode 100755 index fa3666eff7..d4a07688cd --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -23,11 +23,15 @@ DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) # Installs required distro-specific libvirt packages. function install_libvirt { if is_ubuntu; then - install_package qemu-kvm + if is_arch "aarch64" && [[ ${DISTRO} =~ (trusty|utopic) ]]; then + install_package qemu-system + else + install_package qemu-kvm + install_package libguestfs0 + install_package python-guestfs + fi install_package libvirt-bin libvirt-dev pip_install libvirt-python - install_package libguestfs0 - install_package python-guestfs #pip_install elif is_fedora || is_suse; then install_package kvm From 33ba738b052cd642f7ea2e6c2196e193be14122f Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Mon, 6 Apr 2015 10:25:54 -0700 Subject: [PATCH 0137/2941] Revert "List all CIRROS ARCH images in image_list.sh" This reverts commit 71e82f52bde99b4bf791ea1558f1abf86019a384. aioppcu now uses x86_64 so no need to list the i386 images for caching. Change-Id: If500367c8bf3fdb4590c866e007ecd7de1ab5781 Depends-On: I839e1c724821ba2624beddb5233eda24b50c149f --- tools/image_list.sh | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/tools/image_list.sh b/tools/image_list.sh index 204280704e..a27635effd 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -9,8 +9,6 @@ source $TOP_DIR/functions # dummy in the end position to trigger the fall through case. DRIVERS="openvz ironic libvirt vsphere xenserver dummy" -CIRROS_ARCHS="x86_64 i386" - # Extra variables to trigger getting additional images. export ENABLED_SERVICES="h-api,tr-api" HEAT_FETCHED_TEST_IMAGE="Fedora-i386-20-20131211.1-sda" @@ -19,15 +17,12 @@ PRECACHE_IMAGES=True # Loop over all the virt drivers and collect all the possible images ALL_IMAGES="" for driver in $DRIVERS; do - for arch in $CIRROS_ARCHS; do - CIRROS_ARCH=$arch - VIRT_DRIVER=$driver - URLS=$(source $TOP_DIR/stackrc && echo $IMAGE_URLS) - if [[ ! -z "$ALL_IMAGES" ]]; then - ALL_IMAGES+=, - fi - ALL_IMAGES+=$URLS - done + VIRT_DRIVER=$driver + URLS=$(source $TOP_DIR/stackrc && echo $IMAGE_URLS) + if [[ ! -z "$ALL_IMAGES" ]]; then + ALL_IMAGES+=, + fi + ALL_IMAGES+=$URLS done # Make a nice list From 8c6276ea0a2332f5210fc1f16399281c083520cd Mon Sep 17 00:00:00 2001 From: Raseel Bhagat Date: Tue, 7 Apr 2015 03:15:45 +0530 Subject: [PATCH 0138/2941] Added libxml2-dev package as a pre-requisite when installing tempest. This is required so that devstack can be installed on vanilla Ubuntu systems. Closes-Bug: 1225395 Change-Id: Id9116e00e18c23e8e6391d8aa652c04d8bdb86c3 --- files/debs/tempest | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/files/debs/tempest b/files/debs/tempest index f244e4e783..bb095297e0 100644 --- a/files/debs/tempest +++ b/files/debs/tempest @@ -1 +1,2 @@ -libxslt1-dev \ No newline at end of file +libxml2-dev +libxslt1-dev From 0089035504a97fca58cb2383f62ccbb4e6108820 Mon Sep 17 00:00:00 2001 From: Yuki Nishiwaki Date: Sun, 29 Mar 2015 23:35:39 +0900 Subject: [PATCH 0139/2941] Assurance status of rabbitmq is running I changed it so that rabbitmq always restart. Current devstack don't restart rabbitmq in case of ubuntu. Because rabbitmq is running at default. But this approach have the following bug. If rabbitmq is already installed and not running , stack.sh will fail. So I change it so that rabbitmq always restart. Closes-bug: #1030798 Change-Id: Ie45446d3817b2f15631f03b2af84749fe936c67b --- lib/rpc_backend | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 3033cbe08e..3d8fd2bd3e 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -201,10 +201,7 @@ function restart_rpc_backend { [[ $i -eq "10" ]] && die $LINENO "Failed to set rabbitmq password" - if is_fedora || is_suse; then - # service is not started by default - restart_service rabbitmq-server - fi + restart_service rabbitmq-server rabbit_setuser "$RABBIT_USERID" "$RABBIT_PASSWORD" || rc=$? if [ $rc -ne 0 ]; then From 7b9341e1789b31786d10f27a3a3c825fe44bb506 Mon Sep 17 00:00:00 2001 From: Bharat Kumar Kobagana Date: Mon, 30 Mar 2015 11:58:10 +0530 Subject: [PATCH 0140/2941] Clone external plugins before overriding defaults This patch clones external plugin repositories before overriding default configuration parameters. Closes-Bug: 1441058 Change-Id: Ie14fcb897cb40b1604bfb5869baa0dec58e51bce --- stack.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index adcaa219cc..5331beb9b7 100755 --- a/stack.sh +++ b/stack.sh @@ -514,6 +514,9 @@ fi # Configure Projects # ================== +# Clone all external plugins +fetch_plugins + # Plugin Phase 0: override_defaults - allow pluggins to override # defaults before other services are run run_phase override_defaults @@ -540,9 +543,6 @@ source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat -# Clone all external plugins -fetch_plugins - # Extras Source # -------------- From 97aa81d905ce73054747a56a4ba0d93c308d23a5 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Tue, 7 Apr 2015 16:23:53 -0400 Subject: [PATCH 0141/2941] Fix docs about heat in devtack In 279cfe75198c723519f1fb361b2bff3c641c6cef, we disabled heat by default. So fixing the README to reflect that. Change-Id: I3da257158b37e235eed1c78e0c4df432caeefa1d --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 53de970763..039ce3e548 100644 --- a/README.md +++ b/README.md @@ -264,10 +264,10 @@ To change this, set the `Q_AGENT` variable to the agent you want to run # Heat -Heat is enabled by default (see `stackrc` file). To disable it explicitly +Heat is disabled by default (see `stackrc` file). To enable it explicitly you'll need the following settings in your `localrc` section: - disable_service heat h-api h-api-cfn h-api-cw h-eng + enable_service heat h-api h-api-cfn h-api-cw h-eng Heat can also run in standalone mode, and be configured to orchestrate on an external OpenStack cloud. To launch only Heat in standalone mode From 60996b1b60c3efb1376b9f0d659acebd05c47f09 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 8 Apr 2015 09:06:49 -0400 Subject: [PATCH 0142/2941] introduce pip_install_gr This creates a new pip_install_gr that installs from global requirements allowed versions. Now that stable branches are getting capped all of devstack needs to be fixed to do things like this. Change-Id: I8fd0ef2bfc544ca2576fab09d3018f760b8848fe --- inc/python | 14 +++++++++++++- lib/ceilometer | 3 +++ lib/databases/mysql | 2 +- lib/databases/postgresql | 2 +- lib/horizon | 2 +- lib/ironic | 4 ++-- lib/keystone | 2 +- lib/ldap | 2 +- lib/nova_plugins/functions-libvirt | 6 +++--- lib/nova_plugins/hypervisor-xenserver | 2 +- lib/rpc_backend | 2 +- lib/tempest | 2 +- lib/zaqar | 4 ++-- pkg/elasticsearch.sh | 2 +- stack.sh | 2 +- 15 files changed, 33 insertions(+), 18 deletions(-) diff --git a/inc/python b/inc/python index 39684b6fc5..c7cbb52884 100644 --- a/inc/python +++ b/inc/python @@ -52,6 +52,18 @@ function get_python_exec_prefix { fi } +# Wrapper for ``pip install`` that only installs versions of libraries +# from the global-requirements specification. +# +# Uses globals ``REQUIREMENTS_DIR`` +# +# pip_install_gr packagename +function pip_install_gr { + local name=$1 + local clean_name=$(get_from_global_requirements $name) + pip_install $clean_name +} + # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy`` @@ -125,7 +137,7 @@ function pip_install { # get_from_global_requirements function get_from_global_requirements { local package=$1 - local required_pkg=$(grep -h ${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) + local required_pkg=$(grep -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) if [[ $required_pkg == "" ]]; then die $LINENO "Can't find package $package in requirements" fi diff --git a/lib/ceilometer b/lib/ceilometer index 81353093b2..dba92ba542 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -315,6 +315,9 @@ function install_ceilometermiddleware { git_clone_by_name "ceilometermiddleware" setup_dev_lib "ceilometermiddleware" else + # BUG: this should be a pip_install_gr except it was never + # included in global-requirements. Needs to be fixed by + # https://bugs.launchpad.net/ceilometer/+bug/1441655 pip_install ceilometermiddleware fi } diff --git a/lib/databases/mysql b/lib/databases/mysql index 310817b69a..1b9a08199f 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -155,7 +155,7 @@ EOF function install_database_python_mysql { # Install Python client module - pip_install MySQL-python + pip_install_gr MySQL-python ADDITIONAL_VENV_PACKAGES+=",MySQL-python" } diff --git a/lib/databases/postgresql b/lib/databases/postgresql index a6bcf8c0a2..e087a1e0d4 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -104,7 +104,7 @@ EOF function install_database_python_postgresql { # Install Python client module - pip_install psycopg2 + pip_install_gr psycopg2 ADDITIONAL_VENV_PACKAGES+=",psycopg2" } diff --git a/lib/horizon b/lib/horizon index 63a9d0fe46..f953f5cc01 100644 --- a/lib/horizon +++ b/lib/horizon @@ -183,7 +183,7 @@ function stop_horizon { # NOTE: It can be moved to common functions, but it is only used by compilation # of django_openstack_auth catalogs at the moment. function _prepare_message_catalog_compilation { - pip_install $(get_from_global_requirements Babel) + pip_install_gr Babel } diff --git a/lib/ironic b/lib/ironic index c8481ab992..7afed052e9 100644 --- a/lib/ironic +++ b/lib/ironic @@ -206,7 +206,7 @@ function install_ironicclient { sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ironicclient"]}/tools/,/etc/bash_completion.d/}ironic.bash_completion else # nothing actually "requires" ironicclient, so force instally from pypi - pip_install python-ironicclient + pip_install_gr python-ironicclient fi } @@ -729,7 +729,7 @@ function upload_baremetal_ironic_deploy { # install diskimage-builder if [[ $(type -P ramdisk-image-create) == "" ]]; then - pip_install diskimage_builder + pip_install_gr "diskimage-builder" fi if [ -z "$IRONIC_DEPLOY_KERNEL" -o -z "$IRONIC_DEPLOY_RAMDISK" ]; then diff --git a/lib/keystone b/lib/keystone index 7b41812fca..31659f4338 100644 --- a/lib/keystone +++ b/lib/keystone @@ -524,7 +524,7 @@ function install_keystonemiddleware { setup_dev_lib "keystonemiddleware" else # When not installing from repo, keystonemiddleware is still needed... - pip_install keystonemiddleware + pip_install_gr keystonemiddleware fi } diff --git a/lib/ldap b/lib/ldap index d69d3f84a5..d2dbc3b728 100644 --- a/lib/ldap +++ b/lib/ldap @@ -142,7 +142,7 @@ function install_ldap { sudo ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif fi - pip_install ldappool + pip_install_gr ldappool rm -rf $tmp_ldap_dir } diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index d4a07688cd..04da5e2b60 100755 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -31,12 +31,12 @@ function install_libvirt { install_package python-guestfs fi install_package libvirt-bin libvirt-dev - pip_install libvirt-python - #pip_install + pip_install_gr libvirt-python + #pip_install_gr elif is_fedora || is_suse; then install_package kvm install_package libvirt libvirt-devel - pip_install libvirt-python + pip_install_gr libvirt-python install_package python-libguestfs fi diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index 4d0ec898ad..efce383222 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -94,7 +94,7 @@ CRONTAB # install_nova_hypervisor() - Install external components function install_nova_hypervisor { - pip_install xenapi + pip_install_gr xenapi } # start_nova_hypervisor - Start any required external services diff --git a/lib/rpc_backend b/lib/rpc_backend index cc083de449..2b7c6cbc64 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -141,7 +141,7 @@ function install_rpc_backend { # TODO(kgiusti) can remove once python qpid bindings are # available on all supported platforms _and_ pyngus is added # to the requirements.txt file in oslo.messaging - pip_install pyngus + pip_install_gr pyngus fi if is_service_enabled rabbit; then diff --git a/lib/tempest b/lib/tempest index e8e9e0b239..cd8fbd725f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -103,7 +103,7 @@ function configure_tempest { setup_develop $TEMPEST_DIR else # install testr since its used to process tempest logs - pip_install $(get_from_global_requirements testrepository) + pip_install_gr testrepository fi local image_lines diff --git a/lib/zaqar b/lib/zaqar index 34f1915025..8d51910896 100644 --- a/lib/zaqar +++ b/lib/zaqar @@ -140,10 +140,10 @@ function configure_zaqar { function configure_redis { if is_ubuntu; then install_package redis-server - pip_install redis + pip_install_gr redis elif is_fedora; then install_package redis - pip_install redis + pip_install_gr redis else exit_distro_not_supported "redis installation" fi diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh index f53c7f2ff2..29dc22fb88 100755 --- a/pkg/elasticsearch.sh +++ b/pkg/elasticsearch.sh @@ -77,7 +77,7 @@ function stop_elasticsearch { } function install_elasticsearch { - pip_install elasticsearch + pip_install_gr elasticsearch if is_package_installed elasticsearch; then echo "Note: elasticsearch was already installed." return diff --git a/stack.sh b/stack.sh index adcaa219cc..69b76a9e19 100755 --- a/stack.sh +++ b/stack.sh @@ -847,7 +847,7 @@ if use_library_from_git "python-openstackclient"; then git_clone_by_name "python-openstackclient" setup_dev_lib "python-openstackclient" else - pip_install 'python-openstackclient>=1.0.2' + pip_install_gr python-openstackclient fi if [[ $TRACK_DEPENDS = True ]]; then From 9860876f5dbb07826b680143a4e111ad580053fe Mon Sep 17 00:00:00 2001 From: Amrith Kumar Date: Wed, 8 Apr 2015 15:37:58 -0400 Subject: [PATCH 0143/2941] perform install_infra sooner in stack.sh The install_infra() call needs to be done earlier since pip_install_gr() depends on it. Also the fact that python module names are supposed to be lower case but some use camel case is a problem (for example with XenAPI). Change-Id: I7012d77134fa0d9c746d87e837934e7dcb337e5c Closes-Bug: #1441820 --- inc/python | 2 +- stack.sh | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/inc/python b/inc/python index c7cbb52884..3d329b59a9 100644 --- a/inc/python +++ b/inc/python @@ -137,7 +137,7 @@ function pip_install { # get_from_global_requirements function get_from_global_requirements { local package=$1 - local required_pkg=$(grep -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) + local required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) if [[ $required_pkg == "" ]]; then die $LINENO "Can't find package $package in requirements" fi diff --git a/stack.sh b/stack.sh index 69b76a9e19..9ecc49c7ca 100755 --- a/stack.sh +++ b/stack.sh @@ -714,6 +714,9 @@ fi # Extras Pre-install # ------------------ +# Install required infra support libraries +install_infra + # Phase: pre-install run_phase stack pre-install @@ -733,9 +736,6 @@ fi echo_summary "Installing OpenStack project source" -# Install required infra support libraries -install_infra - # Install Oslo libraries install_oslo From fcc3f6ee986c1166c001774052c05b5d974593ea Mon Sep 17 00:00:00 2001 From: Clark Laughlin Date: Tue, 7 Apr 2015 16:31:47 +0000 Subject: [PATCH 0144/2941] Add support for arm64 images This patch enables proper support for arm64 images by disabling VNC support and adding several properties to the image in glance that are necessary to boot correctly: hw_cdrom_bus=virtio hw_machine_type=virt os_command_line='console=ttyAMA0' Change-Id: I68c9a5e0e083af2f92875c3bdf70df750f6e4d8f --- functions | 4 ++++ lib/nova_plugins/hypervisor-libvirt | 6 ++++++ 2 files changed, 10 insertions(+) diff --git a/functions b/functions index 4dc20e7f23..2078db1e7c 100644 --- a/functions +++ b/functions @@ -287,6 +287,10 @@ function upload_image { img_property="--property hw_cdrom_bus=scsi" fi + if is_arch "aarch64"; then + img_property="--property hw_machine_type=virt --property hw_cdrom_bus=virtio --property os_command_line='console=ttyAMA0'" + fi + if [ "$container_format" = "bare" ]; then if [ "$unpack" = "zcat" ]; then openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}") diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 4d1eb6cdcb..a6a87f9164 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -54,6 +54,12 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT vnc_enabled "false" fi + # arm64-specific configuration + if is_arch "aarch64"; then + # arm64 architecture currently does not support graphical consoles. + iniset $NOVA_CONF DEFAULT vnc_enabled "false" + fi + ENABLE_FILE_INJECTION=$(trueorfalse False ENABLE_FILE_INJECTION) if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then # When libguestfs is available for file injection, enable using From 4d7ee095a18af9e834202e92534d2ba7a0b371c5 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 7 Apr 2015 10:40:49 -0400 Subject: [PATCH 0145/2941] Make screen sleep time configurable the sleep 3 in screen_it was added to make devstack pass in the gate with exceptionally slow test cloud nodes. In the gate we now bypass the screen path entirely. However the sleep 3 remains and can add a couple minutes delay into local development runs. We're not sure yet how low this can safely be tuned, so step 1 is to make it configurable, then get devstack team members to try various options to see what works. Change-Id: I0e6476176fc8589efc4e40e78c2231f704d14e45 --- functions-common | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index f442211a61..f8543c1f98 100644 --- a/functions-common +++ b/functions-common @@ -1256,8 +1256,13 @@ function screen_process { # sleep to allow bash to be ready to be send the command - we are # creating a new window in screen and then sends characters, so if - # bash isn't running by the time we send the command, nothing happens - sleep 3 + # bash isn't running by the time we send the command, nothing + # happens. This sleep was added originally to handle gate runs + # where we needed this to be at least 3 seconds to pass + # consistently on slow clouds. Now this is configurable so that we + # can determine a reasonable value for the local case which should + # be much smaller. + sleep ${SCREEN_SLEEP:-3} NL=`echo -ne '\015'` # This fun command does the following: From 7cf7a8f88f05a6e6994dfb2ff3a6643d21c3048e Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Thu, 9 Apr 2015 11:46:56 +0200 Subject: [PATCH 0146/2941] rpc: Allow to configure the rabbitmq heartbeat For testing we can need to disable or change the rate of the heartbeat Currently we have to set the value manually in each componments or to write multiple [[post-config|$_CONF]] section in local.conf. This change will allow to configure all componments at once with only two lines. Also, we don't set default values to continue to use oslo.messaging defaults. Change-Id: Ieaca60ca1cd6d7455b66ce490a9b023df431e9c3 --- lib/rpc_backend | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/rpc_backend b/lib/rpc_backend index 2b7c6cbc64..288987cdf0 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -273,6 +273,12 @@ function iniset_rpc_backend { iniset $file oslo_messaging_rabbit rabbit_hosts $RABBIT_HOST iniset $file oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD iniset $file oslo_messaging_rabbit rabbit_userid $RABBIT_USERID + if [ -n "$RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD" ]; then + iniset $file oslo_messaging_rabbit heartbeat_timeout_threshold $RABBIT_HEARTBEAT_TIMEOUT_THRESHOLD + fi + if [ -n "$RABBIT_HEARTBEAT_RATE" ]; then + iniset $file oslo_messaging_rabbit heartbeat_rate $RABBIT_HEARTBEAT_RATE + fi fi } From d394e59b5c43d6dc7107c3ee31cbb0bb07300d13 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Wed, 8 Apr 2015 23:02:59 +0000 Subject: [PATCH 0147/2941] Specify network UUID for network create with cells When the n-cell service is enabled, we create networks in both the API cell and the child cell. Recent changes to tempest have tests querying networks from the API and passing them for a server create. In order for this to work in cells, the UUIDs for the network in the API cell and the child cell must match, else the network won't be found in the child. This change adds the --uuid option to the nova-manage network create command for cells only. Related-Bug: #1441931 Depends-On: Ib29e632b09905f557a7a6910d58207ed91cdc047 Change-Id: Ib5933b1405c0761ff727e04cda0c502a826c8eaf --- stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stack.sh b/stack.sh index 69b76a9e19..c795dfcf90 100755 --- a/stack.sh +++ b/stack.sh @@ -1214,6 +1214,10 @@ if is_service_enabled q-svc; then elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} if is_service_enabled n-cell; then + # Both cells should have the same network uuid for server create + if [[ ! "$NETWORK_CREATE_ARGS" =~ "--uuid" ]]; then + NETWORK_CREATE_ARGS="$NETWORK_CREATE_ARGS --uuid $(uuidgen)" + fi # Create a small network in the API cell $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS # Everything else should go in the child cell From c2dc95add6e46829f1705041c1d9dddab9b360d3 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Wed, 8 Apr 2015 23:32:17 -0700 Subject: [PATCH 0148/2941] Avoid flushing br-ex during stacking This operation seems vestigial, as it was added to the code when stack.sh did not have a robust cleanup procedure. These days, unstack.sh does destroy all bridges, therefore during subsequent stack.sh runs (or even initially, from a clean environment), the flush operation has become superfluous. Its removal has also been deemeded necessary to enable certain multi-node cloud deployments, like the one available in OpenStack infra [1]. [1] https://review.openstack.org/#/c/158525/ Change-Id: I6b4e5b82958e6d29dd450f1c4c9513f6a9e5053a --- lib/neutron_plugins/ovs_base | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 2997c6c25a..51999c60e4 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -93,11 +93,8 @@ function _neutron_ovs_base_configure_l3_agent { sudo ip link set $Q_PUBLIC_VETH_EX up sudo ip addr flush dev $Q_PUBLIC_VETH_EX else - # --no-wait causes a race condition if $PUBLIC_BRIDGE is not up when ip addr flush is called sudo ovs-vsctl -- --may-exist add-br $PUBLIC_BRIDGE sudo ovs-vsctl br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE - # ensure no IP is configured on the public bridge - sudo ip addr flush dev $PUBLIC_BRIDGE fi } From 13a95a2dc1b429e5aa11148612b0f867fb75f1b0 Mon Sep 17 00:00:00 2001 From: Ghe Rivero Date: Thu, 9 Apr 2015 10:23:58 +0200 Subject: [PATCH 0149/2941] Add ironic files to .gitignore Add pxe booting related files to the .gitignore list Change-Id: I08cfc98bcdd89f6a9a922f86c86551b661d69fff --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index c6900c881c..76ad830a5e 100644 --- a/.gitignore +++ b/.gitignore @@ -15,6 +15,7 @@ files/*.qcow2 files/images files/pip-* files/get-pip.py* +files/ir-deploy* local.conf local.sh localrc From f5b550ee2959a30f7e07271e596cee1d7346aa50 Mon Sep 17 00:00:00 2001 From: melanie witt Date: Fri, 10 Apr 2015 22:20:07 +0000 Subject: [PATCH 0150/2941] Prevent setting tempest fixed_network_name for cells Instead of creating a network in both the API cell and child cell, let tempest use the old behavior of not querying networks from the API for testing server create. Change-Id: I9809d2b2e796ff1a5ea7e4f25bbeb21bd4817a72 --- lib/tempest | 4 +++- stack.sh | 7 ------- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/lib/tempest b/lib/tempest index cd8fbd725f..dc5fb5130a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -340,7 +340,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method - iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME + if ! is_service_enabled n-cell; then + iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME + fi # Compute Features # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints diff --git a/stack.sh b/stack.sh index 5cdcbdf9d8..7a5cbcc1fc 100755 --- a/stack.sh +++ b/stack.sh @@ -1214,13 +1214,6 @@ if is_service_enabled q-svc; then elif is_service_enabled $DATABASE_BACKENDS && is_service_enabled n-net; then NM_CONF=${NOVA_CONF} if is_service_enabled n-cell; then - # Both cells should have the same network uuid for server create - if [[ ! "$NETWORK_CREATE_ARGS" =~ "--uuid" ]]; then - NETWORK_CREATE_ARGS="$NETWORK_CREATE_ARGS --uuid $(uuidgen)" - fi - # Create a small network in the API cell - $NOVA_BIN_DIR/nova-manage --config-file $NM_CONF network create "$PRIVATE_NETWORK_NAME" $FIXED_RANGE 1 $FIXED_NETWORK_SIZE $NETWORK_CREATE_ARGS - # Everything else should go in the child cell NM_CONF=${NOVA_CELLS_CONF} fi From 93d09c24e35611cc7fc1ef8e6796d177d460fecc Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sat, 11 Apr 2015 18:45:09 -0400 Subject: [PATCH 0151/2941] Setting LOGFILE to a file in root directory wipes everything clean.sh picks the parent of LOGFILE and wipes it clean! So if you set it to a log file in the users root directory, you lose everything We should delete just the LOGFILE and cleanup LOGDIR and SCREEN_LOGDIR if they are explicitly set. Change-Id: I45745427dcaed3dcf0b78cc9ed680833d9d555e8 --- clean.sh | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/clean.sh b/clean.sh index 035489c045..fa7d56fbac 100755 --- a/clean.sh +++ b/clean.sh @@ -114,9 +114,16 @@ sudo rm -f /etc/tgt/conf.d/* cleanup_rpc_backend cleanup_database -# Clean out data, logs and status -LOGDIR=$(dirname "$LOGFILE") -sudo rm -rf $DATA_DIR $LOGDIR $DEST/status +# Clean out data and status +sudo rm -rf $DATA_DIR $DEST/status + +# Clean out the log file and log directories +if [[ -n "$LOGFILE" ]] && [[ -f "$LOGFILE" ]]; then + sudo rm -f $LOGFILE +fi +if [[ -n "$LOGDIR" ]] && [[ -d "$LOGDIR" ]]; then + sudo rm -rf $LOGDIR +fi if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then sudo rm -rf $SCREEN_LOGDIR fi From 8768ee39c4775b656304169e18acce3c3c0dd095 Mon Sep 17 00:00:00 2001 From: Pavlo Shchelokovskyy Date: Sun, 12 Apr 2015 10:23:08 +0000 Subject: [PATCH 0152/2941] Ignore *.img images in files folder Some (qcow) images have .img file name extension (e.g. Cirros). Ignore such files in files/ folder too (as we already do with .qcow2 and .gz images). Change-Id: Iac8593b65205e25fd3f94244a136c584d9af8eab --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index c6900c881c..afc221ffa6 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ doc/files doc/build files/*.gz files/*.qcow2 +files/*.img files/images files/pip-* files/get-pip.py* From e53e15845144533debc48b32620d420d9f0ece4e Mon Sep 17 00:00:00 2001 From: Daniel Gonzalez Date: Tue, 7 Apr 2015 16:44:54 +0200 Subject: [PATCH 0153/2941] Fix readme for multi-node setup The readme currently states that a compute node in a multi-node setup requires the glance-api service to be enabled. But actually the glance-api service is only required on the controller node where glance-registry is running. Running the glance-api service on a node without glance-registry will even lead to a failure of glance-api, as the glance cache directory will not be created without enabling glance-registry. Change-Id: Ie92533f3333f3fe3e2d747762e60f2f42a233e79 Closes-bug: #1441198 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 53de970763..faf2d93de7 100644 --- a/README.md +++ b/README.md @@ -333,7 +333,7 @@ will balance VMs across hosts: You can then run many compute nodes, each of which should have a `stackrc` which includes the following, with the IP address of the above controller node: - ENABLED_SERVICES=n-cpu,rabbit,g-api,neutron,q-agt + ENABLED_SERVICES=n-cpu,rabbit,neutron,q-agt SERVICE_HOST=[IP of controller node] MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST From 9a808922c194929bff88bdc7dca4f8e2431de1a4 Mon Sep 17 00:00:00 2001 From: Peter Stachowski Date: Wed, 8 Apr 2015 19:48:09 +0000 Subject: [PATCH 0154/2941] Use openstack CLI instead of keystone When running './stack.sh' messages are output stating that the keystone CLI has been deprecated. These calls should be replaced to ones utilizing the openstack CLI program instead. Documentation examples were also updated to reflect the new syntax. Change-Id: Ib20b8940e317d150e5f6febb618e20bd85d13f8b Closes-Bug: #1441340 --- doc/source/eucarc.rst | 6 +++--- doc/source/guides/multinode-lab.rst | 26 +++++++++++++------------- eucarc | 4 ++-- exercises/client-args.sh | 2 +- lib/ironic | 2 +- stack.sh | 2 +- tools/upload_image.sh | 2 +- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/doc/source/eucarc.rst b/doc/source/eucarc.rst index 1284b8883b..c2ecbc6732 100644 --- a/doc/source/eucarc.rst +++ b/doc/source/eucarc.rst @@ -13,7 +13,7 @@ EC2\_URL :: - EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }') + EC2_URL=$(openstack catalog show ec2 | awk '/ publicURL: / { print $4 }') S3\_URL Set the S3 endpoint for euca2ools. The endpoint is extracted from @@ -21,14 +21,14 @@ S3\_URL :: - export S3_URL=$(keystone catalog --service s3 | awk '/ publicURL / { print $4 }') + export S3_URL=$(openstack catalog show s3 | awk '/ publicURL: / { print $4 }') EC2\_ACCESS\_KEY, EC2\_SECRET\_KEY Create EC2 credentials for the current tenant:user in Keystone. :: - CREDS=$(keystone ec2-credentials-create) + CREDS=$(openstack ec2 credentials create) export EC2_ACCESS_KEY=$(echo "$CREDS" | awk '/ access / { print $4 }') export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index ff81c93975..d963243fa8 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -229,10 +229,10 @@ Additional Users ---------------- DevStack creates two OpenStack users (``admin`` and ``demo``) and two -tenants (also ``admin`` and ``demo``). ``admin`` is exactly what it +projects (also ``admin`` and ``demo``). ``admin`` is exactly what it sounds like, a privileged administrative account that is a member of -both the ``admin`` and ``demo`` tenants. ``demo`` is a normal user -account that is only a member of the ``demo`` tenant. Creating +both the ``admin`` and ``demo`` projects. ``demo`` is a normal user +account that is only a member of the ``demo`` project. Creating additional OpenStack users can be done through the dashboard, sometimes it is easier to do them in bulk from a script, especially since they get blown away every time ``stack.sh`` runs. The following steps are ripe @@ -243,21 +243,21 @@ for scripting: # Get admin creds . openrc admin admin - # List existing tenants - keystone tenant-list + # List existing projects + openstack project list # List existing users - keystone user-list + openstack user list - # Add a user and tenant + # Add a user and project NAME=bob PASSWORD=BigSecrete - TENANT=$NAME - keystone tenant-create --name=$NAME - keystone user-create --name=$NAME --pass=$PASSWORD - keystone user-role-add --user-id= --tenant-id= --role-id= - # member-role-id comes from the existing member role created by stack.sh - # keystone role-list + PROJECT=$NAME + openstack project create $PROJECT + openstack user create $NAME --password=$PASSWORD --project $PROJECT + openstack role add Member --user $NAME --project $PROJECT + # The Member role is created by stack.sh + # openstack role list Swift ----- diff --git a/eucarc b/eucarc index 343f4ccde2..1e672bd932 100644 --- a/eucarc +++ b/eucarc @@ -19,7 +19,7 @@ RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) source $RC_DIR/openrc # Set the ec2 url so euca2ools works -export EC2_URL=$(keystone catalog --service ec2 | awk '/ publicURL / { print $4 }') +export EC2_URL=$(openstack catalog show ec2 | awk '/ publicURL: / { print $4 }') # Create EC2 credentials for the current user CREDS=$(openstack ec2 credentials create) @@ -29,7 +29,7 @@ export EC2_SECRET_KEY=$(echo "$CREDS" | awk '/ secret / { print $4 }') # Euca2ools Certificate stuff for uploading bundles # See exercises/bundle.sh to see how to get certs using nova cli NOVA_KEY_DIR=${NOVA_KEY_DIR:-$RC_DIR} -export S3_URL=$(keystone catalog --service s3 | awk '/ publicURL / { print $4 }') +export S3_URL=$(openstack catalog show s3 | awk '/ publicURL: / { print $4 }') export EC2_USER_ID=42 # nova does not use user id, but bundling requires it export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem export EC2_CERT=${NOVA_KEY_DIR}/cert.pem diff --git a/exercises/client-args.sh b/exercises/client-args.sh index 2f85d98d8f..c33ef44e9a 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -69,7 +69,7 @@ if [[ "$ENABLED_SERVICES" =~ "key" ]]; then STATUS_KEYSTONE="Skipped" else echo -e "\nTest Keystone" - if keystone $TENANT_ARG $ARGS catalog --service identity; then + if openstack $TENANT_ARG $ARGS catalog show identity; then STATUS_KEYSTONE="Succeeded" else STATUS_KEYSTONE="Failed" diff --git a/lib/ironic b/lib/ironic index 7afed052e9..4ac0100410 100644 --- a/lib/ironic +++ b/lib/ironic @@ -765,7 +765,7 @@ function upload_baremetal_ironic_deploy { fi fi - local token=$(keystone token-get | grep ' id ' | get_field 2) + local token=$(openstack token issue -c id -f value) die_if_not_set $LINENO token "Keystone fail to get token" # load them into glance diff --git a/stack.sh b/stack.sh index 9ecc49c7ca..af44feb901 100755 --- a/stack.sh +++ b/stack.sh @@ -1173,7 +1173,7 @@ fi # See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` if is_service_enabled g-reg; then - TOKEN=$(keystone token-get | grep ' id ' | get_field 2) + TOKEN=$(openstack token issue -c id -f value) die_if_not_set $LINENO TOKEN "Keystone fail to get token" echo_summary "Uploading images" diff --git a/tools/upload_image.sh b/tools/upload_image.sh index 5d23f31b9c..19c6b71976 100755 --- a/tools/upload_image.sh +++ b/tools/upload_image.sh @@ -32,7 +32,7 @@ if [[ -z "$1" ]]; then fi # Get a token to authenticate to glance -TOKEN=$(keystone token-get | grep ' id ' | get_field 2) +TOKEN=$(openstack token issue -c id -f value) die_if_not_set $LINENO TOKEN "Keystone fail to get token" # Glance connection info. Note the port must be specified. From e6843e5ea1d97c194536d1fc54e909ef9aa3740c Mon Sep 17 00:00:00 2001 From: ajmiller Date: Sat, 11 Apr 2015 09:52:48 -0700 Subject: [PATCH 0155/2941] clean.sh needs to load plugin settings. Change-Id: Id957f585d2aa93075b138d462d6076d2d70d450e --- clean.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/clean.sh b/clean.sh index 035489c045..78f4cbf500 100755 --- a/clean.sh +++ b/clean.sh @@ -76,6 +76,7 @@ fi # ========== # Phase: clean +load_plugin_settings run_phase clean if [[ -d $TOP_DIR/extras.d ]]; then From c00d2a53136d4d37a519829c4c9cad668fa69a44 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 9 Apr 2015 19:57:13 +1000 Subject: [PATCH 0156/2941] run dstat with run_process It is not clear to me why this can't use run_process? Currently we end up with two log-files both with the same thing - dstat.txt.gz which comes from the "tee" and is symlinked into SCREEN_LOGDIR, so gets picked-up by the gate scripts - screen-dstat.txt.gz which comes from screen_it Change-Id: I00b9e09b8d44f72ff14e69dc6e4a4bd5e2a0439e --- lib/dstat | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) diff --git a/lib/dstat b/lib/dstat index c8faa6578c..4b22752ced 100644 --- a/lib/dstat +++ b/lib/dstat @@ -16,34 +16,16 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace - -# Defaults -# -------- -# for DSTAT logging -DSTAT_FILE=${DSTAT_FILE:-"dstat.log"} - - # start_dstat() - Start running processes, including screen function start_dstat { # A better kind of sysstat, with the top process per time slice DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv" - if [[ -n ${LOGDIR} ]]; then - screen_it dstat "cd $TOP_DIR; dstat $DSTAT_OPTS | tee $LOGDIR/$DSTAT_FILE" - if [[ -n ${SCREEN_LOGDIR} && ${SCREEN_LOGDIR} != ${LOGDIR} ]]; then - # Drop the backward-compat symlink - ln -sf $LOGDIR/$DSTAT_FILE ${SCREEN_LOGDIR}/$DSTAT_FILE - fi - else - screen_it dstat "dstat $DSTAT_OPTS" - fi + run_process dstat "dstat $DSTAT_OPTS" } # stop_dstat() stop dstat process function stop_dstat { - # dstat runs as a console, not as a service, and isn't trackable - # via the normal mechanisms for DevStack. So lets just do a - # killall and move on. - killall dstat || /bin/true + stop_process dstat } # Restore xtrace From 5ccbd0ae0fd9d8caace3a9e0533b9c7a2f0ff579 Mon Sep 17 00:00:00 2001 From: Andrew Lazarev Date: Fri, 6 Feb 2015 16:22:12 -0800 Subject: [PATCH 0157/2941] Switching Sahara to https in case of USE_SSL=True Sahara will work over https in case if USE_SSL is set. Note, this patch requires https://review.openstack.org/#/c/145383/ which is not merged yet. Change-Id: I9e0069cfe72323a069a4205ca2f882c7a3ad17e0 Closes-Bug: #1419162 --- lib/sahara | 11 +++++++++++ stack.sh | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/sahara b/lib/sahara index 0651b0a633..6a3a5180bf 100644 --- a/lib/sahara +++ b/lib/sahara @@ -33,6 +33,9 @@ SAHARA_DIR=$DEST/sahara SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara} SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf +if is_ssl_enabled_service "sahara"; then + SAHARA_SERVICE_PROTOCOL="https" +fi SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST} SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386} SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} @@ -165,6 +168,14 @@ function configure_sahara { iniset $SAHARA_CONF_FILE keystone ca_file $SSL_BUNDLE_FILE fi + # Register SSL certificates if provided + if is_ssl_enabled_service sahara; then + ensure_certificates SAHARA + + iniset $SAHARA_CONF_FILE ssl cert_file "$SAHARA_SSL_CERT" + iniset $SAHARA_CONF_FILE ssl key_file "$SAHARA_SSL_KEY" + fi + iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG # Format logging diff --git a/stack.sh b/stack.sh index 5cdcbdf9d8..a9d958de5e 100755 --- a/stack.sh +++ b/stack.sh @@ -505,7 +505,7 @@ source $TOP_DIR/lib/rpc_backend check_rpc_backend # Service to enable with SSL if ``USE_SSL`` is True -SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron" +SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron,sahara" if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then die $LINENO "tls-proxy and SSL are mutually exclusive" From 0479d37878ee96a1a4f3fc37dce341d782bb6cfd Mon Sep 17 00:00:00 2001 From: Andrew Lazarev Date: Mon, 9 Feb 2015 16:51:25 -0800 Subject: [PATCH 0158/2941] Added support of sahara with tls-proxy service Now devstack will configure tls-proxy for sahara as well as for other openstack services. Change-Id: I7b0f2f0773cd3619a33cac66d40f3d0ce0f5432c Closes-Bug: #1419163 --- lib/sahara | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/lib/sahara b/lib/sahara index 6a3a5180bf..6d4e8648bf 100644 --- a/lib/sahara +++ b/lib/sahara @@ -33,11 +33,12 @@ SAHARA_DIR=$DEST/sahara SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara} SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf -if is_ssl_enabled_service "sahara"; then +if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then SAHARA_SERVICE_PROTOCOL="https" fi SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST} SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386} +SAHARA_SERVICE_PORT_INT=${SAHARA_SERVICE_PORT_INT:-18386} SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara} @@ -183,6 +184,11 @@ function configure_sahara { setup_colorized_logging $SAHARA_CONF_FILE DEFAULT fi + if is_service_enabled tls-proxy; then + # Set the service port for a proxy to take the original + iniset $SAHARA_CONF DEFAULT port $SAHARA_SERVICE_PORT_INT + fi + recreate_database sahara $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head } @@ -214,9 +220,26 @@ function sahara_register_images { # start_sahara() - Start running processes, including screen function start_sahara { + local service_port=$SAHARA_SERVICE_PORT + local service_protocol=$SAHARA_SERVICE_PROTOCOL + if is_service_enabled tls-proxy; then + service_port=$SAHARA_SERVICE_PORT_INT + service_protocol="http" + fi + run_process sahara "$SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE" run_process sahara-api "$SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE" run_process sahara-eng "$SAHARA_BIN_DIR/sahara-engine --config-file $SAHARA_CONF_FILE" + + echo "Waiting for Sahara to start..." + if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SAHARA_SERVICE_HOST:$service_port; then + die $LINENO "Sahara did not start" + fi + + # Start proxies if enabled + if is_service_enabled tls-proxy; then + start_tls_proxy '*' $SAHARA_SERVICE_PORT $SAHARA_SERVICE_HOST $SAHARA_SERVICE_PORT_INT & + fi } # stop_sahara() - Stop running processes From 73af846ca064f214828c9833ab83561be53a1be4 Mon Sep 17 00:00:00 2001 From: Thiago Paiva Date: Tue, 14 Apr 2015 16:57:22 -0300 Subject: [PATCH 0159/2941] Fixing n-crt removal from stackrc The commit 279cfe75198c723519f1fb361b2bff3c641c6cef removed the n-crt service from the default devstack setup. As such, the stack.sh script begun to thrown the following error when trying to "nova x509-create-cert": ERROR (ClientException): The server has either erred or is incapable of performing the requested operation. (HTTP 500) This patches reintroduces the n-crt as a default service. Change-Id: Id9695a37e1c6df567f2c86baa4475225adcfb0ee Closes-bug: #1441007 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index abedb001d1..0b93d32358 100644 --- a/stackrc +++ b/stackrc @@ -49,7 +49,7 @@ if ! isset ENABLED_SERVICES ; then # Keystone - nothing works without keystone ENABLED_SERVICES=key # Nova - services to support libvirt based openstack clouds - ENABLED_SERVICES+=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc + ENABLED_SERVICES+=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc,n-crt # Glance services needed for Nova ENABLED_SERVICES+=,g-api,g-reg # Cinder From c39f6405254b100fbfc0f2471bf85b74aafa3282 Mon Sep 17 00:00:00 2001 From: Lianhao Lu Date: Tue, 24 Mar 2015 12:36:00 +0800 Subject: [PATCH 0160/2941] Added ceilometer-agent-ipmi support Enable devstack to start ceilometer-agent-ipmi. Change-Id: Ia5f4c78760415a50f329fc2f1cf2f20be2e3c221 Closes-Bug: #1410614 --- lib/ceilometer | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index dba92ba542..3a4a4fb167 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -13,6 +13,26 @@ # # enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator # +# To enable Ceilometer to collect the IPMI based meters, further add to the +# localrc section of local.conf: +# +# enable_service ceilometer-aipmi +# +# NOTE: Currently, there are two ways to get the IPMI based meters in +# OpenStack. One way is to configure Ironic conductor to report those meters +# for the nodes managed by Ironic and to have Ceilometer notification +# agent to collect them. Ironic by default does NOT enable that reporting +# functionality. So in order to do so, users need to set the option of +# conductor.send_sensor_data to true in the ironic.conf configuration file +# for the Ironic conductor service, and also enable the +# ceilometer-anotification service. +# +# The other way is to use Ceilometer ipmi agent only to get the IPMI based +# meters. To avoid duplicated meters, users need to make sure to set the +# option of conductor.send_sensor_data to false in the ironic.conf +# configuration file if the node on which Ceilometer ipmi agent is running +# is also managed by Ironic. +# # Several variables set in the localrc section adjust common behaviors # of Ceilometer (see within for additional settings): # @@ -231,6 +251,11 @@ function configure_ceilometer { iniset $CEILOMETER_CONF api pecan_debug "False" _config_ceilometer_apache_wsgi fi + + if is_service_enabled ceilometer-aipmi; then + # Configure rootwrap for the ipmi agent + configure_rootwrap ceilometer $CEILOMETER_BIN_DIR/ceilometer-rootwrap $CEILOMETER_DIR/etc/ceilometer + fi } function configure_mongodb { @@ -327,6 +352,7 @@ function start_ceilometer { run_process ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF" run_process ceilometer-anotification "ceilometer-agent-notification --config-file $CEILOMETER_CONF" run_process ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" + run_process ceilometer-aipmi "ceilometer-agent-ipmi --config-file $CEILOMETER_CONF" if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then run_process ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" @@ -366,7 +392,7 @@ function stop_ceilometer { restart_apache_server fi # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do + for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do stop_process $serv done } From 37b779c3bc40bbd72cb2f55d15d5b2d43a3bb03d Mon Sep 17 00:00:00 2001 From: Lianhao Lu Date: Wed, 15 Apr 2015 10:27:06 +0800 Subject: [PATCH 0161/2941] Acknowledge API_WORKERS in glance-registry Change-Id: Ifaf671439480719255c07673b54dc49c0c2ca4f6 Closes-Bug: #1444231 --- lib/glance | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/glance b/lib/glance index 578c88ab22..a15e1e722a 100644 --- a/lib/glance +++ b/lib/glance @@ -100,6 +100,7 @@ function configure_glance { local dburl=`database_connection_url glance` iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS" iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then From b28b27082c63bc701b4cad8f9c686ba2c1880e6e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 16 Apr 2015 08:43:43 +1000 Subject: [PATCH 0162/2941] Append command to screenrc after we update it If a group is specified we modify the command to run under "sg". This currently isn't reflected in screenrc so rejoining fails Change-Id: I5c18ba664a6ae9ba9aaa4439a9086bc85085cd75 Closes-Bug: #1444267 --- functions-common | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index f8543c1f98..24a462ac21 100644 --- a/functions-common +++ b/functions-common @@ -1235,9 +1235,6 @@ function screen_process { SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} USE_SCREEN=$(trueorfalse True USE_SCREEN) - # Append the process to the screen rc file - screen_rc "$name" "$command" - screen -S $SCREEN_NAME -X screen -t $name local real_logfile="${LOGDIR}/${name}.log.${CURRENT_LOG_TIME}" @@ -1277,6 +1274,10 @@ function screen_process { if [[ -n "$group" ]]; then command="sg $group '$command'" fi + + # Append the process to the screen rc file + screen_rc "$name" "$command" + screen -S $SCREEN_NAME -p $name -X stuff "$command & echo \$! >$SERVICE_DIR/$SCREEN_NAME/${name}.pid; fg || echo \"$name failed to start\" | tee \"$SERVICE_DIR/$SCREEN_NAME/${name}.failure\"$NL" } From cae97da9c7786990acb12c43f691483f549a2945 Mon Sep 17 00:00:00 2001 From: Shilla Saebi Date: Thu, 16 Apr 2015 13:58:56 -0400 Subject: [PATCH 0163/2941] doc changes to devstack overview.rst changed to comply with doc conventions When referring to services, use "Compute," "Image service" and "Identity" instead of "nova," "glance," and "keystone." Use the project names like "nova" and "keystone" glance is officially Image service not storage removed extra . Change-Id: I39457c20dc2ede775fe3f3c63077133fbb6c917b --- doc/source/overview.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/overview.rst b/doc/source/overview.rst index 23ccf27d0a..d245035a1a 100644 --- a/doc/source/overview.rst +++ b/doc/source/overview.rst @@ -7,7 +7,7 @@ and alternative platforms and support services. That evolution has grown well beyond what was originally intended and the majority of configuration combinations are rarely, if ever, tested. DevStack is not a general OpenStack installer and was never meant to be everything to -everyone.. +everyone. Below is a list of what is specifically is supported (read that as "tested") going forward. @@ -58,7 +58,7 @@ Web Server OpenStack Network ----------------- -*Default to Nova Network, optionally use Neutron* +*Defaults to nova network, optionally use neutron* - Nova Network: FlatDHCP - Neutron: A basic configuration approximating the original FlatDHCP @@ -67,10 +67,10 @@ OpenStack Network Services -------- -The default services configured by DevStack are Identity (Keystone), -Object Storage (Swift), Image Storage (Glance), Block Storage (Cinder), -Compute (Nova), Network (Nova), Dashboard (Horizon), Orchestration -(Heat) +The default services configured by DevStack are Identity (keystone), +Object Storage (swift), Image Service (glance), Block Storage (cinder), +Compute (nova), Networking (nova), Dashboard (horizon), Orchestration +(heat) Additional services not included directly in DevStack can be tied in to ``stack.sh`` using the :doc:`plugin mechanism ` to call From dd62293591fd1e822f59754cece645639a4d2d2c Mon Sep 17 00:00:00 2001 From: Wayne Okuma Date: Tue, 31 Mar 2015 00:28:39 -0700 Subject: [PATCH 0164/2941] Catalog Index Service - glance devstack Implements: blueprint catalog-index-service The changes to lib/glance incorporate the new g-search service. The g-search service is optional. To enable it add the following line to devstack/local.conf: enable_service g-search In addition to deploying g-search, the changes will also populate a search type of keystone service and adds in appropriate endpoints. Change-Id: I0272d56bc2e50e8174db78bd449f65f60f7f4000 --- lib/glance | 92 ++++++++++++++++++++++++++++++++++++++------ pkg/elasticsearch.sh | 2 + 2 files changed, 83 insertions(+), 11 deletions(-) diff --git a/lib/glance b/lib/glance index 578c88ab22..5a96a61d18 100644 --- a/lib/glance +++ b/lib/glance @@ -49,8 +49,10 @@ GLANCE_CONF_DIR=${GLANCE_CONF_DIR:-/etc/glance} GLANCE_METADEF_DIR=$GLANCE_CONF_DIR/metadefs GLANCE_REGISTRY_CONF=$GLANCE_CONF_DIR/glance-registry.conf GLANCE_API_CONF=$GLANCE_CONF_DIR/glance-api.conf +GLANCE_SEARCH_CONF=$GLANCE_CONF_DIR/glance-search.conf GLANCE_REGISTRY_PASTE_INI=$GLANCE_CONF_DIR/glance-registry-paste.ini GLANCE_API_PASTE_INI=$GLANCE_CONF_DIR/glance-api-paste.ini +GLANCE_SEARCH_PASTE_INI=$GLANCE_CONF_DIR/glance-search-paste.ini GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json @@ -67,6 +69,9 @@ GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} GLANCE_SERVICE_PROTOCOL=${GLANCE_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} GLANCE_REGISTRY_PORT=${GLANCE_REGISTRY_PORT:-9191} GLANCE_REGISTRY_PORT_INT=${GLANCE_REGISTRY_PORT_INT:-19191} +GLANCE_SEARCH_PORT=${GLANCE_SEARCH_PORT:-9393} +GLANCE_SEARCH_PORT_INT=${GLANCE_SEARCH_PORT_INT:-19393} +GLANCE_SEARCH_HOSTPORT=${GLANCE_SEARCH_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SEARCH_PORT} # Tell Tempest this project is present TEMPEST_SERVICES+=,glance @@ -87,6 +92,10 @@ function cleanup_glance { # kill instances (nova) # delete image files (glance) sudo rm -rf $GLANCE_CACHE_DIR $GLANCE_IMAGE_DIR $GLANCE_AUTH_CACHE_DIR + + if is_service_enabled g-search; then + ${TOP_DIR}/pkg/elasticsearch.sh stop + fi } # configure_glance() - Set config files, create data dirs, etc @@ -218,14 +227,38 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" iniset $GLANCE_CACHE_CONF DEFAULT cinder_endpoint_template "https://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/%(project_id)s" fi + + # Configure search + if is_service_enabled g-search; then + cp $GLANCE_DIR/etc/glance-search.conf $GLANCE_SEARCH_CONF + iniset $GLANCE_SEARCH_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + inicomment $GLANCE_SEARCH_CONF DEFAULT log_file + iniset $GLANCE_SEARCH_CONF DEFAULT use_syslog $SYSLOG + iniset $GLANCE_SEARCH_CONF DEFAULT sql_connection $dburl + iniset $GLANCE_SEARCH_CONF paste_deploy flavor keystone + configure_auth_token_middleware $GLANCE_SEARCH_CONF glance $GLANCE_AUTH_CACHE_DIR/search + + if is_service_enabled tls-proxy; then + iniset $GLANCE_SEARCH_CONF DEFAULT bind_port $GLANCE_SEARCH_PORT_INT + fi + # Register SSL certificates if provided + if is_ssl_enabled_service glance; then + ensure_certificates GLANCE + iniset $GLANCE_SEARCH_CONF DEFAULT cert_file "$GLANCE_SSL_CERT" + iniset $GLANCE_SEARCH_CONF DEFAULT key_file "$GLANCE_SSL_KEY" + fi + + cp $GLANCE_DIR/etc/glance-search-paste.ini $GLANCE_SEARCH_PASTE_INI + fi } # create_glance_accounts() - Set up common required glance accounts -# Project User Roles -# ------------------------------------------------------------------ -# SERVICE_TENANT_NAME glance service -# SERVICE_TENANT_NAME glance-swift ResellerAdmin (if Swift is enabled) +# Project User Roles +# --------------------------------------------------------------------- +# SERVICE_TENANT_NAME glance service +# SERVICE_TENANT_NAME glance-swift ResellerAdmin (if Swift is enabled) +# SERVICE_TENANT_NAME glance-search search (if Search is enabled) function create_glance_accounts { if is_service_enabled g-api; then @@ -251,13 +284,27 @@ function create_glance_accounts { "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" fi fi + + # Add glance-search service and endpoints + if is_service_enabled g-search; then + if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then + local glance_search_service=$(get_or_create_service "glance-search" \ + "search" "EXPERIMENTAL - Glance Graffiti Search Service") + + get_or_create_endpoint $glance_search_service \ + "$REGION_NAME" \ + "$GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT" \ + "$GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT" \ + "$GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT" + fi + fi } # create_glance_cache_dir() - Part of the init_glance() process function create_glance_cache_dir { # Create cache dir - sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry - rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/* + sudo install -d -o $STACK_USER $GLANCE_AUTH_CACHE_DIR/api $GLANCE_AUTH_CACHE_DIR/registry $GLANCE_AUTH_CACHE_DIR/search + rm -f $GLANCE_AUTH_CACHE_DIR/api/* $GLANCE_AUTH_CACHE_DIR/registry/* $GLANCE_AUTH_CACHE_DIR/search/* } # init_glance() - Initialize databases, etc. @@ -280,6 +327,12 @@ function init_glance { $GLANCE_BIN_DIR/glance-manage db_load_metadefs create_glance_cache_dir + + # Init glance search by exporting found metadefs/images to elasticsearch + if is_service_enabled g-search; then + ${TOP_DIR}/pkg/elasticsearch.sh start + $GLANCE_BIN_DIR/glance-index + fi } # install_glanceclient() - Collect source and prepare @@ -301,11 +354,13 @@ function install_glance { fi git_clone $GLANCE_REPO $GLANCE_DIR $GLANCE_BRANCH - setup_develop $GLANCE_DIR - if is_service_enabled g-graffiti; then + + if is_service_enabled g-search; then ${TOP_DIR}/pkg/elasticsearch.sh download ${TOP_DIR}/pkg/elasticsearch.sh install fi + + setup_develop $GLANCE_DIR } # start_glance() - Start running processes, including screen @@ -314,18 +369,29 @@ function start_glance { if is_service_enabled tls-proxy; then start_tls_proxy '*' $GLANCE_SERVICE_PORT $GLANCE_SERVICE_HOST $GLANCE_SERVICE_PORT_INT & start_tls_proxy '*' $GLANCE_REGISTRY_PORT $GLANCE_SERVICE_HOST $GLANCE_REGISTRY_PORT_INT & + + # Handle g-search + if is_service_enabled g-search; then + start_tls_proxy '*' $GLANCE_SEARCH_PORT $GLANCE_SERVICE_HOST $GLANCE_SEARCH_PORT_INT & + fi fi run_process g-reg "$GLANCE_BIN_DIR/glance-registry --config-file=$GLANCE_CONF_DIR/glance-registry.conf" run_process g-api "$GLANCE_BIN_DIR/glance-api --config-file=$GLANCE_CONF_DIR/glance-api.conf" - if is_service_enabled g-graffiti; then - ${TOP_DIR}/pkg/elasticsearch.sh start - fi echo "Waiting for g-api ($GLANCE_HOSTPORT) to start..." if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT; then die $LINENO "g-api did not start" fi + + # Start g-search after g-reg/g-api + if is_service_enabled g-search; then + run_process g-search "$GLANCE_BIN_DIR/glance-search --config-file=$GLANCE_CONF_DIR/glance-search.conf" + echo "Waiting for g-search ($GLANCE_SEARCH_HOSTPORT) to start..." + if ! wait_for_service $SERVICE_TIMEOUT $GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT; then + die $LINENO "g-search did not start" + fi + fi } # stop_glance() - Stop running processes @@ -333,6 +399,10 @@ function stop_glance { # Kill the Glance screen windows stop_process g-api stop_process g-reg + + if is_service_enabled g-search; then + stop_process g-search + fi } # Restore xtrace diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh index 29dc22fb88..79f67a0179 100755 --- a/pkg/elasticsearch.sh +++ b/pkg/elasticsearch.sh @@ -7,6 +7,8 @@ TOP_DIR=$(cd $(dirname "$0")/.. && pwd) FILES=$TOP_DIR/files source $TOP_DIR/functions +DEST=${DEST:-/opt/stack} +source $TOP_DIR/lib/infra # Package source and version, all pkg files are expected to have # something like this, as well as a way to override them. From 1cb809d8ef81931ea0b1f15619b7e830281f2556 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 17 Apr 2015 12:55:38 +1000 Subject: [PATCH 0165/2941] Add "passed" and "failed" functions Add two generic "passed" and "failed" functions to the unittest helper. Also keep a count of passed and failed tests. Later changes will use these functions to ensure they exit with a correct return code. Change-Id: I8574dcb1447b04fcda3d72df0bf8605cf7488d3c --- tests/unittest.sh | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/tests/unittest.sh b/tests/unittest.sh index 435cc3a58b..69f19b7dae 100644 --- a/tests/unittest.sh +++ b/tests/unittest.sh @@ -14,8 +14,30 @@ # we always start with no errors ERROR=0 +PASS=0 FAILED_FUNCS="" +function passed { + local lineno=$(caller 0 | awk '{print $1}') + local function=$(caller 0 | awk '{print $2}') + local msg="$1" + if [ -z "$msg" ]; then + msg="OK" + fi + PASS=$((PASS+1)) + echo $function:L$lineno $msg +} + +function failed { + local lineno=$(caller 0 | awk '{print $1}') + local function=$(caller 0 | awk '{print $2}') + local msg="$1" + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) +} + function assert_equal { local lineno=`caller 0 | awk '{print $1}'` local function=`caller 0 | awk '{print $2}'` @@ -24,16 +46,20 @@ function assert_equal { FAILED_FUNCS+="$function:L$lineno\n" echo "ERROR: $1 != $2 in $function:L$lineno!" echo " $msg" - ERROR=1 + ERROR=$((ERROR+1)) else + PASS=$((PASS+1)) echo "$function:L$lineno - ok" fi } function report_results { - if [[ $ERROR -eq 1 ]]; then - echo "Tests FAILED" - echo $FAILED_FUNCS + echo "$PASS Tests PASSED" + if [[ $ERROR -gt 1 ]]; then + echo + echo "The following $ERROR tests FAILED" + echo -e "$FAILED_FUNCS" + echo "---" exit 1 fi } From f56348bcb2c736b9e66ebfe20c1f118cfc96b9f6 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 17 Apr 2015 12:58:56 +1000 Subject: [PATCH 0166/2941] Use unittest help in test_ip.sh Use the unittest helper to track test runs and correctly exit with a failure code if there is a problem Change-Id: Ie62f354a8cd3b8fd5986e6943a073f7955fb55ba --- tests/test_ip.sh | 79 +++++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 38 deletions(-) diff --git a/tests/test_ip.sh b/tests/test_ip.sh index add8d1ae65..c53e80de36 100755 --- a/tests/test_ip.sh +++ b/tests/test_ip.sh @@ -8,108 +8,111 @@ TOP=$(cd $(dirname "$0")/.. && pwd) # Import common functions source $TOP/functions +source $TOP/tests/unittest.sh echo "Testing IP addr functions" if [[ $(cidr2netmask 4) == 240.0.0.0 ]]; then - echo "cidr2netmask(): /4...OK" + passed "cidr2netmask(): /4...OK" else - echo "cidr2netmask(): /4...failed" + failed "cidr2netmask(): /4...failed" fi if [[ $(cidr2netmask 8) == 255.0.0.0 ]]; then - echo "cidr2netmask(): /8...OK" + passed "cidr2netmask(): /8...OK" else - echo "cidr2netmask(): /8...failed" + failed "cidr2netmask(): /8...failed" fi if [[ $(cidr2netmask 12) == 255.240.0.0 ]]; then - echo "cidr2netmask(): /12...OK" + passed "cidr2netmask(): /12...OK" else - echo "cidr2netmask(): /12...failed" + failed "cidr2netmask(): /12...failed" fi if [[ $(cidr2netmask 16) == 255.255.0.0 ]]; then - echo "cidr2netmask(): /16...OK" + passed "cidr2netmask(): /16...OK" else - echo "cidr2netmask(): /16...failed" + failed "cidr2netmask(): /16...failed" fi if [[ $(cidr2netmask 20) == 255.255.240.0 ]]; then - echo "cidr2netmask(): /20...OK" + passed "cidr2netmask(): /20...OK" else - echo "cidr2netmask(): /20...failed" + failed "cidr2netmask(): /20...failed" fi if [[ $(cidr2netmask 24) == 255.255.255.0 ]]; then - echo "cidr2netmask(): /24...OK" + passed "cidr2netmask(): /24...OK" else - echo "cidr2netmask(): /24...failed" + failed "cidr2netmask(): /24...failed" fi if [[ $(cidr2netmask 28) == 255.255.255.240 ]]; then - echo "cidr2netmask(): /28...OK" + passed "cidr2netmask(): /28...OK" else - echo "cidr2netmask(): /28...failed" + failed "cidr2netmask(): /28...failed" fi if [[ $(cidr2netmask 30) == 255.255.255.252 ]]; then - echo "cidr2netmask(): /30...OK" + passed "cidr2netmask(): /30...OK" else - echo "cidr2netmask(): /30...failed" + failed "cidr2netmask(): /30...failed" fi if [[ $(cidr2netmask 32) == 255.255.255.255 ]]; then - echo "cidr2netmask(): /32...OK" + passed "cidr2netmask(): /32...OK" else - echo "cidr2netmask(): /32...failed" + failed "cidr2netmask(): /32...failed" fi if [[ $(maskip 169.254.169.254 240.0.0.0) == 160.0.0.0 ]]; then - echo "maskip(): /4...OK" + passed "maskip(): /4...OK" else - echo "maskip(): /4...failed" + failed "maskip(): /4...failed" fi if [[ $(maskip 169.254.169.254 255.0.0.0) == 169.0.0.0 ]]; then - echo "maskip(): /8...OK" + passed "maskip(): /8...OK" else - echo "maskip(): /8...failed" + failed "maskip(): /8...failed" fi if [[ $(maskip 169.254.169.254 255.240.0.0) == 169.240.0.0 ]]; then - echo "maskip(): /12...OK" + passed "maskip(): /12...OK" else - echo "maskip(): /12...failed" + failed "maskip(): /12...failed" fi if [[ $(maskip 169.254.169.254 255.255.0.0) == 169.254.0.0 ]]; then - echo "maskip(): /16...OK" + passed "maskip(): /16...OK" else - echo "maskip(): /16...failed" + failed "maskip(): /16...failed" fi if [[ $(maskip 169.254.169.254 255.255.240.0) == 169.254.160.0 ]]; then - echo "maskip(): /20...OK" + passed "maskip(): /20...OK" else - echo "maskip(): /20...failed" + failed "maskip(): /20...failed" fi if [[ $(maskip 169.254.169.254 255.255.255.0) == 169.254.169.0 ]]; then - echo "maskip(): /24...OK" + passed "maskip(): /24...OK" else - echo "maskip(): /24...failed" + failed "maskip(): /24...failed" fi if [[ $(maskip 169.254.169.254 255.255.255.240) == 169.254.169.240 ]]; then - echo "maskip(): /28...OK" + passed "maskip(): /28...OK" else - echo "maskip(): /28...failed" + failed "maskip(): /28...failed" fi if [[ $(maskip 169.254.169.254 255.255.255.255) == 169.254.169.254 ]]; then - echo "maskip(): /32...OK" + passed "maskip(): /32...OK" else - echo "maskip(): /32...failed" + failed "maskip(): /32...failed" fi for mask in 8 12 16 20 24 26 28; do echo -n "address_in_net(): in /$mask..." if address_in_net 10.10.10.1 10.10.10.0/$mask; then - echo "OK" + passed "OK" else - echo "address_in_net() failed on /$mask" + failed "address_in_net() failed on /$mask" fi echo -n "address_in_net(): not in /$mask..." if ! address_in_net 10.10.10.1 11.11.11.0/$mask; then - echo "OK" + passed "OK" else - echo "address_in_net() failed on /$mask" + failed "address_in_net() failed on /$mask" fi done + +report_results From fcdca05de55b9ecec2b66f0cccb88ee01beebbd0 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 17 Apr 2015 13:02:49 +1000 Subject: [PATCH 0167/2941] Fix return of test_ini_config & test_meta_config Convert test_ini_config to use the "passed / failed" functions in unittest.sh. test_meta_config wraps everything into a function; it's not work unrolling this so just make sure it exits with non-zero if a test fails. Change-Id: I9e9883fdad42358255383eede9121b1d361799c8 --- tests/test_ini_config.sh | 99 ++++++++++++++++++++------------------- tests/test_meta_config.sh | 1 + 2 files changed, 53 insertions(+), 47 deletions(-) diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index 4a0ae33aeb..b2529ac4c9 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -7,6 +7,9 @@ TOP=$(cd $(dirname "$0")/.. && pwd) # Import config functions source $TOP/inc/ini-config +source $TOP/tests/unittest.sh + +set -e echo "Testing INI functions" @@ -70,86 +73,86 @@ echo -n "iniset: test missing attribute argument: " iniset test.ini aaa NO_ATTRIBUTE=$(cat test.ini) if [[ "$BEFORE" == "$NO_ATTRIBUTE" ]]; then - echo "OK" + passed else - echo "failed" + failed "failed" fi echo -n "iniset: test missing section argument: " iniset test.ini NO_SECTION=$(cat test.ini) if [[ "$BEFORE" == "$NO_SECTION" ]]; then - echo "OK" + passed else - echo "failed" + failed "failed" fi # Test with spaces VAL=$(iniget test.ini aaa handlers) if [[ "$VAL" == "aa, bb" ]]; then - echo "OK: $VAL" + passed "OK: $VAL" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi iniset test.ini aaa handlers "11, 22" VAL=$(iniget test.ini aaa handlers) if [[ "$VAL" == "11, 22" ]]; then - echo "OK: $VAL" + passed "OK: $VAL" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi # Test with spaces in section header VAL=$(iniget test.ini " ccc " spaces) if [[ "$VAL" == "yes" ]]; then - echo "OK: $VAL" + passed "OK: $VAL" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi iniset test.ini "b b" opt_ion 42 VAL=$(iniget test.ini "b b" opt_ion) if [[ "$VAL" == "42" ]]; then - echo "OK: $VAL" + passed "OK: $VAL" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi # Test without spaces, end of file VAL=$(iniget test.ini bbb handlers) if [[ "$VAL" == "ee,ff" ]]; then - echo "OK: $VAL" + passed "OK: $VAL" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi iniset test.ini bbb handlers "33,44" VAL=$(iniget test.ini bbb handlers) if [[ "$VAL" == "33,44" ]]; then - echo "OK: $VAL" + passed "OK: $VAL" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi # test empty option if ini_has_option test.ini ddd empty; then - echo "OK: ddd.empty present" + passed "OK: ddd.empty present" else - echo "ini_has_option failed: ddd.empty not found" + failed "ini_has_option failed: ddd.empty not found" fi # test non-empty option if ini_has_option test.ini bbb handlers; then - echo "OK: bbb.handlers present" + passed "OK: bbb.handlers present" else - echo "ini_has_option failed: bbb.handlers not found" + failed "ini_has_option failed: bbb.handlers not found" fi # test changing empty option @@ -157,9 +160,9 @@ iniset test.ini ddd empty "42" VAL=$(iniget test.ini ddd empty) if [[ "$VAL" == "42" ]]; then - echo "OK: $VAL" + passed "OK: $VAL" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi # test pipe in option @@ -167,9 +170,9 @@ iniset test.ini aaa handlers "a|b" VAL=$(iniget test.ini aaa handlers) if [[ "$VAL" == "a|b" ]]; then - echo "OK: $VAL" + passed "OK: $VAL" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi # test space in option @@ -177,51 +180,51 @@ iniset test.ini aaa handlers "a b" VAL="$(iniget test.ini aaa handlers)" if [[ "$VAL" == "a b" ]]; then - echo "OK: $VAL" + passed "OK: $VAL" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi # Test section not exist VAL=$(iniget test.ini zzz handlers) if [[ -z "$VAL" ]]; then - echo "OK: zzz not present" + passed "OK: zzz not present" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi iniset test.ini zzz handlers "999" VAL=$(iniget test.ini zzz handlers) if [[ -n "$VAL" ]]; then - echo "OK: zzz not present" + passed "OK: zzz not present" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi # Test option not exist VAL=$(iniget test.ini aaa debug) if [[ -z "$VAL" ]]; then - echo "OK aaa.debug not present" + passed "OK aaa.debug not present" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi if ! ini_has_option test.ini aaa debug; then - echo "OK aaa.debug not present" + passed "OK aaa.debug not present" else - echo "ini_has_option failed: aaa.debug" + failed "ini_has_option failed: aaa.debug" fi iniset test.ini aaa debug "999" VAL=$(iniget test.ini aaa debug) if [[ -n "$VAL" ]]; then - echo "OK aaa.debug present" + passed "OK aaa.debug present" else - echo "iniget failed: $VAL" + failed "iniget failed: $VAL" fi # Test comments @@ -230,9 +233,9 @@ inicomment test.ini aaa handlers VAL=$(iniget test.ini aaa handlers) if [[ -z "$VAL" ]]; then - echo "OK" + passed "OK" else - echo "inicomment failed: $VAL" + failed "inicomment failed: $VAL" fi # Test multiple line iniset/iniget @@ -242,25 +245,25 @@ VAL=$(iniget_multiline test.ini eee multi) if [[ "$VAL" == "bar1 bar2" ]]; then echo "OK: iniset_multiline" else - echo "iniset_multiline failed: $VAL" + failed "iniset_multiline failed: $VAL" fi # Test iniadd with exiting values iniadd test.ini eee multi bar3 VAL=$(iniget_multiline test.ini eee multi) if [[ "$VAL" == "bar1 bar2 bar3" ]]; then - echo "OK: iniadd" + passed "OK: iniadd" else - echo "iniadd failed: $VAL" + failed "iniadd failed: $VAL" fi # Test iniadd with non-exiting values iniadd test.ini eee non-multi foobar1 foobar2 VAL=$(iniget_multiline test.ini eee non-multi) if [[ "$VAL" == "foobar1 foobar2" ]]; then - echo "OK: iniadd with non-exiting value" + passed "OK: iniadd with non-exiting value" else - echo "iniadd with non-exsting failed: $VAL" + failed "iniadd with non-exsting failed: $VAL" fi # Test inidelete @@ -276,20 +279,22 @@ for x in $del_cases; do inidelete test.ini $x a VAL=$(iniget_multiline test.ini $x a) if [ -z "$VAL" ]; then - echo "OK: inidelete $x" + passed "OK: inidelete $x" else - echo "inidelete $x failed: $VAL" + failed "inidelete $x failed: $VAL" fi if [ "$x" = "del_separate_options" -o \ "$x" = "del_missing_option" -o \ "$x" = "del_missing_option_multi" ]; then VAL=$(iniget_multiline test.ini $x b) if [ "$VAL" = "c" -o "$VAL" = "c d" ]; then - echo "OK: inidelete other_options $x" + passed "OK: inidelete other_options $x" else - echo "inidelete other_option $x failed: $VAL" + failed "inidelete other_option $x failed: $VAL" fi fi done rm test.ini + +report_results diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index 9d65280b8c..3ec65bf9b0 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -17,6 +17,7 @@ function check_result { echo "OK" else echo -e "failed: $actual != $expected\n" + exit 1 fi } From 9b0ebc44f413edac87e52d23e8852ca7c52cb091 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 17 Apr 2015 13:06:47 +1000 Subject: [PATCH 0168/2941] Move function.sh to test_functions.sh run_tests.sh runs tests starting with test_* The existing test_functions.sh is really testing true/false. Move that to test_truefalse.sh Then move functions.sh to test_functions.sh. This will ensure it is run during unit testing from run-tests.sh Change-Id: I959ac38c946da1fb47458b8c4f09157f74f0e644 --- tests/functions.sh | 215 ------------------------------------- tests/test_functions.sh | 229 +++++++++++++++++++++++++++++++++++----- tests/test_truefalse.sh | 34 ++++++ 3 files changed, 239 insertions(+), 239 deletions(-) delete mode 100755 tests/functions.sh create mode 100755 tests/test_truefalse.sh diff --git a/tests/functions.sh b/tests/functions.sh deleted file mode 100755 index 126080f1e3..0000000000 --- a/tests/functions.sh +++ /dev/null @@ -1,215 +0,0 @@ -#!/usr/bin/env bash - -# Tests for DevStack functions - -TOP=$(cd $(dirname "$0")/.. && pwd) - -# Import common functions -source $TOP/functions - -# Import configuration -source $TOP/openrc - - -echo "Testing die_if_not_set()" - -bash -cx "source $TOP/functions; X=`echo Y && true`; die_if_not_set X 'not OK'" -if [[ $? != 0 ]]; then - echo "die_if_not_set [X='Y' true] Failed" -else - echo 'OK' -fi - -bash -cx "source $TOP/functions; X=`true`; die_if_not_set X 'OK'" -if [[ $? = 0 ]]; then - echo "die_if_not_set [X='' true] Failed" -fi - -bash -cx "source $TOP/functions; X=`echo Y && false`; die_if_not_set X 'not OK'" -if [[ $? != 0 ]]; then - echo "die_if_not_set [X='Y' false] Failed" -else - echo 'OK' -fi - -bash -cx "source $TOP/functions; X=`false`; die_if_not_set X 'OK'" -if [[ $? = 0 ]]; then - echo "die_if_not_set [X='' false] Failed" -fi - - -# Enabling/disabling services - -echo "Testing enable_service()" - -function test_enable_service { - local start="$1" - local add="$2" - local finish="$3" - - ENABLED_SERVICES="$start" - enable_service $add - if [ "$ENABLED_SERVICES" = "$finish" ]; then - echo "OK: $start + $add -> $ENABLED_SERVICES" - else - echo "changing $start to $finish with $add failed: $ENABLED_SERVICES" - fi -} - -test_enable_service '' a 'a' -test_enable_service 'a' b 'a,b' -test_enable_service 'a,b' c 'a,b,c' -test_enable_service 'a,b' c 'a,b,c' -test_enable_service 'a,b,' c 'a,b,c' -test_enable_service 'a,b' c,d 'a,b,c,d' -test_enable_service 'a,b' "c d" 'a,b,c,d' -test_enable_service 'a,b,c' c 'a,b,c' - -test_enable_service 'a,b,-c' c 'a,b' -test_enable_service 'a,b,c' -c 'a,b' - -function test_disable_service { - local start="$1" - local del="$2" - local finish="$3" - - ENABLED_SERVICES="$start" - disable_service "$del" - if [ "$ENABLED_SERVICES" = "$finish" ]; then - echo "OK: $start - $del -> $ENABLED_SERVICES" - else - echo "changing $start to $finish with $del failed: $ENABLED_SERVICES" - fi -} - -echo "Testing disable_service()" -test_disable_service 'a,b,c' a 'b,c' -test_disable_service 'a,b,c' b 'a,c' -test_disable_service 'a,b,c' c 'a,b' - -test_disable_service 'a,b,c' a 'b,c' -test_disable_service 'b,c' b 'c' -test_disable_service 'c' c '' -test_disable_service '' d '' - -test_disable_service 'a,b,c,' c 'a,b' -test_disable_service 'a,b' c 'a,b' - - -echo "Testing disable_all_services()" -ENABLED_SERVICES=a,b,c -disable_all_services - -if [[ -z "$ENABLED_SERVICES" ]]; then - echo "OK" -else - echo "disabling all services FAILED: $ENABLED_SERVICES" -fi - -echo "Testing disable_negated_services()" - - -function test_disable_negated_services { - local start="$1" - local finish="$2" - - ENABLED_SERVICES="$start" - disable_negated_services - if [ "$ENABLED_SERVICES" = "$finish" ]; then - echo "OK: $start + $add -> $ENABLED_SERVICES" - else - echo "changing $start to $finish failed: $ENABLED_SERVICES" - fi -} - -test_disable_negated_services '-a' '' -test_disable_negated_services '-a,a' '' -test_disable_negated_services '-a,-a' '' -test_disable_negated_services 'a,-a' '' -test_disable_negated_services 'b,a,-a' 'b' -test_disable_negated_services 'a,b,-a' 'b' -test_disable_negated_services 'a,-a,b' 'b' - - -echo "Testing is_package_installed()" - -if [[ -z "$os_PACKAGE" ]]; then - GetOSVersion -fi - -if [[ "$os_PACKAGE" = "deb" ]]; then - is_package_installed dpkg - VAL=$? -elif [[ "$os_PACKAGE" = "rpm" ]]; then - is_package_installed rpm - VAL=$? -else - VAL=1 -fi -if [[ "$VAL" -eq 0 ]]; then - echo "OK" -else - echo "is_package_installed() on existing package failed" -fi - -if [[ "$os_PACKAGE" = "deb" ]]; then - is_package_installed dpkg bash - VAL=$? -elif [[ "$os_PACKAGE" = "rpm" ]]; then - is_package_installed rpm bash - VAL=$? -else - VAL=1 -fi -if [[ "$VAL" -eq 0 ]]; then - echo "OK" -else - echo "is_package_installed() on more than one existing package failed" -fi - -is_package_installed zzzZZZzzz -VAL=$? -if [[ "$VAL" -ne 0 ]]; then - echo "OK" -else - echo "is_package_installed() on non-existing package failed" -fi - -# test against removed package...was a bug on Ubuntu -if is_ubuntu; then - PKG=cowsay - if ! (dpkg -s $PKG >/dev/null 2>&1); then - # it was never installed...set up the condition - sudo apt-get install -y cowsay >/dev/null 2>&1 - fi - if (dpkg -s $PKG >/dev/null 2>&1); then - # remove it to create the 'un' status - sudo dpkg -P $PKG >/dev/null 2>&1 - fi - - # now test the installed check on a deleted package - is_package_installed $PKG - VAL=$? - if [[ "$VAL" -ne 0 ]]; then - echo "OK" - else - echo "is_package_installed() on deleted package failed" - fi -fi - -# test isset function -echo "Testing isset()" -you_should_not_have_this_variable=42 - -if isset "you_should_not_have_this_variable"; then - echo "OK" -else - echo "\"you_should_not_have_this_variable\" not declared. failed" -fi - -unset you_should_not_have_this_variable -if isset "you_should_not_have_this_variable"; then - echo "\"you_should_not_have_this_variable\" looks like declared variable. failed" -else - echo "OK" -fi diff --git a/tests/test_functions.sh b/tests/test_functions.sh index e57948a407..126080f1e3 100755 --- a/tests/test_functions.sh +++ b/tests/test_functions.sh @@ -1,34 +1,215 @@ #!/usr/bin/env bash -# Tests for DevStack meta-config functions +# Tests for DevStack functions TOP=$(cd $(dirname "$0")/.. && pwd) # Import common functions source $TOP/functions -source $TOP/tests/unittest.sh - -function test_truefalse { - local one=1 - local captrue=True - local lowtrue=true - local abrevtrue=t - local zero=0 - local capfalse=False - local lowfalse=false - local abrevfalse=f - for against in True False; do - for name in one captrue lowtrue abrevtrue; do - assert_equal "True" $(trueorfalse $against $name) "\$(trueorfalse $against $name)" - done - done - for against in True False; do - for name in zero capfalse lowfalse abrevfalse; do - assert_equal "False" $(trueorfalse $against $name) "\$(trueorfalse $against $name)" - done - done + +# Import configuration +source $TOP/openrc + + +echo "Testing die_if_not_set()" + +bash -cx "source $TOP/functions; X=`echo Y && true`; die_if_not_set X 'not OK'" +if [[ $? != 0 ]]; then + echo "die_if_not_set [X='Y' true] Failed" +else + echo 'OK' +fi + +bash -cx "source $TOP/functions; X=`true`; die_if_not_set X 'OK'" +if [[ $? = 0 ]]; then + echo "die_if_not_set [X='' true] Failed" +fi + +bash -cx "source $TOP/functions; X=`echo Y && false`; die_if_not_set X 'not OK'" +if [[ $? != 0 ]]; then + echo "die_if_not_set [X='Y' false] Failed" +else + echo 'OK' +fi + +bash -cx "source $TOP/functions; X=`false`; die_if_not_set X 'OK'" +if [[ $? = 0 ]]; then + echo "die_if_not_set [X='' false] Failed" +fi + + +# Enabling/disabling services + +echo "Testing enable_service()" + +function test_enable_service { + local start="$1" + local add="$2" + local finish="$3" + + ENABLED_SERVICES="$start" + enable_service $add + if [ "$ENABLED_SERVICES" = "$finish" ]; then + echo "OK: $start + $add -> $ENABLED_SERVICES" + else + echo "changing $start to $finish with $add failed: $ENABLED_SERVICES" + fi +} + +test_enable_service '' a 'a' +test_enable_service 'a' b 'a,b' +test_enable_service 'a,b' c 'a,b,c' +test_enable_service 'a,b' c 'a,b,c' +test_enable_service 'a,b,' c 'a,b,c' +test_enable_service 'a,b' c,d 'a,b,c,d' +test_enable_service 'a,b' "c d" 'a,b,c,d' +test_enable_service 'a,b,c' c 'a,b,c' + +test_enable_service 'a,b,-c' c 'a,b' +test_enable_service 'a,b,c' -c 'a,b' + +function test_disable_service { + local start="$1" + local del="$2" + local finish="$3" + + ENABLED_SERVICES="$start" + disable_service "$del" + if [ "$ENABLED_SERVICES" = "$finish" ]; then + echo "OK: $start - $del -> $ENABLED_SERVICES" + else + echo "changing $start to $finish with $del failed: $ENABLED_SERVICES" + fi +} + +echo "Testing disable_service()" +test_disable_service 'a,b,c' a 'b,c' +test_disable_service 'a,b,c' b 'a,c' +test_disable_service 'a,b,c' c 'a,b' + +test_disable_service 'a,b,c' a 'b,c' +test_disable_service 'b,c' b 'c' +test_disable_service 'c' c '' +test_disable_service '' d '' + +test_disable_service 'a,b,c,' c 'a,b' +test_disable_service 'a,b' c 'a,b' + + +echo "Testing disable_all_services()" +ENABLED_SERVICES=a,b,c +disable_all_services + +if [[ -z "$ENABLED_SERVICES" ]]; then + echo "OK" +else + echo "disabling all services FAILED: $ENABLED_SERVICES" +fi + +echo "Testing disable_negated_services()" + + +function test_disable_negated_services { + local start="$1" + local finish="$2" + + ENABLED_SERVICES="$start" + disable_negated_services + if [ "$ENABLED_SERVICES" = "$finish" ]; then + echo "OK: $start + $add -> $ENABLED_SERVICES" + else + echo "changing $start to $finish failed: $ENABLED_SERVICES" + fi } -test_truefalse +test_disable_negated_services '-a' '' +test_disable_negated_services '-a,a' '' +test_disable_negated_services '-a,-a' '' +test_disable_negated_services 'a,-a' '' +test_disable_negated_services 'b,a,-a' 'b' +test_disable_negated_services 'a,b,-a' 'b' +test_disable_negated_services 'a,-a,b' 'b' + + +echo "Testing is_package_installed()" + +if [[ -z "$os_PACKAGE" ]]; then + GetOSVersion +fi + +if [[ "$os_PACKAGE" = "deb" ]]; then + is_package_installed dpkg + VAL=$? +elif [[ "$os_PACKAGE" = "rpm" ]]; then + is_package_installed rpm + VAL=$? +else + VAL=1 +fi +if [[ "$VAL" -eq 0 ]]; then + echo "OK" +else + echo "is_package_installed() on existing package failed" +fi + +if [[ "$os_PACKAGE" = "deb" ]]; then + is_package_installed dpkg bash + VAL=$? +elif [[ "$os_PACKAGE" = "rpm" ]]; then + is_package_installed rpm bash + VAL=$? +else + VAL=1 +fi +if [[ "$VAL" -eq 0 ]]; then + echo "OK" +else + echo "is_package_installed() on more than one existing package failed" +fi + +is_package_installed zzzZZZzzz +VAL=$? +if [[ "$VAL" -ne 0 ]]; then + echo "OK" +else + echo "is_package_installed() on non-existing package failed" +fi + +# test against removed package...was a bug on Ubuntu +if is_ubuntu; then + PKG=cowsay + if ! (dpkg -s $PKG >/dev/null 2>&1); then + # it was never installed...set up the condition + sudo apt-get install -y cowsay >/dev/null 2>&1 + fi + if (dpkg -s $PKG >/dev/null 2>&1); then + # remove it to create the 'un' status + sudo dpkg -P $PKG >/dev/null 2>&1 + fi + + # now test the installed check on a deleted package + is_package_installed $PKG + VAL=$? + if [[ "$VAL" -ne 0 ]]; then + echo "OK" + else + echo "is_package_installed() on deleted package failed" + fi +fi + +# test isset function +echo "Testing isset()" +you_should_not_have_this_variable=42 + +if isset "you_should_not_have_this_variable"; then + echo "OK" +else + echo "\"you_should_not_have_this_variable\" not declared. failed" +fi -report_results +unset you_should_not_have_this_variable +if isset "you_should_not_have_this_variable"; then + echo "\"you_should_not_have_this_variable\" looks like declared variable. failed" +else + echo "OK" +fi diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh new file mode 100755 index 0000000000..e57948a407 --- /dev/null +++ b/tests/test_truefalse.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +# Tests for DevStack meta-config functions + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions +source $TOP/tests/unittest.sh + +function test_truefalse { + local one=1 + local captrue=True + local lowtrue=true + local abrevtrue=t + local zero=0 + local capfalse=False + local lowfalse=false + local abrevfalse=f + for against in True False; do + for name in one captrue lowtrue abrevtrue; do + assert_equal "True" $(trueorfalse $against $name) "\$(trueorfalse $against $name)" + done + done + for against in True False; do + for name in zero capfalse lowfalse abrevfalse; do + assert_equal "False" $(trueorfalse $against $name) "\$(trueorfalse $against $name)" + done + done +} + +test_truefalse + +report_results From 9b845da478ae2fb65ac63de95f6005ecacbb52ce Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 17 Apr 2015 13:10:33 +1000 Subject: [PATCH 0169/2941] Fix die_if_not_set tests The "die_if_not_set" test has the LINENO as a positional argument. The existing tests are not passing this in, so they are failing. Along with this, remove the "-x" from the invocation and hide the output of the tests that are expected to fail to avoid confusion. Change-Id: Ibf6b9d7bb72b9f92831e1a90292ff8b0bec7faea --- tests/test_functions.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_functions.sh b/tests/test_functions.sh index 126080f1e3..4d0237afb7 100755 --- a/tests/test_functions.sh +++ b/tests/test_functions.sh @@ -13,26 +13,26 @@ source $TOP/openrc echo "Testing die_if_not_set()" -bash -cx "source $TOP/functions; X=`echo Y && true`; die_if_not_set X 'not OK'" +bash -c "source $TOP/functions; X=`echo Y && true`; die_if_not_set $LINENO X 'not OK'" if [[ $? != 0 ]]; then echo "die_if_not_set [X='Y' true] Failed" else echo 'OK' fi -bash -cx "source $TOP/functions; X=`true`; die_if_not_set X 'OK'" +bash -c "source $TOP/functions; X=`true`; die_if_not_set $LINENO X 'OK'" > /dev/null 2>&1 if [[ $? = 0 ]]; then echo "die_if_not_set [X='' true] Failed" fi -bash -cx "source $TOP/functions; X=`echo Y && false`; die_if_not_set X 'not OK'" +bash -c "source $TOP/functions; X=`echo Y && false`; die_if_not_set $LINENO X 'not OK'" if [[ $? != 0 ]]; then echo "die_if_not_set [X='Y' false] Failed" else echo 'OK' fi -bash -cx "source $TOP/functions; X=`false`; die_if_not_set X 'OK'" +bash -c "source $TOP/functions; X=`false`; die_if_not_set $LINENO X 'OK'" > /dev/null 2>&1 if [[ $? = 0 ]]; then echo "die_if_not_set [X='' false] Failed" fi From 09f4ad227976eb95d70045b67d0f724294cf7e22 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 17 Apr 2015 13:13:04 +1000 Subject: [PATCH 0170/2941] Convert test_functions.sh to use unittest helpers This currently does not exit with any failure code when tests are failing. Convert it to use the helper functions from unittest.sh so it correctly reports failures. Change-Id: I2062d9c00ebffcc98ba75a12f480e4dd728ee080 --- tests/test_functions.sh | 55 ++++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/tests/test_functions.sh b/tests/test_functions.sh index 4d0237afb7..a7914f7a1b 100755 --- a/tests/test_functions.sh +++ b/tests/test_functions.sh @@ -10,31 +10,32 @@ source $TOP/functions # Import configuration source $TOP/openrc +source $TOP/tests/unittest.sh echo "Testing die_if_not_set()" bash -c "source $TOP/functions; X=`echo Y && true`; die_if_not_set $LINENO X 'not OK'" if [[ $? != 0 ]]; then - echo "die_if_not_set [X='Y' true] Failed" + failed "die_if_not_set [X='Y' true] Failed" else - echo 'OK' + passed 'OK' fi bash -c "source $TOP/functions; X=`true`; die_if_not_set $LINENO X 'OK'" > /dev/null 2>&1 if [[ $? = 0 ]]; then - echo "die_if_not_set [X='' true] Failed" + failed "die_if_not_set [X='' true] Failed" fi bash -c "source $TOP/functions; X=`echo Y && false`; die_if_not_set $LINENO X 'not OK'" if [[ $? != 0 ]]; then - echo "die_if_not_set [X='Y' false] Failed" + failed "die_if_not_set [X='Y' false] Failed" else - echo 'OK' + passed 'OK' fi bash -c "source $TOP/functions; X=`false`; die_if_not_set $LINENO X 'OK'" > /dev/null 2>&1 if [[ $? = 0 ]]; then - echo "die_if_not_set [X='' false] Failed" + failed "die_if_not_set [X='' false] Failed" fi @@ -50,9 +51,9 @@ function test_enable_service { ENABLED_SERVICES="$start" enable_service $add if [ "$ENABLED_SERVICES" = "$finish" ]; then - echo "OK: $start + $add -> $ENABLED_SERVICES" + passed "OK: $start + $add -> $ENABLED_SERVICES" else - echo "changing $start to $finish with $add failed: $ENABLED_SERVICES" + failed "changing $start to $finish with $add failed: $ENABLED_SERVICES" fi } @@ -76,9 +77,9 @@ function test_disable_service { ENABLED_SERVICES="$start" disable_service "$del" if [ "$ENABLED_SERVICES" = "$finish" ]; then - echo "OK: $start - $del -> $ENABLED_SERVICES" + passed "OK: $start - $del -> $ENABLED_SERVICES" else - echo "changing $start to $finish with $del failed: $ENABLED_SERVICES" + failed "changing $start to $finish with $del failed: $ENABLED_SERVICES" fi } @@ -101,9 +102,9 @@ ENABLED_SERVICES=a,b,c disable_all_services if [[ -z "$ENABLED_SERVICES" ]]; then - echo "OK" + passed "OK" else - echo "disabling all services FAILED: $ENABLED_SERVICES" + failed "disabling all services FAILED: $ENABLED_SERVICES" fi echo "Testing disable_negated_services()" @@ -116,9 +117,9 @@ function test_disable_negated_services { ENABLED_SERVICES="$start" disable_negated_services if [ "$ENABLED_SERVICES" = "$finish" ]; then - echo "OK: $start + $add -> $ENABLED_SERVICES" + passed "OK: $start + $add -> $ENABLED_SERVICES" else - echo "changing $start to $finish failed: $ENABLED_SERVICES" + failed "changing $start to $finish failed: $ENABLED_SERVICES" fi } @@ -147,9 +148,9 @@ else VAL=1 fi if [[ "$VAL" -eq 0 ]]; then - echo "OK" + passed "OK" else - echo "is_package_installed() on existing package failed" + failed "is_package_installed() on existing package failed" fi if [[ "$os_PACKAGE" = "deb" ]]; then @@ -162,17 +163,17 @@ else VAL=1 fi if [[ "$VAL" -eq 0 ]]; then - echo "OK" + passed "OK" else - echo "is_package_installed() on more than one existing package failed" + failed "is_package_installed() on more than one existing package failed" fi is_package_installed zzzZZZzzz VAL=$? if [[ "$VAL" -ne 0 ]]; then - echo "OK" + passed "OK" else - echo "is_package_installed() on non-existing package failed" + failed "is_package_installed() on non-existing package failed" fi # test against removed package...was a bug on Ubuntu @@ -191,9 +192,9 @@ if is_ubuntu; then is_package_installed $PKG VAL=$? if [[ "$VAL" -ne 0 ]]; then - echo "OK" + passed "OK" else - echo "is_package_installed() on deleted package failed" + failed "is_package_installed() on deleted package failed" fi fi @@ -202,14 +203,16 @@ echo "Testing isset()" you_should_not_have_this_variable=42 if isset "you_should_not_have_this_variable"; then - echo "OK" + passed "OK" else - echo "\"you_should_not_have_this_variable\" not declared. failed" + failed "\"you_should_not_have_this_variable\" not declared. failed" fi unset you_should_not_have_this_variable if isset "you_should_not_have_this_variable"; then - echo "\"you_should_not_have_this_variable\" looks like declared variable. failed" + failed "\"you_should_not_have_this_variable\" looks like declared variable." else - echo "OK" + passed "OK" fi + +report_results From 9b64bbf06eab19534e58a1b7af1757e427e6b3b6 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 17 Apr 2015 13:16:24 +1000 Subject: [PATCH 0171/2941] Remove old comment in run_tests.sh The scope of this has expanded to run everything in ./tests Change-Id: I640b0a8b7aa578ddd24dd3e58d5b2a1e09fe0284 --- run_tests.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/run_tests.sh b/run_tests.sh index c6b7da64c0..a9a3d0bb48 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -17,8 +17,6 @@ PASSES="" FAILURES="" -# Test that no one is trying to land crazy refs as branches - for testfile in tests/test_*.sh; do $testfile if [[ $? -eq 0 ]]; then From fa3e8412864a92715c296c6ed5e3828dd4bb2205 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 17 Apr 2015 11:53:40 +1000 Subject: [PATCH 0172/2941] Create config file in merge_config_file Change If132a94e53545d9134859aa508da7b9819ede2f8 introduced a small regression; it added an "inidelete" which looks in the config file to delete rows. However, at least for the test-case, the config file isn't created yet. The end result is that the test fails but we don't notice. 2015-04-17 00:55:03.169 | merge_config_file test-multiline: sed: can't read test-multiline.conf: No such file or directory 2015-04-17 00:55:03.195 | OK So fix this up by creating the config-file if it isn't there. Also, add "-e" to the test file so we catch things like this in the future. Change-Id: I43a4ecc247f19cccf51d5931dfb687adbd23d6b1 --- inc/meta-config | 8 ++++++++ tests/test_meta_config.sh | 2 ++ 2 files changed, 10 insertions(+) diff --git a/inc/meta-config b/inc/meta-config index c8789bf816..e5f902d1dd 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -86,6 +86,14 @@ function merge_config_file { local matchgroup=$2 local configfile=$3 + # note, configfile might be a variable (note the iniset, etc + # created in the mega-awk below is "eval"ed too, so we just leave + # it alone. + local real_configfile=$(eval echo $configfile) + if [ ! -f $real_configfile ]; then + touch $real_configfile + fi + get_meta_section $file $matchgroup $configfile | \ $CONFIG_AWK_CMD -v configfile=$configfile ' BEGIN { diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index 3ec65bf9b0..a04c081854 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -8,6 +8,8 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/inc/ini-config source $TOP/inc/meta-config +set -e + # check_result() tests and reports the result values # check_result "actual" "expected" function check_result { From af9bf8663b43a2cc80f2c3adb09b8aa3641f99ab Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 16 Apr 2015 08:58:32 -0400 Subject: [PATCH 0173/2941] refactor ping_check Encapsulate all the neutron specific things you have to do ping a neutron guest into a separate script. Refactor the main ping_check so all logic is contained within it. Change-Id: Ic79d8e3a2473b978551a5635a11dba07e1020bb2 --- exercises/boot_from_volume.sh | 2 +- exercises/euca.sh | 2 +- exercises/floating_ips.sh | 6 ++-- exercises/neutron-adv-test.sh | 2 +- exercises/volumes.sh | 2 +- functions | 61 ++++++++++++++++---------------- lib/neutron-legacy | 21 ----------- tools/ping_neutron.sh | 65 +++++++++++++++++++++++++++++++++++ 8 files changed, 104 insertions(+), 57 deletions(-) create mode 100755 tools/ping_neutron.sh diff --git a/exercises/boot_from_volume.sh b/exercises/boot_from_volume.sh index aa348307af..d520b9bbbf 100755 --- a/exercises/boot_from_volume.sh +++ b/exercises/boot_from_volume.sh @@ -182,7 +182,7 @@ IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments -ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT +ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME" # Clean up # -------- diff --git a/exercises/euca.sh b/exercises/euca.sh index df5e233b8d..c2957e222b 100755 --- a/exercises/euca.sh +++ b/exercises/euca.sh @@ -142,7 +142,7 @@ else die $LINENO "Failure authorizing rule in $SECGROUP" # Test we can ping our floating ip within ASSOCIATE_TIMEOUT seconds - ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT + ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" # Revoke pinging euca-revoke -P icmp -s 0.0.0.0/0 -t -1:-1 $SECGROUP || \ diff --git a/exercises/floating_ips.sh b/exercises/floating_ips.sh index 59444e1ebd..4b72a0073f 100755 --- a/exercises/floating_ips.sh +++ b/exercises/floating_ips.sh @@ -139,7 +139,7 @@ IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments -ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT +ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME" # Floating IPs # ------------ @@ -158,7 +158,7 @@ nova add-floating-ip $VM_UUID $FLOATING_IP || \ die $LINENO "Failure adding floating IP $FLOATING_IP to $VM_NAME" # Test we can ping our floating IP within ASSOCIATE_TIMEOUT seconds -ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT +ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" if ! is_service_enabled neutron; then # Allocate an IP from second floating pool @@ -182,7 +182,7 @@ fi # FIXME (anthony): make xs support security groups if [ "$VIRT_DRIVER" != "ironic" -a "$VIRT_DRIVER" != "xenserver" -a "$VIRT_DRIVER" != "openvz" ]; then # Test we can aren't able to ping our floating ip within ASSOCIATE_TIMEOUT seconds - ping_check "$PUBLIC_NETWORK_NAME" $FLOATING_IP $ASSOCIATE_TIMEOUT Fail + ping_check $FLOATING_IP $ASSOCIATE_TIMEOUT "$PUBLIC_NETWORK_NAME" Fail fi # Clean up diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 9230587817..04892b0e93 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -281,7 +281,7 @@ function ping_ip { local VM_NAME=$1 local NET_NAME=$2 IP=$(get_instance_ip $VM_NAME $NET_NAME) - ping_check $NET_NAME $IP $BOOT_TIMEOUT + ping_check $IP $BOOT_TIMEOUT $NET_NAME } function check_vm { diff --git a/exercises/volumes.sh b/exercises/volumes.sh index 3ac2016254..f95c81fdf1 100755 --- a/exercises/volumes.sh +++ b/exercises/volumes.sh @@ -143,7 +143,7 @@ IP=$(get_instance_ip $VM_UUID $PRIVATE_NETWORK_NAME) die_if_not_set $LINENO IP "Failure retrieving IP address" # Private IPs can be pinged in single node deployments -ping_check "$PRIVATE_NETWORK_NAME" $IP $BOOT_TIMEOUT +ping_check $IP $BOOT_TIMEOUT "$PRIVATE_NETWORK_NAME" # Volumes # ------- diff --git a/functions b/functions index 4dc20e7f23..339779c338 100644 --- a/functions +++ b/functions @@ -340,39 +340,42 @@ function wait_for_service { # ping check -# Uses globals ``ENABLED_SERVICES`` -# ping_check from-net ip boot-timeout expected +# Uses globals ``ENABLED_SERVICES``, ``TOP_DIR``, ``MULTI_HOST``, ``PRIVATE_NETWORK`` +# ping_check [boot-timeout] [from_net] [expected] function ping_check { - if is_service_enabled neutron; then - _ping_check_neutron "$1" $2 $3 $4 - return + local ip=$1 + local timeout=${2:-30} + local from_net=${3:-""} + local expected=${4:-True} + local op="!" + local failmsg="[Fail] Couldn't ping server" + local ping_cmd="ping" + + # if we don't specify a from_net we're expecting things to work + # fine from our local box. + if [[ -n "$from_net" ]]; then + if is_service_enabled neutron; then + ping_cmd="$TOP_DIR/tools/ping_neutron.sh $from_net" + elif [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then + # there is no way to address the multihost / private case, bail here for compatibility. + # TODO: remove this cruft and redo code to handle this at the caller level. + return + fi fi - _ping_check_novanet "$1" $2 $3 $4 -} -# ping check for nova -# Uses globals ``MULTI_HOST``, ``PRIVATE_NETWORK`` -function _ping_check_novanet { - local from_net=$1 - local ip=$2 - local boot_timeout=$3 - local expected=${4:-"True"} - local check_command="" - MULTI_HOST=$(trueorfalse False MULTI_HOST) - if [[ "$MULTI_HOST" = "True" && "$from_net" = "$PRIVATE_NETWORK_NAME" ]]; then - return + # inverse the logic if we're testing no connectivity + if [[ "$expected" != "True" ]]; then + op="" + failmsg="[Fail] Could ping server" fi - if [[ "$expected" = "True" ]]; then - check_command="while ! ping -c1 -w1 $ip; do sleep 1; done" - else - check_command="while ping -c1 -w1 $ip; do sleep 1; done" - fi - if ! timeout $boot_timeout sh -c "$check_command"; then - if [[ "$expected" = "True" ]]; then - die $LINENO "[Fail] Couldn't ping server" - else - die $LINENO "[Fail] Could ping server" - fi + + # Because we've transformed this command so many times, print it + # out at the end. + local check_command="while $op $ping_cmd -c1 -w1 $ip; do sleep 1; done" + echo "Checking connectivity with $check_command" + + if ! timeout $timeout sh -c "$check_command"; then + die $LINENO $failmsg fi } diff --git a/lib/neutron-legacy b/lib/neutron-legacy index c6d9296fba..80e78d5db6 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1404,27 +1404,6 @@ function _get_probe_cmd_prefix { echo "$Q_RR_COMMAND ip netns exec qprobe-$probe_id" } -function _ping_check_neutron { - local from_net=$1 - local ip=$2 - local timeout_sec=$3 - local expected=${4:-"True"} - local check_command="" - probe_cmd=`_get_probe_cmd_prefix $from_net` - if [[ "$expected" = "True" ]]; then - check_command="while ! $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" - else - check_command="while $probe_cmd ping -w 1 -c 1 $ip; do sleep 1; done" - fi - if ! timeout $timeout_sec sh -c "$check_command"; then - if [[ "$expected" = "True" ]]; then - die $LINENO "[Fail] Couldn't ping server" - else - die $LINENO "[Fail] Could ping server" - fi - fi -} - # ssh check function _ssh_check_neutron { local from_net=$1 diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh new file mode 100755 index 0000000000..d36b7f60c8 --- /dev/null +++ b/tools/ping_neutron.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Ping a neutron guest using a network namespace probe + +set -o errexit +set -o pipefail + +TOP_DIR=$(cd $(dirname "$0")/.. && pwd) + +# This *must* be run as the admin tenant +source $TOP_DIR/openrc admin admin + +function usage { + cat - < [ping args] + +This provides a wrapper to ping neutron guests that are on isolated +tenant networks that the caller can't normally reach. It does so by +creating a network namespace probe. + +It takes arguments like ping, except the first arg must be the network +name. + +Note: in environments with duplicate network names, the results are +non deterministic. + +This should *really* be in the neutron cli. + +EOF + exit 1 +} + +NET_NAME=$1 + +if [[ -z "$NET_NAME" ]]; then + echo "Error: net_name is required" + usage +fi + +REMANING_ARGS="${@:2}" + +# BUG: with duplicate network names, this fails pretty hard. +NET_ID=$(neutron net-list $NET_NAME | grep "$NET_NAME" | awk '{print $2}') +PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1) + +# This runs a command inside the specific netns +NET_NS_CMD="ip netns exec qprobe-$PROBE_ID" + +PING_CMD="sudo $NET_NS_CMD ping $REMAING_ARGS" +echo "Running $PING_CMD" +$PING_CMD From 645114b7133bc70fb52f9f0c3f841766595358c8 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Tue, 3 Mar 2015 10:56:03 -0500 Subject: [PATCH 0174/2941] Set DevStack to dual stack by default in Kilo+ This patch sets DevStack to run in dual stack networking, with both IPv4 and IPv6 networking configured. This change is required for dual stack testing at the gate. A different patch was created against devstack-gate to make this the default, but the Juno branch of Neutron is missing required fixes to the L3 agent that are present in Kilo. This was the suggested alternative. Related-change: I3d416275f77913769b98e77f7e47bed17fc4d1cc Co-Authored-By: Henry Gessau Co-Authored-By: Andrew Boik Depends-On: Ib66a9109cc1c7999474daca5970d0af1f70886e4 Depends-On: I0f9ea98cb84aa72cb1505fb9ff8ac61561cc1376 Depends-On: I85fe68782bc54f28f3e14aa4a1d042cb15959dac Depends-On: I9395834f673038dc23b25eaeefe14895fe154e0e Change-Id: If0e0b818355e4cb1338f7fa72af5e81e24361574 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index c6d9296fba..bad2001608 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -65,7 +65,7 @@ GATEWAY_TIMEOUT=30 # ----------------------------- # Subnet IP version -IP_VERSION=${IP_VERSION:-4} +IP_VERSION=${IP_VERSION:-"4+6"} # Validate IP_VERSION if [[ $IP_VERSION != "4" ]] && [[ $IP_VERSION != "6" ]] && [[ $IP_VERSION != "4+6" ]]; then die $LINENO "IP_VERSION must be either 4, 6, or 4+6" From 95b994d54815027904504cf173451cd87fd99c66 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Sat, 18 Apr 2015 11:20:15 -0600 Subject: [PATCH 0175/2941] Add logging config to cinder.conf Part of the effort to clean up the Cinder logs is to use the resource tag in the log format. We also want to have some consistency with other projects in how we do logging. This change adds the logging format to cinder.conf similar to what Nova and others use, and most importantly turns on the use of the resource tag that's in olso_log. We're slowly cleaning up the logging in Cinder by doing things like replacing "Delete volume %(volume_id)s compoleted" with ("Delete volume completed successfully.", resource=volume) It woudl be good to have these picked up as we transition so we're not missing info. Also, there's sure to be cases where "volume" isn't a valid dbref and we find issues that need fixed. Change-Id: I193637fea14d97183f6a9782f37d8edcf929e0c4 --- lib/cinder | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/cinder b/lib/cinder index de41bc5f79..6439903953 100644 --- a/lib/cinder +++ b/lib/cinder @@ -264,6 +264,9 @@ function configure_cinder { # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id" + else + # Set req-id, project-name and resource in log format + iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(project_name)s] %(resource)s%(message)s" fi if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then From 71e4e6f601381494e640f424876bad0f92b6dc9b Mon Sep 17 00:00:00 2001 From: gordon chung Date: Mon, 16 Mar 2015 16:33:01 -0400 Subject: [PATCH 0176/2941] ceilometer: add tempest option to test events event support in Ceilometer was implemented in Kilo. to enable tests in tempest, we add an option to run tests only on Kilo+ branch. Change-Id: Ia4a73b7df343e31e6301f8314490fd42a01b7cd0 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index f856ce05f9..06413f7110 100644 --- a/lib/tempest +++ b/lib/tempest @@ -411,6 +411,7 @@ function configure_tempest { # Ceilometer API optimization happened in juno that allows to run more tests in tempest. # Once Tempest retires support for icehouse this flag can be removed. iniset $TEMPEST_CONFIG telemetry too_slow_to_test "False" + iniset $TEMPEST_CONFIG telemetry-feature-enabled events "True" # Object storage local object_storage_api_extensions=${OBJECT_STORAGE_API_EXTENSIONS:-"all"} From 72a8be60cd6b6efd32ebe2d81346ece48434510f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 9 Apr 2015 13:51:23 +1000 Subject: [PATCH 0177/2941] Add a peak memory tracker to dstat We can see at-a-glance memory usage during the run with dstat but we have no way to break that down into an overview of where memory is going. This adds a peer-service to dstat that records snapshots of the system during peak memory usage. It checks periodically if there is less memory available than before and, if so, records the running processes and vm overview. The intent is to add logic into the verify-pipeline jobs to use this report and send statistics on peak memory usage to statsd [1]. We can then build a picture of memory-usage growth over time. This type of report would have allowed better insight into issues such as introduced by Idf3a3a914b54779172776822710b3e52e751b1d1 where memory-usage jumped dramatically after switching to pip versions of libraries. Tracking details of memory usage is going to be an important part of future development. [1] http://graphite.openstack.org/ Change-Id: I4b0a8f382dcaa09331987ab84a68546ec29cbc18 --- lib/dstat | 6 +++ tools/peakmem_tracker.sh | 96 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100755 tools/peakmem_tracker.sh diff --git a/lib/dstat b/lib/dstat index 4b22752ced..f11bfa55c0 100644 --- a/lib/dstat +++ b/lib/dstat @@ -21,11 +21,17 @@ function start_dstat { # A better kind of sysstat, with the top process per time slice DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv" run_process dstat "dstat $DSTAT_OPTS" + + # To enable peakmem_tracker add: + # enable_service peakmem_tracker + # to your localrc + run_process peakmem_tracker "$TOP_DIR/tools/peakmem_tracker.sh" } # stop_dstat() stop dstat process function stop_dstat { stop_process dstat + stop_process peakmem_tracker } # Restore xtrace diff --git a/tools/peakmem_tracker.sh b/tools/peakmem_tracker.sh new file mode 100755 index 0000000000..0d5728a538 --- /dev/null +++ b/tools/peakmem_tracker.sh @@ -0,0 +1,96 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -o errexit + +# time to sleep between checks +SLEEP_TIME=20 + +# MemAvailable is the best estimation and has built-in heuristics +# around reclaimable memory. However, it is not available until 3.14 +# kernel (i.e. Ubuntu LTS Trusty misses it). In that case, we fall +# back to free+buffers+cache as the available memory. +USE_MEM_AVAILBLE=0 +if grep -q '^MemAvailable:' /proc/meminfo; then + USE_MEM_AVAILABLE=1 +fi + +function get_mem_available { + if [[ $USE_MEM_AVAILABLE -eq 1 ]]; then + awk '/^MemAvailable:/ {print $2}' /proc/meminfo + else + awk '/^MemFree:/ {free=$2} + /^Buffers:/ {buffers=$2} + /^Cached:/ {cached=$2} + END { print free+buffers+cached }' /proc/meminfo + fi +} + +# whenever we see less memory available than last time, dump the +# snapshot of current usage; i.e. checking the latest entry in the +# file will give the peak-memory usage +function tracker { + local low_point=$(get_mem_available) + while [ 1 ]; do + + local mem_available=$(get_mem_available) + + if [[ $mem_available -lt $low_point ]]; then + low_point=$mem_available + echo "[[[" + date + echo "---" + # always available greppable output; given difference in + # meminfo output as described above... + echo "peakmem_tracker low_point: $mem_available" + echo "---" + cat /proc/meminfo + echo "---" + # would hierarchial view be more useful (-H)? output is + # not sorted by usage then, however, and the first + # question is "what's using up the memory" + # + # there are a lot of kernel threads, especially on a 8-cpu + # system. do a best-effort removal to improve + # signal/noise ratio of output. + ps --sort=-pmem -eo pid:10,pmem:6,rss:15,ppid:10,cputime:10,nlwp:8,wchan:25,args:100 | + grep -v ']$' + echo "]]]" + fi + + sleep $SLEEP_TIME + done +} + +function usage { + echo "Usage: $0 [-x] [-s N]" 1>&2 + exit 1 +} + +while getopts ":s:x" opt; do + case $opt in + s) + SLEEP_TIME=$OPTARG + ;; + x) + set -o xtrace + ;; + *) + usage + ;; + esac +done +shift $((OPTIND-1)) + +tracker From a80cb815fff0b625718550a2f19a0be08c1af6a1 Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Thu, 12 Mar 2015 17:55:51 -0700 Subject: [PATCH 0178/2941] Add response time to keystone access log Add the response time to keystone's access log for each request. This will be the last element in the log-line and will be represented in microseconds. Change-Id: I19204369af5cdf06df2237550c350dfb3ffc995d --- files/apache-keystone.template | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 1d20af7f90..0b914e2b8f 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -1,5 +1,6 @@ Listen %PUBLICPORT% Listen %ADMINPORT% +LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% @@ -11,7 +12,7 @@ Listen %ADMINPORT% ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/keystone.log - CustomLog /var/log/%APACHE_NAME%/keystone_access.log combined + CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% @@ -27,7 +28,7 @@ Listen %ADMINPORT% ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/keystone.log - CustomLog /var/log/%APACHE_NAME%/keystone_access.log combined + CustomLog /var/log/%APACHE_NAME%/keystone_access.log keystone_combined %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% From 8f5fe871a45585fdbc72aacca6f8528b1f4d328d Mon Sep 17 00:00:00 2001 From: Ramakrishnan G Date: Fri, 17 Apr 2015 12:48:39 +0000 Subject: [PATCH 0179/2941] Fix issue with ml2 plugin on using provider network This commit fixes the issue that ml2 plugin sets 'flat_networks' in ml2 configuration file as empty. The value of 'flat_networks' need to be set as the name of the physical network that was specified in the localrc file (or it's default value). Change-Id: Ib4c31f6576da57534b36aefebd1ca8cd397c6c1a --- lib/neutron_plugins/ml2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index e3b2c4dd28..abe6ea70f5 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -89,7 +89,7 @@ function neutron_plugin_configure_service { # Allow for setup the flat type network if [[ -z "$Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS" && -n "$PHYSICAL_NETWORK" ]]; then - Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=$Q_ML2_FLAT_PHYSNET_OPTIONS" + Q_ML2_PLUGIN_FLAT_TYPE_OPTIONS="flat_networks=$PHYSICAL_NETWORK" fi # REVISIT(rkukura): Setting firewall_driver here for # neutron.agent.securitygroups_rpc.is_firewall_enabled() which is From 2ed09d88fb2847fe8ec813bf518dd945d8d813fa Mon Sep 17 00:00:00 2001 From: Shilla Saebi Date: Tue, 21 Apr 2015 15:02:13 -0400 Subject: [PATCH 0180/2941] made several changes to guides to comply to doc conventions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit “Speed not required” is not a sentence Gb should be GB added a , after floating IPs fixed sentence around “To implement a true multi-node test of Swift since it did not make sense removed extra underline line after Machines removed capitalization of service names to comply with docs conventions https://wiki.openstack.org/wiki/Documentation/Conventions changed to DevStack for consistency throughout Change-Id: I531bf6b2bad62fbf9d1417b2b1ce06de3715e0f0 --- .../guides/devstack-with-nested-kvm.rst | 4 +-- doc/source/guides/multinode-lab.rst | 16 ++++++------ doc/source/guides/neutron.rst | 26 +++++++++---------- doc/source/guides/nova.rst | 8 +++--- doc/source/guides/single-vm.rst | 8 +++--- 5 files changed, 31 insertions(+), 31 deletions(-) diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst index 610300be52..b35492ea17 100644 --- a/doc/source/guides/devstack-with-nested-kvm.rst +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -54,7 +54,7 @@ that by ensuring `/dev/kvm` character device is present. Configure Nested KVM for AMD-based Machines --------------------------------------------- +------------------------------------------- Procedure to enable nested KVM virtualization on AMD-based machines. @@ -121,7 +121,7 @@ attribute `virt_type = kvm` in `/etc/nova.conf`; otherwise, it'll fall back to `virt_type=qemu`, i.e. plain QEMU emulation. Optionally, to explicitly set the type of virtualization, to KVM, by the -libvirt driver in Nova, the below config attribute can be used in +libvirt driver in nova, the below config attribute can be used in DevStack's ``local.conf``: :: diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index d963243fa8..b2617c9f17 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -262,17 +262,17 @@ for scripting: Swift ----- -Swift requires a significant amount of resources and is disabled by -default in DevStack. The support in DevStack is geared toward a minimal -installation but can be used for testing. To implement a true multi-node -test of Swift required more than DevStack provides. Enabling it is as +Swift, OpenStack Object Storage, requires a significant amount of resources +and is disabled by default in DevStack. The support in DevStack is geared +toward a minimal installation but can be used for testing. To implement a +true multi-node test of swift, additional steps will be required. Enabling it is as simple as enabling the ``swift`` service in ``local.conf``: :: enable_service s-proxy s-object s-container s-account -Swift will put its data files in ``SWIFT_DATA_DIR`` (default +Swift, OpenStack Object Storage, will put its data files in ``SWIFT_DATA_DIR`` (default ``/opt/stack/data/swift``). The size of the data 'partition' created (really a loop-mounted file) is set by ``SWIFT_LOOPBACK_DISK_SIZE``. The Swift config files are located in ``SWIFT_CONF_DIR`` (default @@ -334,14 +334,14 @@ After making changes to the repository or branch, if ``RECLONE`` is not set in ``localrc`` it may be necessary to remove the corresponding directory from ``/opt/stack`` to force git to re-clone the repository. -For example, to pull Nova from a proposed release candidate in the -primary Nova repository: +For example, to pull nova, OpenStack Compute, from a proposed release candidate +in the primary nova repository: :: NOVA_BRANCH=rc-proposed -To pull Glance from an experimental fork: +To pull glance, OpenStack Image service, from an experimental fork: :: diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 95cde9699a..3030c7b5f2 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -1,14 +1,14 @@ ====================================== -Using DevStack with Neutron Networking +Using DevStack with neutron Networking ====================================== -This guide will walk you through using OpenStack Neutron with the ML2 +This guide will walk you through using OpenStack neutron with the ML2 plugin and the Open vSwitch mechanism driver. Network Interface Configuration =============================== -To use Neutron, it is suggested that two network interfaces be present +To use neutron, it is suggested that two network interfaces be present in the host operating system. The first interface, eth0 is used for the OpenStack management (API, @@ -62,7 +62,7 @@ connectivity. Disabling Next Generation Firewall Tools ======================================== -Devstack does not properly operate with modern firewall tools. Specifically +DevStack does not properly operate with modern firewall tools. Specifically it will appear as if the guest VM can access the external network via ICMP, but UDP and TCP packets will not be delivered to the guest VM. The root cause of the issue is that both ufw (Uncomplicated Firewall) and firewalld (Fedora's @@ -96,13 +96,13 @@ disable ufw if it was enabled, do the following: Neutron Networking with Open vSwitch ==================================== -Configuring Neutron networking in DevStack is very similar to +Configuring neutron, OpenStack Networking in DevStack is very similar to configuring `nova-network` - many of the same configuration variables (like `FIXED_RANGE` and `FLOATING_RANGE`) used by `nova-network` are -used by Neutron, which is intentional. +used by neutron, which is intentional. The only difference is the disabling of `nova-network` in your -local.conf, and the enabling of the Neutron components. +local.conf, and the enabling of the neutron components. Configuration @@ -134,16 +134,16 @@ in a real setup FLOATING_RANGE would be a public IP address range. Neutron Networking with Open vSwitch and Provider Networks ========================================================== -In some instances, it is desirable to use Neutron's provider +In some instances, it is desirable to use neutron's provider networking extension, so that networks that are configured on an -external router can be utilized by Neutron, and instances created via +external router can be utilized by neutron, and instances created via Nova can attach to the network managed by the external router. For example, in some lab environments, a hardware router has been pre-configured by another party, and an OpenStack developer has been given a VLAN tag and IP address range, so that instances created via DevStack will use the external router for L3 connectivity, as opposed -to the Neutron L3 service. +to the neutron L3 service. Service Configuration @@ -152,8 +152,8 @@ Service Configuration **Control Node** In this example, the control node will run the majority of the -OpenStack API and management services (Keystone, Glance, -Nova, Neutron, etc..) +OpenStack API and management services (keystone, glance, +nova, neutron) **Compute Nodes** @@ -226,4 +226,4 @@ DevStack will automatically add the network interface defined in For example, with the above configuration, a bridge is created, named `br-ex` which is managed by Open vSwitch, and the second interface on the compute node, `eth1` is attached to the -bridge, to forward traffic sent by guest vms. +bridge, to forward traffic sent by guest VMs. diff --git a/doc/source/guides/nova.rst b/doc/source/guides/nova.rst index 0d98f4abad..a91e0d194c 100644 --- a/doc/source/guides/nova.rst +++ b/doc/source/guides/nova.rst @@ -1,15 +1,15 @@ ================= -Nova and devstack +Nova and DevStack ================= This is a rough guide to various configuration parameters for nova -running with devstack. +running with DevStack. nova-serialproxy ================ -In Juno nova implemented a `spec +In Juno, nova implemented a `spec `_ to allow read/write access to the serial console of an instance via `nova-serialproxy @@ -60,7 +60,7 @@ The service can be enabled by adding ``n-sproxy`` to #proxyclient_address=127.0.0.1 -Enabling the service is enough to be functional for a single machine devstack. +Enabling the service is enough to be functional for a single machine DevStack. These config options are defined in `nova.console.serial `_ diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst index ab46d91b83..c2ce1a34a0 100644 --- a/doc/source/guides/single-vm.rst +++ b/doc/source/guides/single-vm.rst @@ -3,10 +3,10 @@ All-In-One Single VM ==================== Use the cloud to build the cloud! Use your cloud to launch new versions -of OpenStack in about 5 minutes. When you break it, start over! The VMs +of OpenStack in about 5 minutes. If you break it, start over! The VMs launched in the cloud will be slow as they are running in QEMU (emulation), but their primary use is testing OpenStack development and -operation. Speed not required. +operation. Prerequisites Cloud & Image =========================== @@ -15,7 +15,7 @@ Virtual Machine --------------- DevStack should run in any virtual machine running a supported Linux -release. It will perform best with 4Gb or more of RAM. +release. It will perform best with 4GB or more of RAM. OpenStack Deployment & cloud-init --------------------------------- @@ -88,7 +88,7 @@ Using OpenStack --------------- At this point you should be able to access the dashboard. Launch VMs and -if you give them floating IPs access those VMs from other machines on +if you give them floating IPs, access those VMs from other machines on your network. One interesting use case is for developers working on a VM on their From fad7b43abece71ccee09bf0a3b729c72e81d9465 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 21 Apr 2015 13:32:11 -0400 Subject: [PATCH 0181/2941] testr requires python's gdbm Debian/Ubuntu have *helpfully* removed gdbm from the base python package and put it into another package, which is not dragged in by installing python. Testr doesn't function without this. We should ensure this always gets installed. Depends-On: If48a8444b02ee1e105bc1d9ce78a0489ea0c405b Change-Id: I85a0ffe5ee384e055e78fd3164c27d42a86bc39d --- files/debs/general | 1 + 1 file changed, 1 insertion(+) diff --git a/files/debs/general b/files/debs/general index c27b77de18..146052643c 100644 --- a/files/debs/general +++ b/files/debs/general @@ -17,6 +17,7 @@ tcpdump tar python-dev python2.7 +python-gdbm # needed for testr bc libyaml-dev libffi-dev From 74a85b0f2954f96eeda876ec8fc8f43017aa8a82 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 22 Apr 2015 18:02:39 +0000 Subject: [PATCH 0182/2941] Set policy_file in the oslo_policy group policy_file in DEFAULT is deprecated Change-Id: I6698a810d5e6c395a18aed8066e61f8c4bae2408 --- lib/ceilometer | 2 +- lib/ironic | 2 +- lib/neutron-legacy | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 3a4a4fb167..9abdbfe286 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -195,7 +195,7 @@ function configure_ceilometer { # Install the policy file for the API server cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR - iniset $CEILOMETER_CONF DEFAULT policy_file $CEILOMETER_CONF_DIR/policy.json + iniset $CEILOMETER_CONF oslo_policy policy_file $CEILOMETER_CONF_DIR/policy.json cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/event_pipeline.yaml $CEILOMETER_CONF_DIR diff --git a/lib/ironic b/lib/ironic index 4ac0100410..4a37f0aafa 100644 --- a/lib/ironic +++ b/lib/ironic @@ -296,7 +296,7 @@ function configure_ironic { # API specific configuration. function configure_ironic_api { iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone - iniset $IRONIC_CONF_FILE DEFAULT policy_file $IRONIC_POLICY_JSON + iniset $IRONIC_CONF_FILE oslo_policy policy_file $IRONIC_POLICY_JSON # TODO(Yuki Nishiwaki): This is a temporary work-around until Ironic is fixed(bug#1422632). # These codes need to be changed to use the function of configure_auth_token_middleware diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 3072d0a0ed..ac1cd0ffc2 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1081,7 +1081,7 @@ function _configure_neutron_service { iniset $NEUTRON_CONF DEFAULT verbose True iniset $NEUTRON_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $NEUTRON_CONF DEFAULT policy_file $Q_POLICY_FILE + iniset $NEUTRON_CONF oslo_policy policy_file $Q_POLICY_FILE iniset $NEUTRON_CONF DEFAULT allow_overlapping_ips $Q_ALLOW_OVERLAPPING_IP iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY From 2796a82ab48107d4445c03938e037e60dd1bbfa9 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 15 Apr 2015 08:59:04 +1000 Subject: [PATCH 0183/2941] Fix negated services with common prefix The current sed matching mixes up common-prefix matching; e.g. "-q-lbaas,q-lbaasv2" is changed into just "v2" This is more verbose, but I think more reliable. See also Ib50f782824f89ae4eb9787f11d42416704babd90. Change-Id: I3faad0841834e24acc811c05015625cf7f848b19 --- functions-common | 32 ++++++++++++++++++++++++++++---- tests/test_functions.sh | 10 +++++++++- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/functions-common b/functions-common index 24a462ac21..e6af662069 100644 --- a/functions-common +++ b/functions-common @@ -1621,14 +1621,38 @@ function disable_all_services { # Uses global ``ENABLED_SERVICES`` # disable_negated_services function disable_negated_services { - local tmpsvcs="${ENABLED_SERVICES}" + local to_remove="" + local remaining="" + local enabled="" local service - for service in ${tmpsvcs//,/ }; do + + # build up list of services that should be removed; i.e. they + # begin with "-" + for service in ${ENABLED_SERVICES//,/ }; do if [[ ${service} == -* ]]; then - tmpsvcs=$(echo ${tmpsvcs}|sed -r "s/(,)?(-)?${service#-}(,)?/,/g") + to_remove+=",${service#-}" + else + remaining+=",${service}" fi done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") + + # go through the service list. if this service appears in the "to + # be removed" list, drop it + for service in ${remaining//,/ }; do + local remove + local add=1 + for remove in ${to_remove//,/ }; do + if [[ ${remove} == ${service} ]]; then + add=0 + break + fi + done + if [[ $add == 1 ]]; then + enabled="${enabled},$service" + fi + done + + ENABLED_SERVICES=$(_cleanup_service_list "$enabled") } # disable_service() removes the services passed as argument to the diff --git a/tests/test_functions.sh b/tests/test_functions.sh index a7914f7a1b..4ebb00085e 100755 --- a/tests/test_functions.sh +++ b/tests/test_functions.sh @@ -130,7 +130,15 @@ test_disable_negated_services 'a,-a' '' test_disable_negated_services 'b,a,-a' 'b' test_disable_negated_services 'a,b,-a' 'b' test_disable_negated_services 'a,-a,b' 'b' - +test_disable_negated_services 'a,aa,-a' 'aa' +test_disable_negated_services 'aa,-a' 'aa' +test_disable_negated_services 'a_a, -a_a' '' +test_disable_negated_services 'a-b, -a-b' '' +test_disable_negated_services 'a-b, b, -a-b' 'b' +test_disable_negated_services 'a,-a,av2,b' 'av2,b' +test_disable_negated_services 'a,aa,-a' 'aa' +test_disable_negated_services 'a,av2,-a,a' 'av2' +test_disable_negated_services 'a,-a,av2' 'av2' echo "Testing is_package_installed()" From 2c5d462d910ba505df44d884f8cf9d6df9252b37 Mon Sep 17 00:00:00 2001 From: Robert Li Date: Tue, 21 Apr 2015 15:48:22 -0400 Subject: [PATCH 0184/2941] Add /usr/local/bin to exec_dirs in rootwrap.conf devstack installs neutron utilities into /usr/local/bin such as neutron-keepalived-state-change and neutron-ns-metadata-proxy. In stead of adding individual filters to allow them to run from that directory, this patch adds /usr/local/bin into exec_dirs. Please also refer to I3abd1c173121dc8abb5738d1879db8ac9a98b690 for discussion on the approach to fix the bug. Change-Id: Iade8b5b09bb53018485c85f8372fb94dbc2ad2da Closes-Bug: 1435971 --- lib/neutron-legacy | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 3072d0a0ed..9f26239010 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1145,6 +1145,8 @@ function _neutron_setup_rootwrap { sudo install -o root -g root -m 644 $NEUTRON_DIR/etc/rootwrap.conf $Q_RR_CONF_FILE fi sudo sed -e "s:^filters_path=.*$:filters_path=$Q_CONF_ROOTWRAP_D:" -i $Q_RR_CONF_FILE + sudo sed -e 's:^exec_dirs=\(.*\)$:exec_dirs=\1,/usr/local/bin:' -i $Q_RR_CONF_FILE + # Specify ``rootwrap.conf`` as first parameter to neutron-rootwrap ROOTWRAP_SUDOER_CMD="$NEUTRON_ROOTWRAP $Q_RR_CONF_FILE *" ROOTWRAP_DAEMON_SUDOER_CMD="$NEUTRON_ROOTWRAP-daemon $Q_RR_CONF_FILE" From 6cdb2e0f1a2c2429587f1e9187344cb26eb31812 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 23 Apr 2015 09:12:59 -0700 Subject: [PATCH 0185/2941] Remove the lvm.conf filter during cleanup This avoids us leaving the filter in the global lvm config. Without cleaning this up, we can hit some failures to run stack.sh because devices are excluded that we need to be able to see. This resets it to what it was before when we do a cleanup. Also, do this before we add the line, so we don't add multiple lines on successive runs. Closes-bug: #1437998 Change-Id: Idbf8a06b723f79ef16a7c175ee77a8c25f813244 --- lib/cinder_backends/lvm | 1 + lib/lvm | 10 +++++++++- unstack.sh | 1 + 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index d369c0c840..35ad209db7 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -39,6 +39,7 @@ function cleanup_cinder_backend_lvm { # Campsite rule: leave behind a volume group at least as clean as we found it clean_lvm_volume_group $VOLUME_GROUP_NAME-$be_name + clean_lvm_filter } # configure_cinder_backend_lvm - Set config files, create data dirs, etc diff --git a/lib/lvm b/lib/lvm index 54976a36cd..1fe2683e65 100644 --- a/lib/lvm +++ b/lib/lvm @@ -145,6 +145,13 @@ function init_default_lvm_volume_group { fi } +# clean_lvm_filter() Remove the filter rule set in set_lvm_filter() +# +# Usage: clean_lvm_filter() +function clean_lvm_filter { + sudo sed -i "s/^.*# from devstack$//" /etc/lvm/lvm.conf +} + # set_lvm_filter() Gather all devices configured for LVM and # use them to build a global device filter # set_lvm_filter() Create a device filter @@ -154,7 +161,7 @@ function init_default_lvm_volume_group { # # Usage: set_lvm_filter() function set_lvm_filter { - local filter_suffix='"r|.*|" ]' + local filter_suffix='"r|.*|" ] # from devstack' local filter_string="global_filter = [ " local pv local vg @@ -167,6 +174,7 @@ function set_lvm_filter { done filter_string=$filter_string$filter_suffix + clean_lvm_filter sudo sed -i "/# global_filter = \[*\]/a\ $global_filter$filter_string" /etc/lvm/lvm.conf echo_summary "set lvm.conf device global_filter to: $filter_string" } diff --git a/unstack.sh b/unstack.sh index 30981fd3c6..ed7e6175ca 100755 --- a/unstack.sh +++ b/unstack.sh @@ -192,3 +192,4 @@ fi # BUG: maybe it doesn't exist? We should isolate this further down. clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true +clean_lvm_filter From 2443c37c1dc2b62bb4c423357f3cae33b6e92762 Mon Sep 17 00:00:00 2001 From: Yalei Wang Date: Fri, 24 Apr 2015 10:58:52 +0800 Subject: [PATCH 0186/2941] Add n-cauth into defalut services n-cauth work with n-novnc to provide the vnc service for VMs. Change-Id: Ia5c53aaaf7fe4f881d525a31b097b167fdb8e5c8 Closes-Bug: #1447774 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index 0b93d32358..2a49ea5ef5 100644 --- a/stackrc +++ b/stackrc @@ -49,7 +49,7 @@ if ! isset ENABLED_SERVICES ; then # Keystone - nothing works without keystone ENABLED_SERVICES=key # Nova - services to support libvirt based openstack clouds - ENABLED_SERVICES+=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc,n-crt + ENABLED_SERVICES+=,n-api,n-cpu,n-net,n-cond,n-sch,n-novnc,n-crt,n-cauth # Glance services needed for Nova ENABLED_SERVICES+=,g-api,g-reg # Cinder From e4af92987a882dc2f7bb48527d0bcdbaa2427d4a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 28 Apr 2015 08:57:57 -0400 Subject: [PATCH 0187/2941] fix warn function The warn function was putting content into a side log file which made it kind of hard to keep an eye on when warnings were actually being issued. Let's just get this into the main output stream. The calling of the warn function in git_timed was also incorrect, so the output would not have been what we expected. This solves that as well. This will hopefully give us trackable data about how often we need to recover from git clone errors. Change-Id: Iee0d2df7fb788a4d34044d29ab10afdcafb9bb5a --- functions-common | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/functions-common b/functions-common index 24a462ac21..a3b8b92f5f 100644 --- a/functions-common +++ b/functions-common @@ -174,10 +174,7 @@ function warn { local xtrace=$(set +o | grep xtrace) set +o xtrace local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" - echo $msg 1>&2; - if [[ -n ${LOGDIR} ]]; then - echo $msg >> "${LOGDIR}/error.log" - fi + echo $msg $xtrace return $exitcode } @@ -509,7 +506,7 @@ function git_timed { fi count=$(($count + 1)) - warn "timeout ${count} for git call: [git $@]" + warn $LINENO "timeout ${count} for git call: [git $@]" if [ $count -eq 3 ]; then die $LINENO "Maximum of 3 git retries reached" fi From 7efba991f78667c19d13431524c95f77660781c5 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 28 Apr 2015 13:15:22 -0400 Subject: [PATCH 0188/2941] don't source openrc There is actually no reason why we need openrc for these tests, don't source it as it prevents some ip math errors from randomly killing tests. Change-Id: Iface7c21898d92e14e840379938b25844cd85565 --- tests/test_functions.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/test_functions.sh b/tests/test_functions.sh index a7914f7a1b..f8e2c9e2e4 100755 --- a/tests/test_functions.sh +++ b/tests/test_functions.sh @@ -7,9 +7,6 @@ TOP=$(cd $(dirname "$0")/.. && pwd) # Import common functions source $TOP/functions -# Import configuration -source $TOP/openrc - source $TOP/tests/unittest.sh echo "Testing die_if_not_set()" From 6e137abbfe66837bc7456425472b53d067591d24 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 29 Apr 2015 08:22:24 -0400 Subject: [PATCH 0189/2941] clean up logging around run_process We do a bunch of exec magic unwind in run_process that leads to a lot of confusing lines in the logs under xtrace. Instead, disable xtrace through these parts to ensure that the flow at the end of the day makes more sense. Change-Id: I91e02465240e704a1a0c0036f5073c0295be018e --- functions-common | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/functions-common b/functions-common index a3b8b92f5f..f2e7076ccf 100644 --- a/functions-common +++ b/functions-common @@ -1137,6 +1137,10 @@ function zypper_install { # the command. # _run_process service "command-line" [group] function _run_process { + # disable tracing through the exec redirects, it's just confusing in the logs. + xtrace=$(set +o | grep xtrace) + set +o xtrace + local service=$1 local command="$2" local group=$3 @@ -1160,6 +1164,9 @@ function _run_process { export PYTHONUNBUFFERED=1 fi + # reenable xtrace before we do *real* work + $xtrace + # Run under ``setsid`` to force the process to become a session and group leader. # The pid saved can be used with pkill -g to get the entire process group. if [[ -n "$group" ]]; then From 8d558c8c270c36a78aeb23f16da084508916a89e Mon Sep 17 00:00:00 2001 From: sridhargaddam Date: Mon, 4 May 2015 14:04:16 +0000 Subject: [PATCH 0190/2941] Set local_ip only when TENANT_TUNNELS are enabled In an installation with VLAN tenant networks, devstack should not configure the local_ip (which is applicable only when tenant_tunnels are used). This is causing failures in Neutron for an IPv6 only setup. This patch addresses this issue, but configuring the local_ip only when TENANT_TUNNELS are enabled. Related-Bug: #1447693 Change-Id: I0e2a2d8b6ce0ad87f6c0d318ac522dbab50d44ee --- lib/neutron_plugins/ml2 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index abe6ea70f5..88537774b7 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -104,8 +104,10 @@ function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi - # Since we enable the tunnel TypeDrivers, also enable a local_ip - iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP + if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then + # Set local_ip if TENANT_TUNNELS are enabled. + iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP + fi populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS From dfcc3871c022516330b6afb6c74751ac42a87480 Mon Sep 17 00:00:00 2001 From: Joe D'Andrea Date: Wed, 29 Apr 2015 15:39:17 -0400 Subject: [PATCH 0191/2941] cinder setup now refers to CINDER_VOLUME_CLEAR and volume_clear CINDER_SECURE_DELETE previously iniset volume_clear to none as a side effect, however secure_delete is not documented in cinder. Now using CINDER_VOLUME_CLEAR outright. CINDER_SECURE_DELETE is supported but now deprecated. Change-Id: Ic8694cf16654c23b27d23853a9f06ddf1050fa93 Closes-Bug: #1450159 --- lib/cinder | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/lib/cinder b/lib/cinder index 6439903953..eb0e1d7600 100644 --- a/lib/cinder +++ b/lib/cinder @@ -77,9 +77,20 @@ CINDER_ENABLED_BACKENDS=${CINDER_ENABLED_BACKENDS:-lvm:lvmdriver-1} # Should cinder perform secure deletion of volumes? -# Defaults to true, can be set to False to avoid this bug when testing: +# Defaults to zero. Can also be set to none or shred. +# This was previously CINDER_SECURE_DELETE (True or False). +# Equivalents using CINDER_VOLUME_CLEAR are zero and none, respectively. +# Set to none to avoid this bug when testing: # https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1023755 -CINDER_SECURE_DELETE=$(trueorfalse True CINDER_SECURE_DELETE) +if [[ -n $CINDER_SECURE_DELETE ]]; then + CINDER_SECURE_DELETE=$(trueorfalse True CINDER_SECURE_DELETE) + if [[ $CINDER_SECURE_DELETE == "False" ]]; then + CINDER_VOLUME_CLEAR_DEFAULT="none" + fi + DEPRECATED_TEXT="$DEPRECATED_TEXT\nConfigure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE.\n" +fi +CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} +CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') # Cinder reports allocations back to the scheduler on periodic intervals # it turns out we can get an "out of space" issue when we run tests too @@ -256,9 +267,8 @@ function configure_cinder { iniset_rpc_backend cinder $CINDER_CONF - if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then - iniset $CINDER_CONF DEFAULT secure_delete False - iniset $CINDER_CONF DEFAULT volume_clear none + if [[ "$CINDER_VOLUME_CLEAR" == "none" ]] || [[ "$CINDER_VOLUME_CLEAR" == "zero" ]] || [[ "$CINDER_VOLUME_CLEAR" == "shred" ]]; then + iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR fi # Format logging From d5537c1dc835413f1911ab797e3007d85322eace Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 30 Apr 2015 21:10:48 -0400 Subject: [PATCH 0192/2941] Add toggle to run Nova API and EC2-API under Apache2 Inspired by keystone and rcbops-cookbooks's nova scripts, this review adds apache2 templates for two of the Nova services. Also add code in lib/nova to switch between the old and new ways to these two services. The patch depends on the Nova review mentioned below as the two scripts that are needed will be in Nova's repository. TODO for later would be to switch on NOVA_USE_MOD_WSGI when ENABLE_HTTPD_MOD_WSGI_SERVICES is switched on. Related Nova blueprint: https://blueprints.launchpad.net/nova/+spec/run-nova-services-under-apache2 Depends-On: Idd7d3d1b3cc5770cdecea7afe6db3c89d5b2c0d0 Change-Id: I9fc0c601db2776d3e9084be84065e728e3f5d414 --- README.md | 4 ++ files/apache-nova-api.template | 16 +++++ files/apache-nova-ec2-api.template | 16 +++++ lib/nova | 101 ++++++++++++++++++++++++++++- 4 files changed, 135 insertions(+), 2 deletions(-) create mode 100644 files/apache-nova-api.template create mode 100644 files/apache-nova-ec2-api.template diff --git a/README.md b/README.md index 04f5fd9711..9853c3d88d 100644 --- a/README.md +++ b/README.md @@ -149,6 +149,10 @@ Example (Keystone): KEYSTONE_USE_MOD_WSGI="True" +Example (Nova): + + NOVA_USE_MOD_WSGI="True" + Example (Swift): SWIFT_USE_MOD_WSGI="True" diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template new file mode 100644 index 0000000000..70ccedddc8 --- /dev/null +++ b/files/apache-nova-api.template @@ -0,0 +1,16 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess nova-api processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup nova-api + WSGIScriptAlias / %PUBLICWSGI% + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/%APACHE_NAME%/nova-api.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + \ No newline at end of file diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template new file mode 100644 index 0000000000..ae4cf94a38 --- /dev/null +++ b/files/apache-nova-ec2-api.template @@ -0,0 +1,16 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess nova-ec2-api processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup nova-ec2-api + WSGIScriptAlias / %PUBLICWSGI% + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/%APACHE_NAME%/nova-ec2-api.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + \ No newline at end of file diff --git a/lib/nova b/lib/nova index 807dfceeae..768346a983 100644 --- a/lib/nova +++ b/lib/nova @@ -16,6 +16,7 @@ # # - install_nova # - configure_nova +# - _config_nova_apache_wsgi # - create_nova_conf # - init_nova # - start_nova @@ -62,6 +63,15 @@ NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} # Expect to remove in L or M. NOVA_API_VERSION=${NOVA_API_VERSION-default} +if is_suse; then + NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/srv/www/htdocs/nova} +else + NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/var/www/nova} +fi + +# Toggle for deploying Nova-API under HTTPD + mod_wsgi +NOVA_USE_MOD_WSGI=${NOVA_USE_MOD_WSGI:-False} + if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then NOVA_SERVICE_PROTOCOL="https" EC2_SERVICE_PROTOCOL="https" @@ -223,6 +233,64 @@ function cleanup_nova { #fi } +# _cleanup_nova_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +function _cleanup_nova_apache_wsgi { + sudo rm -f $NOVA_WSGI_DIR/* + sudo rm -f $(apache_site_config_for nova-api) + sudo rm -f $(apache_site_config_for nova-ec2-api) +} + +# _config_nova_apache_wsgi() - Set WSGI config files of Keystone +function _config_nova_apache_wsgi { + sudo mkdir -p $NOVA_WSGI_DIR + + local nova_apache_conf=$(apache_site_config_for nova-api) + local nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api) + local nova_ssl="" + local nova_certfile="" + local nova_keyfile="" + local nova_api_port=$NOVA_SERVICE_PORT + local nova_ec2_api_port=$EC2_SERVICE_PORT + local venv_path="" + + if is_ssl_enabled_service nova-api; then + nova_ssl="SSLEngine On" + nova_certfile="SSLCertificateFile $NOVA_SSL_CERT" + nova_keyfile="SSLCertificateKeyFile $NOVA_SSL_KEY" + fi + if [[ ${USE_VENV} = True ]]; then + venv_path="python-path=${PROJECT_VENV["nova"]}/lib/python2.7/site-packages" + fi + + # copy proxy vhost and wsgi helper files + sudo cp $NOVA_DIR/nova/wsgi/nova-api.py $NOVA_WSGI_DIR/nova-api + sudo cp $NOVA_DIR/nova/wsgi/nova-ec2-api.py $NOVA_WSGI_DIR/nova-ec2-api + + sudo cp $FILES/apache-nova-api.template $nova_apache_conf + sudo sed -e " + s|%PUBLICPORT%|$nova_api_port|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-api|g; + s|%SSLENGINE%|$nova_ssl|g; + s|%SSLCERTFILE%|$nova_certfile|g; + s|%SSLKEYFILE%|$nova_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + " -i $nova_apache_conf + + sudo cp $FILES/apache-nova-ec2-api.template $nova_ec2_apache_conf + sudo sed -e " + s|%PUBLICPORT%|$nova_ec2_api_port|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-ec2-api|g; + s|%SSLENGINE%|$nova_ssl|g; + s|%SSLCERTFILE%|$nova_certfile|g; + s|%SSLKEYFILE%|$nova_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + " -i $nova_ec2_apache_conf +} + # configure_nova() - Set config files, create data dirs, etc function configure_nova { # Put config files in ``/etc/nova`` for everyone to find @@ -453,12 +521,16 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT force_config_drive "$FORCE_CONFIG_DRIVE" fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$NOVA_USE_MOD_WSGI" == "False" ] ; then setup_colorized_logging $NOVA_CONF DEFAULT else # Show user_name and project_name instead of user_id and project_id iniset $NOVA_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(user_name)s %(project_name)s] %(instance)s%(message)s" fi + if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then + _config_nova_apache_wsgi + fi + if is_service_enabled ceilometer; then iniset $NOVA_CONF DEFAULT instance_usage_audit "True" iniset $NOVA_CONF DEFAULT instance_usage_audit_period "hour" @@ -655,6 +727,13 @@ function install_nova { git_clone $NOVA_REPO $NOVA_DIR $NOVA_BRANCH setup_develop $NOVA_DIR sudo install -D -m 0644 -o $STACK_USER {$NOVA_DIR/tools/,/etc/bash_completion.d/}nova-manage.bash_completion + + if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then + install_apache_wsgi + if is_ssl_enabled_service "nova-api"; then + enable_mod_ssl + fi + fi } # start_nova_api() - Start the API process ahead of other things @@ -671,7 +750,18 @@ function start_nova_api { local old_path=$PATH export PATH=$NOVA_BIN_DIR:$PATH - run_process n-api "$NOVA_BIN_DIR/nova-api" + # If the site is not enabled then we are in a grenade scenario + local enabled_site_file=$(apache_site_config_for nova-api) + if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then + enable_apache_site nova-api + enable_apache_site nova-ec2-api + restart_apache_server + tail_log nova /var/log/$APACHE_NAME/nova-api.log + tail_log nova /var/log/$APACHE_NAME/nova-ec2-api.log + else + run_process n-api "$NOVA_BIN_DIR/nova-api" + fi + echo "Waiting for nova-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SERVICE_HOST:$service_port; then die $LINENO "nova-api did not start" @@ -780,6 +870,13 @@ function stop_nova_compute { } function stop_nova_rest { + if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then + disable_apache_site nova-api + disable_apache_site nova-ec2-api + restart_apache_server + else + stop_process n-api + fi # Kill the nova screen windows # Some services are listed here twice since more than one instance # of a service may be running in certain configs. From ee3d2a8ece24efe8ee8b0304c133574967eb60d3 Mon Sep 17 00:00:00 2001 From: Gregory Haynes Date: Tue, 5 May 2015 22:14:24 +0000 Subject: [PATCH 0193/2941] Import xattr with sudo early on xattr fails to import due to being unable to build cffi bindings unless it is imported as root beforehand. Depends-On: I6a9d64277974933ae9b7bbe2a40b8a0eb0fa8c6a Change-Id: I835e55bbafc7e0640987e6f3c8ee0c873f875ee0 Closes-Bug: #1451992 --- stack.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/stack.sh b/stack.sh index e5ee0dc03f..3925bb0ccf 100755 --- a/stack.sh +++ b/stack.sh @@ -708,6 +708,12 @@ source $TOP_DIR/tools/fixup_stuff.sh # Pre-build some problematic wheels if [[ -n ${WHEELHOUSE:-} && ! -d ${WHEELHOUSE:-} ]]; then source $TOP_DIR/tools/build_wheels.sh + + # Due to https://bugs.launchpad.net/swift/+bug/1451992 we have to import + # this package with root once so the CFFI bindings can be built. We have + # to therefore install it so we can import it. + pip_install xattr + sudo python -c "import xattr" fi From e8a2fa431b4b432c5a05da0cab6c4af5999e5aee Mon Sep 17 00:00:00 2001 From: Cyril Roelandt Date: Wed, 6 May 2015 17:30:48 +0200 Subject: [PATCH 0194/2941] lib/swift: the s3_token middleware should be provided by keystonemiddleware Recently, keystoneclient.middleware has been moved from keystoneclient to keystonemiddleware. The latter should be used. Change-Id: Ib9489a21b988b32fc17399c08eeb60862efae034 Closes-Bug: #1452315 --- lib/swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index 456dde4c8d..820042d972 100644 --- a/lib/swift +++ b/lib/swift @@ -439,7 +439,7 @@ function configure_swift { if is_service_enabled swift3; then cat <>${SWIFT_CONFIG_PROXY_SERVER} [filter:s3token] -paste.filter_factory = keystoneclient.middleware.s3_token:filter_factory +paste.filter_factory = keystonemiddleware.s3_token:filter_factory auth_port = ${KEYSTONE_AUTH_PORT} auth_host = ${KEYSTONE_AUTH_HOST} auth_protocol = ${KEYSTONE_AUTH_PROTOCOL} From 5a59ac7d43bb10a5bbc912b94edea19e1009b675 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 6 May 2015 09:48:54 -0400 Subject: [PATCH 0195/2941] create a more generic work around for cffi & wheels This is an attempt to fix the cffi vs. wheels bug in a more generic way by just ensuring that pip has installed cffi with a pip understood version into the venv before we try to do any builds. Related-Bug: #1451992 Change-Id: Ibc58668c53933033405b40f79b0e9ffc73a01a6f --- stack.sh | 6 ------ tools/build_wheels.sh | 12 ++++++++++++ 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/stack.sh b/stack.sh index 3925bb0ccf..e5ee0dc03f 100755 --- a/stack.sh +++ b/stack.sh @@ -708,12 +708,6 @@ source $TOP_DIR/tools/fixup_stuff.sh # Pre-build some problematic wheels if [[ -n ${WHEELHOUSE:-} && ! -d ${WHEELHOUSE:-} ]]; then source $TOP_DIR/tools/build_wheels.sh - - # Due to https://bugs.launchpad.net/swift/+bug/1451992 we have to import - # this package with root once so the CFFI bindings can be built. We have - # to therefore install it so we can import it. - pip_install xattr - sudo python -c "import xattr" fi diff --git a/tools/build_wheels.sh b/tools/build_wheels.sh index c57568fa64..14c2999c8f 100755 --- a/tools/build_wheels.sh +++ b/tools/build_wheels.sh @@ -60,6 +60,18 @@ virtualenv $TMP_VENV_PATH # Install modern pip and wheel PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel +# BUG: cffi has a lot of issues. It has no stable ABI, if installed +# code is built with a different ABI than the one that's detected at +# load time, it tries to compile on the fly for the new ABI in the +# install location (which will probably be /usr and not +# writable). Also cffi is often included via setup_requires by +# packages, which have different install rules (allowing betas) than +# pip has. +# +# Because of this we must pip install cffi into the venv to build +# wheels. +PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install_gr cffi + # ``VENV_PACKAGES`` is a list of packages we want to pre-install VENV_PACKAGE_FILE=$FILES/venv-requirements.txt if [[ -r $VENV_PACKAGE_FILE ]]; then From 52a3bebcfcb09ec2b78d0357f1a074458ab04053 Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Tue, 5 May 2015 15:00:03 -0700 Subject: [PATCH 0196/2941] Do not set OS_CACERT if there is no CA cert In openrc, if we set OS_CACERT, some things will expect it to be there in pre-flight checks. But it may very well be missing. This "fails closed" because if we find the file, we try to use it, but if we don't find the file, and the user thought we should be using it, we'll just not be able to verify the server's name, and the libs will fail on that. Change-Id: Ia5d06afa74bc645c2f19711cfa37e57a377c329b Closes-Bug: #1452036 --- openrc | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/openrc b/openrc index aec8a2a642..64faa58a3a 100644 --- a/openrc +++ b/openrc @@ -78,8 +78,14 @@ export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} # export OS_AUTH_URL=$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:5000/v${OS_IDENTITY_API_VERSION} -# Set the pointer to our CA certificate chain. Harmless if TLS is not used. -export OS_CACERT=${OS_CACERT:-$INT_CA_DIR/ca-chain.pem} +# Set OS_CACERT to a default CA certificate chain if it exists. +if [[ ! -v OS_CACERT ]] ; then + DEFAULT_OS_CACERT=$INT_CA_DIR/ca-chain.pem + # If the file does not exist, this may confuse preflight sanity checks + if [ -e $DEFAULT_OS_CACERT ] ; then + export OS_CACERT=$DEFAULT_OS_CACERT + fi +fi # Currently novaclient needs you to specify the *compute api* version. This # needs to match the config of your catalog returned by Keystone. From 168b7c226cd17fa75eecc0e6ce4c81d001747f78 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 7 May 2015 08:57:28 -0400 Subject: [PATCH 0197/2941] dump iptables in the worlddump If we fail during devstack / grenade runs, it would be nice to have the map of iptables that are currently active as well. This makes it handy to start figuring out what's going on when test servers don't ping. Change-Id: Ia31736ef2cb0221586d30c089473dfdc1db90e23 --- tools/worlddump.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tools/worlddump.py b/tools/worlddump.py index 8dd455c274..cb32510526 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -61,6 +61,17 @@ def disk_space(): print dfraw +def iptables_dump(): + tables = ['filter', 'nat', 'mangle'] + print """ +IP Tables Dump +=============== +""" + for table in tables: + print os.popen("sudo iptables --line-numbers -L -nv -t %s" + % table).read() + + def process_list(): print """ Process Listing @@ -79,6 +90,7 @@ def main(): os.dup2(f.fileno(), sys.stdout.fileno()) disk_space() process_list() + iptables_dump() if __name__ == '__main__': From 9fd75f57fd8bdbd1926b1942462d439f9e496204 Mon Sep 17 00:00:00 2001 From: Jens Rosenboom Date: Mon, 23 Mar 2015 11:45:00 +0100 Subject: [PATCH 0198/2941] Update default cirros version Update the default CIRROS_VERSION to 0.3.4, which has better support for IPv6 and some other bugfixes. Co-Authored-By: Scott Moser Change-Id: I03ee6e1403680fb6c421225a7cadaf8a82edf702 Depends-On: Iac9f108d947ff4a51f99c6e8ad9d1ac5b32c000a --- stackrc | 2 +- tools/xen/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index abedb001d1..076efc17b4 100644 --- a/stackrc +++ b/stackrc @@ -536,7 +536,7 @@ esac #IMAGE_URLS="http://smoser.brickies.net/ubuntu/ttylinux-uec/ttylinux-uec-amd64-11.2_2.6.35-15_1.tar.gz" # old ttylinux-uec image #IMAGE_URLS="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-disk.img" # cirros full disk image -CIRROS_VERSION=${CIRROS_VERSION:-"0.3.2"} +CIRROS_VERSION=${CIRROS_VERSION:-"0.3.4"} CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of diff --git a/tools/xen/README.md b/tools/xen/README.md index c8f47be393..61694e9616 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -97,7 +97,7 @@ Of course, use real passwords if this machine is exposed. # Download a vhd and a uec image IMAGE_URLS="\ https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz,\ - http://download.cirros-cloud.net/0.3.2/cirros-0.3.2-x86_64-uec.tar.gz" + http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-uec.tar.gz" # Explicitly set virt driver VIRT_DRIVER=xenserver From d4c89289f9d4bf88c065dca85a46c9b08464b56c Mon Sep 17 00:00:00 2001 From: Waldemar Znoinski Date: Thu, 7 May 2015 17:14:21 +0100 Subject: [PATCH 0199/2941] Set datapath to $OVS_DATAPATH_TYPE for bridges This change extends devstack to configure the br-ex, br- and Xenserver's br-$GUEST_INTERFACE_DEFAULT datapaths when OVS_DATAPATH_TYPE is set. Change-Id: I71e590de86e7526e8423140463752d6b3ad14214 Closes-Bug: #1416444 --- lib/neutron_plugins/openvswitch_agent | 4 ++-- lib/neutron_plugins/ovs_base | 18 +++++++++++++----- 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 1d24f3b837..2a05e2dcfa 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -59,7 +59,7 @@ function neutron_plugin_configure_plugin_agent { OVS_BRIDGE_MAPPINGS=$PHYSICAL_NETWORK:$OVS_PHYSICAL_BRIDGE # Configure bridge manually with physical interface as port for multi-node - sudo ovs-vsctl --no-wait -- --may-exist add-br $OVS_PHYSICAL_BRIDGE + _neutron_ovs_base_add_bridge $OVS_PHYSICAL_BRIDGE fi if [[ "$OVS_BRIDGE_MAPPINGS" != "" ]]; then iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings $OVS_BRIDGE_MAPPINGS @@ -92,7 +92,7 @@ function neutron_plugin_configure_plugin_agent { # Set up domU's L2 agent: # Create a bridge "br-$GUEST_INTERFACE_DEFAULT" - sudo ovs-vsctl --no-wait -- --may-exist add-br "br-$GUEST_INTERFACE_DEFAULT" + _neutron_ovs_base_add_bridge "br-$GUEST_INTERFACE_DEFAULT" # Add $GUEST_INTERFACE_DEFAULT to that bridge sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 51999c60e4..5ecca81ce9 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -16,13 +16,21 @@ function is_neutron_ovs_base_plugin { return 0 } +function _neutron_ovs_base_add_bridge { + local bridge=$1 + local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge" + + if [ "$OVS_DATAPATH_TYPE" != "" ] ; then + addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" + fi + + $addbr_cmd +} + function _neutron_ovs_base_setup_bridge { local bridge=$1 neutron-ovs-cleanup - sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge - if [[ $OVS_DATAPATH_TYPE != "" ]]; then - sudo ovs-vsctl set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE} - fi + _neutron_ovs_base_add_bridge $bridge sudo ovs-vsctl --no-wait br-set-external-id $bridge bridge-id $bridge } @@ -93,7 +101,7 @@ function _neutron_ovs_base_configure_l3_agent { sudo ip link set $Q_PUBLIC_VETH_EX up sudo ip addr flush dev $Q_PUBLIC_VETH_EX else - sudo ovs-vsctl -- --may-exist add-br $PUBLIC_BRIDGE + _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE sudo ovs-vsctl br-set-external-id $PUBLIC_BRIDGE bridge-id $PUBLIC_BRIDGE fi } From 1b5a49829eebea87b2cd1fd057f808612f72e7cf Mon Sep 17 00:00:00 2001 From: Matthew Gilliard Date: Fri, 10 Apr 2015 08:42:22 +0100 Subject: [PATCH 0200/2941] Set live_migrate_paused_instances=True in tempest.conf Live migration of paused instances is a new Nova feature in Kilo, and will not be backported. The compute_feature_enabled.live_migrate_paused_instances flag defaults to False for this reason, but can be set to True here. The tempest config option and this change can both be removed at Juno-EOL. The related Tempest change: I5c6fd3de7ea45d1851bb40037c64ad7fb5e6dc48 Change-Id: I3a83e43d252b88c234438a224e2fbebc0a81eaff Related-Bug: #1305062 --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index cd8fbd725f..6ce245aef1 100644 --- a/lib/tempest +++ b/lib/tempest @@ -364,6 +364,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled api_extensions $compute_api_extensions # TODO(mriedem): Remove the preserve_ports flag when Juno is end of life. iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True + # TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life. + iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True # Network iniset $TEMPEST_CONFIG network api_version 2.0 From 0a9d03d5059356a9f494ad331b548cc74d85d75f Mon Sep 17 00:00:00 2001 From: Yuki Nishiwaki Date: Fri, 8 May 2015 16:29:55 +0900 Subject: [PATCH 0201/2941] Move install_infra before execute build_wheels.sh The pip_install_gr function in build_wheels.sh use requirements project. So requirements project must exist before execute build_wheels.sh. Then we moved install_infra function which install requirements project. Change-Id: I8f80ecafff0f7e1942731379b70bccac338ea3b3 Closes-Bug: 1453012 --- stack.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index e5ee0dc03f..f0aafafb3a 100755 --- a/stack.sh +++ b/stack.sh @@ -705,6 +705,9 @@ source $TOP_DIR/tools/fixup_stuff.sh # Virtual Environment # ------------------- +# Install required infra support libraries +install_infra + # Pre-build some problematic wheels if [[ -n ${WHEELHOUSE:-} && ! -d ${WHEELHOUSE:-} ]]; then source $TOP_DIR/tools/build_wheels.sh @@ -713,10 +716,6 @@ fi # Extras Pre-install # ------------------ - -# Install required infra support libraries -install_infra - # Phase: pre-install run_phase stack pre-install From 41309002fa1a1c00f8485ef71acdec93fbfbd014 Mon Sep 17 00:00:00 2001 From: Vladyslav Drok Date: Wed, 29 Apr 2015 13:36:52 +0300 Subject: [PATCH 0202/2941] Add new options to baremetal config section This change adds setting of deploy_img_dir and node_uuid baremetal config options during tempest configuration to enable ironic w/o glance scenario testing. Needed for change I171e85cb8a21fae4da45028f1f798988a36f6c95 Change-Id: I6fd393390389c4c643b93198fa461fc2adc415ae --- lib/ironic | 8 +++++++- lib/tempest | 2 ++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index 4a37f0aafa..7493c3cab2 100644 --- a/lib/ironic +++ b/lib/ironic @@ -58,6 +58,7 @@ IRONIC_HW_EPHEMERAL_DISK=${IRONIC_HW_EPHEMERAL_DISK:-0} IRONIC_IPMIINFO_FILE=${IRONIC_IPMIINFO_FILE:-$IRONIC_DATA_DIR/hardware_info} # Set up defaults for functional / integration testing +IRONIC_NODE_UUID=${IRONIC_NODE_UUID:-`uuidgen`} IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$TOP_DIR/tools/ironic/scripts} IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates} IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False IRONIC_BAREMETAL_BASIC_OPS) @@ -619,7 +620,12 @@ function enroll_nodes { node_options+=" -i $_IRONIC_DEPLOY_RAMDISK_KEY=$IRONIC_DEPLOY_RAMDISK_ID" fi - local node_id=$(ironic node-create --chassis_uuid $chassis_id \ + # First node created will be used for testing in ironic w/o glance + # scenario, so we need to know its UUID. + local standalone_node_uuid=$([ $total_nodes -eq 0 ] && echo "--uuid $IRONIC_NODE_UUID") + + local node_id=$(ironic node-create $standalone_node_uuid\ + --chassis_uuid $chassis_id \ --driver $IRONIC_DEPLOY_DRIVER \ -p cpus=$ironic_node_cpu\ -p memory_mb=$ironic_node_ram\ diff --git a/lib/tempest b/lib/tempest index cd8fbd725f..44b2f96101 100644 --- a/lib/tempest +++ b/lib/tempest @@ -481,6 +481,8 @@ function configure_tempest { if [ "$VIRT_DRIVER" = "ironic" ] ; then iniset $TEMPEST_CONFIG baremetal driver_enabled True iniset $TEMPEST_CONFIG baremetal unprovision_timeout 300 + iniset $TEMPEST_CONFIG baremetal deploy_img_dir $FILES + iniset $TEMPEST_CONFIG baremetal node_uuid $IRONIC_NODE_UUID iniset $TEMPEST_CONFIG compute-feature-enabled change_password False iniset $TEMPEST_CONFIG compute-feature-enabled console_output False iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False From b3a8f6032a47fd78fcaeb46bca6572a700c775ce Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 8 May 2015 06:59:39 -0700 Subject: [PATCH 0203/2941] nova: remove allow_migrate_to_same_host config usage Nova commit 9b224641295af3763d011816d6399565ac7b98de removed the option in Liberty so we can remove it's usage in devstack. Related-Bug: #1364851 Change-Id: If051f43fb75d57c118db4e8e97895ff06fbb54e2 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index 807dfceeae..75e585d9d2 100644 --- a/lib/nova +++ b/lib/nova @@ -392,7 +392,6 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" if [ "$NOVA_ALLOW_MOVE_TO_SAME_HOST" == "True" ]; then iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" - iniset $NOVA_CONF DEFAULT allow_migrate_to_same_host "True" fi iniset $NOVA_CONF DEFAULT api_paste_config "$NOVA_API_PASTE_INI" iniset $NOVA_CONF DEFAULT rootwrap_config "$NOVA_CONF_DIR/rootwrap.conf" From ad0a518ca92f86a9f1361d717413f8d1d65d2994 Mon Sep 17 00:00:00 2001 From: Accela Zhao Date: Fri, 8 May 2015 23:55:31 +0800 Subject: [PATCH 0204/2941] Use an actual existing nova scheduler in README.md The Multi-Node Setup guide in README.md https://github.com/openstack-dev/devstack/tree/master#multi-node-setup guides users to use SCHEDULER=nova.scheduler.simple.SimpleScheduler where the SimpleScheduler doesn't actually exist in nova. Even though this is just an example, it is misleading enough for a beginner to put SimpleScheduler into local.conf. The resulting error message where n-sch fails to start ImportError: No module named simple Isn't intuitive enough and may takes the beginner long time to locate what's wrong. This patch replaces SimpleScheduler with a real existing FilterScheduler in nova. Change-Id: I14a2a5c0604ce08a498accfc3a795c1c9aa3e642 Closes-bug: #1453186 --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 04f5fd9711..7ba4e7461c 100644 --- a/README.md +++ b/README.md @@ -328,7 +328,7 @@ that includes at least: You likely want to change your `localrc` section to run a scheduler that will balance VMs across hosts: - SCHEDULER=nova.scheduler.simple.SimpleScheduler + SCHEDULER=nova.scheduler.filter_scheduler.FilterScheduler You can then run many compute nodes, each of which should have a `stackrc` which includes the following, with the IP address of the above controller node: From 99de7cc1782ed00905068d0ec894ac08db5aa06f Mon Sep 17 00:00:00 2001 From: Accela Zhao Date: Fri, 8 May 2015 18:14:11 +0800 Subject: [PATCH 0205/2941] Fix wrong `sudo ceph -c` command in lib/ceph The `sudo -c ${CEPH_CONF_FILE} ceph ...` in lib/ceph misplaced `ceph`. The correct syntax is `sudo ceph -c ${CEPH_CONF_FILE} ...`, see lib/ceph:308. While installing ./stack.sh with ceph enabled, the above malformed command raises a `usage: sudo -h | -K | -k | -V ...` error and stops the installation. This patch fixes `sudo -c ${CEPH_CONF_FILE} ceph ...` by moving `ceph` to the right place. Change-Id: I3da943d5a353d99b09787f804b79c1d006a09d96 Closes-bug: #1453055 --- lib/ceph | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceph b/lib/ceph index 76747ccd77..4068e26222 100644 --- a/lib/ceph +++ b/lib/ceph @@ -279,7 +279,7 @@ function configure_ceph_embedded_nova { # configure Nova service options, ceph pool, ceph user and ceph key sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} size ${CEPH_REPLICAS} if [[ $CEPH_REPLICAS -ne 1 ]]; then - sudo -c ${CEPH_CONF_FILE} ceph osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID} + sudo ceph -c ${CEPH_CONF_FILE} osd pool set ${NOVA_CEPH_POOL} crush_ruleset ${RULE_ID} fi } From 091b42b7da7650d528bb5f88ec411a04af3da828 Mon Sep 17 00:00:00 2001 From: Jeremy Stanley Date: Fri, 8 May 2015 17:43:08 +0000 Subject: [PATCH 0206/2941] Stop installing nose and pylint from distros The distro packages of nose and pylint depend on python-setuptools on some platforms, and on some of those platforms (at least CentOS 6.x) you can't resolve dependencies on python-setuptools properly if you've forcibly removed it already (as we do on our CI workers). It appears that any current upstream use of these tools in relation to DevStack-based testing is now relying on tox and pip to obtain them instead. Change-Id: Ibd16ac550c90364115caf57fae4f5f4cb5d5f238 --- files/debs/swift | 3 --- files/rpms-suse/general | 1 - files/rpms-suse/horizon | 2 -- files/rpms-suse/swift | 1 - files/rpms/general | 1 - files/rpms/horizon | 1 - 6 files changed, 9 deletions(-) diff --git a/files/debs/swift b/files/debs/swift index 0089d27fee..726786ee18 100644 --- a/files/debs/swift +++ b/files/debs/swift @@ -1,8 +1,5 @@ curl make memcached -# NOTE python-nose only exists because of swift functional job, we should probably -# figure out a more consistent way of installing this from test-requirements.txt instead -python-nose sqlite3 xfsprogs diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 2219426141..42756d8fcc 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -15,7 +15,6 @@ openssh openssl psmisc python-cmd2 # dist:opensuse-12.3 -python-pylint screen tar tcpdump diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon index d1f378a70d..c45eae6153 100644 --- a/files/rpms-suse/horizon +++ b/files/rpms-suse/horizon @@ -12,7 +12,5 @@ python-coverage python-dateutil python-eventlet python-mox -python-nose -python-pylint python-sqlalchemy-migrate python-xattr diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift index 4b14098064..9c0d188fe2 100644 --- a/files/rpms-suse/swift +++ b/files/rpms-suse/swift @@ -8,7 +8,6 @@ python-devel python-eventlet python-greenlet python-netifaces -python-nose python-simplejson python-xattr sqlite3 diff --git a/files/rpms/general b/files/rpms/general index e17d6d6458..7b2c00ad5c 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -14,7 +14,6 @@ libxml2-devel libxslt-devel pkgconfig psmisc -pylint python-devel screen tar diff --git a/files/rpms/horizon b/files/rpms/horizon index 8d7f0371ef..b2cf0ded6f 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -1,6 +1,5 @@ Django httpd # NOPRIME mod_wsgi # NOPRIME -pylint pyxattr pcre-devel # pyScss From 98f59aafaf88328f6aee98efa0f563fb8bf91ebd Mon Sep 17 00:00:00 2001 From: Mahito OGURA Date: Mon, 11 May 2015 18:02:34 +0900 Subject: [PATCH 0207/2941] Fix function and test for 'trueorfalse'. The function's comment is written as follow, however the function accepts other values (ex. "e", "t", "T", "f", "F", etc...). --- Accepts as False: 0 no No NO false False FALSE Accepts as True: 1 yes Yes YES true True TRUE --- Moreover if testval mach True or False, the function exits without resetting xtrace. This patch fixes the issue and add test patterns. Change-Id: Ie48a859476faff22a4dfef466516e2d7d62ef0c0 Closes-bug: #1453687 --- functions-common | 14 ++++++++------ tests/test_truefalse.sh | 33 +++++++++++++++++++++------------ 2 files changed, 29 insertions(+), 18 deletions(-) diff --git a/functions-common b/functions-common index 4d07c03cce..6d9a0fa6b0 100644 --- a/functions-common +++ b/functions-common @@ -51,14 +51,16 @@ TRACK_DEPENDS=${TRACK_DEPENDS:-False} function trueorfalse { local xtrace=$(set +o | grep xtrace) set +o xtrace + local default=$1 - local literal=$2 - local testval=${!literal:-} + local testval=${!2:-} + + case "$testval" in + "1" | [yY]es | "YES" | [tT]rue | "TRUE" ) echo "True" ;; + "0" | [nN]o | "NO" | [fF]alse | "FALSE" ) echo "False" ;; + * ) echo "$default" ;; + esac - [[ -z "$testval" ]] && { echo "$default"; return; } - [[ "0 no No NO false False FALSE" =~ "$testval" ]] && { echo "False"; return; } - [[ "1 yes Yes YES true True TRUE" =~ "$testval" ]] && { echo "True"; return; } - echo "$default" $xtrace } diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh index e57948a407..ebd9650649 100755 --- a/tests/test_truefalse.sh +++ b/tests/test_truefalse.sh @@ -8,27 +8,36 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/functions source $TOP/tests/unittest.sh -function test_truefalse { +function test_trueorfalse { local one=1 local captrue=True local lowtrue=true - local abrevtrue=t + local uppertrue=TRUE + local capyes=Yes + local lowyes=yes + local upperyes=YES + + for default in True False; do + for name in one captrue lowtrue uppertrue capyes lowyes upperyes; do + assert_equal "True" $(trueorfalse $default $name) "\$(trueorfalse $default $name)" + done + done + local zero=0 local capfalse=False local lowfalse=false - local abrevfalse=f - for against in True False; do - for name in one captrue lowtrue abrevtrue; do - assert_equal "True" $(trueorfalse $against $name) "\$(trueorfalse $against $name)" - done - done - for against in True False; do - for name in zero capfalse lowfalse abrevfalse; do - assert_equal "False" $(trueorfalse $against $name) "\$(trueorfalse $against $name)" + local upperfalse=FALSE + local capno=No + local lowno=no + local upperno=NO + + for default in True False; do + for name in zero capfalse lowfalse upperfalse capno lowno upperno; do + assert_equal "False" $(trueorfalse $default $name) "\$(trueorfalse $default $name)" done done } -test_truefalse +test_trueorfalse report_results From 331a64f9d087692cba10f3dd15c6b01595e1c127 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Mon, 11 May 2015 10:02:24 -0500 Subject: [PATCH 0208/2941] Use stevedore for keystone backends With bp stevedore, keystone will load backend drivers using stevedore entrypoints. Using the qualified class name is deprecated. Since stevedore is going to validate that the entrypoint is found, there's no need to list the valid backends, so backend validation was removed. This change will cause the server to fail to start if the backends are misconfigured rather than using the default one. The names of the stevedore endpoints are "sql", "ldap", etc., rather than the qualified class name, so the way that these are specified in KEYSTONE_IDENTITY_BACKEND, etc., is the same as the stevedore entrypoint and there's no need to translate. Change-Id: I81e4e3a6c97b0057610e6b256aff5df4da884e33 --- lib/keystone | 42 +++++++++++------------------------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/lib/keystone b/lib/keystone index 997bb14967..976aad0c6f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -64,21 +64,21 @@ KEYSTONE_EXTENSIONS=${KEYSTONE_EXTENSIONS:-} # Toggle for deploying Keystone under HTTPD + mod_wsgi KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} -# Select the backend for Keystone's service catalog +# Select the Catalog backend driver KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} KEYSTONE_CATALOG=$KEYSTONE_CONF_DIR/default_catalog.templates -# Select the backend for Tokens +# Select the token persistence backend driver KEYSTONE_TOKEN_BACKEND=${KEYSTONE_TOKEN_BACKEND:-sql} -# Select the backend for Identity +# Select the Identity backend driver KEYSTONE_IDENTITY_BACKEND=${KEYSTONE_IDENTITY_BACKEND:-sql} -# Select the backend for Assignment +# Select the Assignment backend driver KEYSTONE_ASSIGNMENT_BACKEND=${KEYSTONE_ASSIGNMENT_BACKEND:-sql} -# Select Keystone's token format -# Choose from 'UUID', 'PKI', or 'PKIZ' +# Select Keystone's token provider (and format) +# Choose from 'uuid', 'pki', 'pkiz', or 'fernet' KEYSTONE_TOKEN_FORMAT=${KEYSTONE_TOKEN_FORMAT:-} KEYSTONE_TOKEN_FORMAT=$(echo ${KEYSTONE_TOKEN_FORMAT} | tr '[:upper:]' '[:lower:]') @@ -99,12 +99,6 @@ KEYSTONE_ADMIN_BIND_HOST=${KEYSTONE_ADMIN_BIND_HOST:-$KEYSTONE_SERVICE_HOST} # Set the tenant for service accounts in Keystone SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} -# valid identity backends as per dir keystone/identity/backends -KEYSTONE_VALID_IDENTITY_BACKENDS=kvs,ldap,pam,sql - -# valid assignment backends as per dir keystone/identity/backends -KEYSTONE_VALID_ASSIGNMENT_BACKENDS=kvs,ldap,sql - # if we are running with SSL use https protocols if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then KEYSTONE_AUTH_PROTOCOL="https" @@ -225,15 +219,8 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT member_role_name "_member_" fi - # check if identity backend is valid - if [[ "$KEYSTONE_VALID_IDENTITY_BACKENDS" =~ "$KEYSTONE_IDENTITY_BACKEND" ]]; then - iniset $KEYSTONE_CONF identity driver "keystone.identity.backends.$KEYSTONE_IDENTITY_BACKEND.Identity" - fi - - # check if assignment backend is valid - if [[ "$KEYSTONE_VALID_ASSIGNMENT_BACKENDS" =~ "$KEYSTONE_ASSIGNMENT_BACKEND" ]]; then - iniset $KEYSTONE_CONF assignment driver "keystone.assignment.backends.$KEYSTONE_ASSIGNMENT_BACKEND.Assignment" - fi + iniset $KEYSTONE_CONF identity driver "$KEYSTONE_IDENTITY_BACKEND" + iniset $KEYSTONE_CONF assignment driver "$KEYSTONE_ASSIGNMENT_BACKEND" iniset_rpc_backend keystone $KEYSTONE_CONF @@ -257,23 +244,17 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" if [[ "$KEYSTONE_TOKEN_FORMAT" != "" ]]; then - iniset $KEYSTONE_CONF token provider keystone.token.providers.$KEYSTONE_TOKEN_FORMAT.Provider + iniset $KEYSTONE_CONF token provider $KEYSTONE_TOKEN_FORMAT fi iniset $KEYSTONE_CONF database connection `database_connection_url keystone` iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" - if [[ "$KEYSTONE_TOKEN_BACKEND" = "sql" ]]; then - iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.sql.Token - elif [[ "$KEYSTONE_TOKEN_BACKEND" = "memcache" ]]; then - iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.memcache.Token - else - iniset $KEYSTONE_CONF token driver keystone.token.persistence.backends.kvs.Token - fi + iniset $KEYSTONE_CONF token driver "$KEYSTONE_TOKEN_BACKEND" + iniset $KEYSTONE_CONF catalog driver "$KEYSTONE_CATALOG_BACKEND" if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then # Configure ``keystone.conf`` to use sql - iniset $KEYSTONE_CONF catalog driver keystone.catalog.backends.sql.Catalog inicomment $KEYSTONE_CONF catalog template_file else cp -p $FILES/default_catalog.templates $KEYSTONE_CATALOG @@ -300,7 +281,6 @@ function configure_keystone { " -i $KEYSTONE_CATALOG # Configure ``keystone.conf`` to use templates - iniset $KEYSTONE_CONF catalog driver "keystone.catalog.backends.templated.Catalog" iniset $KEYSTONE_CONF catalog template_file "$KEYSTONE_CATALOG" fi From 71a8eccdc3594b2e0395d7df75e69eb877269e81 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Mon, 11 May 2015 10:37:18 -0500 Subject: [PATCH 0209/2941] Remove setting nonexistant [ec2] driver option in keystone There's no [ec2] driver option in keystone. Change-Id: Ifee92127f32db85d4d55f665471c8da1c9a970e7 --- lib/keystone | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 976aad0c6f..a1d832c5ae 100644 --- a/lib/keystone +++ b/lib/keystone @@ -248,7 +248,6 @@ function configure_keystone { fi iniset $KEYSTONE_CONF database connection `database_connection_url keystone` - iniset $KEYSTONE_CONF ec2 driver "keystone.contrib.ec2.backends.sql.Ec2" iniset $KEYSTONE_CONF token driver "$KEYSTONE_TOKEN_BACKEND" From eb7a0d9b2d22da3d1e0fbc3f581c597a1a510666 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Mon, 11 May 2015 12:54:33 -0500 Subject: [PATCH 0210/2941] Remove KEYSTONE_AUTH_CACHE_DIR Keystone doesn't use a cache directory. Change-Id: I569b406db46cf6bdabcbfd8c5eb6f3cbdbc3cff7 --- lib/keystone | 5 ----- 1 file changed, 5 deletions(-) diff --git a/lib/keystone b/lib/keystone index a1d832c5ae..63c3f7973a 100644 --- a/lib/keystone +++ b/lib/keystone @@ -50,7 +50,6 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} -KEYSTONE_AUTH_CACHE_DIR=${KEYSTONE_AUTH_CACHE_DIR:-/var/cache/keystone} if is_suse; then KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/srv/www/htdocs/keystone} else @@ -475,10 +474,6 @@ function init_keystone { # Set up certificates rm -rf $KEYSTONE_CONF_DIR/ssl $KEYSTONE_BIN_DIR/keystone-manage pki_setup - - # Create cache dir - sudo install -d -o $STACK_USER $KEYSTONE_AUTH_CACHE_DIR - rm -f $KEYSTONE_AUTH_CACHE_DIR/* fi } From 60a140571ea3a4ad07772f1eedae6d4d1a6e4c67 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 11 May 2015 14:53:39 -0400 Subject: [PATCH 0211/2941] add network info to the worlddump This adds potentially helpful networking info to the world dump. It also refactors some of the output mechanisms into reusable functions for cleanliness in the code. Change-Id: I39f95bd487c152925f8fadd1799149db35cffd52 --- tools/worlddump.py | 49 +++++++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index cb32510526..7f2614dcaa 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -41,12 +41,24 @@ def warn(msg): print "WARN: %s" % msg +def _dump_cmd(cmd): + print cmd + print "-" * len(cmd) + print + print os.popen(cmd).read() + + +def _header(name): + print + print name + print "=" * len(name) + print + + def disk_space(): # the df output - print """ -File System Summary -=================== -""" + _header("File System Summary") + dfraw = os.popen("df -Ph").read() df = [s.split() for s in dfraw.splitlines()] for fs in df: @@ -63,22 +75,26 @@ def disk_space(): def iptables_dump(): tables = ['filter', 'nat', 'mangle'] - print """ -IP Tables Dump -=============== -""" + _header("IP Tables Dump") + for table in tables: - print os.popen("sudo iptables --line-numbers -L -nv -t %s" - % table).read() + _dump_cmd("sudo iptables --line-numbers -L -nv -t %s" % table) + + +def network_dump(): + _header("Network Dump") + + _dump_cmd("brctl show") + _dump_cmd("arp -n") + _dump_cmd("ip addr") + _dump_cmd("ip link") + _dump_cmd("ip route") def process_list(): - print """ -Process Listing -=============== -""" - psraw = os.popen("ps axo user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args").read() - print psraw + _header("Process Listing") + _dump_cmd("ps axo " + "user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args") def main(): @@ -90,6 +106,7 @@ def main(): os.dup2(f.fileno(), sys.stdout.fileno()) disk_space() process_list() + network_dump() iptables_dump() From cbe12eb72eaf70001b80b0a6357cde3048cbc81f Mon Sep 17 00:00:00 2001 From: Louis Taylor Date: Tue, 12 May 2015 16:49:49 +0000 Subject: [PATCH 0212/2941] glance: remove deprecated store options glance_store has now been fully migrated, so we can remove these from the config files. Change-Id: I987ab6338b235f0beeed7c7fe74b0f5b6526f70d --- lib/glance | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/lib/glance b/lib/glance index f543e54c7a..4e1bd24ef5 100644 --- a/lib/glance +++ b/lib/glance @@ -138,26 +138,12 @@ function configure_glance { fi # Store specific configs - iniset $GLANCE_API_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ - - # NOTE(flaper87): Until Glance is fully migrated, set these configs in both - # sections. iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" # Store the images in swift if enabled. if is_service_enabled s-proxy; then - iniset $GLANCE_API_CONF DEFAULT default_store swift - iniset $GLANCE_API_CONF DEFAULT swift_store_auth_address $KEYSTONE_SERVICE_URI/v2.0/ - iniset $GLANCE_API_CONF DEFAULT swift_store_user $SERVICE_TENANT_NAME:glance-swift - iniset $GLANCE_API_CONF DEFAULT swift_store_key $SERVICE_PASSWORD - iniset $GLANCE_API_CONF DEFAULT swift_store_create_container_on_put True - - iniset $GLANCE_API_CONF DEFAULT known_stores "glance.store.filesystem.Store, glance.store.http.Store, glance.store.swift.Store" - - # NOTE(flaper87): Until Glance is fully migrated, set these configs in both - # sections. iniset $GLANCE_API_CONF glance_store default_store swift iniset $GLANCE_API_CONF glance_store swift_store_auth_address $KEYSTONE_SERVICE_URI/v2.0/ iniset $GLANCE_API_CONF glance_store swift_store_user $SERVICE_TENANT_NAME:glance-swift @@ -211,9 +197,6 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD # Store specific confs - # NOTE(flaper87): Until Glance is fully migrated, set these configs in both - # sections. - iniset $GLANCE_CACHE_CONF DEFAULT filesystem_store_datadir $GLANCE_IMAGE_DIR/ iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ cp -p $GLANCE_DIR/etc/policy.json $GLANCE_POLICY_JSON From 3a2c86aabfa985dbdc998f02201649f49f3adab7 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Tue, 12 May 2015 13:41:25 +0000 Subject: [PATCH 0213/2941] Add python_version function to functions-common This makes it possible to list virtual site-package directories without statically stating the python version, which is a bit ugly. Change-Id: I3e7ac39eb43cdc4f656e0c90f3bfb23545722aef --- functions-common | 6 ++++++ lib/keystone | 2 +- lib/nova | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 4d07c03cce..d28ef85cc1 100644 --- a/functions-common +++ b/functions-common @@ -1897,6 +1897,12 @@ function maskip { echo $subnet } +# Return the current python as "python." +function python_version { + local python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])') + echo "python${python_version}" +} + # Service wrapper to restart services # restart_service service-name function restart_service { diff --git a/lib/keystone b/lib/keystone index 997bb14967..0f369af71d 100644 --- a/lib/keystone +++ b/lib/keystone @@ -164,7 +164,7 @@ function _config_keystone_apache_wsgi { keystone_auth_port=$KEYSTONE_AUTH_PORT_INT fi if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/python2.7/site-packages" + venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" fi # copy proxy vhost and wsgi file diff --git a/lib/nova b/lib/nova index 6ac9da3b41..7d2145b170 100644 --- a/lib/nova +++ b/lib/nova @@ -259,7 +259,7 @@ function _config_nova_apache_wsgi { nova_keyfile="SSLCertificateKeyFile $NOVA_SSL_KEY" fi if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["nova"]}/lib/python2.7/site-packages" + venv_path="python-path=${PROJECT_VENV["nova"]}/lib/$(python_version)/site-packages" fi # copy proxy vhost and wsgi helper files From ce8e6f6aa61c267df7892e7e8748963db8c83bc3 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Tue, 12 May 2015 17:28:59 -0600 Subject: [PATCH 0214/2941] Add ability to specify cinder lvm_type option Cinder has had an lvm_type option for quite a while that allows the LVM driver to be configured to use thin provisioning. This required an additional PPA in Precise, but is available in the default Trusty packages. This patch adds the lvm_type option, and we'll use it to do gate testing against the lvm_thin configuration. Change-Id: I99c7fea131f3d79747ec75052adf8b24f41ba483 --- lib/cinder | 4 ++++ lib/cinder_backends/lvm | 1 + 2 files changed, 5 insertions(+) diff --git a/lib/cinder b/lib/cinder index eb0e1d7600..93285bf99c 100644 --- a/lib/cinder +++ b/lib/cinder @@ -65,6 +65,10 @@ CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +# What type of LVM device should Cinder use for LVM backend +# Defaults to default, which is thick, the other valid choice +# is thin, which as the name implies utilizes lvm thin provisioning. +CINDER_LVM_TYPE=${CINDER_LVM_TYPE:-default} # Default backends # The backend format is type:name where type is one of the supported backend diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index 35ad209db7..411b82c190 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -51,6 +51,7 @@ function configure_cinder_backend_lvm { iniset $CINDER_CONF $be_name volume_driver "cinder.volume.drivers.lvm.LVMVolumeDriver" iniset $CINDER_CONF $be_name volume_group $VOLUME_GROUP_NAME-$be_name iniset $CINDER_CONF $be_name iscsi_helper "$CINDER_ISCSI_HELPER" + iniset $CINDER_CONF $be_name lvm_type "$CINDER_LVM_TYPE" if [[ "$CINDER_SECURE_DELETE" == "False" ]]; then iniset $CINDER_CONF $be_name volume_clear none From 1fa82aab6634bf815d162978e33b211e1fdef343 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Tue, 12 May 2015 20:04:49 -0700 Subject: [PATCH 0215/2941] Install g-r version of OSC in configure_tempest configure_tempest uses python-openstackclient (OSC) and we call configure_tempest in grenade on the new side. So we need to make sure the version of OSC is installed matches global-requirements on new. Change-Id: I6fae9b8b081355b45e7c8d622d8db2482d41b464 Closes-Bug: #1454467 --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index 6ce245aef1..1af5c7a795 100644 --- a/lib/tempest +++ b/lib/tempest @@ -106,6 +106,10 @@ function configure_tempest { pip_install_gr testrepository fi + # Used during configuration so make sure we have the correct + # version installed + pip_install_gr python-openstackclient + local image_lines local images local num_images From 38bee18a2c440b01bd1c2187cb2a62a841e3b7ae Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Mon, 11 May 2015 16:51:10 +0200 Subject: [PATCH 0216/2941] Cinder: Set os_privileged_user credentials (for os-assisted-snapshots) When calling os-assisted-snapshots APIs, Cinder often (by default) needs to pass an admin token to Nova. Currently it uses the credentials of the current user. This will cause calls to Nova APIs for assisted volume snapshots to fail. Configuration options should be added to specify different credentials for talking to Nova. Change-Id: I9e3ed53f4e1349d57a0c33518445f54ac63e36ec Related-Bug: #1308736 --- lib/cinder | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/cinder b/lib/cinder index eb0e1d7600..7ad7ef9b0c 100644 --- a/lib/cinder +++ b/lib/cinder @@ -299,6 +299,11 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT ssl_key_file "$CINDER_SSL_KEY" fi + # Set os_privileged_user credentials (used for os-assisted-snapshots) + iniset $CINDER_CONF DEFAULT os_privileged_user_name nova + iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD" + iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_TENANT_NAME" + } # create_cinder_accounts() - Set up common required cinder accounts From a747cd25f76440a2320bd5e1c65252d31eb9b189 Mon Sep 17 00:00:00 2001 From: Swapnil Kulkarni Date: Wed, 13 May 2015 09:26:15 +0000 Subject: [PATCH 0217/2941] Update the glance image-list with openstack image list Change-Id: I0f0f15cb204daf12fbc12384f04a2cd9618c4bef --- lib/tempest | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index cd8fbd725f..df986728c1 100644 --- a/lib/tempest +++ b/lib/tempest @@ -143,9 +143,7 @@ function configure_tempest { image_uuid_alt="$IMAGE_UUID" fi images+=($IMAGE_UUID) - # TODO(stevemar): update this command to use openstackclient's `openstack image list` - # when it supports listing by status. - done < <(glance image-list --status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') + done < <(openstack image list --property status=active | awk -F'|' '!/^(+--)|ID|aki|ari/ { print $3,$2 }') case "${#images[*]}" in 0) From 737e94202fe635b7bd9ad59195352bb5dfe54817 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 12 May 2015 19:51:39 -0400 Subject: [PATCH 0218/2941] dump compute consoles on fail as well This provides a dump of the compute consoles as well on failure. Change-Id: Ib253537a54a1b9d83a930bbefa4512e039575fd1 --- tools/worlddump.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tools/worlddump.py b/tools/worlddump.py index 7f2614dcaa..d846f10025 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -18,6 +18,7 @@ import argparse import datetime +import fnmatch import os import os.path import sys @@ -97,6 +98,14 @@ def process_list(): "user,ppid,pid,pcpu,pmem,vsz,rss,tty,stat,start,time,args") +def compute_consoles(): + _header("Compute consoles") + for root, dirnames, filenames in os.walk('/opt/stack'): + for filename in fnmatch.filter(filenames, 'console.log'): + fullpath = os.path.join(root, filename) + _dump_cmd("sudo cat %s" % fullpath) + + def main(): opts = get_options() fname = filename(opts.dir) @@ -108,6 +117,7 @@ def main(): process_list() network_dump() iptables_dump() + compute_consoles() if __name__ == '__main__': From 6816234dc84b3e81a3de8745e84691d09123ba7f Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 13 May 2015 15:41:03 -0500 Subject: [PATCH 0219/2941] Save stackenv values more often Having these for debugging can be handy Change-Id: I18c2658eec83a0f20f697a3c4c36aa1cf46b7a92 --- functions-common | 20 ++++++++++++++++++++ stack.sh | 13 +++++++------ 2 files changed, 27 insertions(+), 6 deletions(-) diff --git a/functions-common b/functions-common index 52d80fb95b..974f5934e4 100644 --- a/functions-common +++ b/functions-common @@ -43,6 +43,25 @@ declare -A GITDIR TRACK_DEPENDS=${TRACK_DEPENDS:-False} +# Save these variables to .stackenv +STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ + KEYSTONE_AUTH_PROTOCOL KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \ + LOGFILE OS_CACERT SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP" + + +# Saves significant environment variables to .stackenv for later use +# Refers to a lot of globals, only TOP_DIR and STACK_ENV_VARS are required to +# function, the rest are simply saved and do not cause problems if they are undefined. +# save_stackenv [tag] +function save_stackenv { + local tag=${1:-""} + # Save some values we generated for later use + time_stamp=$(date "+$TIMESTAMP_FORMAT") + echo "# $time_stamp $tag" >$TOP_DIR/.stackenv + for i in $STACK_ENV_VARS; do + echo $i=${!i} >>$TOP_DIR/.stackenv + done +} # Normalize config values to True or False # Accepts as False: 0 no No NO false False FALSE @@ -68,6 +87,7 @@ function isset { [[ -v "$1" ]] } + # Control Functions # ================= diff --git a/stack.sh b/stack.sh index f0aafafb3a..4f7a805f3f 100755 --- a/stack.sh +++ b/stack.sh @@ -669,6 +669,9 @@ if is_service_enabled s-proxy; then fi fi +# Save configuration values +save_stackenv $LINENO + # Install Packages # ================ @@ -950,6 +953,9 @@ fi # Initialize the directory for service status check init_service_check +# Save configuration values +save_stackenv $LINENO + # Start Services # ============== @@ -1294,12 +1300,7 @@ fi # Save some values we generated for later use -CURRENT_RUN_TIME=$(date "+$TIMESTAMP_FORMAT") -echo "# $CURRENT_RUN_TIME" >$TOP_DIR/.stackenv -for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ - SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP KEYSTONE_AUTH_PROTOCOL OS_CACERT; do - echo $i=${!i} >>$TOP_DIR/.stackenv -done +save_stackenv # Wrapup configuration From 9e220b9b2b560b160c93058b255b3d69e49c0cbc Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Tue, 24 Mar 2015 16:32:03 -0700 Subject: [PATCH 0220/2941] Move trove into in-tree plugin Once the trove code is copied into the trove repo and it is used as a devstack-plugin, we can remove trove-specific code from devstack. Change-Id: I8f9f1a015edb7ec1033e2eaf0b29ab15d89384ce Depends-On: I3506dec0e6097f9c2e9267110fdfb768faa23c85 --- MAINTAINERS.rst | 6 - clean.sh | 1 - doc/source/index.rst | 3 - extras.d/70-trove.sh | 32 ----- lib/trove | 252 ----------------------------------- stackrc | 28 ++-- tests/test_libs_from_pypi.sh | 2 +- unstack.sh | 4 - 8 files changed, 10 insertions(+), 318 deletions(-) delete mode 100644 extras.d/70-trove.sh delete mode 100644 lib/trove diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst index 20e8655d69..d3e8c67487 100644 --- a/MAINTAINERS.rst +++ b/MAINTAINERS.rst @@ -75,12 +75,6 @@ SUSE Tempest ~~~~~~~ -Trove -~~~~~ - -* Nikhil Manchanda -* Michael Basnight - Xen ~~~ * Bob Ball diff --git a/clean.sh b/clean.sh index 7db519b0dc..c31a65fd40 100755 --- a/clean.sh +++ b/clean.sh @@ -51,7 +51,6 @@ source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ironic -source $TOP_DIR/lib/trove # Extras Source diff --git a/doc/source/index.rst b/doc/source/index.rst index 4435b495ae..e0c3f3a5d6 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -173,7 +173,6 @@ Scripts * `lib/swift `__ * `lib/tempest `__ * `lib/tls `__ -* `lib/trove `__ * `lib/zaqar `__ * `unstack.sh `__ * `clean.sh `__ @@ -182,7 +181,6 @@ Scripts * `extras.d/50-ironic.sh `__ * `extras.d/60-ceph.sh `__ * `extras.d/70-sahara.sh `__ -* `extras.d/70-trove.sh `__ * `extras.d/70-tuskar.sh `__ * `extras.d/70-zaqar.sh `__ * `extras.d/80-tempest.sh `__ @@ -242,6 +240,5 @@ Exercises * `exercises/sahara.sh `__ * `exercises/sec\_groups.sh `__ * `exercises/swift.sh `__ -* `exercises/trove.sh `__ * `exercises/volumes.sh `__ * `exercises/zaqar.sh `__ diff --git a/extras.d/70-trove.sh b/extras.d/70-trove.sh deleted file mode 100644 index f284354e1f..0000000000 --- a/extras.d/70-trove.sh +++ /dev/null @@ -1,32 +0,0 @@ -# trove.sh - Devstack extras script to install Trove - -if is_service_enabled trove; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/trove - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Trove" - install_trove - install_troveclient - cleanup_trove - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Trove" - configure_trove - - if is_service_enabled key; then - create_trove_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize trove - init_trove - - # Start the trove API and trove taskmgr components - echo_summary "Starting Trove" - start_trove - fi - - if [[ "$1" == "unstack" ]]; then - stop_trove - fi -fi diff --git a/lib/trove b/lib/trove deleted file mode 100644 index b0a96100c2..0000000000 --- a/lib/trove +++ /dev/null @@ -1,252 +0,0 @@ -#!/bin/bash -# -# lib/trove -# Functions to control the configuration and operation of the **Trove** service - -# Dependencies: -# ``functions`` file -# ``DEST``, ``STACK_USER`` must be defined -# ``SERVICE_{HOST|PROTOCOL|TOKEN}`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# install_trove -# configure_trove -# init_trove -# start_trove -# stop_trove -# cleanup_trove - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- -if is_service_enabled neutron; then - TROVE_HOST_GATEWAY=${PUBLIC_NETWORK_GATEWAY:-172.24.4.1} -else - TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} -fi - -# Set up default configuration -GITDIR["python-troveclient"]=$DEST/python-troveclient - -TROVE_DIR=$DEST/trove -TROVE_CONF_DIR=${TROVE_CONF_DIR:-/etc/trove} -TROVE_CONF=${TROVE_CONF:-$TROVE_CONF_DIR/trove.conf} -TROVE_TASKMANAGER_CONF=${TROVE_TASKMANAGER_CONF:-$TROVE_CONF_DIR/trove-taskmanager.conf} -TROVE_CONDUCTOR_CONF=${TROVE_CONDUCTOR_CONF:-$TROVE_CONF_DIR/trove-conductor.conf} -TROVE_GUESTAGENT_CONF=${TROVE_GUESTAGENT_CONF:-$TROVE_CONF_DIR/trove-guestagent.conf} -TROVE_API_PASTE_INI=${TROVE_API_PASTE_INI:-$TROVE_CONF_DIR/api-paste.ini} - -TROVE_LOCAL_CONF_DIR=$TROVE_DIR/etc/trove -TROVE_LOCAL_API_PASTE_INI=$TROVE_LOCAL_CONF_DIR/api-paste.ini -TROVE_AUTH_CACHE_DIR=${TROVE_AUTH_CACHE_DIR:-/var/cache/trove} -TROVE_DATASTORE_TYPE=${TROVE_DATASTORE_TYPE:-"mysql"} -TROVE_DATASTORE_VERSION=${TROVE_DATASTORE_VERSION:-"5.6"} -TROVE_DATASTORE_PACKAGE=${TROVE_DATASTORE_PACKAGE:-"mysql-server-5.6"} - -# Support entry points installation of console scripts -if [[ -d $TROVE_DIR/bin ]]; then - TROVE_BIN_DIR=$TROVE_DIR/bin -else - TROVE_BIN_DIR=$(get_python_exec_prefix) -fi -TROVE_MANAGE=$TROVE_BIN_DIR/trove-manage - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,trove - - -# Functions -# --------- - -# Test if any Trove services are enabled -# is_trove_enabled -function is_trove_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"tr-" ]] && return 0 - return 1 -} - -# setup_trove_logging() - Adds logging configuration to conf files -function setup_trove_logging { - local CONF=$1 - iniset $CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $CONF DEFAULT use_syslog $SYSLOG - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - # Add color to logging output - setup_colorized_logging $CONF DEFAULT tenant user - fi -} - -# create_trove_accounts() - Set up common required Trove accounts - -# Tenant User Roles -# ------------------------------------------------------------------ -# service trove admin # if enabled - -function create_trove_accounts { - if [[ "$ENABLED_SERVICES" =~ "trove" ]]; then - - create_service_user "trove" - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - - local trove_service=$(get_or_create_service "trove" \ - "database" "Trove Service") - get_or_create_endpoint $trove_service \ - "$REGION_NAME" \ - "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ - "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" \ - "http://$SERVICE_HOST:8779/v1.0/\$(tenant_id)s" - fi - fi -} - -# stack.sh entry points -# --------------------- - -# cleanup_trove() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_trove { - #Clean up dirs - rm -fr $TROVE_AUTH_CACHE_DIR/* - rm -fr $TROVE_CONF_DIR/* -} - -# configure_trove() - Set config files, create data dirs, etc -function configure_trove { - setup_develop $TROVE_DIR - - # Create the trove conf dir and cache dirs if they don't exist - sudo install -d -o $STACK_USER ${TROVE_CONF_DIR} ${TROVE_AUTH_CACHE_DIR} - - # Copy api-paste file over to the trove conf dir - cp $TROVE_LOCAL_API_PASTE_INI $TROVE_API_PASTE_INI - - # (Re)create trove conf files - rm -f $TROVE_CONF - rm -f $TROVE_TASKMANAGER_CONF - rm -f $TROVE_CONDUCTOR_CONF - - iniset $TROVE_CONF DEFAULT rabbit_userid $RABBIT_USERID - iniset $TROVE_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_CONF database connection `database_connection_url trove` - iniset $TROVE_CONF DEFAULT default_datastore $TROVE_DATASTORE_TYPE - setup_trove_logging $TROVE_CONF - iniset $TROVE_CONF DEFAULT trove_api_workers "$API_WORKERS" - - configure_auth_token_middleware $TROVE_CONF trove $TROVE_AUTH_CACHE_DIR - - # (Re)create trove taskmanager conf file if needed - if is_service_enabled tr-tmgr; then - TROVE_AUTH_ENDPOINT=$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION - - iniset $TROVE_TASKMANAGER_CONF DEFAULT rabbit_userid $RABBIT_USERID - iniset $TROVE_TASKMANAGER_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_TASKMANAGER_CONF database connection `database_connection_url trove` - iniset $TROVE_TASKMANAGER_CONF DEFAULT taskmanager_manager trove.taskmanager.manager.Manager - iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_user radmin - iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_tenant_name trove - iniset $TROVE_TASKMANAGER_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS - iniset $TROVE_TASKMANAGER_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT - setup_trove_logging $TROVE_TASKMANAGER_CONF - fi - - # (Re)create trove conductor conf file if needed - if is_service_enabled tr-cond; then - iniset $TROVE_CONDUCTOR_CONF DEFAULT rabbit_userid $RABBIT_USERID - iniset $TROVE_CONDUCTOR_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_CONDUCTOR_CONF database connection `database_connection_url trove` - iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_user radmin - iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_tenant_name trove - iniset $TROVE_CONDUCTOR_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS - iniset $TROVE_CONDUCTOR_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT - iniset $TROVE_CONDUCTOR_CONF DEFAULT control_exchange trove - setup_trove_logging $TROVE_CONDUCTOR_CONF - fi - - # Set up Guest Agent conf - iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_userid $RABBIT_USERID - iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_host $TROVE_HOST_GATEWAY - iniset $TROVE_GUESTAGENT_CONF DEFAULT rabbit_password $RABBIT_PASSWORD - iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_user radmin - iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_tenant_name trove - iniset $TROVE_GUESTAGENT_CONF DEFAULT nova_proxy_admin_pass $RADMIN_USER_PASS - iniset $TROVE_GUESTAGENT_CONF DEFAULT trove_auth_url $TROVE_AUTH_ENDPOINT - iniset $TROVE_GUESTAGENT_CONF DEFAULT control_exchange trove - iniset $TROVE_GUESTAGENT_CONF DEFAULT ignore_users os_admin - iniset $TROVE_GUESTAGENT_CONF DEFAULT log_dir /var/log/trove/ - iniset $TROVE_GUESTAGENT_CONF DEFAULT log_file trove-guestagent.log - setup_trove_logging $TROVE_GUESTAGENT_CONF -} - -# install_troveclient() - Collect source and prepare -function install_troveclient { - if use_library_from_git "python-troveclient"; then - git_clone_by_name "python-troveclient" - setup_dev_lib "python-troveclient" - fi -} - -# install_trove() - Collect source and prepare -function install_trove { - git_clone $TROVE_REPO $TROVE_DIR $TROVE_BRANCH -} - -# init_trove() - Initializes Trove Database as a Service -function init_trove { - # (Re)Create trove db - recreate_database trove - - # Initialize the trove database - $TROVE_MANAGE db_sync - - # If no guest image is specified, skip remaining setup - [ -z "$TROVE_GUEST_IMAGE_URL" ] && return 0 - - # Find the glance id for the trove guest image - # The image is uploaded by stack.sh -- see $IMAGE_URLS handling - GUEST_IMAGE_NAME=$(basename "$TROVE_GUEST_IMAGE_URL") - GUEST_IMAGE_NAME=${GUEST_IMAGE_NAME%.*} - TROVE_GUEST_IMAGE_ID=$(openstack --os-token $TOKEN --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image list | grep "${GUEST_IMAGE_NAME}" | get_field 1) - if [ -z "$TROVE_GUEST_IMAGE_ID" ]; then - # If no glance id is found, skip remaining setup - echo "Datastore ${TROVE_DATASTORE_TYPE} will not be created: guest image ${GUEST_IMAGE_NAME} not found." - return 1 - fi - - # Now that we have the guest image id, initialize appropriate datastores / datastore versions - $TROVE_MANAGE datastore_update "$TROVE_DATASTORE_TYPE" "" - $TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" "$TROVE_DATASTORE_TYPE" \ - "$TROVE_GUEST_IMAGE_ID" "$TROVE_DATASTORE_PACKAGE" 1 - $TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "inactive_version" "inactive_manager" "$TROVE_GUEST_IMAGE_ID" "" 0 - $TROVE_MANAGE datastore_update "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" - $TROVE_MANAGE datastore_update "Inactive_Datastore" "" -} - -# start_trove() - Start running processes, including screen -function start_trove { - run_process tr-api "$TROVE_BIN_DIR/trove-api --config-file=$TROVE_CONF --debug" - run_process tr-tmgr "$TROVE_BIN_DIR/trove-taskmanager --config-file=$TROVE_TASKMANAGER_CONF --debug" - run_process tr-cond "$TROVE_BIN_DIR/trove-conductor --config-file=$TROVE_CONDUCTOR_CONF --debug" -} - -# stop_trove() - Stop running processes -function stop_trove { - # Kill the trove screen windows - local serv - for serv in tr-api tr-tmgr tr-cond; do - stop_process $serv - done -} - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/stackrc b/stackrc index 3c08b15b03..938a09a56d 100644 --- a/stackrc +++ b/stackrc @@ -231,11 +231,6 @@ SAHARA_BRANCH=${SAHARA_BRANCH:-master} SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} SWIFT_BRANCH=${SWIFT_BRANCH:-master} -# trove service -TROVE_REPO=${TROVE_REPO:-${GIT_BASE}/openstack/trove.git} -TROVE_BRANCH=${TROVE_BRANCH:-master} - - ############## # # Testing Components @@ -301,10 +296,6 @@ GITBRANCH["python-saharaclient"]=${SAHARACLIENT_BRANCH:-master} GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git} GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-master} -# trove client library test -GITREPO["python-troveclient"]=${TROVECLIENT_REPO:-${GIT_BASE}/openstack/python-troveclient.git} -GITBRANCH["python-troveclient"]=${TROVECLIENT_BRANCH:-master} - # consolidated openstack python client GITREPO["python-openstackclient"]=${OPENSTACKCLIENT_REPO:-${GIT_BASE}/openstack/python-openstackclient.git} GITBRANCH["python-openstackclient"]=${OPENSTACKCLIENT_BRANCH:-master} @@ -577,16 +568,15 @@ case "$VIRT_DRIVER" in IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};; esac -# Trove needs a custom image for its work -if [[ "$ENABLED_SERVICES" =~ 'tr-api' ]]; then - case "$VIRT_DRIVER" in - libvirt|ironic|xenapi) - TROVE_GUEST_IMAGE_URL=${TROVE_GUEST_IMAGE_URL:-"http://tarballs.openstack.org/trove/images/ubuntu/mysql.qcow2"} - IMAGE_URLS+=",${TROVE_GUEST_IMAGE_URL}" - ;; - *) - ;; - esac +# Staging Area for New Images, have them here for at least 24hrs for nodepool +# to cache them otherwise the failure rates in the gate are too high +PRECACHE_IMAGES=$(trueorfalse False PRECACHE_IMAGES) +if [[ "$PRECACHE_IMAGES" == "True" ]]; then + + IMAGE_URL="http://tarballs.openstack.org/trove/images/ubuntu/mysql.qcow2" + if ! [[ "$IMAGE_URLS" =~ "$IMAGE_URL" ]]; then + IMAGE_URLS+=",$IMAGE_URL" + fi fi # 10Gb default volume backing file size diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 0bec584aad..8210d0a466 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -29,7 +29,7 @@ for i in $TOP/lib/*; do fi done -ALL_LIBS="python-novaclient oslo.config pbr oslo.context python-troveclient" +ALL_LIBS="python-novaclient oslo.config pbr oslo.context" ALL_LIBS+=" python-keystoneclient taskflow oslo.middleware pycadf" ALL_LIBS+=" python-glanceclient python-ironicclient tempest-lib" ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore" diff --git a/unstack.sh b/unstack.sh index ed7e6175ca..f0da9710a2 100755 --- a/unstack.sh +++ b/unstack.sh @@ -173,10 +173,6 @@ if is_service_enabled neutron; then cleanup_neutron fi -if is_service_enabled trove; then - cleanup_trove -fi - if is_service_enabled dstat; then stop_dstat fi From 61045ca58a89f9ce3a2c905450885700119a8a6f Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Thu, 14 May 2015 11:20:39 -0400 Subject: [PATCH 0221/2941] Write out a clouds.yaml file os-client-config consumes clouds.yaml files, which is now supported in python-openstackclient and shade. It also makes for a non-envvar way of getting config info into functional tests. Change-Id: I1150b943f52f10d19f8434b27e8dde73a14d7843 --- clean.sh | 2 +- stack.sh | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/clean.sh b/clean.sh index 7db519b0dc..86e3d4bec8 100755 --- a/clean.sh +++ b/clean.sh @@ -130,7 +130,7 @@ if [[ -n "$SCREEN_LOGDIR" ]] && [[ -d "$SCREEN_LOGDIR" ]]; then fi # Clean up venvs -DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]}" +DIRS_TO_CLEAN="$WHEELHOUSE ${PROJECT_VENV[@]} .config/openstack" rm -rf $DIRS_TO_CLEAN # Clean up files diff --git a/stack.sh b/stack.sh index f20af219a6..dea56437dd 100755 --- a/stack.sh +++ b/stack.sh @@ -1294,6 +1294,29 @@ for i in BASE_SQL_CONN ENABLED_SERVICES HOST_IP LOGFILE \ echo $i=${!i} >>$TOP_DIR/.stackenv done +# Write out a clouds.yaml file +# putting the location into a variable to allow for easier refactoring later +# to make it overridable. There is current no usecase where doing so makes +# sense, so I'm not actually doing it now. +CLOUDS_YAML=~/.config/openstack/clouds.yaml +if [ ! -e $CLOUDS_YAML ]; then + mkdir -p $(dirname $CLOUDS_YAML) + cat >"$CLOUDS_YAML" <>"$CLOUDS_YAML" + fi +fi + # Wrapup configuration # ==================== From c6782413081cbdc72c7b24e34acec383a1cf2f46 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 14 May 2015 10:01:53 +1000 Subject: [PATCH 0222/2941] Infer rootwrap arguments from project We can infer the binary and configuration paths just from the project name and expanding this to the known *_DIR & *_BIN_DIR variables. A similar thing is done for policyd settings Change-Id: I7c6a9fa106948ae5cbcf52555ade6154623798f1 --- inc/rootwrap | 14 ++++++++++---- lib/ceilometer | 2 +- lib/cinder | 2 +- lib/nova | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/inc/rootwrap b/inc/rootwrap index bac8e1e86c..411e5f7b73 100644 --- a/inc/rootwrap +++ b/inc/rootwrap @@ -38,11 +38,17 @@ function add_sudo_secure_path { # Configure rootwrap # Make a load of assumptions otherwise we'll have 6 arguments -# configure_rootwrap project bin conf-src-dir +# configure_rootwrap project function configure_rootwrap { - local project=$1 # xx - local rootwrap_bin=$2 # /opt/stack/xx.venv/bin/xx-rootwrap - local rootwrap_conf_src_dir=$3 # /opt/stack/xx/etc/xx + local project=$1 + local project_uc=$(echo $1|tr a-z A-Z) + local bin_dir="${project_uc}_BIN_DIR" + bin_dir="${!bin_dir}" + local project_dir="${project_uc}_DIR" + project_dir="${!project_dir}" + + local rootwrap_conf_src_dir="${project_dir}/etc/${project}" + local rootwrap_bin="${bin_dir}/${project}-rootwrap" # Start fresh with rootwrap filters sudo rm -rf /etc/${project}/rootwrap.d diff --git a/lib/ceilometer b/lib/ceilometer index 9abdbfe286..1f72187ed6 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -254,7 +254,7 @@ function configure_ceilometer { if is_service_enabled ceilometer-aipmi; then # Configure rootwrap for the ipmi agent - configure_rootwrap ceilometer $CEILOMETER_BIN_DIR/ceilometer-rootwrap $CEILOMETER_DIR/etc/ceilometer + configure_rootwrap ceilometer fi } diff --git a/lib/cinder b/lib/cinder index eb0e1d7600..3c5425af08 100644 --- a/lib/cinder +++ b/lib/cinder @@ -190,7 +190,7 @@ function configure_cinder { rm -f $CINDER_CONF - configure_rootwrap cinder $CINDER_BIN_DIR/cinder-rootwrap $CINDER_DIR/etc/cinder + configure_rootwrap cinder cp $CINDER_DIR/etc/cinder/api-paste.ini $CINDER_API_PASTE_INI diff --git a/lib/nova b/lib/nova index 7d2145b170..da288d3e4d 100644 --- a/lib/nova +++ b/lib/nova @@ -298,7 +298,7 @@ function configure_nova { install_default_policy nova - configure_rootwrap nova $NOVA_BIN_DIR/nova-rootwrap $NOVA_DIR/etc/nova + configure_rootwrap nova if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then # Get the sample configuration file in place From 8afbaa1c80d54d7f6591f8f2c1a26c34f60c77e1 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 13 May 2015 20:53:08 -0400 Subject: [PATCH 0223/2941] Support for running Nova with oslo.rootwrap daemon Nova is being enhanced to use rootwrap as a daemon. For this effort, we need an additional entry for nova-rootwrap-daemon in the sudoers.d/ directory. Needed by: I57dc2efa39b86fa1fa20730ad70d056e87617c96 Change-Id: I80c7b9dd8e9e0f940aa4e54a95b241dfc40d3574 --- inc/rootwrap | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/inc/rootwrap b/inc/rootwrap index 411e5f7b73..f91e557e68 100644 --- a/inc/rootwrap +++ b/inc/rootwrap @@ -59,12 +59,16 @@ function configure_rootwrap { sudo install -o root -g root -m 644 $rootwrap_conf_src_dir/rootwrap.conf /etc/${project}/rootwrap.conf sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf - # Specify rootwrap.conf as first parameter to rootwrap - rootwrap_sudo_cmd="$rootwrap_bin /etc/${project}/rootwrap.conf *" - # Set up the rootwrap sudoers local tempfile=$(mktemp) + # Specify rootwrap.conf as first parameter to rootwrap + rootwrap_sudo_cmd="${rootwrap_bin} /etc/${project}/rootwrap.conf *" echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile + if [ -f ${bin_dir}/${project}-rootwrap-daemon ]; then + # rootwrap daemon does not need any parameters + rootwrap_sudo_cmd="${rootwrap_bin}-daemon /etc/${project}/rootwrap.conf" + echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >>$tempfile + fi chmod 0440 $tempfile sudo chown root:root $tempfile sudo mv $tempfile /etc/sudoers.d/${project}-rootwrap From 3380a16974defc62db65fbc8e30e2510b57b84b6 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 15 May 2015 13:12:02 +1000 Subject: [PATCH 0224/2941] Disable firewalld always We've bike-sheded over this before (I5252a12223a35f7fb7a4ac3c58aa4a3cd1bc4799) but I have just traced down further issues to firewalld with neutron+ipv6 (see the bug). In fact, as mentioned in the comments, RDO disables firewalld and the neutron guide says to disable it [1]. The force flag is left if anyone really wants this; but nobody is testing (or, as far as I can tell, working on) this so bring devstack back into line and disable it always. Note we do not remove the package; as has been found in the puppet scripts this can lead to dependency issues. [1] http://docs.openstack.org/developer/devstack/guides/neutron.html Change-Id: Ief7cb33d926a9538f4eb39c74d906ee0c879de35 Partial-Bug: 1455303 --- lib/nova_plugins/functions-libvirt | 11 ----------- tools/fixup_stuff.sh | 27 ++++++++++++++++++--------- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 04da5e2b60..96d8a44b05 100755 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -39,17 +39,6 @@ function install_libvirt { pip_install_gr libvirt-python install_package python-libguestfs fi - - # Restart firewalld after install of libvirt to avoid a problem - # with polkit, which libvirtd brings in. See - # https://bugzilla.redhat.com/show_bug.cgi?id=1099031 - - # Note there is a difference between F20 rackspace cloud images - # and HP images used in the gate; rackspace has firewalld but hp - # cloud doesn't. - if is_fedora && is_package_installed firewalld; then - sudo service firewalld restart || true - fi } # Configures the installed libvirt system so that is accessible by diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 2efb4e0987..31258d13f7 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -109,19 +109,28 @@ if is_fedora; then fi FORCE_FIREWALLD=$(trueorfalse False $FORCE_FIREWALLD) - if [[ ${DISTRO} =~ (f20) && $FORCE_FIREWALLD == "False" ]]; then + if [[ $FORCE_FIREWALLD == "False" ]]; then # On Fedora 20 firewalld interacts badly with libvirt and - # slows things down significantly. However, for those cases - # where that combination is desired, allow this fix to be skipped. - - # There was also an additional issue with firewalld hanging - # after install of libvirt with polkit. See - # https://bugzilla.redhat.com/show_bug.cgi?id=1099031 + # slows things down significantly (this issue was fixed in + # later fedoras). There was also an additional issue with + # firewalld hanging after install of libvirt with polkit [1]. + # firewalld also causes problems with neturon+ipv6 [2] + # + # Note we do the same as the RDO packages and stop & disable, + # rather than remove. This is because other packages might + # have the dependency [3][4]. + # + # [1] https://bugzilla.redhat.com/show_bug.cgi?id=1099031 + # [2] https://bugs.launchpad.net/neutron/+bug/1455303 + # [3] https://github.com/redhat-openstack/openstack-puppet-modules/blob/master/firewall/manifests/linux/redhat.pp + # [4] http://docs.openstack.org/developer/devstack/guides/neutron.html if is_package_installed firewalld; then - uninstall_package firewalld + sudo systemctl disable firewalld + sudo systemctl enable iptables + sudo systemctl stop firewalld + sudo systemctl start iptables fi fi - fi # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has From 4b684aed316a89d4bc0d365e594ed345fe99d6b4 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 15 May 2015 12:38:09 -0400 Subject: [PATCH 0225/2941] Don't set tempest fixed_network_name with neutron If neutron is enabled then there isn't a shared private network between all tenants which is what is required for the fixed_network_name config option. This commit adds a conditional to not set that option when neutron is enabled. While not necessarily fatal to tempest it does emit a warning on almost every server create call if it is set with a non-existent network name. Change-Id: I1a42fa6b0b5a93b411c08ec35df043d6ea69d453 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 9c227167d8..f02b0d13dd 100644 --- a/lib/tempest +++ b/lib/tempest @@ -344,7 +344,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method - if ! is_service_enabled n-cell; then + if [[ ! $(is_service_enabled n-cell) && ! $(is_service_enabled neutron) ]]; then iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME fi From a48e5dc4bd3514cc24cd75c72ea998ad9afe5321 Mon Sep 17 00:00:00 2001 From: Yalei Wang Date: Fri, 6 Mar 2015 17:05:11 +0800 Subject: [PATCH 0226/2941] add the port_sec as default neutron/ml2 extension driver Neutron ML2 plugin introduces the first extension driver port_security, this patch add it to be a default extension driver as a example. And also, if not set it by default, networks like public/private which are created after the neutron-db-manage's update, will not include the port-sec value. Change-Id: I3035317c83d22804855517434bd8578719ce0436 Partially Implements: blueprint ml2-ovs-portsecurity --- doc/source/guides/neutron.rst | 5 +++++ lib/neutron_plugins/ml2 | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 3030c7b5f2..b0a89070fb 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -131,6 +131,11 @@ In this configuration we are defining FLOATING_RANGE to be a subnet that exists in the private RFC1918 address space - however in in a real setup FLOATING_RANGE would be a public IP address range. +Note that extension drivers for the ML2 plugin is set by +`Q_ML2_PLUGIN_EXT_DRIVERS`, and it includes 'port_security' by default. If you +want to remove all the extension drivers (even 'port_security'), set +`Q_ML2_PLUGIN_EXT_DRIVERS` to blank. + Neutron Networking with Open vSwitch and Provider Networks ========================================================== diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 88537774b7..2733f1f513 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -31,6 +31,9 @@ Q_ML2_PLUGIN_GRE_TYPE_OPTIONS=${Q_ML2_PLUGIN_GRE_TYPE_OPTIONS:-tunnel_id_ranges= Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS:-vni_ranges=1001:2000} # Default VLAN TypeDriver options Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS=${Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS:-} +# List of extension drivers to load, use '-' instead of ':-' to allow people to +# explicitly override this to blank +Q_ML2_PLUGIN_EXT_DRIVERS=${Q_ML2_PLUGIN_EXT_DRIVERS-port_security} # L3 Plugin to load for ML2 ML2_L3_PLUGIN=${ML2_L3_PLUGIN:-neutron.services.l3_router.l3_router_plugin.L3RouterPlugin} @@ -113,6 +116,8 @@ function neutron_plugin_configure_service { populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 extension_drivers=$Q_ML2_PLUGIN_EXT_DRIVERS + populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 $Q_SRV_EXTRA_OPTS populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2_type_gre $Q_ML2_PLUGIN_GRE_TYPE_OPTIONS From 37421991b446f2077a9fb4e9a6d580b1c08044a3 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 20 May 2015 06:37:11 -0700 Subject: [PATCH 0227/2941] optional pymysql support This allows you to specify MYSQL_DRIVER=PyMySQL and get it in the environment. Change-Id: Ic9d75266640b7aa6d7efb6e882d3027e81414059 --- files/venv-requirements.txt | 1 - lib/databases/mysql | 13 +++++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt index 73d05793ce..b9a55b423d 100644 --- a/files/venv-requirements.txt +++ b/files/venv-requirements.txt @@ -1,7 +1,6 @@ # Once we can prebuild wheels before a devstack run, uncomment the skipped libraries cryptography # lxml # still install from from packages -MySQL-python # netifaces # still install from packages #numpy # slowest wheel by far, stop building until we are actually using the output posix-ipc diff --git a/lib/databases/mysql b/lib/databases/mysql index 1b9a08199f..7cd2856ae9 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -11,6 +11,13 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace +MYSQL_DRIVER=${MYSQL_DRIVER:-MySQL-python} +# Force over to pymysql driver by default if we are using it. +if is_service_enabled mysql; then + if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then + SQLALCHEMY_DATABASE_DRIVER=${SQLALCHEMY_DATABASE_DRIVER:-"pymysql"} + fi +fi register_database mysql @@ -155,8 +162,10 @@ EOF function install_database_python_mysql { # Install Python client module - pip_install_gr MySQL-python - ADDITIONAL_VENV_PACKAGES+=",MySQL-python" + pip_install_gr $MYSQL_DRIVER + if [[ "$MYSQL_DRIVER" == "MySQL-python" ]]; then + ADDITIONAL_VENV_PACKAGES+=",MySQL-python" + fi } function database_connection_url_mysql { From 165afa2377ee8eb6bad1b6cfb454a7de525a4498 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 25 May 2015 11:29:48 +1000 Subject: [PATCH 0228/2941] Fix msg argument to assert_equal I noticed this was taking an argument but not dealing with it. In general the functions were undocumented, so I added some terse usage. Also, the intent of the test-case was to expand the values before using them as the message; make sure this happens by using a temp variable. Change-Id: Ib317ad1e9dd2a5d2232b9c64541fe4a601a2b8da --- tests/test_truefalse.sh | 6 ++++-- tests/unittest.sh | 17 +++++++++++++++-- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh index ebd9650649..2689589dc9 100755 --- a/tests/test_truefalse.sh +++ b/tests/test_truefalse.sh @@ -19,7 +19,8 @@ function test_trueorfalse { for default in True False; do for name in one captrue lowtrue uppertrue capyes lowyes upperyes; do - assert_equal "True" $(trueorfalse $default $name) "\$(trueorfalse $default $name)" + local msg="trueorfalse($default $name)" + assert_equal "True" $(trueorfalse $default $name) "$msg" done done @@ -33,7 +34,8 @@ function test_trueorfalse { for default in True False; do for name in zero capfalse lowfalse upperfalse capno lowno upperno; do - assert_equal "False" $(trueorfalse $default $name) "\$(trueorfalse $default $name)" + local msg="trueorfalse($default $name)" + assert_equal "False" $(trueorfalse $default $name) "$msg" done done } diff --git a/tests/unittest.sh b/tests/unittest.sh index 69f19b7dae..93aa5fc571 100644 --- a/tests/unittest.sh +++ b/tests/unittest.sh @@ -17,6 +17,8 @@ ERROR=0 PASS=0 FAILED_FUNCS="" +# pass a test, printing out MSG +# usage: passed message function passed { local lineno=$(caller 0 | awk '{print $1}') local function=$(caller 0 | awk '{print $2}') @@ -25,9 +27,11 @@ function passed { msg="OK" fi PASS=$((PASS+1)) - echo $function:L$lineno $msg + echo "PASS: $function:L$lineno $msg" } +# fail a test, printing out MSG +# usage: failed message function failed { local lineno=$(caller 0 | awk '{print $1}') local function=$(caller 0 | awk '{print $2}') @@ -38,10 +42,16 @@ function failed { ERROR=$((ERROR+1)) } +# assert string comparision of val1 equal val2, printing out msg +# usage: assert_equal val1 val2 msg function assert_equal { local lineno=`caller 0 | awk '{print $1}'` local function=`caller 0 | awk '{print $2}'` local msg=$3 + + if [ -z "$msg" ]; then + msg="OK" + fi if [[ "$1" != "$2" ]]; then FAILED_FUNCS+="$function:L$lineno\n" echo "ERROR: $1 != $2 in $function:L$lineno!" @@ -49,10 +59,13 @@ function assert_equal { ERROR=$((ERROR+1)) else PASS=$((PASS+1)) - echo "$function:L$lineno - ok" + echo "PASS: $function:L$lineno - $msg" fi } +# print a summary of passing and failing tests, exiting +# with an error if we have failed tests +# usage: report_results function report_results { echo "$PASS Tests PASSED" if [[ $ERROR -gt 1 ]]; then From fcefb0a910f78f36b329d8eb74d3849678a7a2b7 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Sun, 26 Apr 2015 08:50:49 -0700 Subject: [PATCH 0229/2941] VMware: add support for simple DVS Add the file vmware_dvs that will enable the external CI to be run. The patch in the VMware repo for the DVS support is: https://review.openstack.org/#/c/177597/ Change-Id: I6dee978fd2be3818c5ce57b1dcb2917234ab61e2 --- lib/neutron_plugins/vmware_dvs | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 lib/neutron_plugins/vmware_dvs diff --git a/lib/neutron_plugins/vmware_dvs b/lib/neutron_plugins/vmware_dvs new file mode 100644 index 0000000000..587d5a6b11 --- /dev/null +++ b/lib/neutron_plugins/vmware_dvs @@ -0,0 +1,10 @@ +#!/bin/bash + +# This file is needed so Q_PLUGIN=vmware_dvs will work. + +# FIXME(salv-orlando): This function should not be here, but unfortunately +# devstack calls it before the external plugins are fetched +function has_neutron_plugin_security_group { + # 0 means True here + return 0 +} From 93ee8c876ca2a8cdea98b6685538f85f1a7979ef Mon Sep 17 00:00:00 2001 From: Guillaume Giamarchi Date: Tue, 26 May 2015 02:08:44 +0200 Subject: [PATCH 0230/2941] Set IP_VERSION default value to 4+6 This is actually the default value since 645114b Change-Id: Ib6603b4f6ea0b4079f9a4ea46e723ecbb2ea371d --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 1cc7083bb4..8e2e7ffa31 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -391,7 +391,7 @@ Multi-host DevStack ENABLED_SERVICES=n-vol,n-cpu,n-net,n-api IP Version - | Default: ``IP_VERSION=4`` + | Default: ``IP_VERSION=4+6`` | This setting can be used to configure DevStack to create either an IPv4, IPv6, or dual stack tenant data network by setting ``IP_VERSION`` to either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6`` From 8606c98c53722f425525fa06eee554b7d30f62f6 Mon Sep 17 00:00:00 2001 From: fumihiko kakuma Date: Mon, 13 Apr 2015 09:55:06 +0900 Subject: [PATCH 0231/2941] Fix remove_disabled_extensions to remove an extension at the last position remove_disabled_extensions do matching by '$ext_to_remove","'. So it doesn't match an extension at the last position in extensions_list. This patch fixes that. Closes-Bug: #1443254 Change-Id: I194b483de797697ba06b320cf33f1bac67fc0cc7 --- functions-common | 41 +++++++++++++++++++++++++---------------- lib/tempest | 5 +---- tests/test_functions.sh | 25 +++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 20 deletions(-) diff --git a/functions-common b/functions-common index 52d80fb95b..ff9261161c 100644 --- a/functions-common +++ b/functions-common @@ -1629,7 +1629,6 @@ function disable_all_services { function disable_negated_services { local to_remove="" local remaining="" - local enabled="" local service # build up list of services that should be removed; i.e. they @@ -1644,21 +1643,7 @@ function disable_negated_services { # go through the service list. if this service appears in the "to # be removed" list, drop it - for service in ${remaining//,/ }; do - local remove - local add=1 - for remove in ${to_remove//,/ }; do - if [[ ${remove} == ${service} ]]; then - add=0 - break - fi - done - if [[ $add == 1 ]]; then - enabled="${enabled},$service" - fi - done - - ENABLED_SERVICES=$(_cleanup_service_list "$enabled") + ENABLED_SERVICES=$(remove_disabled_services "$remaining" "$to_remove") } # disable_service() removes the services passed as argument to the @@ -1762,6 +1747,30 @@ function is_service_enabled { return $enabled } +# remove specified list from the input string +# remove_disabled_services service-list remove-list +function remove_disabled_services { + local service_list=$1 + local remove_list=$2 + local service + local enabled="" + + for service in ${service_list//,/ }; do + local remove + local add=1 + for remove in ${remove_list//,/ }; do + if [[ ${remove} == ${service} ]]; then + add=0 + break + fi + done + if [[ $add == 1 ]]; then + enabled="${enabled},$service" + fi + done + _cleanup_service_list "$enabled" +} + # Toggle enable/disable_service for services that must run exclusive of each other # $1 The name of a variable containing a space-separated list of services # $2 The name of a variable in which to store the enabled service's name diff --git a/lib/tempest b/lib/tempest index 6c34323802..ffb59ff59b 100644 --- a/lib/tempest +++ b/lib/tempest @@ -91,10 +91,7 @@ function remove_disabled_extensions { local extensions_list=$1 shift local disabled_exts=$* - for ext_to_remove in ${disabled_exts//,/ } ; do - extensions_list=${extensions_list/$ext_to_remove","} - done - echo $extensions_list + remove_disabled_services "$extensions_list" "$disabled_exts" } # configure_tempest() - Set config files, create data dirs, etc diff --git a/tests/test_functions.sh b/tests/test_functions.sh index 1d82792ca7..f555de8dff 100755 --- a/tests/test_functions.sh +++ b/tests/test_functions.sh @@ -137,6 +137,31 @@ test_disable_negated_services 'a,aa,-a' 'aa' test_disable_negated_services 'a,av2,-a,a' 'av2' test_disable_negated_services 'a,-a,av2' 'av2' +echo "Testing remove_disabled_services()" + +function test_remove_disabled_services { + local service_list="$1" + local remove_list="$2" + local expected="$3" + + results=$(remove_disabled_services "$service_list" "$remove_list") + if [ "$results" = "$expected" ]; then + passed "OK: '$service_list' - '$remove_list' -> '$results'" + else + failed "getting '$expected' from '$service_list' - '$remove_list' failed: '$results'" + fi +} + +test_remove_disabled_services 'a,b,c' 'a,c' 'b' +test_remove_disabled_services 'a,b,c' 'b' 'a,c' +test_remove_disabled_services 'a,b,c,d' 'a,c d' 'b' +test_remove_disabled_services 'a,b c,d' 'a d' 'b,c' +test_remove_disabled_services 'a,b,c' 'a,b,c' '' +test_remove_disabled_services 'a,b,c' 'd' 'a,b,c' +test_remove_disabled_services 'a,b,c' '' 'a,b,c' +test_remove_disabled_services '' 'a,b,c' '' +test_remove_disabled_services '' '' '' + echo "Testing is_package_installed()" if [[ -z "$os_PACKAGE" ]]; then From d82d3f13401320ec47757aff0457a307203b28fe Mon Sep 17 00:00:00 2001 From: Mahito Date: Fri, 22 May 2015 10:37:23 -0700 Subject: [PATCH 0232/2941] Add test case to 'cidr2netmask' 'cidr2netmask' of function doesn't have enough test case and test code isn't smart. This patch adds test case and refactors test code. Change-Id: Iab20ef06fe78316a78198ab75c0afe738a577dd6 Closes-bug: #1457989 --- tests/test_ip.sh | 80 +++++++++++++++++++++--------------------------- 1 file changed, 35 insertions(+), 45 deletions(-) diff --git a/tests/test_ip.sh b/tests/test_ip.sh index c53e80de36..f8c2058967 100755 --- a/tests/test_ip.sh +++ b/tests/test_ip.sh @@ -12,51 +12,41 @@ source $TOP/tests/unittest.sh echo "Testing IP addr functions" -if [[ $(cidr2netmask 4) == 240.0.0.0 ]]; then - passed "cidr2netmask(): /4...OK" -else - failed "cidr2netmask(): /4...failed" -fi -if [[ $(cidr2netmask 8) == 255.0.0.0 ]]; then - passed "cidr2netmask(): /8...OK" -else - failed "cidr2netmask(): /8...failed" -fi -if [[ $(cidr2netmask 12) == 255.240.0.0 ]]; then - passed "cidr2netmask(): /12...OK" -else - failed "cidr2netmask(): /12...failed" -fi -if [[ $(cidr2netmask 16) == 255.255.0.0 ]]; then - passed "cidr2netmask(): /16...OK" -else - failed "cidr2netmask(): /16...failed" -fi -if [[ $(cidr2netmask 20) == 255.255.240.0 ]]; then - passed "cidr2netmask(): /20...OK" -else - failed "cidr2netmask(): /20...failed" -fi -if [[ $(cidr2netmask 24) == 255.255.255.0 ]]; then - passed "cidr2netmask(): /24...OK" -else - failed "cidr2netmask(): /24...failed" -fi -if [[ $(cidr2netmask 28) == 255.255.255.240 ]]; then - passed "cidr2netmask(): /28...OK" -else - failed "cidr2netmask(): /28...failed" -fi -if [[ $(cidr2netmask 30) == 255.255.255.252 ]]; then - passed "cidr2netmask(): /30...OK" -else - failed "cidr2netmask(): /30...failed" -fi -if [[ $(cidr2netmask 32) == 255.255.255.255 ]]; then - passed "cidr2netmask(): /32...OK" -else - failed "cidr2netmask(): /32...failed" -fi +function test_cidr2netmask { + local mask=0 + local ips="128 192 224 240 248 252 254 255" + local ip + local msg + + msg="cidr2netmask(/0) == 0.0.0.0" + assert_equal "0.0.0.0" $(cidr2netmask $mask) "$msg" + + for ip in $ips; do + mask=$(( mask + 1 )) + msg="cidr2netmask(/$mask) == $ip.0.0.0" + assert_equal "$ip.0.0.0" $(cidr2netmask $mask) "$msg" + done + + for ip in $ips; do + mask=$(( mask + 1 )) + msg="cidr2netmask(/$mask) == 255.$ip.0.0" + assert_equal "255.$ip.0.0" $(cidr2netmask $mask) "$msg" + done + + for ip in $ips; do + mask=$(( mask + 1 )) + msg="cidr2netmask(/$mask) == 255.255.$ip.0" + assert_equal "255.255.$ip.0" $(cidr2netmask $mask) "$msg" + done + + for ip in $ips; do + mask=$(( mask + 1 )) + msg="cidr2netmask(/$mask) == 255.255.255.$ip" + assert_equal "255.255.255.$ip" $(cidr2netmask $mask) "$msg" + done +} + +test_cidr2netmask if [[ $(maskip 169.254.169.254 240.0.0.0) == 160.0.0.0 ]]; then passed "maskip(): /4...OK" From b3798af474955368211a297ba85332fde5491993 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 20 May 2015 06:48:02 -0700 Subject: [PATCH 0233/2941] change the default to PyMYSQL As discussed in the Liberty Design Summit "Moving apps to Python 3" cross-project workshop, the way forward in the near future is to switch to the pure-python PyMySQL library as a default. https://etherpad.openstack.org/p/liberty-cross-project-python3 Change-Id: Ic609ce136061b753ca692b37509a0b29c60bb8b5 --- lib/databases/mysql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 7cd2856ae9..832c2ca6d7 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -11,7 +11,7 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -MYSQL_DRIVER=${MYSQL_DRIVER:-MySQL-python} +MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} # Force over to pymysql driver by default if we are using it. if is_service_enabled mysql; then if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then From aece9ff9eff94fcdd2bdac14d64536e16207139d Mon Sep 17 00:00:00 2001 From: Rob Crittenden Date: Tue, 26 May 2015 15:24:38 -0400 Subject: [PATCH 0234/2941] Use correct conf file variable name in sahara When the tls-proxy service is enabled then a separate set of ports is used internally vs externally. The services listen on the internal port and a proxy (stud) listen on the "standard" port and forward requests to the internal port. An incorrect environment variable was being used to set the internal port in the sahara configuration so it wasn't listening on the correct port, causing stack.sh to fail because it thought the service wasn't up (at least not on the right port). Change-Id: I3384039392be786d3c189f3e4f84e069ddaf4339 Closes-Bug: #1458984 --- lib/sahara | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/sahara b/lib/sahara index 6d4e8648bf..51e431afc7 100644 --- a/lib/sahara +++ b/lib/sahara @@ -186,7 +186,7 @@ function configure_sahara { if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original - iniset $SAHARA_CONF DEFAULT port $SAHARA_SERVICE_PORT_INT + iniset $SAHARA_CONF_FILE DEFAULT port $SAHARA_SERVICE_PORT_INT fi recreate_database sahara From 73d24b2c1c1795a1d8b7f6dcdd608ad387d125b9 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Wed, 27 May 2015 11:41:33 +0100 Subject: [PATCH 0235/2941] Ironic: Remove deprecated parameters Ironic have updated some parameters to have a consistent name across drivers. This patch is updating devstack to stop using the pxe_deploy_{kernel, ramdisk} parameters which have been deprecated since early Kilo eliminating the deprecation warnings in the logs. WARNING ironic.drivers.modules.pxe [-] The "pxe_deploy_kernel" parameter is deprecated. Please update the node 267e42c8-df07-49f5-bc7f-48b566acb109 to use "deploy_kernel" instead. WARNING ironic.drivers.modules.pxe [-] The "pxe_deploy_ramdisk" parameter is deprecated. Please update the node 267e42c8-df07-49f5-bc7f-48b566acb109 to use "deploy_ramdisk" instead. Change-Id: I3dcf8df130efc0c2ea35695018bedba31bf0570c --- lib/ironic | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/lib/ironic b/lib/ironic index 7493c3cab2..4984be1861 100644 --- a/lib/ironic +++ b/lib/ironic @@ -569,14 +569,6 @@ function wait_for_nova_resources { function enroll_nodes { local chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2) - if [[ "$IRONIC_DEPLOY_DRIVER" == "pxe_ssh" ]] ; then - local _IRONIC_DEPLOY_KERNEL_KEY=pxe_deploy_kernel - local _IRONIC_DEPLOY_RAMDISK_KEY=pxe_deploy_ramdisk - elif is_deployed_by_agent; then - local _IRONIC_DEPLOY_KERNEL_KEY=deploy_kernel - local _IRONIC_DEPLOY_RAMDISK_KEY=deploy_ramdisk - fi - if ! is_ironic_hardware; then local ironic_node_cpu=$IRONIC_VM_SPECS_CPU local ironic_node_ram=$IRONIC_VM_SPECS_RAM @@ -584,8 +576,8 @@ function enroll_nodes { local ironic_ephemeral_disk=$IRONIC_VM_EPHEMERAL_DISK local ironic_hwinfo_file=$IRONIC_VM_MACS_CSV_FILE local node_options="\ - -i $_IRONIC_DEPLOY_KERNEL_KEY=$IRONIC_DEPLOY_KERNEL_ID \ - -i $_IRONIC_DEPLOY_RAMDISK_KEY=$IRONIC_DEPLOY_RAMDISK_ID \ + -i deploy_kernel=$IRONIC_DEPLOY_KERNEL_ID \ + -i deploy_ramdisk=$IRONIC_DEPLOY_RAMDISK_ID \ -i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \ -i ssh_address=$IRONIC_VM_SSH_ADDRESS \ -i ssh_port=$IRONIC_VM_SSH_PORT \ @@ -616,8 +608,8 @@ function enroll_nodes { # we create the bare metal flavor with minimum value local node_options="-i ipmi_address=$ipmi_address -i ipmi_password=$ironic_ipmi_passwd\ -i ipmi_username=$ironic_ipmi_username" - node_options+=" -i $_IRONIC_DEPLOY_KERNEL_KEY=$IRONIC_DEPLOY_KERNEL_ID" - node_options+=" -i $_IRONIC_DEPLOY_RAMDISK_KEY=$IRONIC_DEPLOY_RAMDISK_ID" + node_options+=" -i deploy_kernel=$IRONIC_DEPLOY_KERNEL_ID" + node_options+=" -i deploy_ramdisk=$IRONIC_DEPLOY_RAMDISK_ID" fi # First node created will be used for testing in ironic w/o glance From 3fd71d68933f2c4e38ff7fa58416ec0263325a9f Mon Sep 17 00:00:00 2001 From: Samuel de Medeiros Queiroz Date: Sun, 3 May 2015 14:54:45 -0300 Subject: [PATCH 0236/2941] Honor the flag for Identity v3 API only jobs When the property ENABLE_IDENTITY_V2 is set to False in the local.conf file, devstack will: * Disable the v2 API in Keystone paste config; * Set Tempest to skip Identity v2 tests and use v3 auth tokens to run all the other tests; * Set Horizon to use v3 API and v3 auth tokens; * Register the Identity endpoint as v3. Change-Id: I2575a516244b848e5ed461e7f488c59edc41068d --- lib/horizon | 9 ++++++++- lib/keystone | 6 ++++++ lib/tempest | 10 +++++++++- stackrc | 19 ++++++++++++++++--- 4 files changed, 39 insertions(+), 5 deletions(-) diff --git a/lib/horizon b/lib/horizon index f953f5cc01..ab6e758409 100644 --- a/lib/horizon +++ b/lib/horizon @@ -97,7 +97,14 @@ function init_horizon { _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"Member\" _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\" - _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\"" + + if [ "$ENABLE_IDENTITY_V2" == "False" ]; then + # Only Identity v3 API is available; then use it with v3 auth tokens + _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":\"v3\"} + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v3\"" + else + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\"" + fi if [ -f $SSL_BUNDLE_FILE ]; then _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\" diff --git a/lib/keystone b/lib/keystone index 997bb14967..de2d2ca53f 100644 --- a/lib/keystone +++ b/lib/keystone @@ -204,6 +204,12 @@ function configure_keystone { KEYSTONE_PASTE_INI="$KEYSTONE_CONF" fi + if [ "$ENABLE_IDENTITY_V2" == "False" ]; then + # Only Identity v3 API should be available; then disable v2 pipelines + inidelete $KEYSTONE_PASTE_INI composite:main \\/v2.0 + inidelete $KEYSTONE_PASTE_INI composite:admin \\/v2.0 + fi + configure_keystone_extensions # Rewrite stock ``keystone.conf`` diff --git a/lib/tempest b/lib/tempest index cd8fbd725f..18e3703110 100644 --- a/lib/tempest +++ b/lib/tempest @@ -311,7 +311,15 @@ function configure_tempest { iniset $TEMPEST_CONFIG identity admin_tenant_id $ADMIN_TENANT_ID iniset $TEMPEST_CONFIG identity admin_domain_name $ADMIN_DOMAIN_NAME fi - iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} + if [ "$ENABLE_IDENTITY_V2" == "False" ]; then + # Only Identity v3 is available; then skip Identity API v2 tests + iniset $TEMPEST_CONFIG identity-feature-enabled v2_api False + # In addition, use v3 auth tokens for running all Tempest tests + iniset $TEMPEST_CONFIG identity auth_version v3 + else + iniset $TEMPEST_CONFIG identity auth_version ${TEMPEST_AUTH_VERSION:-v2} + fi + if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then iniset $TEMPEST_CONFIG identity ca_certificates_file $SSL_BUNDLE_FILE fi diff --git a/stackrc b/stackrc index 2a49ea5ef5..af5ed6e16d 100644 --- a/stackrc +++ b/stackrc @@ -87,9 +87,6 @@ TEMPEST_SERVICES="" # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata -# Configure Identity API version: 2.0, 3 -IDENTITY_API_VERSION=2.0 - # Whether to use 'dev mode' for screen windows. Dev mode works by # stuffing text into the screen windows so that a developer can use # ctrl-c, up-arrow, enter to restart the service. Starting services @@ -106,6 +103,22 @@ elif [[ -f $RC_DIR/.localrc.auto ]]; then source $RC_DIR/.localrc.auto fi +# Configure Identity API version: 2.0, 3 +IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} + +# Set the option ENABLE_IDENTITY_V2 to True. It defines whether the DevStack +# deployment will be deploying the Identity v2 pipelines. If this option is set +# to ``False``, DevStack will: i) disable Identity v2; ii) configure Tempest to +# skip Identity v2 specific tests; and iii) configure Horizon to use Identity +# v3. When this option is set to ``False``, the option IDENTITY_API_VERSION +# will to be set to ``3`` in order to make DevStack register the Identity +# endpoint as v3. This flag is experimental and will be used as basis to +# identify the projects which still have issues to operate with Identity v3. +ENABLE_IDENTITY_V2=$(trueorfalse True ENABLE_IDENTITY_V2) +if [ "$ENABLE_IDENTITY_V2" == "False" ]; then + IDENTITY_API_VERSION=3 +fi + # Enable use of Python virtual environments. Individual project use of # venvs are controlled by the PROJECT_VENV array; every project with # an entry in the array will be installed into the named venv. From dd363a182fb1f8472bc163c82ea5f48e8f8fd29e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 25 May 2015 11:50:32 +1000 Subject: [PATCH 0237/2941] Cleanup test_ip.sh to give more consistent output Minor changes to give more consistent output; no functional changes to tests. Change-Id: I6c4ef74587c59b786761735c7bd528f3d7f94905 --- tests/test_ip.sh | 76 +++++++++++++++++++----------------------------- 1 file changed, 30 insertions(+), 46 deletions(-) diff --git a/tests/test_ip.sh b/tests/test_ip.sh index f8c2058967..da939f41d1 100755 --- a/tests/test_ip.sh +++ b/tests/test_ip.sh @@ -48,60 +48,44 @@ function test_cidr2netmask { test_cidr2netmask -if [[ $(maskip 169.254.169.254 240.0.0.0) == 160.0.0.0 ]]; then - passed "maskip(): /4...OK" -else - failed "maskip(): /4...failed" -fi -if [[ $(maskip 169.254.169.254 255.0.0.0) == 169.0.0.0 ]]; then - passed "maskip(): /8...OK" -else - failed "maskip(): /8...failed" -fi -if [[ $(maskip 169.254.169.254 255.240.0.0) == 169.240.0.0 ]]; then - passed "maskip(): /12...OK" -else - failed "maskip(): /12...failed" -fi -if [[ $(maskip 169.254.169.254 255.255.0.0) == 169.254.0.0 ]]; then - passed "maskip(): /16...OK" -else - failed "maskip(): /16...failed" -fi -if [[ $(maskip 169.254.169.254 255.255.240.0) == 169.254.160.0 ]]; then - passed "maskip(): /20...OK" -else - failed "maskip(): /20...failed" -fi -if [[ $(maskip 169.254.169.254 255.255.255.0) == 169.254.169.0 ]]; then - passed "maskip(): /24...OK" -else - failed "maskip(): /24...failed" -fi -if [[ $(maskip 169.254.169.254 255.255.255.240) == 169.254.169.240 ]]; then - passed "maskip(): /28...OK" -else - failed "maskip(): /28...failed" -fi -if [[ $(maskip 169.254.169.254 255.255.255.255) == 169.254.169.254 ]]; then - passed "maskip(): /32...OK" -else - failed "maskip(): /32...failed" -fi +msg="maskip(169.254.169.254 240.0.0.0) == 160.0.0.0" +assert_equal $(maskip 169.254.169.254 240.0.0.0) 160.0.0.0 "$msg" + +msg="maskip(169.254.169.254 255.0.0.0) == 169.0.0.0" +assert_equal $(maskip 169.254.169.254 255.0.0.0) 169.0.0.0 "$msg" + +msg="maskip(169.254.169.254 255.240.0.0) == 169.240.0.0" +assert_equal $(maskip 169.254.169.254 255.240.0.0) 169.240.0.0 "$msg" + +msg="maskip(169.254.169.254 255.255.0.0) == 169.254.0.0" +assert_equal $(maskip 169.254.169.254 255.255.0.0) 169.254.0.0 "$msg" + +msg="maskip(169.254.169.254 255.255.240.0) == 169.254.160.0" +assert_equal $(maskip 169.254.169.254 255.255.240.0) 169.254.160.0 "$msg" + +msg="maskip(169.254.169.254 255.255.255.0) == 169.254.169.0" +assert_equal $(maskip 169.254.169.254 255.255.255.0) 169.254.169.0 "$msg" + +msg="maskip(169.254.169.254 255.255.255.240) == 169.254.169.240" +assert_equal $(maskip 169.254.169.254 255.255.255.240) 169.254.169.240 "$msg" + +msg="maskip(169.254.169.254 255.255.255.255) == 169.254.169.254" +assert_equal $(maskip 169.254.169.254 255.255.255.255) 169.254.169.254 "$msg" + for mask in 8 12 16 20 24 26 28; do - echo -n "address_in_net(): in /$mask..." + msg="address_in_net($10.10.10.1 10.10.10.0/$mask)" if address_in_net 10.10.10.1 10.10.10.0/$mask; then - passed "OK" + passed "$msg" else - failed "address_in_net() failed on /$mask" + failed "$msg" fi - echo -n "address_in_net(): not in /$mask..." + msg="! address_in_net($10.10.10.1 11.11.11.0/$mask)" if ! address_in_net 10.10.10.1 11.11.11.0/$mask; then - passed "OK" + passed "$msg" else - failed "address_in_net() failed on /$mask" + failed "$msg" fi done From 9ee1ef6cb8e06864e2341f4121372028d6d59c64 Mon Sep 17 00:00:00 2001 From: Shin Sato Date: Thu, 28 May 2015 13:56:58 +0900 Subject: [PATCH 0238/2941] Fix typo: _create_volume_group => _create_lvm_volume_group Change-Id: Ifb648c9eb4a57ac0fc97afb842e83286789801dd --- lib/lvm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/lvm b/lib/lvm index 1fe2683e65..8afd543f34 100644 --- a/lib/lvm +++ b/lib/lvm @@ -78,7 +78,7 @@ function clean_lvm_volume_group { } -# _create_volume_group creates default volume group +# _create_lvm_volume_group creates default volume group # # Usage: _create_lvm_volume_group() $vg $size function _create_lvm_volume_group { From 40c5ea67d34168048068d115e5d870a5065d4b0f Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Thu, 28 May 2015 06:42:03 +0100 Subject: [PATCH 0239/2941] XenAPI: Increase OpenStack DomU usage again Devstack has continued to grow in memory requirements and now we cannot reliably fit in 4GB, with several services being unable to start. Increase the minimum for DomU to 6GB to leave room for virtual machines Change-Id: Idbdfa1f36015b6af347d1ce27eb28baa360af5ef --- tools/xen/xenrc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/xen/xenrc b/tools/xen/xenrc index 43a6ce8a77..be6c5ca037 100644 --- a/tools/xen/xenrc +++ b/tools/xen/xenrc @@ -14,12 +14,12 @@ CLEAN_TEMPLATES=${CLEAN_TEMPLATES:-false} # Size of image VDI_MB=${VDI_MB:-5000} -# Devstack now contains many components. 3GB ram is not enough to prevent +# Devstack now contains many components. 4GB ram is not enough to prevent # swapping and memory fragmentation - the latter of which can cause failures # such as blkfront failing to plug a VBD and lead to random test fails. # -# Set to 4GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 3GB for VMs -OSDOMU_MEM_MB=4096 +# Set to 6GB so an 8GB XenServer VM can have a 1GB Dom0 and leave 1GB for VMs +OSDOMU_MEM_MB=6144 OSDOMU_VDI_GB=8 # Network mapping. Specify bridge names or network names. Network names may From 75c44737121baea0c56050599350bc7de8f22799 Mon Sep 17 00:00:00 2001 From: Yalei Wang Date: Wed, 13 May 2015 12:43:56 +0800 Subject: [PATCH 0240/2941] Remove the code against flushing public bridge As unstack.sh does destroy all bridges, we don't have to refresh bridge interface any more, as what is done in commit c2dc95add6e46829f1705041c1d9dddab9b360d3. So in this commit we will continue to remove the related statements in lib/neutron-legacy. These statements will also cause undefined PUBLIC_BRIDGE error. Change-Id: I4c7617f6a245ea4e2e08f518d873b1b8adc2b807 Closes-Bug: #1454475 --- lib/neutron-legacy | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 18b0942ae1..a08778ca32 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1294,17 +1294,6 @@ function _neutron_configure_router_v6 { IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'` die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" - # The ovs_base_configure_l3_agent function flushes the public - # bridge's ip addresses, so turn IPv6 support in the host off - # and then on to recover the public bridge's link local address - sudo sysctl -w net.ipv6.conf.${PUBLIC_BRIDGE}.disable_ipv6=1 - sudo sysctl -w net.ipv6.conf.${PUBLIC_BRIDGE}.disable_ipv6=0 - if ! ip -6 addr show dev $PUBLIC_BRIDGE | grep 'scope global'; then - # Create an IPv6 ULA address for PUBLIC_BRIDGE if one is not present - IPV6_BRIDGE_ULA=`uuidgen | sed s/-//g | cut -c 23- | sed -e "s/\(..\)\(....\)\(....\)/\1:\2:\3/"` - sudo ip -6 addr add fd$IPV6_BRIDGE_ULA::1 dev $PUBLIC_BRIDGE - fi - if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then local ext_gw_interface=$(_neutron_get_ext_gw_interface) local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} From c550f2158970fc222cf01ddccf71d03f96a4651d Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 28 May 2015 15:38:01 +0200 Subject: [PATCH 0241/2941] Enable F22 without the FORCE flag F22 has a stable release and working with devstack. The change also removes the version flags regarding to the mariadb-devel. NOTE: You may see yum deprecation warnings, unless you set the YUM variable to dnf. Change-Id: I05140765bffc16faef5a29dfaba291c290bfae02 --- files/rpms/devlibs | 3 +-- lib/ceph | 2 +- stack.sh | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/files/rpms/devlibs b/files/rpms/devlibs index 834a4b6cf1..385ed3b84c 100644 --- a/files/rpms/devlibs +++ b/files/rpms/devlibs @@ -1,8 +1,7 @@ libffi-devel # pyOpenSSL libxml2-devel # lxml libxslt-devel # lxml -mariadb-devel # MySQL-python f20,f21,rhel7 -mysql-devel # MySQL-python rhel6 +mariadb-devel # MySQL-python openssl-devel # pyOpenSSL postgresql-devel # psycopg2 python-devel # pyOpenSSL diff --git a/lib/ceph b/lib/ceph index 4068e26222..4d6ca4aa68 100644 --- a/lib/ceph +++ b/lib/ceph @@ -110,7 +110,7 @@ function undefine_virsh_secret { # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph function check_os_support_ceph { - if [[ ! ${DISTRO} =~ (trusty|f20|f21) ]]; then + if [[ ! ${DISTRO} =~ (trusty|f20|f21|f22) ]]; then echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)" if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes" diff --git a/stack.sh b/stack.sh index dea56437dd..6615b8fef4 100755 --- a/stack.sh +++ b/stack.sh @@ -173,7 +173,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|trusty|7.0|wheezy|sid|testing|jessie|f20|f21|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (precise|trusty|7.0|wheezy|sid|testing|jessie|f20|f21|f22|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 31127a2a74ab851fe4b133e7f413719f370d7c94 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Fri, 15 May 2015 13:09:26 +1000 Subject: [PATCH 0242/2941] Skip 'shocco' code when running tox when running tox in a devstack directory where you have previously run tox -edocs the bashate testenv will fail as the shocco code doesn't match the devstack style. eg: --- E003: Indent not multiple of 4: ' 2>/dev/null ||' - /home/stack/projects/openstack/openstack-dev/devstack/shocco/shocco.sh : L352 27 bashate error(s) found --- Take the easy path and avoid running bashate in the shocco dir. Change-Id: I5b0155332ec994afaffc5c5961902281864cff61 --- tox.ini | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/tox.ini b/tox.ini index 279dcd4f66..e3d19ce60c 100644 --- a/tox.ini +++ b/tox.ini @@ -10,19 +10,20 @@ install_command = pip install {opts} {packages} [testenv:bashate] deps = bashate whitelist_externals = bash -commands = bash -c "find {toxinidir} \ - -not \( -type d -name .?\* -prune \) \ # prune all 'dot' dirs - -not \( -type d -name doc -prune \) \ # skip documentation - -type f \ # only files - -not -name \*~ \ # skip editors, readme, etc - -not -name \*.md \ - \( \ - -name \*.sh -or \ - -name \*rc -or \ - -name functions\* -or \ - -wholename \*/inc/\* -or \ # /inc files and - -wholename \*/lib/\* \ # /lib files are shell, but - \) \ # have no extension +commands = bash -c "find {toxinidir} \ + -not \( -type d -name .?\* -prune \) \ # prune all 'dot' dirs + -not \( -type d -name doc -prune \) \ # skip documentation + -not \( -type d -name shocco -prune \) \ # skip shocco + -type f \ # only files + -not -name \*~ \ # skip editors, readme, etc + -not -name \*.md \ + \( \ + -name \*.sh -or \ + -name \*rc -or \ + -name functions\* -or \ + -wholename \*/inc/\* -or \ # /inc files and + -wholename \*/lib/\* \ # /lib files are shell, but + \) \ # have no extension -print0 | xargs -0 bashate -v" [testenv:docs] From 6d50d95cae72435330690e518e4b7dbf06c75f84 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Fri, 29 May 2015 12:26:31 +1000 Subject: [PATCH 0243/2941] Skip the .tox dir when building docs When running tools/build_docs.sh in a devstack dir that has also run tox build_docs needlessly runs shocco on the .tox files. Just skip them. Change-Id: Ia561e49ea2214ac75bd55964f1b86872118b2031 --- tools/build_docs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_docs.sh b/tools/build_docs.sh index fda86c05cd..fa843432b5 100755 --- a/tools/build_docs.sh +++ b/tools/build_docs.sh @@ -75,7 +75,7 @@ rm -f $GLOG # Build list of scripts to process FILES="" -for f in $(find . -name .git -prune -o \( -type f -name \*.sh -not -path \*shocco/\* -print \)); do +for f in $(find . \( -name .git -o -name .tox \) -prune -o \( -type f -name \*.sh -not -path \*shocco/\* -print \)); do echo $f FILES+="$f " mkdir -p $FQ_HTML_BUILD/`dirname $f`; From 02ae50dc995815641c787d821c69ac537ac6527a Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Fri, 20 Mar 2015 09:58:55 -0700 Subject: [PATCH 0244/2941] Documentation for single interface Neutron networking with OVS Change-Id: I7a72377f55952db629c2ce7ba4ed648635e581ef --- doc/source/conf.py | 2 +- doc/source/guides/neutron.rst | 74 +++++++++++++++++++++++++++++++++-- tox.ini | 4 ++ 3 files changed, 75 insertions(+), 5 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 3e9aa45911..6e3ec029e9 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -26,7 +26,7 @@ # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = [ 'oslosphinx' ] +extensions = [ 'oslosphinx', 'sphinxcontrib.blockdiag', 'sphinxcontrib.nwdiag' ] todo_include_todos = True diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index b0a89070fb..bdfd3a4afa 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -5,11 +5,77 @@ Using DevStack with neutron Networking This guide will walk you through using OpenStack neutron with the ML2 plugin and the Open vSwitch mechanism driver. -Network Interface Configuration -=============================== -To use neutron, it is suggested that two network interfaces be present -in the host operating system. +Using Neutron with a Single Interface +===================================== + +In some instances, like on a developer laptop, there is only one +network interface that is available. In this scenario, the physical +interface is added to the Open vSwitch bridge, and the IP address of +the laptop is migrated onto the bridge interface. That way, the +physical interface can be used to transmit tenant network traffic, +the OpenStack API traffic, and management traffic. + + +Physical Network Setup +---------------------- + +In most cases where DevStack is being deployed with a single +interface, there is a hardware router that is being used for external +connectivity and DHCP. The developer machine is connected to this +network and is on a shared subnet with other machines. + +.. nwdiag:: + + nwdiag { + inet [ shape = cloud ]; + router; + inet -- router; + + network hardware_network { + address = "172.18.161.0/24" + router [ address = "172.18.161.1" ]; + devstack_laptop [ address = "172.18.161.6" ]; + } + } + + +DevStack Configuration +---------------------- + + +:: + + HOST_IP=172.18.161.6 + SERVICE_HOST=172.18.161.6 + MYSQL_HOST=172.18.161.6 + RABBIT_HOST=172.18.161.6 + GLANCE_HOSTPORT=172.18.161.6:9292 + ADMIN_PASSWORD=secrete + MYSQL_PASSWORD=secrete + RABBIT_PASSWORD=secrete + SERVICE_PASSWORD=secrete + SERVICE_TOKEN=secrete + + ## Neutron options + Q_USE_SECGROUP=True + FLOATING_RANGE="172.18.161.1/24" + FIXED_RANGE="10.0.0.0/24" + Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 + PUBLIC_NETWORK_GATEWAY="172.18.161.1" + Q_L3_ENABLED=True + PUBLIC_INTERFACE=eth0 + Q_USE_PROVIDERNET_FOR_PUBLIC=True + OVS_PHYSICAL_BRIDGE=br-ex + PUBLIC_BRIDGE=br-ex + OVS_BRIDGE_MAPPINGS=public:br-ex + + + + + +Using Neutron with Multiple Interfaces +====================================== The first interface, eth0 is used for the OpenStack management (API, message bus, etc) as well as for ssh for an administrator to access diff --git a/tox.ini b/tox.ini index 279dcd4f66..f76e2565eb 100644 --- a/tox.ini +++ b/tox.ini @@ -32,6 +32,10 @@ deps = sphinx>=1.1.2,<1.2 pbr>=0.6,!=0.7,<1.0 oslosphinx + nwdiag + blockdiag + sphinxcontrib-blockdiag + sphinxcontrib-nwdiag whitelist_externals = bash setenv = TOP_DIR={toxinidir} From 7ef246492c8613c80b197fcec93ca65c5db47cb1 Mon Sep 17 00:00:00 2001 From: David Lyle Date: Fri, 29 May 2015 13:49:03 -0600 Subject: [PATCH 0245/2941] Fixing keystone v3 version use for horizon The setting for overriding Horizon's OPENSTACK_API_VERSIONS is not the correct format. The version should be a number, not a string. so should be 3, not "v3". Change-Id: I193d21514b196336796eac067417dc2aaec56433 Closes-Bug: #1460190 --- lib/horizon | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/horizon b/lib/horizon index ab6e758409..b0f306b675 100644 --- a/lib/horizon +++ b/lib/horizon @@ -100,7 +100,7 @@ function init_horizon { if [ "$ENABLE_IDENTITY_V2" == "False" ]; then # Only Identity v3 API is available; then use it with v3 auth tokens - _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":\"v3\"} + _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3} _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v3\"" else _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\"" From 4be092da3eb79ffe172d29ed767815fb13d658ca Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Sat, 30 May 2015 23:19:18 +0200 Subject: [PATCH 0246/2941] Fix devlibs for SUSE libmysqld-devel is needed for installing MySQL-python. Otherwise the following error occurs: EnvironmentError: mysql_config not found Change-Id: Id84d3116d5987976169d8e2f9aca754ded205880 --- files/rpms-suse/devlibs | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms-suse/devlibs b/files/rpms-suse/devlibs index c9238252af..02c5398079 100644 --- a/files/rpms-suse/devlibs +++ b/files/rpms-suse/devlibs @@ -4,3 +4,4 @@ libxml2-devel # lxml libxslt-devel # lxml postgresql-devel # psycopg2 python-devel # pyOpenSSL +libmysqld-devel # MySQL-python \ No newline at end of file From 06efa2ac5df6f64785c2574e653595625dbbffde Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Sat, 30 May 2015 23:55:33 +0200 Subject: [PATCH 0247/2941] Fix horizon files for SUSE python-CherryPy and python-beautifulsoup are not needed so remove them from the list of packages for horizon. Change-Id: I45ddf98b5891a1f1f1da82bb4afa79ea43d156cc --- files/rpms-suse/horizon | 2 -- 1 file changed, 2 deletions(-) diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon index c45eae6153..5ba5aafd38 100644 --- a/files/rpms-suse/horizon +++ b/files/rpms-suse/horizon @@ -1,13 +1,11 @@ apache2 # NOPRIME apache2-mod_wsgi # NOPRIME -python-CherryPy # why? (coming from apts) python-Paste python-PasteDeploy python-Routes python-SQLAlchemy python-WebOb python-anyjson -python-beautifulsoup python-coverage python-dateutil python-eventlet From a858085afb46922760d9e89c34feb988ea283a54 Mon Sep 17 00:00:00 2001 From: Thomas Bechtold Date: Sun, 31 May 2015 00:04:33 +0200 Subject: [PATCH 0248/2941] Simplify add_user_to_group function Current SLE12 and openSUSE13.X versions can handle usermod's '-a' and '-G' switches so remove the special case. Change-Id: If0f1390a0eb8f41ffffca74525a4648cfe8ea61d --- functions-common | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/functions-common b/functions-common index ff9261161c..3a2f5f7f41 100644 --- a/functions-common +++ b/functions-common @@ -1842,16 +1842,7 @@ function add_user_to_group { local user=$1 local group=$2 - if [[ -z "$os_VENDOR" ]]; then - GetOSVersion - fi - - # SLE11 and openSUSE 12.2 don't have the usual usermod - if ! is_suse || [[ "$os_VENDOR" = "openSUSE" && "$os_RELEASE" != "12.2" ]]; then - sudo usermod -a -G "$group" "$user" - else - sudo usermod -A "$group" "$user" - fi + sudo usermod -a -G "$group" "$user" } # Convert CIDR notation to a IPv4 netmask From 6bc905c3488a93fa87776bcd0af7e362a90b082f Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Fri, 15 May 2015 12:51:43 +1000 Subject: [PATCH 0249/2941] Change the restart_rpc_backend loop to accomodate async rabbitmq Some distros have converted to systemd for starting RabbitMQ. This has resulted in: --- [Call Trace] ./stack.sh:904:restart_rpc_backend /home/stack/projects/openstack/openstack-dev/devstack/lib/rpc_backend:201:die [ERROR] /home/stack/projects/openstack/openstack-dev/devstack/lib/rpc_backend:201 Failed to set rabbitmq password Error on exit World dumping... see /opt/stack/logs/worlddump-2015-05-29-031618.txt for details --- Because 'restart_service rabbitmq-server' returns before the server is ready to accept connections. Alter the retry loop to only restart the rabbitmq-server every second time through the loop. Allowing time for the slow rabbit to start. Closes-Bug: 1449056 Change-Id: Ibb291c1ecfd109f9ed10b5f194933364985cc1ce --- lib/rpc_backend | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/lib/rpc_backend b/lib/rpc_backend index 297ebacc08..33ab03d664 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -194,13 +194,22 @@ function restart_rpc_backend { # NOTE(bnemec): Retry initial rabbitmq configuration to deal with # the fact that sometimes it fails to start properly. # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1144100 + # NOTE(tonyb): Extend the orginal retry logic to only restart rabbitmq + # every second time around the loop. + # See: https://bugs.launchpad.net/devstack/+bug/1449056 for details on + # why this is needed. This can bee seen on vivid and Debian unstable + # (May 2015) + # TODO(tonyb): Remove this when Debian and Ubuntu have a fixed systemd + # service file. local i - for i in `seq 10`; do + for i in `seq 20`; do local rc=0 - [[ $i -eq "10" ]] && die $LINENO "Failed to set rabbitmq password" + [[ $i -eq "20" ]] && die $LINENO "Failed to set rabbitmq password" - restart_service rabbitmq-server + if [[ $(( i % 2 )) == "0" ]] ; then + restart_service rabbitmq-server + fi rabbit_setuser "$RABBIT_USERID" "$RABBIT_PASSWORD" || rc=$? if [ $rc -ne 0 ]; then From 0bedeb906244b8ecf32cff43d4a73717217801b2 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Fri, 15 May 2015 12:51:16 +1000 Subject: [PATCH 0250/2941] Add utopic and vivid as a supported distros Change-Id: I63843335bd70ab9701bbd10dcf61f3eaa45a10e8 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 6615b8fef4..dc79fa94f7 100755 --- a/stack.sh +++ b/stack.sh @@ -173,7 +173,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|trusty|7.0|wheezy|sid|testing|jessie|f20|f21|f22|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (precise|trusty|utopic|vivid|7.0|wheezy|sid|testing|jessie|f20|f21|f22|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 1ebe4f1ff0b9056d8015eee021a11ce1bd18b184 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Fri, 29 May 2015 13:36:09 +0200 Subject: [PATCH 0251/2941] Add missing libmysqlclient-devel dependency for SUSE Currently devstack unconditionally installs MySQL-python, so we need to have its dependencies available. Since this is transitional until the switch to PyMysql happened, lets just add the dependency for now to have devstack working again. Change-Id: I638b5999d35a06eee962679b1cd95950bbf2b1d7 --- files/rpms-suse/devlibs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/rpms-suse/devlibs b/files/rpms-suse/devlibs index 02c5398079..bdb630a16f 100644 --- a/files/rpms-suse/devlibs +++ b/files/rpms-suse/devlibs @@ -3,5 +3,5 @@ libopenssl-devel # pyOpenSSL libxml2-devel # lxml libxslt-devel # lxml postgresql-devel # psycopg2 +libmysqlclient-devel # MySQL-python python-devel # pyOpenSSL -libmysqld-devel # MySQL-python \ No newline at end of file From 643779873acfaca160caac5ef94c1286eb33ae51 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 1 Jun 2015 10:43:58 +0200 Subject: [PATCH 0252/2941] Remove packaged rpm python libraries We want to use the pip versions instead, only python-devel should be needed. Change-Id: If7720d54c4ad3358f9dc2fceedd6f5897085eb8f --- files/rpms-suse/ceilometer-collector | 1 - files/rpms-suse/glance | 9 --------- files/rpms-suse/horizon | 12 ------------ files/rpms-suse/keystone | 11 ----------- files/rpms-suse/neutron | 12 ------------ files/rpms-suse/nova | 24 +----------------------- files/rpms-suse/swift | 9 --------- 7 files changed, 1 insertion(+), 77 deletions(-) diff --git a/files/rpms-suse/ceilometer-collector b/files/rpms-suse/ceilometer-collector index c76454fded..5e4dfcc35e 100644 --- a/files/rpms-suse/ceilometer-collector +++ b/files/rpms-suse/ceilometer-collector @@ -1,4 +1,3 @@ # Not available in openSUSE main repositories, but can be fetched from OBS # (devel:languages:python and server:database projects) mongodb -python-pymongo diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance index 9b962f9643..0e58425b1f 100644 --- a/files/rpms-suse/glance +++ b/files/rpms-suse/glance @@ -1,11 +1,2 @@ libxml2-devel -python-PasteDeploy -python-Routes -python-SQLAlchemy -python-argparse python-devel -python-eventlet -python-greenlet -python-iso8601 -python-pyOpenSSL -python-xattr diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon index 5ba5aafd38..77f7c34b31 100644 --- a/files/rpms-suse/horizon +++ b/files/rpms-suse/horizon @@ -1,14 +1,2 @@ apache2 # NOPRIME apache2-mod_wsgi # NOPRIME -python-Paste -python-PasteDeploy -python-Routes -python-SQLAlchemy -python-WebOb -python-anyjson -python-coverage -python-dateutil -python-eventlet -python-mox -python-sqlalchemy-migrate -python-xattr diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone index 4c37adef9b..c838b413c3 100644 --- a/files/rpms-suse/keystone +++ b/files/rpms-suse/keystone @@ -1,15 +1,4 @@ cyrus-sasl-devel openldap2-devel -python-Paste -python-PasteDeploy -python-PasteScript -python-Routes -python-SQLAlchemy -python-WebOb python-devel -python-greenlet -python-lxml -python-mysql -python-mysql-connector-python -python-pysqlite sqlite3 diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron index d278363e98..e75db89ddf 100644 --- a/files/rpms-suse/neutron +++ b/files/rpms-suse/neutron @@ -6,17 +6,6 @@ iptables iputils mariadb # NOPRIME postgresql-devel -python-eventlet -python-greenlet -python-iso8601 -python-mysql -python-mysql-connector-python -python-Paste -python-PasteDeploy -python-pyudev -python-Routes -python-SQLAlchemy -python-suds rabbitmq-server # NOPRIME sqlite3 sudo @@ -24,5 +13,4 @@ vlan radvd # NOPRIME # FIXME: qpid is not part of openSUSE, those names are tentative -python-qpid # NOPRIME qpidd # NOPRIME diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index b1c4f6a8d0..6f8aef1027 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -16,29 +16,7 @@ libvirt-python # NOPRIME mariadb # NOPRIME parted polkit -python-M2Crypto -python-m2crypto # dist:sle11sp2 -python-Paste -python-PasteDeploy -python-Routes -python-SQLAlchemy -python-Tempita -python-cheetah -python-eventlet -python-feedparser -python-greenlet -python-iso8601 -python-libxml2 -python-lockfile -python-lxml # needed for glance which is needed for nova --- this shouldn't be here -python-mox -python-mysql -python-mysql-connector-python -python-numpy # needed by websockify for spice console -python-paramiko -python-sqlalchemy-migrate -python-suds -python-xattr # needed for glance which is needed for nova --- this shouldn't be here +python-devel rabbitmq-server # NOPRIME socat sqlite3 diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift index 9c0d188fe2..6a824f944f 100644 --- a/files/rpms-suse/swift +++ b/files/rpms-suse/swift @@ -1,15 +1,6 @@ curl memcached -python-PasteDeploy -python-WebOb -python-configobj -python-coverage python-devel -python-eventlet -python-greenlet -python-netifaces -python-simplejson -python-xattr sqlite3 xfsprogs xinetd From f100e1cfe6860cc3b7d5384ed41d5bdad6af2fd2 Mon Sep 17 00:00:00 2001 From: David Kranz Date: Mon, 1 Jun 2015 10:29:59 -0400 Subject: [PATCH 0253/2941] Enable image deactivate feature which was added in kilo Change-Id: Ia1d3d811bd57d3de16d397cfab341e8d0f17cb69 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 059709dbb8..95715a33da 100644 --- a/lib/tempest +++ b/lib/tempest @@ -330,6 +330,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi + # Image Features + iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True + # Auth TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} From 25cb34606eb25ad2760e4ac50fa4d08036afbc96 Mon Sep 17 00:00:00 2001 From: Sergey Skripnick Date: Mon, 1 Jun 2015 19:06:46 +0300 Subject: [PATCH 0254/2941] Fix sample multinode configuration There should not be c-sch and c-api services on compute node. Change-Id: Ice057eb80e7ab6e917ca972abe7eaae7d635e8a5 Closes-Bug: 1393721 --- doc/source/guides/multinode-lab.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index b2617c9f17..27d71f1b9c 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -178,7 +178,7 @@ machines, create a ``local.conf`` with: MYSQL_HOST=192.168.42.11 RABBIT_HOST=192.168.42.11 GLANCE_HOSTPORT=192.168.42.11:9292 - ENABLED_SERVICES=n-cpu,n-net,n-api,c-sch,c-api,c-vol + ENABLED_SERVICES=n-cpu,n-net,n-api,c-vol NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://192.168.42.11:6080/vnc_auto.html" VNCSERVER_LISTEN=$HOST_IP From 64d5ecf3bf7e0bd08762a7fe5f94f7947ae0204c Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Mon, 1 Jun 2015 14:13:41 -0400 Subject: [PATCH 0255/2941] Define PUBLIC_BRIDGE in the main Neutron lib This way, it can be used by both OVS and Linux Bridge Change-Id: Iea5a8bb720d327b69f64791a23d414d4cde2e3ea Closes-Bug: #1460758 --- lib/neutron-legacy | 4 ++++ lib/neutron_plugins/linuxbridge_agent | 5 +++++ lib/neutron_plugins/ovs_base | 1 - 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 18b0942ae1..8879bd91c6 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -168,6 +168,10 @@ NEUTRON_CREATE_INITIAL_NETWORKS=${NEUTRON_CREATE_INITIAL_NETWORKS:-True} ## Provider Network Information PROVIDER_SUBNET_NAME=${PROVIDER_SUBNET_NAME:-"provider_net"} +# Define the public bridge that will transmit traffic from VMs to the +# physical network - used by both the OVS and Linux Bridge drivers. +PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} + # Use flat providernet for public network # # If Q_USE_PROVIDERNET_FOR_PUBLIC=True, use a flat provider network diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index c9ea1cac28..b348af9c4f 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -7,6 +7,10 @@ PLUGIN_XTRACE=$(set +o | grep xtrace) set +o xtrace +function neutron_lb_cleanup { + sudo brctl delbr $PUBLIC_BRIDGE +} + function is_neutron_ovs_base_plugin { # linuxbridge doesn't use OVS return 1 @@ -29,6 +33,7 @@ function neutron_plugin_configure_dhcp_agent { } function neutron_plugin_configure_l3_agent { + sudo brctl addbr $PUBLIC_BRIDGE iniset $Q_L3_CONF_FILE DEFAULT external_network_bridge iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport } diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index 51999c60e4..81561d3114 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -8,7 +8,6 @@ OVSB_XTRACE=$(set +o | grep xtrace) set +o xtrace OVS_BRIDGE=${OVS_BRIDGE:-br-int} -PUBLIC_BRIDGE=${PUBLIC_BRIDGE:-br-ex} OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-""} function is_neutron_ovs_base_plugin { From 7d350720fe5d25fece68c5d1625a33a6cad431ef Mon Sep 17 00:00:00 2001 From: Rob Crittenden Date: Thu, 28 May 2015 14:59:31 -0400 Subject: [PATCH 0256/2941] Replace pip-installed requests CA bundle with link If the version of python-requests required is higher than that provided by the operating system, pip will install it from upstream. The upstream version provides its own CA certificate bundle based on the Mozilla bundle, and defaults to that in case a CA certificate file is not specified for a request. The distribution-specific packages point to the system-wide CA bundle that can be managed by tools such as update-ca-trust (Fedora/RHEL) and update-ca-certificates (Debian/Ubuntu). When installing in SSL/TLS mode, either with SSL=True or by adding tls-proxy to ENABLED_SERVICES, if a non-systemwide CA bundle is used, then the CA generated by devstack will not be used causing the installation to fail. Replace the upstream-provided bundle with a link to the system bundle when possible. Change-Id: I349662ff8f851b4a7f879f89b8975a068f2d73dc Closes-Bug: #1459789 --- tools/fixup_stuff.sh | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 31258d13f7..d3a3de2092 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -138,3 +138,24 @@ fi # and installing the latest version using pip. uninstall_package python-virtualenv pip_install -U virtualenv + +# If a non-system python-requests is installed then it will use the +# built-in CA certificate store rather than the distro-specific +# CA certificate store. Detect this and symlink to the correct +# one. If the value for the CA is not rooted in /etc then we know +# we need to change it. +capath=$(python -c "from requests import certs; print certs.where()") + +if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then + if [[ ! $capath =~ ^/etc/.* && ! -L $capath ]]; then + if is_fedora; then + sudo rm -f $capath + sudo ln -s /etc/pki/tls/certs/ca-bundle.crt $capath + elif is_ubuntu; then + sudo rm -f $capath + sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath + else + echo "Don't know how to set the CA bundle, expect the install to fail." + fi + fi +fi From aa54511727614a837992845be416b9bd921be2e4 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 3 Jun 2015 17:10:43 +0900 Subject: [PATCH 0257/2941] midonet: Provide has_neutron_plugin_security_group Change-Id: I6ac12022bb8998fbec17cfa503db9277aa2eb8b7 Partial-Bug: #1458871 --- lib/neutron_plugins/midonet | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/midonet b/lib/neutron_plugins/midonet index 9e72aa0ce9..ca0b70c76c 100644 --- a/lib/neutron_plugins/midonet +++ b/lib/neutron_plugins/midonet @@ -1,4 +1,10 @@ #!/bin/bash -# REVISIT(devvesa): This file is intentionally left empty -# in order to keep Q_PLUGIN=midonet work. +# REVISIT(devvesa): This file is needed so Q_PLUGIN=midonet will work. + +# FIXME(yamamoto): This function should not be here, but unfortunately +# devstack calls it before the external plugins are fetched +function has_neutron_plugin_security_group { + # 0 means True here + return 0 +} From 3fe4c4e789f266b1367a770e12f3c461e8981a15 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Wed, 3 Jun 2015 17:29:50 +0900 Subject: [PATCH 0258/2941] MAINTAINERS.rst: Add MidoNet section While the most part of MidoNet code is now externally maintained using the external plugin mechanism, it can be still useful to have a contact list. Change-Id: I3e0a0586c07875ca37ce101dd169eaf78f34f7a5 --- MAINTAINERS.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst index d3e8c67487..eeb1f21b61 100644 --- a/MAINTAINERS.rst +++ b/MAINTAINERS.rst @@ -45,6 +45,13 @@ Fedora/CentOS/RHEL Neutron ~~~~~~~ +MidoNet +~~~~~~~ + +* Jaume Devesa +* Ryu Ishimoto +* YAMAMOTO Takashi + OpenDaylight ~~~~~~~~~~~~ From 50a3edf1b77fbe91c8101cfca9c1abad9c756a3d Mon Sep 17 00:00:00 2001 From: afazekas Date: Wed, 27 May 2015 11:50:12 +0200 Subject: [PATCH 0259/2941] Enable deactivate image tests in tempest Deactivate image is new feature, so it cannot be default enabled in tempest. Change-Id: Ic33b3e2f7a9f62543680647312603bdd19b90198 Depends-On: I7880f0e2646ce8660e035ebaa19a60f5bf271b64 --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 059709dbb8..5312a02cfa 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,6 +329,7 @@ function configure_tempest { if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi + iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image true # Auth TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} From a53ae68889746d61ad84cc19d2f2b61eec2c052a Mon Sep 17 00:00:00 2001 From: Kashyap Chamarthy Date: Wed, 27 May 2015 21:59:32 +0200 Subject: [PATCH 0260/2941] Remove Fedora 20 as supported distribution Fedora 20 will reach end of life on 23-JUN-2015[1]; remove it as supported distribution. Add Fedora 22 where applicable. - stack.sh: Remove Fedora 20 from list of supported distributions. - files/rpms/general: Remove 'f20' from NOPRIME. - lib/ceph: Remove 'f20' from the check_os_support_ceph() function. - doc/source/index.rst: s/Fedora 20/Fedora 21/ [1] https://lists.fedoraproject.org/pipermail/devel-announce/2015-May/001586.html Change-Id: I8f2e1ddc24c071754b1cceb5bed5bdafdc9d9f79 --- doc/source/index.rst | 6 +++--- files/rpms/general | 2 +- lib/ceph | 2 +- stack.sh | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/doc/source/index.rst b/doc/source/index.rst index e0c3f3a5d6..6d0edb0bfe 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -19,9 +19,9 @@ Quick Start #. Select a Linux Distribution - Only Ubuntu 14.04 (Trusty), Fedora 20 and CentOS/RHEL 7 are - documented here. OpenStack also runs and is packaged on other flavors - of Linux such as OpenSUSE and Debian. + Only Ubuntu 14.04 (Trusty), Fedora 21 (or Fedora 22) and CentOS/RHEL + 7 are documented here. OpenStack also runs and is packaged on other + flavors of Linux such as OpenSUSE and Debian. #. Install Selected OS diff --git a/files/rpms/general b/files/rpms/general index 7b2c00ad5c..67b084ef29 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -25,6 +25,6 @@ bc libyaml-devel gettext # used for compiling message catalogs net-tools -java-1.7.0-openjdk-headless # NOPRIME rhel7,f20 +java-1.7.0-openjdk-headless # NOPRIME rhel7 java-1.8.0-openjdk-headless # NOPRIME f21,f22 pyOpenSSL # version in pip uses too much memory diff --git a/lib/ceph b/lib/ceph index 4d6ca4aa68..cbdc3b87f1 100644 --- a/lib/ceph +++ b/lib/ceph @@ -110,7 +110,7 @@ function undefine_virsh_secret { # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph function check_os_support_ceph { - if [[ ! ${DISTRO} =~ (trusty|f20|f21|f22) ]]; then + if [[ ! ${DISTRO} =~ (trusty|f21|f22) ]]; then echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)" if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes" diff --git a/stack.sh b/stack.sh index dc79fa94f7..0c01165642 100755 --- a/stack.sh +++ b/stack.sh @@ -173,7 +173,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|trusty|utopic|vivid|7.0|wheezy|sid|testing|jessie|f20|f21|f22|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (precise|trusty|utopic|vivid|7.0|wheezy|sid|testing|jessie|f21|f22|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 6254d5fd0d87e65aa0a53b9fb30b36145e47d46d Mon Sep 17 00:00:00 2001 From: Rob Crittenden Date: Fri, 5 Jun 2015 11:58:15 -0400 Subject: [PATCH 0261/2941] Fix nova and glance discovery URLs when tls-proxy is enabled. Retrieving the root page in the nova and glance APIs include URLs for the various versions supported. These are by default reported using unsecure URLs. Configure the services to report a SSL-based URL instead. Change-Id: I220757e53b94a5f6d19291371407220fdf54c645 --- lib/glance | 1 + lib/nova | 1 + 2 files changed, 2 insertions(+) diff --git a/lib/glance b/lib/glance index 4e1bd24ef5..016ade3479 100644 --- a/lib/glance +++ b/lib/glance @@ -154,6 +154,7 @@ function configure_glance { if is_service_enabled tls-proxy; then iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT + iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT fi diff --git a/lib/nova b/lib/nova index da288d3e4d..11fa2a0ba4 100644 --- a/lib/nova +++ b/lib/nova @@ -489,6 +489,7 @@ function create_nova_conf { if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original iniset $NOVA_CONF DEFAULT osapi_compute_listen_port "$NOVA_SERVICE_PORT_INT" + iniset $NOVA_CONF DEFAULT osapi_compute_link_prefix $NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT fi configure_auth_token_middleware $NOVA_CONF nova $NOVA_AUTH_CACHE_DIR From 027e2ea741bdbcb6e1afc3fe527c3fdf045825c3 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Fri, 5 Jun 2015 18:43:50 +0000 Subject: [PATCH 0262/2941] Allow ceilometer to run in virtualenv if USE_VENV This requires three main changes: * setting CEILOMETER_BIN_DIR appropriately * running the various services with a full path * explicitly installing optional python modules (for mongo and virt drivers, if configured) during the install phase In the process of making this work it was discovered that setting CEILOMETER_BACKEND to something like 'foo' would cause the backend to be configured to use mongodb but for the mongodb system packages and related python modules to not be installed. Fixing this was used to validate the install process under USE_VENV. Change-Id: I35fbfa76bdd60a22ba90b13666b06eeb961dddb3 --- files/apache-ceilometer.template | 2 +- lib/ceilometer | 59 +++++++++++++++++++++++--------- 2 files changed, 43 insertions(+), 18 deletions(-) diff --git a/files/apache-ceilometer.template b/files/apache-ceilometer.template index 1c57b328b8..79f14c38ab 100644 --- a/files/apache-ceilometer.template +++ b/files/apache-ceilometer.template @@ -1,7 +1,7 @@ Listen %PORT% - WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP} + WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup ceilometer-api WSGIScriptAlias / %WSGIAPP% WSGIApplicationGroup %{GLOBAL} diff --git a/lib/ceilometer b/lib/ceilometer index 1f72187ed6..f6f605b686 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -78,8 +78,13 @@ CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer} -# Support potential entry-points console scripts -CEILOMETER_BIN_DIR=$(get_python_exec_prefix) +# Support potential entry-points console scripts in VENV or not +if [[ ${USE_VENV} = True ]]; then + PROJECT_VENV["ceilometer"]=${CEILOMETER_DIR}.venv + CEILOMETER_BIN_DIR=${PROJECT_VENV["ceilometer"]}/bin +else + CEILOMETER_BIN_DIR=$(get_python_exec_prefix) +fi # Set up database backend CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} @@ -151,6 +156,8 @@ function _cleanup_ceilometer_apache_wsgi { # runs that a clean run would need to clean up function cleanup_ceilometer { if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then + echo "### cleaning database" + read mongo ceilometer --eval "db.dropDatabase();" elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then curl -XDELETE "localhost:9200/events_*" @@ -165,16 +172,22 @@ function _config_ceilometer_apache_wsgi { local ceilometer_apache_conf=$(apache_site_config_for ceilometer) local apache_version=$(get_apache_version) + local venv_path="" # Copy proxy vhost and wsgi file sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app + if [[ ${USE_VENV} = True ]]; then + venv_path="python-path=${PROJECT_VENV["ceilometer"]}/lib/$(python_version)/site-packages" + fi + sudo cp $FILES/apache-ceilometer.template $ceilometer_apache_conf sudo sed -e " s|%PORT%|$CEILOMETER_SERVICE_PORT|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%WSGIAPP%|$CEILOMETER_WSGI_DIR/app|g; - s|%USER%|$STACK_USER|g + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g " -i $ceilometer_apache_conf } @@ -232,12 +245,14 @@ function configure_ceilometer { iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS ${TOP_DIR}/pkg/elasticsearch.sh start cleanup_ceilometer - else + elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then iniset $CEILOMETER_CONF database alarm_connection mongodb://localhost:27017/ceilometer iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer configure_mongodb cleanup_ceilometer + else + die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND" fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then @@ -263,10 +278,8 @@ function configure_mongodb { local packages=mongodb-server if is_fedora; then - # mongodb client + python bindings - packages="${packages} mongodb pymongo" - else - packages="${packages} python-pymongo" + # mongodb client + packages="${packages} mongodb" fi install_package ${packages} @@ -319,6 +332,18 @@ function install_ceilometer { install_redis fi + if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then + pip_install_gr pymongo + fi + + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + pip_install_gr libvirt-python + fi + + if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then + pip_instal_gr oslo.vmware + fi + if [ "$CEILOMETER_BACKEND" = 'es' ] ; then ${TOP_DIR}/pkg/elasticsearch.sh download ${TOP_DIR}/pkg/elasticsearch.sh install @@ -349,13 +374,13 @@ function install_ceilometermiddleware { # start_ceilometer() - Start running processes, including screen function start_ceilometer { - run_process ceilometer-acentral "ceilometer-agent-central --config-file $CEILOMETER_CONF" - run_process ceilometer-anotification "ceilometer-agent-notification --config-file $CEILOMETER_CONF" - run_process ceilometer-collector "ceilometer-collector --config-file $CEILOMETER_CONF" - run_process ceilometer-aipmi "ceilometer-agent-ipmi --config-file $CEILOMETER_CONF" + run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" + run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF" + run_process ceilometer-collector "$CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" + run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-agent-ipmi --config-file $CEILOMETER_CONF" if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then - run_process ceilometer-api "ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" + run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" else enable_apache_site ceilometer restart_apache_server @@ -367,10 +392,10 @@ function start_ceilometer { # Start the compute agent last to allow time for the collector to # fully wake up and connect to the message bus. See bug #1355809 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP + run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - run_process ceilometer-acompute "ceilometer-agent-compute --config-file $CEILOMETER_CONF" + run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF" fi # Only die on API if it was actually intended to be turned on @@ -381,8 +406,8 @@ function start_ceilometer { fi fi - run_process ceilometer-alarm-notifier "ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" - run_process ceilometer-alarm-evaluator "ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" + run_process ceilometer-alarm-notifier "$CEILOMETER_BIN_DIR/ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" + run_process ceilometer-alarm-evaluator "$CEILOMETER_BIN_DIR/ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" } # stop_ceilometer() - Stop running processes From 2ebe993b25462919e8aeeb896c9f91b6be7aa573 Mon Sep 17 00:00:00 2001 From: Joe Gordon Date: Sun, 7 Jun 2015 16:57:34 +0900 Subject: [PATCH 0263/2941] guru meditation report for nova-compute in worlddump Nova-compute is hanging in the multinode test, and its difficult to figure out why. So trigger a guru meditation report for nova-compute in worlddump so we can see what nova-compute is doing when it is hung. Having a hung nova-compute causes tempest to fail and I035fe8e3333034e44b403ed0f986220ab5b0e57a runs worlddump whenever tempest fails. Bug 1462305 is one of the last issues left before the multinode job is stable enough to gate on, and this patch should make it much easier to debug. Change-Id: I87d7536b5992c47b8082684cc662f953113fd1a8 Related-Bug: #1462305 --- tools/worlddump.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/worlddump.py b/tools/worlddump.py index d846f10025..7acfb5e97d 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -106,6 +106,12 @@ def compute_consoles(): _dump_cmd("sudo cat %s" % fullpath) +def guru_meditation_report(): + _header("nova-compute Guru Meditation Report") + _dump_cmd("kill -s USR1 `pgrep nova-compute`") + print "guru meditation report in nova-compute log" + + def main(): opts = get_options() fname = filename(opts.dir) @@ -118,6 +124,7 @@ def main(): network_dump() iptables_dump() compute_consoles() + guru_meditation_report() if __name__ == '__main__': From 10e1fd420dbebec2a5e546266edfc748b8684f9b Mon Sep 17 00:00:00 2001 From: Ramy Asselin Date: Thu, 4 Jun 2015 12:12:15 -0700 Subject: [PATCH 0264/2941] Allow override of os-brick library used by cinder os-brick code was pulled out of cinder and made into its own library https://review.openstack.org/#/c/155552/ added to requirements: https://review.openstack.org/#/c/177372/ Integration tests were added https://review.openstack.org/#/c/188156/ But they still use the version of os-brick from pip. This change updates devstack to pull in the changes from os-brick patch sets instead, when configured to do so. Needed-by: Id2bc10782847861fe4bb5e9e46245654450e38fd Change-Id: I5359dd37dfe94bd469d5ca35f9fbaeda61b5fac4 --- lib/cinder | 8 ++++++++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index da22e29f84..b8cf8090dc 100644 --- a/lib/cinder +++ b/lib/cinder @@ -39,6 +39,7 @@ fi # set up default directories GITDIR["python-cinderclient"]=$DEST/python-cinderclient +GITDIR["os-brick"]=$DEST/os-brick CINDER_DIR=$DEST/cinder # Cinder virtual environment @@ -381,6 +382,13 @@ function init_cinder { # install_cinder() - Collect source and prepare function install_cinder { + # Install os-brick from git so we make sure we're testing + # the latest code. + if use_library_from_git "os-brick"; then + git_clone_by_name "os-brick" + setup_dev_lib "os-brick" + fi + git_clone $CINDER_REPO $CINDER_DIR $CINDER_BRANCH setup_develop $CINDER_DIR if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then diff --git a/stackrc b/stackrc index 09ba3e9807..f8add4b44d 100644 --- a/stackrc +++ b/stackrc @@ -441,6 +441,10 @@ SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git} GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master} +# os-brick library to manage local volume attaches +GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git} +GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master} + ################## # diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 8210d0a466..336a213bc5 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -39,7 +39,7 @@ ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth" ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" -ALL_LIBS+=" debtcollector" +ALL_LIBS+=" debtcollector os-brick" # Generate the above list with # echo ${!GITREPO[@]} From 7bc2af7a6b75a0e7f383546c1d61e02b27cf45b5 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Mon, 8 Jun 2015 12:36:30 -0400 Subject: [PATCH 0265/2941] Neutron: Add a cleanup function for Linux Bridge Change-Id: Ia1bad5d2fa3b94afc662463b2e072f8482b0ce1f --- lib/neutron-legacy | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5681743e41..dd67f45a39 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -824,6 +824,10 @@ function cleanup_neutron { neutron_ovs_base_cleanup fi + if [[ $Q_AGENT == "linuxbridge" ]]; then + neutron_lb_cleanup + fi + # delete all namespaces created by neutron for ns in $(sudo ip netns list | grep -o -E '(qdhcp|qrouter|qlbaas|fip|snat)-[0-9a-f-]*'); do sudo ip netns delete ${ns} From 09b94603bb4f903616da3b1b4970ee1e2a666b91 Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Mon, 8 Jun 2015 15:25:43 -0400 Subject: [PATCH 0266/2941] Separate start/stop control of Neutron L2 agent. This patch separates out control of the Neutron L2 agent from starting/stopping the rest of Neutron. This is needed for the same reason that control of nova-compute was separated out for Nova. When doing rolling upgrade testing with Grenade, we need to be able to stop and upgrade everything except the L2 agent, as that is what would be running on a compute node. After this is in place, we can update grenade to support a partial upgrade scenario with Neutron and run it in jenkins to ensure we don't break live upgrade support of Neutron. Change-Id: I7eb87fba778aff3e4514813c6232dafa99ee2912 Signed-off-by: Russell Bryant --- lib/neutron-legacy | 33 +++++++++++++++++++++++++-------- 1 file changed, 25 insertions(+), 8 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 5681743e41..519200bbcb 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -703,11 +703,10 @@ function start_neutron_service_and_check { fi } -# Start running processes, including screen -function start_neutron_agents { - # Start up the neutron agents if enabled +# Control of the l2 agent is separated out to make it easier to test partial +# upgrades (everything upgraded except the L2 agent) +function start_neutron_l2_agent { run_process q-agt "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" if is_provider_network; then sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE @@ -722,6 +721,10 @@ function start_neutron_agents { sudo route add -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE fi fi +} + +function start_neutron_other_agents { + run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" if is_service_enabled q-vpn; then run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)" @@ -745,8 +748,18 @@ function start_neutron_agents { fi } -# stop_neutron() - Stop running processes (non-screen) -function stop_neutron { +# Start running processes, including screen +function start_neutron_agents { + # Start up the neutron agents if enabled + start_neutron_l2_agent + start_neutron_other_agents +} + +function stop_neutron_l2_agent { + stop_process q-agt +} + +function stop_neutron_other { if is_service_enabled q-dhcp; then stop_process q-dhcp pid=$(ps aux | awk '/[d]nsmasq.+interface=(tap|ns-)/ { print $2 }') @@ -761,8 +774,6 @@ function stop_neutron { stop_process q-meta fi - stop_process q-agt - if is_service_enabled q-lbaas; then neutron_lbaas_stop fi @@ -777,6 +788,12 @@ function stop_neutron { fi } +# stop_neutron() - Stop running processes (non-screen) +function stop_neutron { + stop_neutron_other + stop_neutron_l2_agent +} + # _move_neutron_addresses_route() - Move the primary IP to the OVS bridge # on startup, or back to the public interface on cleanup function _move_neutron_addresses_route { From ce2d75df01071a06ac08c8f5ba73a8fd78002da4 Mon Sep 17 00:00:00 2001 From: Rob Crittenden Date: Thu, 4 Jun 2015 18:01:29 -0400 Subject: [PATCH 0267/2941] Remove hardcoded http in URL in sahara exercise script The script hardcoded http://$SERVICE_HOST/... which failed when SSL or tls-proxy was enabled. Calculate the protocol based on enabled services instead. Change-Id: I192eeeafe7bf4dc5cbd382c505ffb9307651d78a --- exercises/sahara.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/exercises/sahara.sh b/exercises/sahara.sh index 2589e28c0c..8cad94562d 100755 --- a/exercises/sahara.sh +++ b/exercises/sahara.sh @@ -35,7 +35,13 @@ source $TOP_DIR/exerciserc is_service_enabled sahara || exit 55 -$CURL_GET http://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!" +if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then + SAHARA_SERVICE_PROTOCOL="https" +fi + +SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} + +$CURL_GET $SAHARA_SERVICE_PROTOCOL://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!" set +o xtrace echo "*********************************************************************" From a16e46100a2f676457abf884fc2b852d67597807 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 2 Jun 2015 10:08:04 +1000 Subject: [PATCH 0268/2941] Cleanup FAQ somewhat Remove some old discussions that no longer seem relevant and cleanup a few other points. Change-Id: I175ddaf9362bf48d35b0e648904eeb21bdc3c793 --- doc/source/faq.rst | 104 ++++++++++++++++----------------------------- 1 file changed, 37 insertions(+), 67 deletions(-) diff --git a/doc/source/faq.rst b/doc/source/faq.rst index d3b491fdac..0437ec26d5 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -10,36 +10,21 @@ General Questions ================= Q: Can I use DevStack for production? - A: No. We mean it. Really. DevStack makes some implementation - choices that are not appropriate for production deployments. We - warned you! -Q: Then why selinux in enforcing mode? - A: That is the default on current Fedora and RHEL releases. DevStack - has (rightly so) a bad reputation for its security practices; it has - always been meant as a development tool first and system integration - later. This is changing as the security issues around OpenStack's - use of root (for example) have been tightened and developers need to - be better equipped to work in these environments. ``stack.sh``'s use - of root is primarily to support the activities that would be handled - by packaging in "real" deployments. To remove additional protections - that will be desired/required in production would be a step - backward. -Q: But selinux is disabled in RHEL! - A: Today it is, yes. That is a specific exception that certain - DevStack contributors fought strongly against. The primary reason it - was allowed was to support using RHEL6 as the Python 2.6 test - platform and that took priority time-wise. This will not be the case - with RHEL 7. + + A: DevStack is targeted at developers and CI systems to use the + raw upstream code. It makes many choices that are not appropriate + for production systems. + + Your best choice is probably to choose a `distribution of + OpenStack + `__. + Q: Why a shell script, why not chef/puppet/... A: The script is meant to be read by humans (as well as ran by computers); it is the primary documentation after all. Using a recipe system requires everyone to agree and understand chef or puppet. -Q: Why not use Crowbar? - A: DevStack is optimized for documentation & developers. As some of - us use `Crowbar `__ for - production deployments, we hope developers documenting how they - setup systems for new features supports projects like Crowbar. + Q: I'd like to help! A: That isn't a question, but please do! The source for DevStack is at @@ -49,27 +34,23 @@ Q: I'd like to help! follow the usual process as described in the `developer guide `__. This Sphinx documentation is housed in the doc directory. + Q: Why not use packages? A: Unlike packages, DevStack leaves your cloud ready to develop - checkouts of the code and services running in screen. However, many people are doing the hard work of packaging and recipes for - production deployments. We hope this script serves as a way to - communicate configuration changes between developers and packagers. + production deployments. + Q: Why isn't $MY\_FAVORITE\_DISTRO supported? A: DevStack is meant for developers and those who want to see how OpenStack really works. DevStack is known to run on the distro/release combinations listed in ``README.md``. DevStack is only supported on releases other than those documented in ``README.md`` on a best-effort basis. -Q: What about Fedora/RHEL/CentOS? - A: Fedora and CentOS/RHEL are supported via rpm dependency files and - specific checks in ``stack.sh``. Support will follow the pattern set - with the Ubuntu testing, i.e. only a single release of the distro - will receive regular testing, others will be handled on a - best-effort basis. -Q: Are there any differences between Ubuntu and Fedora support? - A: Neutron is not fully supported prior to Fedora 18 due lack of - OpenVSwitch packages. + +Q: Are there any differences between Ubuntu and Centos/Fedora support? + A: Both should work well and are tested by DevStack CI. + Q: Why can't I use another shell? A: DevStack now uses some specific bash-ism that require Bash 4, such as associative arrays. Simple compatibility patches have been accepted @@ -77,26 +58,23 @@ Q: Why can't I use another shell? compatibility patches will be considered except for shells matching the array functionality as it is very ingrained in the repo and project management. -Q: But, but, can't I test on OS/X? - A: Yes, even you, core developer who complained about this, needs to - install bash 4 via homebrew to keep running tests on OS/X. Get a Real - Operating System. (For most of you who don't know, I am referring to - myself.) + +Q: Can I test on OS/X? + A: Some people have success with bash 4 installed via + homebrew to keep running tests on OS/X. Operation and Configuration =========================== Q: Can DevStack handle a multi-node installation? - A: Indirectly, yes. You run DevStack on each node with the - appropriate configuration in ``local.conf``. The primary - considerations are turning off the services not required on the - secondary nodes, making sure the passwords match and setting the - various API URLs to the right place. + A: Yes, see :doc:`multinode lab guide ` + Q: How can I document the environment that DevStack is using? A: DevStack includes a script (``tools/info.sh``) that gathers the versions of the relevant installed apt packages, pip packages and git repos. This is a good way to verify what Python modules are installed. + Q: How do I turn off a service that is enabled by default? A: Services can be turned off by adding ``disable_service xxx`` to ``local.conf`` (using ``n-vol`` in this example): @@ -113,31 +91,22 @@ Q: Is enabling a service that defaults to off done with the reverse of the above enable_service qpid Q: How do I run a specific OpenStack milestone? - A: OpenStack milestones have tags set in the git repo. Set the appropriate tag in the ``*_BRANCH`` variables in ``local.conf``. Swift is on its own release schedule so pick a tag in the Swift repo that is just before the milestone release. For example: + A: OpenStack milestones have tags set in the git repo. Set the + appropriate tag in the ``*_BRANCH`` variables in ``local.conf``. + Swift is on its own release schedule so pick a tag in the Swift repo + that is just before the milestone release. For example: :: [[local|localrc]] - GLANCE_BRANCH=stable/juno - HORIZON_BRANCH=stable/juno - KEYSTONE_BRANCH=stable/juno - NOVA_BRANCH=stable/juno - GLANCE_BRANCH=stable/juno - NEUTRON_BRANCH=stable/juno - SWIFT_BRANCH=2.2.1 - -Q: Why not use [STRIKEOUT:``tools/pip-requires``]\ ``requirements.txt`` to grab project dependencies? - [STRIKEOUT:The majority of deployments will use packages to install - OpenStack that will have distro-based packages as dependencies. - DevStack installs as many of these Python packages as possible to - mimic the expected production environment.] Certain Linux - distributions have a 'lack of workaround' in their Python - configurations that installs vendor packaged Python modules and - pip-installed modules to the SAME DIRECTORY TREE. This is causing - heartache and moving us in the direction of installing more modules - from PyPI than vendor packages. However, that is only being done as - necessary as the packaging needs to catch up to the development - cycle anyway so this is kept to a minimum. + GLANCE_BRANCH=stable/kilo + HORIZON_BRANCH=stable/kilo + KEYSTONE_BRANCH=stable/kilo + NOVA_BRANCH=stable/kilo + GLANCE_BRANCH=stable/kilo + NEUTRON_BRANCH=stable/kilo + SWIFT_BRANCH=2.3.0 + Q: What can I do about RabbitMQ not wanting to start on my fresh new VM? A: This is often caused by ``erlang`` not being happy with the hostname resolving to a reachable IP address. Make sure your @@ -145,6 +114,7 @@ Q: What can I do about RabbitMQ not wanting to start on my fresh new VM? in ``/etc/hosts`` is often good enough for a single-node installation. And in an extreme case, use ``clean.sh`` to eradicate it and try again. + Q: How can I set up Heat in stand-alone configuration? A: Configure ``local.conf`` thusly: From 2f27addf660d768988b1d9ec199e59584f0d4022 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 2 Jun 2015 10:18:49 +1000 Subject: [PATCH 0269/2941] Move FAQ to a section-based format Move to a section-based format, and add a TOC so we can see an overview of the questions. Change-Id: Ie480f2ab759a5a7081d4dc7d2491b44a85b6503a --- doc/source/faq.rst | 218 +++++++++++++++++++++++++-------------------- 1 file changed, 121 insertions(+), 97 deletions(-) diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 0437ec26d5..87f84693df 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -2,99 +2,120 @@ FAQ === -- `General Questions <#general>`__ -- `Operation and Configuration <#ops_conf>`__ -- `Miscellaneous <#misc>`__ +.. contents:: + :local: General Questions ================= -Q: Can I use DevStack for production? - - A: DevStack is targeted at developers and CI systems to use the - raw upstream code. It makes many choices that are not appropriate - for production systems. - - Your best choice is probably to choose a `distribution of - OpenStack - `__. - -Q: Why a shell script, why not chef/puppet/... - A: The script is meant to be read by humans (as well as ran by - computers); it is the primary documentation after all. Using a - recipe system requires everyone to agree and understand chef or - puppet. - -Q: I'd like to help! - A: That isn't a question, but please do! The source for DevStack is - at - `git.openstack.org `__ - and bug reports go to - `LaunchPad `__. Contributions - follow the usual process as described in the `developer - guide `__. This Sphinx - documentation is housed in the doc directory. - -Q: Why not use packages? - A: Unlike packages, DevStack leaves your cloud ready to develop - - checkouts of the code and services running in screen. However, many - people are doing the hard work of packaging and recipes for - production deployments. - -Q: Why isn't $MY\_FAVORITE\_DISTRO supported? - A: DevStack is meant for developers and those who want to see how - OpenStack really works. DevStack is known to run on the - distro/release combinations listed in ``README.md``. DevStack is - only supported on releases other than those documented in - ``README.md`` on a best-effort basis. - -Q: Are there any differences between Ubuntu and Centos/Fedora support? - A: Both should work well and are tested by DevStack CI. - -Q: Why can't I use another shell? - A: DevStack now uses some specific bash-ism that require Bash 4, such - as associative arrays. Simple compatibility patches have been accepted - in the past when they are not complex, at this point no additional - compatibility patches will be considered except for shells matching - the array functionality as it is very ingrained in the repo and project - management. - -Q: Can I test on OS/X? - A: Some people have success with bash 4 installed via - homebrew to keep running tests on OS/X. +Can I use DevStack for production? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack is targeted at developers and CI systems to use the raw +upstream code. It makes many choices that are not appropriate for +production systems. + +Your best choice is probably to choose a `distribution of OpenStack +`__. + +Why a shell script, why not chef/puppet/... +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The script is meant to be read by humans (as well as ran by +computers); it is the primary documentation after all. Using a recipe +system requires everyone to agree and understand chef or puppet. + +I'd like to help! +~~~~~~~~~~~~~~~~~ + +That isn't a question, but please do! The source for DevStack is at +`git.openstack.org +`__ and bug +reports go to `LaunchPad +`__. Contributions follow the +usual process as described in the `developer guide +`__. This +Sphinx documentation is housed in the doc directory. + +Why not use packages? +~~~~~~~~~~~~~~~~~~~~~ + +Unlike packages, DevStack leaves your cloud ready to develop - +checkouts of the code and services running in screen. However, many +people are doing the hard work of packaging and recipes for production +deployments. + +Why isn't $MY\_FAVORITE\_DISTRO supported? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack is meant for developers and those who want to see how +OpenStack really works. DevStack is known to run on the distro/release +combinations listed in ``README.md``. DevStack is only supported on +releases other than those documented in ``README.md`` on a best-effort +basis. + +Are there any differences between Ubuntu and Centos/Fedora support? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Both should work well and are tested by DevStack CI. + +Why can't I use another shell? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +DevStack now uses some specific bash-ism that require Bash 4, such as +associative arrays. Simple compatibility patches have been accepted in +the past when they are not complex, at this point no additional +compatibility patches will be considered except for shells matching +the array functionality as it is very ingrained in the repo and +project management. + +Can I test on OS/X? +~~~~~~~~~~~~~~~~~~~ + +Some people have success with bash 4 installed via homebrew to keep +running tests on OS/X. Operation and Configuration =========================== -Q: Can DevStack handle a multi-node installation? - A: Yes, see :doc:`multinode lab guide ` +Can DevStack handle a multi-node installation? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Yes, see :doc:`multinode lab guide ` + +How can I document the environment that DevStack is using? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Q: How can I document the environment that DevStack is using? - A: DevStack includes a script (``tools/info.sh``) that gathers the - versions of the relevant installed apt packages, pip packages and - git repos. This is a good way to verify what Python modules are - installed. +DevStack includes a script (``tools/info.sh``) that gathers the +versions of the relevant installed apt packages, pip packages and git +repos. This is a good way to verify what Python modules are +installed. -Q: How do I turn off a service that is enabled by default? - A: Services can be turned off by adding ``disable_service xxx`` to - ``local.conf`` (using ``n-vol`` in this example): +How do I turn off a service that is enabled by default? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Services can be turned off by adding ``disable_service xxx`` to +``local.conf`` (using ``n-vol`` in this example): :: disable_service n-vol -Q: Is enabling a service that defaults to off done with the reverse of the above? - A: Of course! +Is enabling a service that defaults to off done with the reverse of the above? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Of course! :: enable_service qpid -Q: How do I run a specific OpenStack milestone? - A: OpenStack milestones have tags set in the git repo. Set the - appropriate tag in the ``*_BRANCH`` variables in ``local.conf``. - Swift is on its own release schedule so pick a tag in the Swift repo - that is just before the milestone release. For example: +How do I run a specific OpenStack milestone? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +OpenStack milestones have tags set in the git repo. Set the +appropriate tag in the ``*_BRANCH`` variables in ``local.conf``. +Swift is on its own release schedule so pick a tag in the Swift repo +that is just before the milestone release. For example: :: @@ -107,16 +128,16 @@ Q: How do I run a specific OpenStack milestone? NEUTRON_BRANCH=stable/kilo SWIFT_BRANCH=2.3.0 -Q: What can I do about RabbitMQ not wanting to start on my fresh new VM? - A: This is often caused by ``erlang`` not being happy with the - hostname resolving to a reachable IP address. Make sure your - hostname resolves to a working IP address; setting it to 127.0.0.1 - in ``/etc/hosts`` is often good enough for a single-node - installation. And in an extreme case, use ``clean.sh`` to eradicate - it and try again. +What can I do about RabbitMQ not wanting to start on my fresh new VM? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +This is often caused by ``erlang`` not being happy with the hostname +resolving to a reachable IP address. Make sure your hostname resolves +to a working IP address; setting it to 127.0.0.1 in ``/etc/hosts`` is +often good enough for a single-node installation. And in an extreme +case, use ``clean.sh`` to eradicate it and try again. -Q: How can I set up Heat in stand-alone configuration? - A: Configure ``local.conf`` thusly: +Configure ``local.conf`` thusly: :: @@ -126,22 +147,25 @@ Q: How can I set up Heat in stand-alone configuration? KEYSTONE_SERVICE_HOST= KEYSTONE_AUTH_HOST= -Q: Why are my configuration changes ignored? - A: You may have run into the package prerequisite installation - timeout. ``tools/install_prereqs.sh`` has a timer that skips the - package installation checks if it was run within the last - ``PREREQ_RERUN_HOURS`` hours (default is 2). To override this, set - ``FORCE_PREREQ=1`` and the package checks will never be skipped. +Why are my configuration changes ignored? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You may have run into the package prerequisite installation +timeout. ``tools/install_prereqs.sh`` has a timer that skips the +package installation checks if it was run within the last +``PREREQ_RERUN_HOURS`` hours (default is 2). To override this, set +``FORCE_PREREQ=1`` and the package checks will never be skipped. Miscellaneous ============= -Q: ``tools/fixup_stuff.sh`` is broken and shouldn't 'fix' just one version of packages. - A: [Another not-a-question] No it isn't. Stuff in there is to - correct problems in an environment that need to be fixed elsewhere - or may/will be fixed in a future release. In the case of - ``httplib2`` and ``prettytable`` specific problems with specific - versions are being worked around. If later releases have those - problems than we'll add them to the script. Knowing about the broken - future releases is valuable rather than polling to see if it has - been fixed. +``tools/fixup_stuff.sh`` is broken and shouldn't 'fix' just one version of packages. +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Stuff in there is to correct problems in an environment that need to +be fixed elsewhere or may/will be fixed in a future release. In the +case of ``httplib2`` and ``prettytable`` specific problems with +specific versions are being worked around. If later releases have +those problems than we'll add them to the script. Knowing about the +broken future releases is valuable rather than polling to see if it +has been fixed. From 64cf20440865595a4c6c71859a964ddbea0389e2 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 2 Jun 2015 10:28:55 +1000 Subject: [PATCH 0270/2941] Add note on openrc and zsh Add a note about running openrc through bash for import into zsh, as shown by Chmouel Boudjnah in I5b6c8cfedcdd36efb4cbc91831501ee5c9c3b1d1 Change-Id: I5f1c9fc3fdc045cf6fb69af13f6264a81bf5f763 Closes-Bug: #1460656 --- doc/source/faq.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 87f84693df..b09d386048 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -75,6 +75,21 @@ Can I test on OS/X? Some people have success with bash 4 installed via homebrew to keep running tests on OS/X. +Can I at least source ``openrc`` with ``zsh``? +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +People have reported success with a special function to run ``openrc`` +through bash for this + +.. code-block:: bash + + function sourceopenrc { + pushd ~/devstack >/dev/null + eval $(bash -c ". openrc $1 $2;env|sed -n '/OS_/ { s/^/export /;p}'") + popd >/dev/null + } + + Operation and Configuration =========================== From 64b2ebca15701785af09d2c225d7c81e3c6acdce Mon Sep 17 00:00:00 2001 From: Ben Nemec Date: Fri, 5 Jun 2015 12:22:36 -0500 Subject: [PATCH 0271/2941] Make sure iptables-services is installed The iptables service files are no longer included by default on Fedora. This causes the systemctl calls in fixup_stuff.sh to fail when disabling firewalld in favor of iptables. Change-Id: If37691d03e3d07ca8b53c541717081beeb184c16 Closes-Bug: #1462347 --- files/rpms/general | 1 + tools/fixup_stuff.sh | 3 +++ 2 files changed, 4 insertions(+) diff --git a/files/rpms/general b/files/rpms/general index 7b2c00ad5c..43101f7c87 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -28,3 +28,4 @@ net-tools java-1.7.0-openjdk-headless # NOPRIME rhel7,f20 java-1.8.0-openjdk-headless # NOPRIME f21,f22 pyOpenSSL # version in pip uses too much memory +iptables-services # NOPRIME f21,f22 diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 31258d13f7..4fff57f401 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -126,6 +126,9 @@ if is_fedora; then # [4] http://docs.openstack.org/developer/devstack/guides/neutron.html if is_package_installed firewalld; then sudo systemctl disable firewalld + # The iptables service files are no longer included by default, + # at least on a baremetal Fedora 21 Server install. + install_package iptables-services sudo systemctl enable iptables sudo systemctl stop firewalld sudo systemctl start iptables From d8ed29dcb3c73bd0eec61939164b90b914c6530c Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Tue, 9 Jun 2015 13:15:24 -0700 Subject: [PATCH 0272/2941] Add vmware_nsx_v3 support Sadly this is needed. We should refactor this out from all of the plugins so we don't need to have all of these files. Adding this one for now though. Change-Id: Id382443fa7bef6b45237688c7e88d9e9a80a6ba1 --- lib/neutron_plugins/vmware_nsx_v3 | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 lib/neutron_plugins/vmware_nsx_v3 diff --git a/lib/neutron_plugins/vmware_nsx_v3 b/lib/neutron_plugins/vmware_nsx_v3 new file mode 100644 index 0000000000..6d8a6e6b70 --- /dev/null +++ b/lib/neutron_plugins/vmware_nsx_v3 @@ -0,0 +1,10 @@ +#!/bin/bash + +# This file is needed so Q_PLUGIN=vmware_nsx_v3 will work. + +# FIXME(salv-orlando): This function should not be here, but unfortunately +# devstack calls it before the external plugins are fetched +function has_neutron_plugin_security_group { + # 0 means True here + return 0 +} From 7272afdf8bf55580f778530d590afd505394b4ae Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 10 Jun 2015 10:26:57 +0000 Subject: [PATCH 0273/2941] Revert "Replace pip-installed requests CA bundle with link" This does not gracefully handle the situation where requests is not there at the beginning. Needs to be rethought. This reverts commit 7d350720fe5d25fece68c5d1625a33a6cad431ef. Change-Id: I101fac0dc6fdc97b7fb0b2955cffc6b4905152e5 --- tools/fixup_stuff.sh | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index d3a3de2092..31258d13f7 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -138,24 +138,3 @@ fi # and installing the latest version using pip. uninstall_package python-virtualenv pip_install -U virtualenv - -# If a non-system python-requests is installed then it will use the -# built-in CA certificate store rather than the distro-specific -# CA certificate store. Detect this and symlink to the correct -# one. If the value for the CA is not rooted in /etc then we know -# we need to change it. -capath=$(python -c "from requests import certs; print certs.where()") - -if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then - if [[ ! $capath =~ ^/etc/.* && ! -L $capath ]]; then - if is_fedora; then - sudo rm -f $capath - sudo ln -s /etc/pki/tls/certs/ca-bundle.crt $capath - elif is_ubuntu; then - sudo rm -f $capath - sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath - else - echo "Don't know how to set the CA bundle, expect the install to fail." - fi - fi -fi From cfbc7918c5e7720fcfba88d2c18d26dd9d0cf5cf Mon Sep 17 00:00:00 2001 From: Russell Bryant Date: Tue, 9 Jun 2015 09:14:13 -0400 Subject: [PATCH 0274/2941] Simplify start_neutron_other_agents This patch just simplifies the start function a bit by removing some unnecessary is_service_enabled checks that just wrap run_process calls. run_process does this exact check internally so it's not needed here. Change-Id: Id12a23f77ea0342854337c7d65821dd4e574dec2 Signed-off-by: Russell Bryant --- lib/neutron-legacy | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 519200bbcb..87674b309a 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -733,19 +733,13 @@ function start_neutron_other_agents { fi run_process q-meta "python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" + run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" + run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" if [ "$VIRT_DRIVER" = 'xenserver' ]; then # For XenServer, start an agent for the domU openvswitch run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" fi - - if is_service_enabled q-lbaas; then - run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" - fi - - if is_service_enabled q-metering; then - run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" - fi } # Start running processes, including screen From 21afa42da6c3f0d7d73cc71718cfd7c9588fc43b Mon Sep 17 00:00:00 2001 From: Louis Taylor Date: Wed, 10 Jun 2015 12:55:10 +0000 Subject: [PATCH 0275/2941] ceph: remove deprecated glance_store options glance_store has now been fully migrated, so we can stop setting these options in the config files. Change-Id: I3c6c2eea0171227b1ed362e74bcc5b10770721be --- lib/ceph | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/ceph b/lib/ceph index 4d6ca4aa68..25afb6c10c 100644 --- a/lib/ceph +++ b/lib/ceph @@ -264,10 +264,6 @@ function configure_ceph_glance { sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring - # NOTE(eharney): When Glance has fully migrated to Glance store, - # default_store can be removed from [DEFAULT]. (See lib/glance.) - iniset $GLANCE_API_CONF DEFAULT default_store rbd - iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True iniset $GLANCE_API_CONF glance_store default_store rbd iniset $GLANCE_API_CONF glance_store stores "file, http, rbd" iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE From 96c6b2deb33e67c1657e37fdcc8c6ec904674ed6 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 8 Jun 2015 16:48:49 -0700 Subject: [PATCH 0276/2941] Add automaton to lib/oslo and stackrc Part of blueprint adopt-automaton Change-Id: I520643b74aced431f7a46b7d7b94616bb2e6bf8e --- lib/oslo | 1 + stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/oslo b/lib/oslo index d9688a01cd..b195687bb7 100644 --- a/lib/oslo +++ b/lib/oslo @@ -22,6 +22,7 @@ set +o xtrace # Defaults # -------- +GITDIR["automaton"]=$DEST/automaton GITDIR["cliff"]=$DEST/cliff GITDIR["debtcollector"]=$DEST/debtcollector GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency diff --git a/stackrc b/stackrc index f8add4b44d..b010a88d52 100644 --- a/stackrc +++ b/stackrc @@ -330,6 +330,10 @@ GITBRANCH["cliff"]=${CLIFF_BRANCH:-master} GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git} GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-master} +# helpful state machines +GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git} +GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-master} + # oslo.concurrency GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git} GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 336a213bc5..83b7e10b81 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -39,7 +39,7 @@ ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth" ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" -ALL_LIBS+=" debtcollector os-brick" +ALL_LIBS+=" debtcollector os-brick automaton" # Generate the above list with # echo ${!GITREPO[@]} From e3a640d57327955aba491366d6e69349813897a8 Mon Sep 17 00:00:00 2001 From: Joshua Harlow Date: Mon, 8 Jun 2015 16:44:21 -0700 Subject: [PATCH 0277/2941] Add futurist to lib/oslo and stackrc Part of blueprint adopt-futurist Change-Id: I17e27a085c7c509bad6018016e90d0114543b073 --- lib/oslo | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/oslo b/lib/oslo index b195687bb7..be26668dfb 100644 --- a/lib/oslo +++ b/lib/oslo @@ -25,6 +25,7 @@ set +o xtrace GITDIR["automaton"]=$DEST/automaton GITDIR["cliff"]=$DEST/cliff GITDIR["debtcollector"]=$DEST/debtcollector +GITDIR["futurist"]=$DEST/futurist GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency GITDIR["oslo.config"]=$DEST/oslo.config GITDIR["oslo.context"]=$DEST/oslo.context @@ -63,6 +64,7 @@ function _do_install_oslo_lib { function install_oslo { _do_install_oslo_lib "cliff" _do_install_oslo_lib "debtcollector" + _do_install_oslo_lib "futurist" _do_install_oslo_lib "oslo.concurrency" _do_install_oslo_lib "oslo.config" _do_install_oslo_lib "oslo.context" diff --git a/stackrc b/stackrc index b010a88d52..c16a9997a4 100644 --- a/stackrc +++ b/stackrc @@ -326,6 +326,10 @@ GITDIR["python-openstackclient"]=$DEST/python-openstackclient GITREPO["cliff"]=${CLIFF_REPO:-${GIT_BASE}/openstack/cliff.git} GITBRANCH["cliff"]=${CLIFF_BRANCH:-master} +# async framework/helpers +GITREPO["futurist"]=${FUTURIST_REPO:-${GIT_BASE}/openstack/futurist.git} +GITBRANCH["futurist"]=${FUTURIST_BRANCH:-master} + # debtcollector deprecation framework/helpers GITREPO["debtcollector"]=${DEBTCOLLECTOR_REPO:-${GIT_BASE}/openstack/debtcollector.git} GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 83b7e10b81..2f0b1febe4 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -39,7 +39,7 @@ ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth" ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" -ALL_LIBS+=" debtcollector os-brick automaton" +ALL_LIBS+=" debtcollector os-brick automaton futurist" # Generate the above list with # echo ${!GITREPO[@]} From 03786b1cca196473d90c8ce0dc1c98a1b3081ac5 Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Wed, 10 Jun 2015 11:31:51 -0700 Subject: [PATCH 0278/2941] Create and initialize the nova api_db Going forward, nova will have another database at the API level (similar to how current cells has an api-level cell, with its own database). This patch creates and initializes it so that we can start testing the migrations with grenade. Change-Id: I0dfae32102aeda9c5d17e134527b6a18f4b88014 --- lib/nova | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/nova b/lib/nova index 11fa2a0ba4..a9f335115d 100644 --- a/lib/nova +++ b/lib/nova @@ -53,6 +53,7 @@ NOVA_CONF=$NOVA_CONF_DIR/nova.conf NOVA_CELLS_CONF=$NOVA_CONF_DIR/nova-cells.conf NOVA_FAKE_CONF=$NOVA_CONF_DIR/nova-fake.conf NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} +NOVA_API_DB=${NOVA_API_DB:-nova_api} NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} # NOVA_API_VERSION valid options @@ -471,6 +472,7 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" iniset $NOVA_CONF database connection `database_connection_url nova` + iniset $NOVA_CONF api_database connection `database_connection_url nova_api` iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" @@ -675,6 +677,9 @@ function init_nova { if is_service_enabled n-cell; then recreate_database $NOVA_CELLS_DB fi + + recreate_database $NOVA_API_DB + $NOVA_BIN_DIR/nova-manage api_db sync fi create_nova_cache_dir From d1d6667c6b201d8c01f6b1d89660ceb4176de070 Mon Sep 17 00:00:00 2001 From: David Kranz Date: Thu, 11 Jun 2015 13:09:37 -0400 Subject: [PATCH 0279/2941] Enable the volume bootable feature flag that was added in kilo Change-Id: I5f8e0154a8b654b4c65b95f8b5c03e1a9be9e137 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index c4ae05f534..0c451d3b55 100644 --- a/lib/tempest +++ b/lib/tempest @@ -451,6 +451,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis $object_storage_api_extensions # Volume + # TODO(dkranz): Remove the bootable flag when Juno is end of life. + iniset $TEMPEST_CONFIG volume-feature-enabled bootable True + local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint From 0f63eb3a37c547b4aa9027fb83cc444d60157adf Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Fri, 12 Jun 2015 09:05:12 +0200 Subject: [PATCH 0280/2941] Install PyMySQL if used Change Ic609ce136061b753ca692b37509a0b29c60bb8b5 switched to PyMySQL by default but does not make sure it is installed. This is causing gate failure in Gnocchi for example: http://logs.openstack.org/25/186025/3/check/gate-gnocchi-dsvm-functional-file-mysql/eebd773/logs/devstacklog.txt.gz Change-Id: I23d313220607fcc8acb95ab43f55b7d9899b9b1f --- lib/databases/mysql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/databases/mysql b/lib/databases/mysql index 832c2ca6d7..f097fb21cb 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -165,6 +165,8 @@ function install_database_python_mysql { pip_install_gr $MYSQL_DRIVER if [[ "$MYSQL_DRIVER" == "MySQL-python" ]]; then ADDITIONAL_VENV_PACKAGES+=",MySQL-python" + elif [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then + ADDITIONAL_VENV_PACKAGES+=",PyMySQL" fi } From de8d29ed8ce4a26b61cbee48f9fe5418d5416a06 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 12 Jun 2015 10:43:28 +0000 Subject: [PATCH 0281/2941] Revert "change the default to PyMYSQL" The failure rate with neutron is too high to keep this as the default. Related-Bug: #1464612 This reverts commit b3798af474955368211a297ba85332fde5491993. Change-Id: Ie9550aeb25d472a38e3d3ef6f3711622c9221c46 --- lib/databases/mysql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 832c2ca6d7..7cd2856ae9 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -11,7 +11,7 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} +MYSQL_DRIVER=${MYSQL_DRIVER:-MySQL-python} # Force over to pymysql driver by default if we are using it. if is_service_enabled mysql; then if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then From 987f83da5adcf5f8dd2b78a526613cc23a9cdfdd Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 1 Jun 2015 12:43:30 +0200 Subject: [PATCH 0282/2941] SUSE: Add dependencies for L3 agent Change-Id: If3f4f5ab9a3072273d2f440718ce0c75fd71fdf1 --- files/rpms-suse/q-l3 | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 files/rpms-suse/q-l3 diff --git a/files/rpms-suse/q-l3 b/files/rpms-suse/q-l3 new file mode 100644 index 0000000000..a7a190c063 --- /dev/null +++ b/files/rpms-suse/q-l3 @@ -0,0 +1,2 @@ +conntrack-tools +keepalived From b3d8822ec835f1ca7eb5d9742b28f2ece480b387 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Fri, 12 Jun 2015 07:54:03 -0700 Subject: [PATCH 0283/2941] Revert "Revert "change the default to PyMYSQL"" Some projects (Neutron) seem to be affected more than others, so we should revert this to allow for a more selective choice of the DB driver on a per project basis. We can re-enable the use MySQL-python just for Neutron. This reverts commit de8d29ed8ce4a26b61cbee48f9fe5418d5416a06. Related-Bug: #1464612 Change-Id: I889f4f8b116c413b300ab9eecc7b428a9a4afb1a --- lib/databases/mysql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 7cd2856ae9..832c2ca6d7 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -11,7 +11,7 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace -MYSQL_DRIVER=${MYSQL_DRIVER:-MySQL-python} +MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} # Force over to pymysql driver by default if we are using it. if is_service_enabled mysql; then if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then From e155b894c8975f649cbbbc08675095fe728d0b69 Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Fri, 12 Jun 2015 08:55:02 -0700 Subject: [PATCH 0284/2941] Set Neutron api_workers to $API_WORKERS Change [1] brings back this feature in Neutron, so we want to have the ability to set the number of API workers the same way other projects do. However, this cause some instability, so we need to be careful on how we bring it back. [1] https://review.openstack.org/#/c/191127/ Closes-Bug: #1432189 Related-bug: #1432065 Change-Id: Id4986a49d33fa4b8a7291150488665e200525dac Co-authored-by: Russell Bryant --- lib/neutron-legacy | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index dd67f45a39..3ac76a2586 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -463,6 +463,8 @@ function configure_neutron { fi _configure_neutron_debug_command + + iniset $NEUTRON_CONF DEFAULT api_workers "$API_WORKERS" } function create_nova_conf_neutron { From 13f655720d77149d908fcae7d0b7c6d377c972d5 Mon Sep 17 00:00:00 2001 From: Marian Horban Date: Wed, 10 Jun 2015 14:34:22 -0400 Subject: [PATCH 0285/2941] Small fixes for running Nova services under Apache2 Following fixed were done: 1. Cleanup fixed. 2. API_WORKERS config option was added to nova config templates. 3. Nova API screen tabs were named as nova-api and nova-ec2-api. Change-Id: I68dc6fd6c8aeffcec7f9039afd63bd1599c65682 --- files/apache-nova-api.template | 4 ++-- files/apache-nova-ec2-api.template | 4 ++-- lib/nova | 10 ++++++++-- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template index 70ccedddc8..301a3bdbdd 100644 --- a/files/apache-nova-api.template +++ b/files/apache-nova-api.template @@ -1,7 +1,7 @@ Listen %PUBLICPORT% - WSGIDaemonProcess nova-api processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIDaemonProcess nova-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup nova-api WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} @@ -13,4 +13,4 @@ Listen %PUBLICPORT% %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% - \ No newline at end of file + diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template index ae4cf94a38..235d958d1a 100644 --- a/files/apache-nova-ec2-api.template +++ b/files/apache-nova-ec2-api.template @@ -1,7 +1,7 @@ Listen %PUBLICPORT% - WSGIDaemonProcess nova-ec2-api processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIDaemonProcess nova-ec2-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup nova-ec2-api WSGIScriptAlias / %PUBLICWSGI% WSGIApplicationGroup %{GLOBAL} @@ -13,4 +13,4 @@ Listen %PUBLICPORT% %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% - \ No newline at end of file + diff --git a/lib/nova b/lib/nova index a9f335115d..88b336a1be 100644 --- a/lib/nova +++ b/lib/nova @@ -232,6 +232,10 @@ function cleanup_nova { #if is_service_enabled n-cpu && [[ -r $NOVA_PLUGINS/hypervisor-$VIRT_DRIVER ]]; then # cleanup_nova_hypervisor #fi + + if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then + _cleanup_nova_apache_wsgi + fi } # _cleanup_nova_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file @@ -277,6 +281,7 @@ function _config_nova_apache_wsgi { s|%SSLKEYFILE%|$nova_keyfile|g; s|%USER%|$STACK_USER|g; s|%VIRTUALENV%|$venv_path|g + s|%APIWORKERS%|$API_WORKERS|g " -i $nova_apache_conf sudo cp $FILES/apache-nova-ec2-api.template $nova_ec2_apache_conf @@ -289,6 +294,7 @@ function _config_nova_apache_wsgi { s|%SSLKEYFILE%|$nova_keyfile|g; s|%USER%|$STACK_USER|g; s|%VIRTUALENV%|$venv_path|g + s|%APIWORKERS%|$API_WORKERS|g " -i $nova_ec2_apache_conf } @@ -761,8 +767,8 @@ function start_nova_api { enable_apache_site nova-api enable_apache_site nova-ec2-api restart_apache_server - tail_log nova /var/log/$APACHE_NAME/nova-api.log - tail_log nova /var/log/$APACHE_NAME/nova-ec2-api.log + tail_log nova-api /var/log/$APACHE_NAME/nova-api.log + tail_log nova-ec2-api /var/log/$APACHE_NAME/nova-ec2-api.log else run_process n-api "$NOVA_BIN_DIR/nova-api" fi From 6bc089fce03e1b29405224eeed4761f08339255a Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 1 Jun 2015 12:39:12 +0200 Subject: [PATCH 0286/2941] Cinder: On SUSE, avoid restarting tgt There is a known bug that restart tgtd fails, so go the workaround way and stopping/starting it instead. In addition, remove the else case since unstack also uses cleanup_cinder, which already unconditionally supports all distros. Change-Id: Ib70917a95f001ef36a51815f08416fa30084aad6 --- lib/cinder | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/lib/cinder b/lib/cinder index b8cf8090dc..ade3b82c3a 100644 --- a/lib/cinder +++ b/lib/cinder @@ -432,12 +432,13 @@ function start_cinder { _configure_tgt_for_config_d if is_ubuntu; then sudo service tgt restart - elif is_fedora || is_suse; then - restart_service tgtd + elif is_suse; then + # NOTE(dmllr): workaround restart bug + # https://bugzilla.suse.com/show_bug.cgi?id=934642 + stop_service tgtd + start_service tgtd else - # note for other distros: unstack.sh also uses the tgt/tgtd service - # name, and would need to be adjusted too - exit_distro_not_supported "restarting tgt" + restart_service tgtd fi # NOTE(gfidente): ensure tgtd is running in debug mode sudo tgtadm --mode system --op update --name debug --value on From b632c9ef81090e210fee27346c6e1f2b4f3bedec Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Thu, 28 May 2015 23:36:15 +0000 Subject: [PATCH 0287/2941] Use keystone v3 API for projects Always use the keystone v3 API for project creation. Make domain a required argument. Whilst we could simply default this value within the function I think it's better to make this explicit as these are things deployers and services need to consider. In future we will want to figure out how we want devstack to organize domains however I don't believe that it belongs in this patch. Change-Id: Ib9587193c5c8419dc4b5a608246709baaddd2a52 Implements: bp keystonev3 --- functions-common | 15 ++++++--------- lib/ironic | 2 +- lib/keystone | 8 ++++---- lib/swift | 4 ++-- lib/tempest | 2 +- 5 files changed, 14 insertions(+), 17 deletions(-) diff --git a/functions-common b/functions-common index 3a2f5f7f41..33245fbc3d 100644 --- a/functions-common +++ b/functions-common @@ -720,18 +720,15 @@ function get_or_create_user { } # Gets or creates project -# Usage: get_or_create_project [] +# Usage: get_or_create_project function get_or_create_project { - # Gets project id - local os_cmd="openstack" - local domain="" - if [[ ! -z "$2" ]]; then - domain="--domain=$2" - os_cmd="$os_cmd --os-url=$KEYSTONE_SERVICE_URI_V3 --os-identity-api-version=3" - fi local project_id=$( # Creates new project with --or-show - $os_cmd project create $1 $domain --or-show -f value -c id + openstack --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ + project create $1 \ + --domain=$2 \ + --or-show -f value -c id ) echo $project_id } diff --git a/lib/ironic b/lib/ironic index 4984be1861..40a3460f1f 100644 --- a/lib/ironic +++ b/lib/ironic @@ -366,7 +366,7 @@ function configure_ironic_conductor { fi iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080} iniset $IRONIC_CONF_FILE glance swift_api_version v1 - local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME) + local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default) iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id} iniset $IRONIC_CONF_FILE glance swift_container glance iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600 diff --git a/lib/keystone b/lib/keystone index 7a949cf96f..90ff31a54c 100644 --- a/lib/keystone +++ b/lib/keystone @@ -357,13 +357,13 @@ function configure_keystone_extensions { function create_keystone_accounts { # admin - local admin_tenant=$(get_or_create_project "admin") + local admin_tenant=$(get_or_create_project "admin" default) local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD") local admin_role=$(get_or_create_role "admin") get_or_add_user_project_role $admin_role $admin_user $admin_tenant # Create service project/role - get_or_create_project "$SERVICE_TENANT_NAME" + get_or_create_project "$SERVICE_TENANT_NAME" default # Service role, so service users do not have to be admins get_or_create_role service @@ -382,10 +382,10 @@ function create_keystone_accounts { local another_role=$(get_or_create_role "anotherrole") # invisible tenant - admin can't see this one - local invis_tenant=$(get_or_create_project "invisible_to_admin") + local invis_tenant=$(get_or_create_project "invisible_to_admin" default) # demo - local demo_tenant=$(get_or_create_project "demo") + local demo_tenant=$(get_or_create_project "demo" default) local demo_user=$(get_or_create_user "demo" \ "$ADMIN_PASSWORD" "demo@example.com") diff --git a/lib/swift b/lib/swift index 820042d972..420350b95e 100644 --- a/lib/swift +++ b/lib/swift @@ -616,7 +616,7 @@ function create_swift_accounts { "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" fi - local swift_tenant_test1=$(get_or_create_project swifttenanttest1) + local swift_tenant_test1=$(get_or_create_project swifttenanttest1 default) die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1" SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password "test@example.com") die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" @@ -626,7 +626,7 @@ function create_swift_accounts { die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3" get_or_add_user_project_role $another_role $swift_user_test3 $swift_tenant_test1 - local swift_tenant_test2=$(get_or_create_project swifttenanttest2) + local swift_tenant_test2=$(get_or_create_project swifttenanttest2 default) die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2" local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password "test2@example.com") diff --git a/lib/tempest b/lib/tempest index c4ae05f534..4e7133a0db 100644 --- a/lib/tempest +++ b/lib/tempest @@ -546,7 +546,7 @@ function create_tempest_accounts { if is_service_enabled tempest; then # Tempest has some tests that validate various authorization checks # between two regular users in separate tenants - get_or_create_project alt_demo + get_or_create_project alt_demo default get_or_create_user alt_demo "$ADMIN_PASSWORD" "alt_demo@example.com" get_or_add_user_project_role Member alt_demo alt_demo fi From 9d7e776b704d0fa54b2bf6543d054ab0118f5806 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Fri, 29 May 2015 01:08:53 +0000 Subject: [PATCH 0288/2941] Use Keystone v3 API for user creation This includes requiring a domain when creating a user. This will allow us to control where users are created in a later patch. Adding the token to the user creation call is required because of a bad interaction between OpenStackClient, os-client-config and keystoneclient when dealing with v2 authentication but v3 API calls. It will be cleaned up when we switch to v3 credentials. Change-Id: I6ef50fd384d423bc0f13ee1016a8bdbb0650ecd9 Implements: bp keystonev3 --- functions-common | 23 +++++++++-------------- lib/glance | 2 +- lib/keystone | 6 +++--- lib/swift | 12 ++++++++---- lib/tempest | 2 +- stack.sh | 3 +++ 6 files changed, 25 insertions(+), 23 deletions(-) diff --git a/functions-common b/functions-common index 33245fbc3d..48c0c75637 100644 --- a/functions-common +++ b/functions-common @@ -675,9 +675,8 @@ function get_or_create_domain { } # Gets or creates group -# Usage: get_or_create_group [ ] +# Usage: get_or_create_group [] function get_or_create_group { - local domain=${2:+--domain ${2}} local desc="${3:-}" local os_url="$KEYSTONE_SERVICE_URI_V3" # Gets group id @@ -685,34 +684,30 @@ function get_or_create_group { # Creates new group with --or-show openstack --os-token=$OS_TOKEN --os-url=$os_url \ --os-identity-api-version=3 group create $1 \ - $domain --description "$desc" --or-show \ + --domain $2 --description "$desc" --or-show \ -f value -c id ) echo $group_id } # Gets or creates user -# Usage: get_or_create_user [ []] +# Usage: get_or_create_user [] function get_or_create_user { - if [[ ! -z "$3" ]]; then - local email="--email=$3" + if [[ ! -z "$4" ]]; then + local email="--email=$4" else local email="" fi - local os_cmd="openstack" - local domain="" - if [[ ! -z "$4" ]]; then - domain="--domain=$4" - os_cmd="$os_cmd --os-url=$KEYSTONE_SERVICE_URI_V3 --os-identity-api-version=3" - fi # Gets user id local user_id=$( # Creates new user with --or-show - $os_cmd user create \ + openstack user create \ $1 \ --password "$2" \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ + --domain=$3 \ $email \ - $domain \ --or-show \ -f value -c id ) diff --git a/lib/glance b/lib/glance index 016ade3479..2fae002049 100644 --- a/lib/glance +++ b/lib/glance @@ -254,7 +254,7 @@ function create_glance_accounts { if is_service_enabled s-proxy; then local glance_swift_user=$(get_or_create_user "glance-swift" \ - "$SERVICE_PASSWORD" "glance-swift@example.com") + "$SERVICE_PASSWORD" "default" "glance-swift@example.com") get_or_add_user_project_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME fi diff --git a/lib/keystone b/lib/keystone index 90ff31a54c..c33d466c6c 100644 --- a/lib/keystone +++ b/lib/keystone @@ -358,7 +358,7 @@ function create_keystone_accounts { # admin local admin_tenant=$(get_or_create_project "admin" default) - local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD") + local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default) local admin_role=$(get_or_create_role "admin") get_or_add_user_project_role $admin_role $admin_user $admin_tenant @@ -387,7 +387,7 @@ function create_keystone_accounts { # demo local demo_tenant=$(get_or_create_project "demo" default) local demo_user=$(get_or_create_user "demo" \ - "$ADMIN_PASSWORD" "demo@example.com") + "$ADMIN_PASSWORD" "default" "demo@example.com") get_or_add_user_project_role $member_role $demo_user $demo_tenant get_or_add_user_project_role $admin_role $admin_user $demo_tenant @@ -426,7 +426,7 @@ function create_keystone_accounts { function create_service_user { local role=${2:-service} - local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD") + local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default) get_or_add_user_project_role "$role" "$user" "$SERVICE_TENANT_NAME" } diff --git a/lib/swift b/lib/swift index 420350b95e..0cd51aaddf 100644 --- a/lib/swift +++ b/lib/swift @@ -618,18 +618,21 @@ function create_swift_accounts { local swift_tenant_test1=$(get_or_create_project swifttenanttest1 default) die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1" - SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password "test@example.com") + SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \ + "default" "test@example.com") die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_tenant_test1 - local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password "test3@example.com") + local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \ + "default" "test3@example.com") die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3" get_or_add_user_project_role $another_role $swift_user_test3 $swift_tenant_test1 local swift_tenant_test2=$(get_or_create_project swifttenanttest2 default) die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2" - local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password "test2@example.com") + local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \ + "default" "test2@example.com") die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2" get_or_add_user_project_role admin $swift_user_test2 $swift_tenant_test2 @@ -639,7 +642,8 @@ function create_swift_accounts { local swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain) die_if_not_set $LINENO swift_tenant_test4 "Failure creating swift_tenant_test4" - local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password "test4@example.com" $swift_domain) + local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \ + $swift_domain "test4@example.com") die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4" get_or_add_user_project_role admin $swift_user_test4 $swift_tenant_test4 } diff --git a/lib/tempest b/lib/tempest index 4e7133a0db..f3703a072a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -547,7 +547,7 @@ function create_tempest_accounts { # Tempest has some tests that validate various authorization checks # between two regular users in separate tenants get_or_create_project alt_demo default - get_or_create_user alt_demo "$ADMIN_PASSWORD" "alt_demo@example.com" + get_or_create_user alt_demo "$ADMIN_PASSWORD" "default" "alt_demo@example.com" get_or_add_user_project_role Member alt_demo alt_demo fi } diff --git a/stack.sh b/stack.sh index dc79fa94f7..489fbe446c 100755 --- a/stack.sh +++ b/stack.sh @@ -1006,6 +1006,9 @@ if is_service_enabled keystone; then # Begone token auth unset OS_TOKEN OS_URL + # force set to use v2 identity authentication even with v3 commands + export OS_AUTH_TYPE=v2password + # Set up password auth credentials now that Keystone is bootstrapped export OS_AUTH_URL=$SERVICE_ENDPOINT export OS_TENANT_NAME=admin From 37eca48970106abb9b982af4f1262bcb227411ea Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 16 Jun 2015 07:19:22 -0400 Subject: [PATCH 0289/2941] remove non RabbitMQ messaging Part of what was decided at summit is devstack needs to return to a more opinionated stance, the following removes support for non RabbitMQ messaging. RabbitMQ is used by over 95% of our community (statistically all of it), so it's a pretty clear line to draw that this shouldn't be in tree. iniset_rpc_backend will be our stable hook for other projects that want to implement this out of tree. The burden on creating those out of tree plugins will be on those that wish to support those alternative stacks. Change-Id: I8073a895c03ec927a2598eff6c2f01e5c82606fc --- README.md | 16 --- doc/source/faq.rst | 2 +- files/debs/neutron | 2 - files/debs/nova | 2 - files/debs/qpid | 1 - files/rpms-suse/neutron | 3 - files/rpms-suse/nova | 4 - files/rpms/neutron | 1 - files/rpms/nova | 1 - files/rpms/qpid | 3 - functions-common | 2 +- lib/glance | 8 +- lib/rpc_backend | 271 ++-------------------------------------- lib/zaqar | 7 +- stack.sh | 13 -- 15 files changed, 16 insertions(+), 320 deletions(-) delete mode 100644 files/debs/qpid delete mode 100644 files/rpms/qpid diff --git a/README.md b/README.md index 455e1c69c6..ebcb0184e5 100644 --- a/README.md +++ b/README.md @@ -115,22 +115,6 @@ following in the `localrc` section: `mysql` is the default database. -# RPC Backend - -Multiple RPC backends are available. Currently, this -includes RabbitMQ (default), Qpid, and ZeroMQ. Your backend of -choice may be selected via the `localrc` section. - -Note that selecting more than one RPC backend will result in a failure. - -Example (ZeroMQ): - - ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-qpid,zeromq" - -Example (Qpid): - - ENABLED_SERVICES="$ENABLED_SERVICES,-rabbit,-zeromq,qpid" - # Apache Frontend Apache web server can be enabled for wsgi services that support being deployed diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 0437ec26d5..be81f5d863 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -88,7 +88,7 @@ Q: Is enabling a service that defaults to off done with the reverse of the above :: - enable_service qpid + enable_service q-svc Q: How do I run a specific OpenStack milestone? A: OpenStack milestones have tags set in the git repo. Set the diff --git a/files/debs/neutron b/files/debs/neutron index 2d69a71c3a..b5a457e482 100644 --- a/files/debs/neutron +++ b/files/debs/neutron @@ -9,11 +9,9 @@ sudo postgresql-server-dev-all python-mysqldb python-mysql.connector -python-qpid # NOPRIME dnsmasq-base dnsmasq-utils # for dhcp_release only available in dist:precise rabbitmq-server # NOPRIME -qpidd # NOPRIME sqlite3 vlan radvd # NOPRIME diff --git a/files/debs/nova b/files/debs/nova index 9d9acde3e9..346b8b337a 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -24,10 +24,8 @@ vlan curl genisoimage # required for config_drive rabbitmq-server # NOPRIME -qpidd # NOPRIME socat # used by ajaxterm python-libvirt # NOPRIME python-libxml2 python-numpy # used by websockify for spice console python-m2crypto -python-qpid # NOPRIME diff --git a/files/debs/qpid b/files/debs/qpid deleted file mode 100644 index e3bbf0961c..0000000000 --- a/files/debs/qpid +++ /dev/null @@ -1 +0,0 @@ -sasl2-bin # NOPRIME diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron index e75db89ddf..133979911e 100644 --- a/files/rpms-suse/neutron +++ b/files/rpms-suse/neutron @@ -11,6 +11,3 @@ sqlite3 sudo vlan radvd # NOPRIME - -# FIXME: qpid is not part of openSUSE, those names are tentative -qpidd # NOPRIME diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 6f8aef1027..039456fc1b 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -22,7 +22,3 @@ socat sqlite3 sudo vlan - -# FIXME: qpid is not part of openSUSE, those names are tentative -python-qpid # NOPRIME -qpidd # NOPRIME diff --git a/files/rpms/neutron b/files/rpms/neutron index 8292e7bffe..29851bea9b 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -11,7 +11,6 @@ mysql-server # NOPRIME openvswitch # NOPRIME postgresql-devel rabbitmq-server # NOPRIME -qpid-cpp-server # NOPRIME sqlite sudo radvd # NOPRIME diff --git a/files/rpms/nova b/files/rpms/nova index ebd667454a..6a20940bd4 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -22,6 +22,5 @@ mysql-server # NOPRIME parted polkit rabbitmq-server # NOPRIME -qpid-cpp-server # NOPRIME sqlite sudo diff --git a/files/rpms/qpid b/files/rpms/qpid deleted file mode 100644 index 41dd2f69f9..0000000000 --- a/files/rpms/qpid +++ /dev/null @@ -1,3 +0,0 @@ -qpid-proton-c-devel # NOPRIME -cyrus-sasl-lib # NOPRIME -cyrus-sasl-plain # NOPRIME diff --git a/functions-common b/functions-common index 3a2f5f7f41..8f1031139d 100644 --- a/functions-common +++ b/functions-common @@ -1671,7 +1671,7 @@ function disable_service { # ``ENABLED_SERVICES`` list, if they are not already present. # # For example: -# enable_service qpid +# enable_service q-svc # # This function does not know about the special cases # for nova, glance, and neutron built into is_service_enabled(). diff --git a/lib/glance b/lib/glance index 016ade3479..9db25aef06 100644 --- a/lib/glance +++ b/lib/glance @@ -112,9 +112,7 @@ function configure_glance { iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS" iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry - if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging - fi + iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging iniset_rpc_backend glance $GLANCE_REGISTRY_CONF cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF @@ -125,9 +123,7 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement configure_auth_token_middleware $GLANCE_API_CONF glance $GLANCE_AUTH_CACHE_DIR/api - if is_service_enabled qpid || [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; then - iniset $GLANCE_API_CONF DEFAULT notification_driver messaging - fi + iniset $GLANCE_API_CONF DEFAULT notification_driver messaging iniset_rpc_backend glance $GLANCE_API_CONF if [ "$VIRT_DRIVER" = 'xenserver' ]; then iniset $GLANCE_API_CONF DEFAULT container_formats "ami,ari,aki,bare,ovf,tgz" diff --git a/lib/rpc_backend b/lib/rpc_backend index 33ab03d664..03eacd8674 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -1,72 +1,32 @@ #!/bin/bash # # lib/rpc_backend -# Interface for interactig with different RPC backends +# Interface for installing RabbitMQ on the system # Dependencies: # # - ``functions`` file # - ``RABBIT_{HOST|PASSWORD|USERID}`` must be defined when RabbitMQ is used -# - ``RPC_MESSAGING_PROTOCOL`` option for configuring the messaging protocol # ``stack.sh`` calls the entry points in this order: # # - check_rpc_backend # - install_rpc_backend # - restart_rpc_backend -# - iniset_rpc_backend +# - iniset_rpc_backend (stable interface) +# +# Note: if implementing an out of tree plugin for an RPC backend, you +# should install all services through normal plugin methods, then +# redefine ``iniset_rpc_backend`` in your code. That's the one portion +# of this file which is a standard interface. # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace -RPC_MESSAGING_PROTOCOL=${RPC_MESSAGING_PROTOCOL:-0.9} - -# TODO(sdague): RPC backend selection is super wonky because we treat -# messaging server as a service, which it really isn't for multi host -QPID_HOST=${QPID_HOST:-} - - # Functions # --------- -# Make sure we only have one rpc backend enabled. -# Also check the specified rpc backend is available on your platform. -function check_rpc_backend { - local c svc - - local rpc_needed=1 - # We rely on the fact that filenames in lib/* match the service names - # that can be passed as arguments to is_service_enabled. - # We check for a call to iniset_rpc_backend in these files, meaning - # the service needs a backend. - rpc_candidates=$(grep -rl iniset_rpc_backend $TOP_DIR/lib/ | awk -F/ '{print $NF}') - for c in ${rpc_candidates}; do - if is_service_enabled $c; then - rpc_needed=0 - break - fi - done - local rpc_backend_cnt=0 - for svc in qpid zeromq rabbit; do - is_service_enabled $svc && - (( rpc_backend_cnt++ )) || true - done - if [ "$rpc_backend_cnt" -gt 1 ]; then - echo "ERROR: only one rpc backend may be enabled," - echo " set only one of 'rabbit', 'qpid', 'zeromq'" - echo " via ENABLED_SERVICES." - elif [ "$rpc_backend_cnt" == 0 ] && [ "$rpc_needed" == 0 ]; then - echo "ERROR: at least one rpc backend must be enabled," - echo " set one of 'rabbit', 'qpid', 'zeromq'" - echo " via ENABLED_SERVICES." - fi - - if is_service_enabled qpid && ! qpid_is_supported; then - die $LINENO "Qpid support is not available for this version of your distribution." - fi -} - # clean up after rpc backend - eradicate all traces so changing backends # produces a clean switch function cleanup_rpc_backend { @@ -79,110 +39,14 @@ function cleanup_rpc_backend { # And the Erlang runtime too apt_get purge -y erlang* fi - elif is_service_enabled qpid; then - if is_fedora; then - uninstall_package qpid-cpp-server - elif is_ubuntu; then - uninstall_package qpidd - else - exit_distro_not_supported "qpid installation" - fi - elif is_service_enabled zeromq; then - if is_fedora; then - uninstall_package zeromq python-zmq - if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then - uninstall_package redis python-redis - fi - elif is_ubuntu; then - uninstall_package libzmq1 python-zmq - if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then - uninstall_package redis-server python-redis - fi - elif is_suse; then - uninstall_package libzmq1 python-pyzmq - if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then - uninstall_package redis python-redis - fi - else - exit_distro_not_supported "zeromq installation" - fi - fi - - # Remove the AMQP 1.0 messaging libraries - if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then - if is_fedora; then - uninstall_package qpid-proton-c-devel - uninstall_package python-qpid-proton - fi - # TODO(kgiusti) ubuntu cleanup fi } # install rpc backend function install_rpc_backend { - # Regardless of the broker used, if AMQP 1.0 is configured load - # the necessary messaging client libraries for oslo.messaging - if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then - if is_fedora; then - install_package qpid-proton-c-devel - install_package python-qpid-proton - elif is_ubuntu; then - # TODO(kgiusti) The QPID AMQP 1.0 protocol libraries - # are not yet in the ubuntu repos. Enable these installs - # once they are present: - #install_package libqpid-proton2-dev - #install_package python-qpid-proton - # Also add 'uninstall' directives in cleanup_rpc_backend()! - exit_distro_not_supported "QPID AMQP 1.0 Proton libraries" - else - exit_distro_not_supported "QPID AMQP 1.0 Proton libraries" - fi - # Install pyngus client API - # TODO(kgiusti) can remove once python qpid bindings are - # available on all supported platforms _and_ pyngus is added - # to the requirements.txt file in oslo.messaging - pip_install_gr pyngus - fi - if is_service_enabled rabbit; then # Install rabbitmq-server install_package rabbitmq-server - elif is_service_enabled qpid; then - if is_fedora; then - install_package qpid-cpp-server - elif is_ubuntu; then - install_package qpidd - else - exit_distro_not_supported "qpid installation" - fi - _configure_qpid - elif is_service_enabled zeromq; then - if is_fedora; then - install_package zeromq python-zmq - if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then - install_package redis python-redis - fi - elif is_ubuntu; then - install_package libzmq1 python-zmq - if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then - install_package redis-server python-redis - fi - elif is_suse; then - install_package libzmq1 python-pyzmq - if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then - install_package redis python-redis - fi - else - exit_distro_not_supported "zeromq installation" - fi - # Necessary directory for socket location. - sudo mkdir -p /var/run/openstack - sudo chown $STACK_USER /var/run/openstack - fi - - # If using the QPID broker, install the QPID python client API - if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then - install_package python-qpid fi } @@ -232,17 +96,12 @@ function restart_rpc_backend { sudo rabbitmqctl set_permissions -p child_cell $RABBIT_USERID ".*" ".*" ".*" fi fi - elif is_service_enabled qpid; then - echo_summary "Starting qpid" - restart_service qpidd fi } # builds transport url string function get_transport_url { - if is_service_enabled qpid || [ -n "$QPID_HOST" ]; then - echo "qpid://$QPID_USERNAME:$QPID_PASSWORD@$QPID_HOST:5672/" - elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then echo "rabbit://$RABBIT_USERID:$RABBIT_PASSWORD@$RABBIT_HOST:5672/" fi } @@ -252,29 +111,7 @@ function iniset_rpc_backend { local package=$1 local file=$2 local section=${3:-DEFAULT} - if is_service_enabled zeromq; then - iniset $file $section rpc_backend "zmq" - iniset $file $section rpc_zmq_host `hostname` - if [ "$ZEROMQ_MATCHMAKER" == "redis" ]; then - iniset $file $section rpc_zmq_matchmaker "redis" - MATCHMAKER_REDIS_HOST=${MATCHMAKER_REDIS_HOST:-127.0.0.1} - iniset $file matchmaker_redis host $MATCHMAKER_REDIS_HOST - else - die $LINENO "Other matchmaker drivers not supported" - fi - elif is_service_enabled qpid || [ -n "$QPID_HOST" ]; then - # For Qpid use the 'amqp' oslo.messaging transport when AMQP 1.0 is used - if [ "$RPC_MESSAGING_PROTOCOL" == "AMQP1" ]; then - iniset $file $section rpc_backend "amqp" - else - iniset $file $section rpc_backend "qpid" - fi - iniset $file $section qpid_hostname ${QPID_HOST:-$SERVICE_HOST} - if [ -n "$QPID_USERNAME" ]; then - iniset $file $section qpid_username $QPID_USERNAME - iniset $file $section qpid_password $QPID_PASSWORD - fi - elif is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then + if is_service_enabled rabbit || { [ -n "$RABBIT_HOST" ] && [ -n "$RABBIT_PASSWORD" ]; }; then iniset $file $section rpc_backend "rabbit" iniset $file oslo_messaging_rabbit rabbit_hosts $RABBIT_HOST iniset $file oslo_messaging_rabbit rabbit_password $RABBIT_PASSWORD @@ -288,17 +125,6 @@ function iniset_rpc_backend { fi } -# Check if qpid can be used on the current distro. -# qpid_is_supported -function qpid_is_supported { - if [[ -z "$DISTRO" ]]; then - GetDistro - fi - - # Qpid is not in openSUSE - ( ! is_suse ) -} - function rabbit_setuser { local user="$1" pass="$2" found="" out="" out=$(sudo rabbitmqctl list_users) || @@ -314,85 +140,6 @@ function rabbit_setuser { sudo rabbitmqctl set_permissions "$user" ".*" ".*" ".*" } -# Set up the various configuration files used by the qpidd broker -function _configure_qpid { - - # the location of the configuration files have changed since qpidd 0.14 - local qpid_conf_file - if [ -e /etc/qpid/qpidd.conf ]; then - qpid_conf_file=/etc/qpid/qpidd.conf - elif [ -e /etc/qpidd.conf ]; then - qpid_conf_file=/etc/qpidd.conf - else - exit_distro_not_supported "qpidd.conf file not found!" - fi - - # force the ACL file to a known location - local qpid_acl_file=/etc/qpid/qpidd.acl - if [ ! -e $qpid_acl_file ]; then - sudo mkdir -p -m 755 `dirname $qpid_acl_file` - sudo touch $qpid_acl_file - sudo chmod o+r $qpid_acl_file - fi - sudo sed -i.bak '/^acl-file=/d' $qpid_conf_file - echo "acl-file=$qpid_acl_file" | sudo tee --append $qpid_conf_file - - sudo sed -i '/^auth=/d' $qpid_conf_file - if [ -z "$QPID_USERNAME" ]; then - # no QPID user configured, so disable authentication - # and access control - echo "auth=no" | sudo tee --append $qpid_conf_file - cat < Date: Tue, 16 Jun 2015 23:25:17 +0530 Subject: [PATCH 0290/2941] Invoke Heat via console script generated binaries Depends-On: Ic8f5b8dc85098de752bbf673c4b15c06fdc4162a Change-Id: Iffb6d09bfef593d854b38e68200ae6039c4727e7 --- lib/heat | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/lib/heat b/lib/heat index 5cb0dbf6d9..373b810f4a 100644 --- a/lib/heat +++ b/lib/heat @@ -53,6 +53,8 @@ HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP} HEAT_API_PORT=${HEAT_API_PORT:-8004} +# Support entry points installation of console scripts +HEAT_BIN_DIR=$(get_python_exec_prefix) # other default options if [[ "$HEAT_STANDALONE" = "True" ]]; then @@ -190,7 +192,7 @@ function init_heat { # (re)create heat database recreate_database heat - $HEAT_DIR/bin/heat-manage db_sync + $HEAT_BIN_DIR/heat-manage db_sync create_heat_cache_dir } @@ -227,10 +229,10 @@ function install_heat_other { # start_heat() - Start running processes, including screen function start_heat { - run_process h-eng "$HEAT_DIR/bin/heat-engine --config-file=$HEAT_CONF" - run_process h-api "$HEAT_DIR/bin/heat-api --config-file=$HEAT_CONF" - run_process h-api-cfn "$HEAT_DIR/bin/heat-api-cfn --config-file=$HEAT_CONF" - run_process h-api-cw "$HEAT_DIR/bin/heat-api-cloudwatch --config-file=$HEAT_CONF" + run_process h-eng "$HEAT_BIN_DIR/heat-engine --config-file=$HEAT_CONF" + run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF" + run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF" + run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF" } # stop_heat() - Stop running processes From 1987fcc8a31478911d6c815eb0a94afcf9fa5788 Mon Sep 17 00:00:00 2001 From: Rob Crittenden Date: Wed, 10 Jun 2015 11:00:59 -0400 Subject: [PATCH 0291/2941] Replace pip-installed requests CA bundle with link If the version of python-requests required is higher than that provided by the operating system, pip will install it from upstream. The upstream version provides its own CA certificate bundle based on the Mozilla bundle, and defaults to that in case a CA certificate file is not specified for a request. The distribution-specific packages point to the system-wide CA bundle that can be managed by tools such as update-ca-trust (Fedora/RHEL) and update-ca-certificates (Debian/Ubuntu). When installing in SSL/TLS mode, either with SSL=True or by adding tls-proxy to ENABLED_SERVICES, if a non-systemwide CA bundle is used, then the CA generated by devstack will not be used causing the installation to fail. Replace the upstream-provided bundle with a link to the system bundle when possible. Change-Id: I651aec93398d583dcdc8323503792df7ca05a7e7 Closes-Bug: #1459789 --- lib/tls | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/tls b/lib/tls index 09f1c2dfdd..8ff2027819 100644 --- a/lib/tls +++ b/lib/tls @@ -202,6 +202,7 @@ subjectAltName = \$ENV::SUBJECT_ALT_NAME # Create root and intermediate CAs # init_CA function init_CA { + fix_system_ca_bundle_path # Ensure CAs are built make_root_CA $ROOT_CA_DIR make_int_CA $INT_CA_DIR $ROOT_CA_DIR @@ -338,6 +339,29 @@ function make_root_CA { -outform PEM } +# If a non-system python-requests is installed then it will use the +# built-in CA certificate store rather than the distro-specific +# CA certificate store. Detect this and symlink to the correct +# one. If the value for the CA is not rooted in /etc then we know +# we need to change it. +function fix_system_ca_bundle_path { + if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then + local capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass') + + if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then + if is_fedora; then + sudo rm -f $capath + sudo ln -s /etc/pki/tls/certs/ca-bundle.crt $capath + elif is_ubuntu; then + sudo rm -f $capath + sudo ln -s /etc/ssl/certs/ca-certificates.crt $capath + else + echo "Don't know how to set the CA bundle, expect the install to fail." + fi + fi + fi +} + # Certificate Input Configuration # =============================== From 40fc66324c94a54263c63e70e8f427940c04a0a7 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 17 Jun 2015 16:39:37 +0000 Subject: [PATCH 0292/2941] Remove debugging artifacts The change in 027e2ea741bdbcb6e1afc3fe527c3fdf045825c3 included some debugging code that should have been removed before being added. This removes it. Change-Id: Ia56e1eb7305683b6c00b27a727fc8e094c65a963 --- lib/ceilometer | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index f6f605b686..90c7705743 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -156,8 +156,6 @@ function _cleanup_ceilometer_apache_wsgi { # runs that a clean run would need to clean up function cleanup_ceilometer { if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then - echo "### cleaning database" - read mongo ceilometer --eval "db.dropDatabase();" elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then curl -XDELETE "localhost:9200/events_*" From ccd4c2e1775a214967ca287729facfc21bd4e59d Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 17 Jun 2015 16:42:43 +0000 Subject: [PATCH 0293/2941] Install python-libvirt for ceilometer only if n-cpu enabled Nova is responsible for installing the libvirt package (if it is being used). It is required by python-libvirt but python-libvirt only required in ceilometer if nova compute is being used. There are some usage scenarios where nova compute is not being used so in that case don't install python-libvirt. Change-Id: I0db66f1c0526e24ade98de85989a5ed8d37f0c4f --- lib/ceilometer | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 90c7705743..a577ee9ca8 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -334,12 +334,15 @@ function install_ceilometer { pip_install_gr pymongo fi - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - pip_install_gr libvirt-python - fi + # Only install virt drivers if we're running nova compute + if is_service_enabled n-cpu ; then + if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then + pip_install_gr libvirt-python + fi - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - pip_instal_gr oslo.vmware + if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then + pip_instal_gr oslo.vmware + fi fi if [ "$CEILOMETER_BACKEND" = 'es' ] ; then From f553ce24ea1fa860d19b5dfb14c286614552d509 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Wed, 17 Jun 2015 13:52:20 +1200 Subject: [PATCH 0294/2941] Actually install the requirements repo. The requirements repo has had a setup.cfg etc for a long time but only recently started using it. As it now has dependencies, we need to pip install it. To preserve compat with older requirements repos I haven't changed the call to invoke update-requirements yet, as we still have the update.py symlink. The pbr install is moved before requirements to ensure we don't trigger easy-install. Change-Id: I7d7e91694c9145fac0ddab8a9de5f789d723c641 --- lib/infra | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/infra b/lib/infra index c825b4ee56..5fb185fd6a 100644 --- a/lib/infra +++ b/lib/infra @@ -29,9 +29,6 @@ REQUIREMENTS_DIR=$DEST/requirements # install_infra() - Collect source and prepare function install_infra { - # bring down global requirements - git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH - # Install pbr if use_library_from_git "pbr"; then git_clone_by_name "pbr" @@ -41,6 +38,10 @@ function install_infra { # in via system packages. pip_install "-U" "pbr" fi + + # bring down global requirements + git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH + pip_install $REQUIREMENTS_DIR } # Restore xtrace From 18d1cca6cb1e56827e04c2f38c9db745b881f98a Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 18 Jun 2015 06:32:00 -0400 Subject: [PATCH 0295/2941] Add oslo.service to devstack Add the ability to switch on oslo.service master using LIBS_FROM_GIT Change-Id: I00fe7776aea005b6d4e0a84fce54f33a862b57e3 --- lib/oslo | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/oslo b/lib/oslo index be26668dfb..554bec8945 100644 --- a/lib/oslo +++ b/lib/oslo @@ -37,6 +37,7 @@ GITDIR["oslo.middleware"]=$DEST/oslo.middleware GITDIR["oslo.policy"]=$DEST/oslo.policy GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap GITDIR["oslo.serialization"]=$DEST/oslo.serialization +GITDIR["oslo.service"]=$DEST/oslo.service GITDIR["oslo.utils"]=$DEST/oslo.utils GITDIR["oslo.versionedobjects"]=$DEST/oslo.versionedobjects GITDIR["oslo.vmware"]=$DEST/oslo.vmware @@ -76,6 +77,7 @@ function install_oslo { _do_install_oslo_lib "oslo.policy" _do_install_oslo_lib "oslo.rootwrap" _do_install_oslo_lib "oslo.serialization" + _do_install_oslo_lib "oslo.service" _do_install_oslo_lib "oslo.utils" _do_install_oslo_lib "oslo.versionedobjects" _do_install_oslo_lib "oslo.vmware" diff --git a/stackrc b/stackrc index c16a9997a4..1ac1338f2c 100644 --- a/stackrc +++ b/stackrc @@ -382,6 +382,10 @@ GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master} GITREPO["oslo.serialization"]=${OSLOSERIALIZATION_REPO:-${GIT_BASE}/openstack/oslo.serialization.git} GITBRANCH["oslo.serialization"]=${OSLOSERIALIZATION_BRANCH:-master} +# oslo.service +GITREPO["oslo.service"]=${OSLOSERVICE_REPO:-${GIT_BASE}/openstack/oslo.service.git} +GITBRANCH["oslo.service"]=${OSLOSERVICE_BRANCH:-master} + # oslo.utils GITREPO["oslo.utils"]=${OSLOUTILS_REPO:-${GIT_BASE}/openstack/oslo.utils.git} GITBRANCH["oslo.utils"]=${OSLOUTILS_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 2f0b1febe4..fc6596789b 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -39,7 +39,7 @@ ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth" ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" -ALL_LIBS+=" debtcollector os-brick automaton futurist" +ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" # Generate the above list with # echo ${!GITREPO[@]} From 40f3e33f734330c32f27882898ab1bb4ab8f3217 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Fri, 19 Jun 2015 08:04:00 +1200 Subject: [PATCH 0296/2941] Put requirements repo in a venv. This is to prevent any possible contamination of test results from its presence. Change-Id: I5a929854745650cc6a182ffc4d15c50caabdd727 --- inc/python | 6 +++--- lib/infra | 6 +++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/inc/python b/inc/python index 3d329b59a9..e3c5e61dd2 100644 --- a/inc/python +++ b/inc/python @@ -219,15 +219,15 @@ function setup_package_with_req_sync { if [[ "$REQUIREMENTS_MODE" == "soft" ]]; then if is_in_projects_txt $project_dir; then (cd $REQUIREMENTS_DIR; \ - python update.py $project_dir) + ./.venv/bin/python update.py $project_dir) else # soft update projects not found in requirements project.txt (cd $REQUIREMENTS_DIR; \ - python update.py -s $project_dir) + ./.venv/bin/python update.py -s $project_dir) fi else (cd $REQUIREMENTS_DIR; \ - python update.py $project_dir) + ./.venv/bin/python update.py $project_dir) fi fi diff --git a/lib/infra b/lib/infra index 5fb185fd6a..4cc25965ea 100644 --- a/lib/infra +++ b/lib/infra @@ -29,6 +29,8 @@ REQUIREMENTS_DIR=$DEST/requirements # install_infra() - Collect source and prepare function install_infra { + local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" + # Install pbr if use_library_from_git "pbr"; then git_clone_by_name "pbr" @@ -41,7 +43,9 @@ function install_infra { # bring down global requirements git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH - pip_install $REQUIREMENTS_DIR + [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR } # Restore xtrace From d16bfa48ee33f07ebb60221267082002aea5c47b Mon Sep 17 00:00:00 2001 From: Clint Byrum Date: Thu, 18 Jun 2015 13:22:35 -0700 Subject: [PATCH 0297/2941] Tune mysql a bit better for concurrent operations With PyMySQL in the projects we can expect things to happen more concurrently now. The query cache is a hinderance to concurrency, and more connections will be required. Change-Id: Icfb8cdbb9ed39cfd7732ad05fe740e01c767af7b --- lib/databases/mysql | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index f097fb21cb..0e477ca264 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -95,7 +95,10 @@ function configure_database_mysql { sudo bash -c "source $TOP_DIR/functions && \ iniset $my_conf mysqld bind-address 0.0.0.0 && \ iniset $my_conf mysqld sql_mode STRICT_ALL_TABLES && \ - iniset $my_conf mysqld default-storage-engine InnoDB" + iniset $my_conf mysqld default-storage-engine InnoDB \ + iniset $my_conf mysqld max_connections 1024 \ + iniset $my_conf mysqld query_cache_type OFF \ + iniset $my_conf mysqld query_cache_size 0" if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then From d72b839b7f15a8065d2f7bcceff0a9299ea9c901 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Thu, 18 Jun 2015 12:40:09 -0400 Subject: [PATCH 0298/2941] Docs: Use RFC5737 for Provider Networking That'll make things more clear that with the provider networking feature in DevStack, FIXED_RANGE will be a routed IPv4 subnet that gives routed IPv4 addresses to instances without using floating IPs. Change-Id: Ie26d75ac5ff285a25762c4f61fd9800b0382886b --- doc/source/guides/neutron.rst | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index bdfd3a4afa..40a5632b86 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -261,15 +261,18 @@ controller node. ## Neutron Networking options used to create Neutron Subnets - FIXED_RANGE="10.1.1.0/24" + FIXED_RANGE="203.0.113.0/24" PROVIDER_SUBNET_NAME="provider_net" PROVIDER_NETWORK_TYPE="vlan" SEGMENTATION_ID=2010 In this configuration we are defining FIXED_RANGE to be a -subnet that exists in the private RFC1918 address space - however -in a real setup FIXED_RANGE would be a public IP address range, so -that you could access your instances from the public internet. +publicly routed IPv4 subnet. In this specific instance we are using +the special TEST-NET-3 subnet defined in `RFC 5737 `_, +which is used for documentation. In your DevStack setup, FIXED_RANGE +would be a public IP address range that you or your organization has +allocated to you, so that you could access your instances from the +public internet. The following is a snippet of the DevStack configuration on the compute node. From 86923559a4feb4a7c1f01f69ffdbff6c67c6b785 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Fri, 19 Jun 2015 11:17:04 +1200 Subject: [PATCH 0299/2941] Fixup pbr in LIBS_FROM_GIT Change-Id: I32594f30a13c0757cc918c8a5e54ae182e985693 --- lib/infra | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/infra b/lib/infra index 4cc25965ea..585e9b47bc 100644 --- a/lib/infra +++ b/lib/infra @@ -30,6 +30,12 @@ REQUIREMENTS_DIR=$DEST/requirements # install_infra() - Collect source and prepare function install_infra { local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" + # bring down global requirements + git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH + [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV + # We don't care about testing git pbr in the requirements venv. + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr + PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR # Install pbr if use_library_from_git "pbr"; then @@ -40,12 +46,6 @@ function install_infra { # in via system packages. pip_install "-U" "pbr" fi - - # bring down global requirements - git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH - [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV - PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr - PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR } # Restore xtrace From f4f01c63973246cbd7821fb28f0e8f9d74e4a131 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Fri, 19 Jun 2015 02:52:41 +0000 Subject: [PATCH 0300/2941] Use swift store config files in glance Using the swift_store_auth_address, swift_store_user and swift_store_key are marked as deprecated in glance in favour of using a standalone config file that provides multiple auth options. Create and use a standalone authentication file for communicating with swift. Change-Id: I9b5361ce6e1771781d7ae7226974604a7f9e5d00 --- lib/glance | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/lib/glance b/lib/glance index 016ade3479..47bad0e277 100644 --- a/lib/glance +++ b/lib/glance @@ -56,6 +56,7 @@ GLANCE_SEARCH_PASTE_INI=$GLANCE_CONF_DIR/glance-search-paste.ini GLANCE_CACHE_CONF=$GLANCE_CONF_DIR/glance-cache.conf GLANCE_POLICY_JSON=$GLANCE_CONF_DIR/policy.json GLANCE_SCHEMA_JSON=$GLANCE_CONF_DIR/schema-image.json +GLANCE_SWIFT_STORE_CONF=$GLANCE_CONF_DIR/glance-swift-store.conf if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then GLANCE_SERVICE_PROTOCOL="https" @@ -145,11 +146,20 @@ function configure_glance { # Store the images in swift if enabled. if is_service_enabled s-proxy; then iniset $GLANCE_API_CONF glance_store default_store swift - iniset $GLANCE_API_CONF glance_store swift_store_auth_address $KEYSTONE_SERVICE_URI/v2.0/ - iniset $GLANCE_API_CONF glance_store swift_store_user $SERVICE_TENANT_NAME:glance-swift - iniset $GLANCE_API_CONF glance_store swift_store_key $SERVICE_PASSWORD iniset $GLANCE_API_CONF glance_store swift_store_create_container_on_put True + + iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF + iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 iniset $GLANCE_API_CONF glance_store stores "file, http, swift" + + iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_TENANT_NAME:glance-swift + iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v2.0/ + + # commenting is not strictly necessary but it's confusing to have bad values in conf + inicomment $GLANCE_API_CONF glance_store swift_store_user + inicomment $GLANCE_API_CONF glance_store swift_store_key + inicomment $GLANCE_API_CONF glance_store swift_store_auth_address fi if is_service_enabled tls-proxy; then From ceca15db4d1d7cf652c25d0339ce90d9aad2b885 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Fri, 19 Jun 2015 11:46:36 +0200 Subject: [PATCH 0301/2941] Glance: move connection_url from DEFAULT section to DB section To get rid of Oslo_config deprecation warning. Change-Id: I9de475f4dea4a4496cc8b5e93aa6928235d4cb29 --- lib/glance | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/glance b/lib/glance index 016ade3479..650028450e 100644 --- a/lib/glance +++ b/lib/glance @@ -107,7 +107,7 @@ function configure_glance { iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file local dburl=`database_connection_url glance` - iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl + iniset $GLANCE_REGISTRY_CONF database connection $dburl iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS" iniset $GLANCE_REGISTRY_CONF paste_deploy flavor keystone @@ -120,7 +120,7 @@ function configure_glance { cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $GLANCE_API_CONF DEFAULT log_file - iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl + iniset $GLANCE_API_CONF database connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_API_CONF DEFAULT image_cache_dir $GLANCE_CACHE_DIR/ iniset $GLANCE_API_CONF paste_deploy flavor keystone+cachemanagement @@ -219,7 +219,7 @@ function configure_glance { iniset $GLANCE_SEARCH_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL inicomment $GLANCE_SEARCH_CONF DEFAULT log_file iniset $GLANCE_SEARCH_CONF DEFAULT use_syslog $SYSLOG - iniset $GLANCE_SEARCH_CONF DEFAULT sql_connection $dburl + iniset $GLANCE_SEARCH_CONF database connection $dburl iniset $GLANCE_SEARCH_CONF paste_deploy flavor keystone configure_auth_token_middleware $GLANCE_SEARCH_CONF glance $GLANCE_AUTH_CACHE_DIR/search From 0124e08ede770caf8b787d3a54e4683e7e1277cb Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 19 Jun 2015 08:26:45 -0400 Subject: [PATCH 0302/2941] refresh the devstack plugin docs, add plugin registry The devstack plugin docs mostly referred to in tree plugins, which is honestly something we don't want people doing. Instead restructure the whole document to talk about external plugins as the only kinds of plugins, and focus on a workflow to make that easy for people to work through. This also adds a plugin-registry page to start listing known plugins somewhere centrally. Some sample content was added, hopefully people will submit patches to include their plugins. This does drop the section on hypervisor plugins. That's not currently something that we expect a ton of people to work on, so diving into the code for this should be fine. Change-Id: Ifc0b831c90a1a45daa507a009d1dcffcd6e2deca --- doc/source/index.rst | 1 + doc/source/plugin-registry.rst | 73 ++++++++ doc/source/plugins.rst | 305 ++++++++++++++++++--------------- 3 files changed, 243 insertions(+), 136 deletions(-) create mode 100644 doc/source/plugin-registry.rst diff --git a/doc/source/index.rst b/doc/source/index.rst index 6d0edb0bfe..f15c3060e7 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -10,6 +10,7 @@ DevStack - an OpenStack Community Production overview configuration plugins + plugin-registry faq changes hacking diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst new file mode 100644 index 0000000000..2dd70d84e9 --- /dev/null +++ b/doc/source/plugin-registry.rst @@ -0,0 +1,73 @@ +.. + Note to reviewers: the intent of this file is to be easy for + community members to update. As such fast approving (single core +2) + is fine as long as you've identified that the plugin listed actually exists. + +========================== + DevStack Plugin Registry +========================== + +Since we've created the external plugin mechanism, it's gotten used by +a lot of projects. The following is a list of plugins that currently +exist. Any project that wishes to list their plugin here is welcomed +to. + +Official OpenStack Projects +=========================== + +The following are plugins that exist for official OpenStack projects. + ++--------------------+-------------------------------------------+--------------------+ +|Plugin Name |URL |Comments | ++--------------------+-------------------------------------------+--------------------+ +|magnum |git://git.openstack.org/openstack/magnum | | ++--------------------+-------------------------------------------+--------------------+ +|trove |git://git.openstack.org/openstack/trove | | ++--------------------+-------------------------------------------+--------------------+ +|zaqar |git://git.openstack.org/openstack/zarar | | ++--------------------+-------------------------------------------+--------------------+ + + + +Drivers +======= + ++--------------------+-------------------------------------------------+------------------+ +|Plugin Name |URL |Comments | ++--------------------+-------------------------------------------------+------------------+ +|dragonflow |git://git.openstack.org/openstack/dragonflow |[d1]_ | ++--------------------+-------------------------------------------------+------------------+ +|odl |git://git.openstack.org/openstack/networking-odl |[d2]_ | ++--------------------+-------------------------------------------------+------------------+ + +.. [d1] demonstrates example of installing 3rd party SDN controller +.. [d2] demonstrates a pretty advanced set of modes that that allow + one to run OpenDayLight either from a pre-existing install, or + also from source + +Alternate Configs +================= + ++-------------+------------------------------------------------------------+------------+ +| Plugin Name | URL | Comments | +| | | | ++-------------+------------------------------------------------------------+------------+ +|glusterfs |git://git.openstack.org/stackforge/devstack-plugin-glusterfs| | ++-------------+------------------------------------------------------------+------------+ +| | | | ++-------------+------------------------------------------------------------+------------+ + +Additional Services +=================== + ++-------------+------------------------------------------+------------+ +| Plugin Name | URL | Comments | +| | | | ++-------------+------------------------------------------+------------+ +|ec2-api |git://git.openstack.org/stackforge/ec2api |[as1]_ | ++-------------+------------------------------------------+------------+ +| | | | ++-------------+------------------------------------------+------------+ + +.. [as1] first functional devstack plugin, hence why used in most of + the examples. diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index c4ed2285cb..b166936a8b 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -2,78 +2,83 @@ Plugins ======= -DevStack has a couple of plugin mechanisms to allow easily adding -support for additional projects and features. +The OpenStack ecosystem is wide and deep, and only growing more so +every day. The value of DevStack is that it's simple enough to +understand what it's doing clearly. And yet we'd like to support as +much of the OpenStack Ecosystem as possible. We do that with plugins. -Extras.d Hooks -============== +DevStack plugins are bits of bash code that live outside the DevStack +tree. They are called through a strong contract, so these plugins can +be sure that they will continue to work in the future as DevStack +evolves. -These hooks are an extension of the service calls in -``stack.sh`` at specific points in its run, plus ``unstack.sh`` and -``clean.sh``. A number of the higher-layer projects are implemented in -DevStack using this mechanism. +Plugin Interface +================ -The script in ``extras.d`` is expected to be mostly a dispatcher to -functions in a ``lib/*`` script. The scripts are named with a -zero-padded two digits sequence number prefix to control the order that -the scripts are called, and with a suffix of ``.sh``. DevStack reserves -for itself the sequence numbers 00 through 09 and 90 through 99. +DevStack supports a standard mechansim for including plugins from +external repositories. The plugin interface assumes the following: -Below is a template that shows handlers for the possible command-line -arguments: +An external git repository that includes a ``devstack/`` top level +directory. Inside this directory there can be 2 files. -:: +- ``settings`` - a file containing global variables that will be + sourced very early in the process. This is helpful if other plugins + might depend on this one, and need access to global variables to do + their work. - # template.sh - DevStack extras.d dispatch script template + Your settings should include any ``enable_service`` lines required + by your plugin. This is especially important if you are kicking off + services using ``run_process`` as it only works with enabled + services. - # check for service enabled - if is_service_enabled template; then + Be careful to allow users to override global-variables for + customizing their environment. Usually it is best to provide a + default value only if the variable is unset or empty; e.g. in bash + syntax ``FOO=${FOO:-default}``. - if [[ "$1" == "source" ]]; then - # Initial source of lib script - source $TOP_DIR/lib/template - fi +- ``plugin.sh`` - the actual plugin. It is executed by devstack at + well defined points during a ``stack.sh`` run. The plugin.sh + internal structure is discussed bellow. - if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then - # Set up system services - echo_summary "Configuring system services Template" - install_package cowsay - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - # Perform installation of service source - echo_summary "Installing Template" - install_template +Plugins are registered by adding the following to the localrc section +of ``local.conf``. - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - # Configure after the other layer 1 and 2 services have been configured - echo_summary "Configuring Template" - configure_template +They are added in the following format:: - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize and start the template service - echo_summary "Initializing Template" - ##init_template - fi + [[local|localrc]] + enable_plugin [GITREF] - if [[ "$1" == "unstack" ]]; then - # Shut down template services - # no-op - : - fi +- ``name`` - an arbitrary name. (ex: glustfs, docker, zaqar, congress) +- ``giturl`` - a valid git url that can be cloned +- ``gitref`` - an optional git ref (branch / ref / tag) that will be + cloned. Defaults to master. - if [[ "$1" == "clean" ]]; then - # Remove state and transient data - # Remember clean.sh first calls unstack.sh - # no-op - : - fi - fi +An example would be as follows:: -The arguments are: + enable_plugin ec2api git://git.openstack.org/stackforge/ec2api + +plugin.sh contract +================== + +``plugin.sh`` is a bash script that will be called at specific points +during ``stack.sh``, ``unstack.sh``, and ``clean.sh``. It will be +called in the following way:: + + source $PATH/TO/plugin.sh [phase] -- **source** - Called by each script that utilizes ``extras.d`` hooks; - this replaces directly sourcing the ``lib/*`` script. -- **stack** - Called by ``stack.sh`` three times for different phases +``mode`` can be thought of as the major mode being called, currently +one of: ``stack``, ``unstack``, ``clean``. ``phase`` is used by modes +which have multiple points during their run where it's necessary to +be able to execute code. All existing ``mode`` and ``phase`` points +are considered **strong contracts** and won't be removed without a +reasonable deprecation period. Additional new ``mode`` or ``phase`` +points may be added at any time if we discover we need them to support +additional kinds of plugins in devstack. + +The current full list of ``mode`` and ``phase`` are: + +- **stack** - Called by ``stack.sh`` four times for different phases of its run: - **pre-install** - Called after system (OS) setup is complete and @@ -84,106 +89,90 @@ The arguments are: been configured. All configuration files for enabled services should exist at this point. - **extra** - Called near the end after layer 1 and 2 services have - been started. This is the existing hook and has not otherwise - changed. + been started. - **unstack** - Called by ``unstack.sh`` before other services are shut down. - **clean** - Called by ``clean.sh`` before other services are cleaned, but after ``unstack.sh`` has been called. +Example plugin +==================== -Externally Hosted Plugins -========================= +An example plugin would look something as follows. -Based on the extras.d hooks, DevStack supports a standard mechansim -for including plugins from external repositories. The plugin interface -assumes the following: +``devstack/settings``:: -An external git repository that includes a ``devstack/`` top level -directory. Inside this directory there can be 2 files. + # settings file for template + enable_service template -- ``settings`` - a file containing global variables that will be - sourced very early in the process. This is helpful if other plugins - might depend on this one, and need access to global variables to do - their work. - Your settings should include any ``enable_service`` lines required - by your plugin. This is especially important if you are kicking off - services using ``run_process`` as it only works with enabled - services. +``devstack/plugin.sh``:: - Be careful to allow users to override global-variables for - customizing their environment. Usually it is best to provide a - default value only if the variable is unset or empty; e.g. in bash - syntax ``FOO=${FOO:-default}``. + # plugin.sh - DevStack plugin.sh dispatch script template -- ``plugin.sh`` - the actual plugin. It will be executed by devstack - during it's run. The run order will be done in the registration - order for these plugins, and will occur immediately after all in - tree extras.d dispatch at the phase in question. The plugin.sh - looks like the extras.d dispatcher above. + function install_template { + ... + } -Plugins are registered by adding the following to the localrc section -of ``local.conf``. + function init_template { + ... + } -They are added in the following format:: + function configure_template { + ... + } - [[local|localrc]] - enable_plugin [GITREF] + # check for service enabled + if is_service_enabled template; then -- ``name`` - an arbitrary name. (ex: glustfs, docker, zaqar, congress) -- ``giturl`` - a valid git url that can be cloned -- ``gitref`` - an optional git ref (branch / ref / tag) that will be - cloned. Defaults to master. + if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then + # Set up system services + echo_summary "Configuring system services Template" + install_package cowsay -An example would be as follows:: + elif [[ "$1" == "stack" && "$2" == "install" ]]; then + # Perform installation of service source + echo_summary "Installing Template" + install_template - enable_plugin ec2api git://git.openstack.org/stackforge/ec2api + elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + # Configure after the other layer 1 and 2 services have been configured + echo_summary "Configuring Template" + configure_template + + elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + # Initialize and start the template service + echo_summary "Initializing Template" + init_template + fi + + if [[ "$1" == "unstack" ]]; then + # Shut down template services + # no-op + : + fi -Plugins for gate jobs ---------------------- - -All OpenStack plugins that wish to be used as gate jobs need to exist -in OpenStack's gerrit. Both ``openstack`` namespace and ``stackforge`` -namespace are fine. This allows testing of the plugin as well as -provides network isolation against upstream git repository failures -(which we see often enough to be an issue). - -Ideally plugins will be implemented as ``devstack`` directory inside -the project they are testing. For example, the stackforge/ec2-api -project has it's pluggin support in it's tree. - -In the cases where there is no "project tree" per say (like -integrating a backend storage configuration such as ceph or glusterfs) -it's also allowed to build a dedicated -``stackforge/devstack-plugin-FOO`` project to house the plugin. - -Note jobs must not require cloning of repositories during tests. -Tests must list their repository in the ``PROJECTS`` variable for -`devstack-gate -`_ -for the repository to be available to the test. Further information -is provided in the project creator's guide. - -Hypervisor -========== - -Hypervisor plugins are fairly new and condense most hypervisor -configuration into one place. - -The initial plugin implemented was for Docker support and is a useful -template for the required support. Plugins are placed in -``lib/nova_plugins`` and named ``hypervisor-`` where ```` is -the value of ``VIRT_DRIVER``. Plugins must define the following -functions: - -- ``install_nova_hypervisor`` - install any external requirements -- ``configure_nova_hypervisor`` - make configuration changes, including - those to other services -- ``start_nova_hypervisor`` - start any external services -- ``stop_nova_hypervisor`` - stop any external services -- ``cleanup_nova_hypervisor`` - remove transient data and cache + if [[ "$1" == "clean" ]]; then + # Remove state and transient data + # Remember clean.sh first calls unstack.sh + # no-op + : + fi + fi + +Plugin Execution Order +====================== + +Plugins are run after in tree services at each of the stages +above. For example, if you need something to happen before Keystone +starts, you should do that at the ``post-config`` phase. + +Multiple plugins can be specified in your ``local.conf``. When that +happens the plugins will be executed **in order** at each phase. This +allows plugins to conceptually depend on each other through +documenting to the user the order they must be declared. A formal +dependency mechanism is beyond the scope of the current work. System Packages =============== @@ -205,3 +194,47 @@ repository: - ``./devstack/files/rpms-suse/$plugin_name`` - Packages to install when running on SUSE Linux or openSUSE. + + +Using Plugins in the OpenStack Gate +=================================== + +For everyday use, DevStack plugins can exist in any git tree that's +accessible on the internet. However, when using DevStack plugins in +the OpenStack gate, they must live in projects in OpenStack's +gerrit. Both ``openstack`` namespace and ``stackforge`` namespace are +fine. This allows testing of the plugin as well as provides network +isolation against upstream git repository failures (which we see often +enough to be an issue). + +Ideally a plugin will be included within the ``devstack`` directory of +the project they are being tested. For example, the stackforge/ec2-api +project has its pluggin support in its own tree. + +However, some times a DevStack plugin might be used solely to +configure a backend service that will be used by the rest of +OpenStack, so there is no "project tree" per say. Good examples +include: integration of back end storage (e.g. ceph or glusterfs), +integration of SDN controllers (e.g. ovn, OpenDayLight), or +integration of alternate RPC systems (e.g. zmq, qpid). In these cases +the best practice is to build a dedicated +``stackforge/devstack-plugin-FOO`` project. + +To enable a plugin to be used in a gate job, the following lines will +be needed in your project.yaml definition:: + + # Because we are testing a non standard project, add the + # our project repository. This makes zuul do the right + # reference magic for testing changes. + export PROJECTS="stackforge/ec2-api $PROJECTS" + + # note the actual url here is somewhat irrelevant because it + # caches in nodepool, however make it a valid url for + # documentation purposes. + export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/stackforge/ec2-api" + +See Also +======== + +For additional inspiration on devstack plugins you can check out the +`Plugin Registry `_. From 23bf045c183f53762e7771fe0081b3d4ab09e107 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Fri, 29 May 2015 11:38:22 +0200 Subject: [PATCH 0303/2941] Check if cinder is enabled before doing anything lvm2 related On some system Cinder is not enabled so we can't assume LVM is installed. So do not try to `sed` /etc/lvm/lvm.conf or clean LVM VG if cinder is not enabled Change-Id: I09b1a7bee0785e5e1bb7dc96158a654bd3f15c83 --- unstack.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/unstack.sh b/unstack.sh index f0da9710a2..10e595809a 100755 --- a/unstack.sh +++ b/unstack.sh @@ -187,5 +187,10 @@ if [[ -n "$SCREEN" ]]; then fi # BUG: maybe it doesn't exist? We should isolate this further down. -clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true -clean_lvm_filter +# NOTE: Cinder automatically installs the lvm2 package, independently of the +# enabled backends. So if Cinder is enabled, we are sure lvm (lvremove, +# /etc/lvm/lvm.conf, etc.) is here. +if is_service_enabled cinder; then + clean_lvm_volume_group $DEFAULT_VOLUME_GROUP_NAME || /bin/true + clean_lvm_filter +fi From d3d78cc528ea2d35a8794ea21c2b8d043e3119e2 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 19 Jun 2015 18:57:13 -0700 Subject: [PATCH 0304/2941] use pip_install_gr for ceilometermiddleware Since this patch https://review.openstack.org/#/c/171685 has been merged in global requirements, better to switch to pip_install_gr. Change-Id: Ibe708d4f523c32ade3c6a273f80b9c38bb03e382 --- lib/ceilometer | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index a577ee9ca8..d7888d9502 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -366,10 +366,7 @@ function install_ceilometermiddleware { git_clone_by_name "ceilometermiddleware" setup_dev_lib "ceilometermiddleware" else - # BUG: this should be a pip_install_gr except it was never - # included in global-requirements. Needs to be fixed by - # https://bugs.launchpad.net/ceilometer/+bug/1441655 - pip_install ceilometermiddleware + pip_install_gr ceilometermiddleware fi } From e1fa0701b3920932d40b031b08d19c6fd2e3397e Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Sun, 21 Jun 2015 08:54:43 -0500 Subject: [PATCH 0305/2941] Update clouds.yaml If the user already has a clouds.yaml, update it with the "devstack" entry. Change-Id: Id02378b6f3a86f9fee201d91688205705202c0a8 --- stack.sh | 45 ++++++++++-------- tools/update_clouds_yaml.py | 95 +++++++++++++++++++++++++++++++++++++ 2 files changed, 119 insertions(+), 21 deletions(-) create mode 100755 tools/update_clouds_yaml.py diff --git a/stack.sh b/stack.sh index 0d9d836eca..26d9754f6f 100755 --- a/stack.sh +++ b/stack.sh @@ -1295,28 +1295,31 @@ fi # Save some values we generated for later use save_stackenv -# Write out a clouds.yaml file -# putting the location into a variable to allow for easier refactoring later -# to make it overridable. There is current no usecase where doing so makes -# sense, so I'm not actually doing it now. +# Update/create user clouds.yaml file. +# clouds.yaml will have a `devstack` entry for the `demo` user for the `demo` +# project. + +# The location is a variable to allow for easier refactoring later to make it +# overridable. There is currently no usecase where doing so makes sense, so +# it's not currently configurable. CLOUDS_YAML=~/.config/openstack/clouds.yaml -if [ ! -e $CLOUDS_YAML ]; then - mkdir -p $(dirname $CLOUDS_YAML) - cat >"$CLOUDS_YAML" <>"$CLOUDS_YAML" - fi -fi + +mkdir -p $(dirname $CLOUDS_YAML) + +CA_CERT_ARG='' +if [ -f "$SSL_BUNDLE_FILE" ]; then + CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" +fi +$TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack \ + --os-region-name $REGION_NAME \ + --os-identity-api-version $IDENTITY_API_VERSION \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ + --os-username demo \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo # Wrapup configuration diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py new file mode 100755 index 0000000000..08621352ad --- /dev/null +++ b/tools/update_clouds_yaml.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# Update the clouds.yaml file. + + +import argparse +import os.path + +import yaml + + +class UpdateCloudsYaml(object): + def __init__(self, args): + if args.file: + self._clouds_path = args.file + self._create_directory = False + else: + self._clouds_path = os.path.expanduser( + '~/.config/openstack/clouds.yaml') + self._create_directory = True + self._clouds = {} + + self._cloud = args.os_cloud + self._cloud_data = { + 'region_name': args.os_region_name, + 'identity_api_version': args.os_identity_api_version, + 'auth': { + 'auth_url': args.os_auth_url, + 'username': args.os_username, + 'password': args.os_password, + 'project_name': args.os_project_name, + }, + } + if args.os_cacert: + self._cloud_data['cacert'] = args.os_cacert + + def run(self): + self._read_clouds() + self._update_clouds() + self._write_clouds() + + def _read_clouds(self): + try: + with open(self._clouds_path) as clouds_file: + self._clouds = yaml.load(clouds_file) + except IOError: + # The user doesn't have a clouds.yaml file. + print("The user clouds.yaml file didn't exist.") + self._clouds = {} + + def _update_clouds(self): + self._clouds.setdefault('clouds', {})[self._cloud] = self._cloud_data + + def _write_clouds(self): + + if self._create_directory: + clouds_dir = os.path.dirname(self._clouds_path) + os.makedirs(clouds_dir) + + with open(self._clouds_path, 'w') as clouds_file: + yaml.dump(self._clouds, clouds_file, default_flow_style=False) + + +def main(): + parser = argparse.ArgumentParser('Update clouds.yaml file.') + parser.add_argument('--file') + parser.add_argument('--os-cloud', required=True) + parser.add_argument('--os-region-name', default='RegionOne') + parser.add_argument('--os-identity-api-version', default='3') + parser.add_argument('--os-cacert') + parser.add_argument('--os-auth-url', required=True) + parser.add_argument('--os-username', required=True) + parser.add_argument('--os-password', required=True) + parser.add_argument('--os-project-name', required=True) + + args = parser.parse_args() + + update_clouds_yaml = UpdateCloudsYaml(args) + update_clouds_yaml.run() + + +if __name__ == "__main__": + main() From e123830c15a4bd07f7ea700a46ca1e158487cdd9 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Sun, 21 Jun 2015 09:16:44 -0500 Subject: [PATCH 0306/2941] Add devstack-admin cloud to clouds.yaml A lot of commands developers use require admin by default, so add a "devstack-admin" cloud to clouds.yaml that has admin authority. $ openstack --os-cloud devstack-admin user list Change-Id: Ie0f1979c50901004418f8622d4ca79dc4bdadd8d --- stack.sh | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 26d9754f6f..4228f35e0d 100755 --- a/stack.sh +++ b/stack.sh @@ -1296,8 +1296,9 @@ fi save_stackenv # Update/create user clouds.yaml file. -# clouds.yaml will have a `devstack` entry for the `demo` user for the `demo` -# project. +# clouds.yaml will have +# - A `devstack` entry for the `demo` user for the `demo` project. +# - A `devstack-admin` entry for the `admin` user for the `admin` project. # The location is a variable to allow for easier refactoring later to make it # overridable. There is currently no usecase where doing so makes sense, so @@ -1320,6 +1321,16 @@ $TOP_DIR/tools/update_clouds_yaml.py \ --os-username demo \ --os-password $ADMIN_PASSWORD \ --os-project-name demo +$TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + --os-identity-api-version $IDENTITY_API_VERSION \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin # Wrapup configuration From 2f63da9e0e0d0491acab31cafcee530ca0982e2e Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sun, 21 Jun 2015 09:02:59 -0700 Subject: [PATCH 0307/2941] Ensure yum-utils is installed before using yum-config-manager yum-utils provides yum-config-manager but the check for yum-utils is currently being done after the first usage of yum-config-manager, which fails if you don't have yum-utils already installed, so move it up before the first usage of yum-config-manager. Putting yum-utils in files/rpms/general doesn't help since yum-config-manager is used in stack.sh before tools/install_prereqs.sh is called. Closes-Bug: #1467270 Change-Id: I74996c76838b7dc50d847e3bedb2d04dc55b4431 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 0d9d836eca..f1dc74f45d 100755 --- a/stack.sh +++ b/stack.sh @@ -263,6 +263,7 @@ gpgcheck=0 EOF # Enable a bootstrap repo. It is removed after finishing # the epel-release installation. + is_package_installed yum-utils || install_package yum-utils sudo yum-config-manager --enable epel-bootstrap yum_install epel-release || \ die $LINENO "Error installing EPEL repo, cannot continue" @@ -270,7 +271,6 @@ EOF sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo # ... and also optional to be enabled - is_package_installed yum-utils || install_package yum-utils sudo yum-config-manager --enable rhel-7-server-optional-rpms RHEL_RDO_REPO_RPM=${RHEL7_RDO_REPO_RPM:-"https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm"} From c7e89f23808abf837d2a410ec9f4b1a452957c6d Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Mon, 1 Jun 2015 12:57:33 +0200 Subject: [PATCH 0308/2941] SUSE: Clean up libxslt/libxml2 related dependencies libxml2-devel is a dependency of libxslt-devel, so we don't need to explicitly include it. Also, since it is only really needed by python-lxml, consolidate it into devlibs and remove the copies. Also remove a non-existing package reference along the way Change-Id: If9afaaa93f2c485baa1efff74d7ae58c59713de6 --- files/rpms-suse/devlibs | 1 - files/rpms-suse/glance | 1 - files/rpms-suse/trove | 1 - 3 files changed, 3 deletions(-) delete mode 100644 files/rpms-suse/trove diff --git a/files/rpms-suse/devlibs b/files/rpms-suse/devlibs index bdb630a16f..54d13a33e9 100644 --- a/files/rpms-suse/devlibs +++ b/files/rpms-suse/devlibs @@ -1,6 +1,5 @@ libffi-devel # pyOpenSSL libopenssl-devel # pyOpenSSL -libxml2-devel # lxml libxslt-devel # lxml postgresql-devel # psycopg2 libmysqlclient-devel # MySQL-python diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance index 0e58425b1f..bf512de575 100644 --- a/files/rpms-suse/glance +++ b/files/rpms-suse/glance @@ -1,2 +1 @@ -libxml2-devel python-devel diff --git a/files/rpms-suse/trove b/files/rpms-suse/trove deleted file mode 100644 index 96f8f29277..0000000000 --- a/files/rpms-suse/trove +++ /dev/null @@ -1 +0,0 @@ -libxslt1-dev From 0ae942b41c6dcd0fe7353e7d68574194fb72a66d Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Wed, 3 Jun 2015 14:09:05 +0200 Subject: [PATCH 0309/2941] Only install python-libguestfs bindings when needed Currently those bindings are missing from SLES12, and since they're not actually used unless file injection is enabled (which is not by default), only conditionally depend on it. Change-Id: I79a8d8ac7ad2fbd7d2fce696821d130218e43e03 --- lib/nova_plugins/functions-libvirt | 2 -- lib/nova_plugins/hypervisor-libvirt | 11 +++++++++-- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 96d8a44b05..22b58e0065 100755 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -28,7 +28,6 @@ function install_libvirt { else install_package qemu-kvm install_package libguestfs0 - install_package python-guestfs fi install_package libvirt-bin libvirt-dev pip_install_gr libvirt-python @@ -37,7 +36,6 @@ function install_libvirt { install_package kvm install_package libvirt libvirt-devel pip_install_gr libvirt-python - install_package python-libguestfs fi } diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index a6a87f9164..f70b21a475 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -26,7 +26,7 @@ source $TOP_DIR/lib/nova_plugins/functions-libvirt # -------- # File injection is disabled by default in Nova. This will turn it back on. -ENABLE_FILE_INJECTION=${ENABLE_FILE_INJECTION:-False} +ENABLE_FILE_INJECTION=$(trueorfalse False ENABLE_FILE_INJECTION) # Entry Points @@ -60,7 +60,6 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT vnc_enabled "false" fi - ENABLE_FILE_INJECTION=$(trueorfalse False ENABLE_FILE_INJECTION) if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then # When libguestfs is available for file injection, enable using # libguestfs to inspect the image and figure out the proper @@ -97,6 +96,14 @@ function install_nova_hypervisor { yum_install libcgroup-tools fi fi + + if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then + if is_ubuntu; then + install_package python-guestfs + elif is_fedora || is_suse; then + install_package python-libguestfs + fi + fi } # start_nova_hypervisor - Start any required external services From 7ebe8e0751dab545091e0b114589087009cc4e22 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Tue, 23 Jun 2015 09:41:21 +1200 Subject: [PATCH 0310/2941] Don't install pbr in a venv Because PIP_VIRTUAL_ENV was set for the installation of requirements, and left around in scope, the installation of pbr no longer happened in a global context, it instead landed inside the virtual env. Unsetting the variable after requirements install gets us back to where we expect. This was an unintended side effect of the requirements-venv patch. Change-Id: I2c4cb4305fec81a5fd237edabee78874ccd0da22 --- lib/infra | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/infra b/lib/infra index 585e9b47bc..3d68e45bd9 100644 --- a/lib/infra +++ b/lib/infra @@ -37,6 +37,10 @@ function install_infra { PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install $REQUIREMENTS_DIR + # Unset the PIP_VIRTUAL_ENV so that PBR does not end up trapped + # down the VENV well + unset PIP_VIRTUAL_ENV + # Install pbr if use_library_from_git "pbr"; then git_clone_by_name "pbr" From 365d11dfa9fec3b1025c14e38ad3a66f8c79a148 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 23 Jun 2015 11:50:18 +0100 Subject: [PATCH 0311/2941] Ironic: Fix iPXE Connection timed out This patch fixes the problem of iPXE connection timing out when it's trying to fetch the configuration files and images from the HTTP server by accepting requests to HTTP server port on iptables. Closes-Bug: #1467894 Change-Id: I43d66335a97c376ab64d604ff807540d0decc401 --- lib/ironic | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ironic b/lib/ironic index 4984be1861..ab1915abd6 100644 --- a/lib/ironic +++ b/lib/ironic @@ -658,6 +658,10 @@ function configure_iptables { # agent ramdisk gets instance image from swift sudo iptables -I INPUT -d $HOST_IP -p tcp --dport ${SWIFT_DEFAULT_BIND_PORT:-8080} -j ACCEPT || true fi + + if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then + sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true + fi } function configure_tftpd { From 010963c677a1c4e558e09afa320595fa5584b329 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Tue, 23 Jun 2015 14:16:39 +0100 Subject: [PATCH 0312/2941] Correct logging_context_format_string for Ironic We should use "tenant" and "user" instead of "project_name" and "user_name" by calling setup_colorized_logging with these parameters. Closes-Bug: #1467942 Change-Id: I484ef431ac422e25545391ed41fab45060a7b821 --- lib/ironic | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index 4984be1861..5c8509f98a 100644 --- a/lib/ironic +++ b/lib/ironic @@ -285,7 +285,7 @@ function configure_ironic { # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $IRONIC_CONF_FILE DEFAULT + setup_colorized_logging $IRONIC_CONF_FILE DEFAULT tenant user fi if [[ "$IRONIC_IPXE_ENABLED" == "True" ]]; then From 348c6ace71f791034dae19a3467d18cea3140d5a Mon Sep 17 00:00:00 2001 From: gong yong sheng Date: Tue, 23 Jun 2015 14:03:47 +0800 Subject: [PATCH 0313/2941] Use ip route replace to avoid the existing route Change-Id: I3cc82aca1e3fd26e3beb4baee1f11a9b45e8b9f7 Closes-Bug: 1467762 --- lib/neutron-legacy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 3ac76a2586..279f6e937b 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -721,7 +721,7 @@ function start_neutron_agents { sudo ip addr del $IP dev $PUBLIC_INTERFACE sudo ip addr add $IP dev $OVS_PHYSICAL_BRIDGE done - sudo route add -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + sudo ip route replace $FIXED_RANGE via $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE fi fi @@ -1275,7 +1275,7 @@ function _neutron_configure_router_v4 { fi ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'` die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" - sudo route add -net $FIXED_RANGE gw $ROUTER_GW_IP + sudo ip route replace $FIXED_RANGE via $ROUTER_GW_IP fi _neutron_set_router_id fi @@ -1310,7 +1310,7 @@ function _neutron_configure_router_v6 { # Configure interface for public bridge sudo ip -6 addr add $ipv6_ext_gw_ip/$ipv6_cidr_len dev $ext_gw_interface - sudo ip -6 route add $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface + sudo ip -6 route replace $FIXED_RANGE_V6 via $IPV6_ROUTER_GW_IP dev $ext_gw_interface fi _neutron_set_router_id fi From 7af8a1b9b3180da54e2c9505228ad722db44ca27 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 24 Jun 2015 05:51:54 -0400 Subject: [PATCH 0314/2941] only soft enforce requirements not in projects.txt We're adding the ability to have devstack plugins, which should be much more free to require new things not in global requirements. Our old thinking of locking down all the requirements doesn't really work in a plugin model. Instead, if the project is in projects.txt, continue with the old behavior. If it is not, do a soft update (update all the requirements we know about, leave the ones we don't). This was previously the SOFT requirements update mode, but now it will just be the default. Change-Id: Ic0c6e01a6d7613d712ac9e7e4a378cc3a8ce75e6 --- inc/python | 16 ++++++---------- stackrc | 11 ----------- 2 files changed, 6 insertions(+), 21 deletions(-) diff --git a/inc/python b/inc/python index e3c5e61dd2..07a811e8c3 100644 --- a/inc/python +++ b/inc/python @@ -216,18 +216,14 @@ function setup_package_with_req_sync { local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed") if [[ $update_requirements != "changed" ]]; then - if [[ "$REQUIREMENTS_MODE" == "soft" ]]; then - if is_in_projects_txt $project_dir; then - (cd $REQUIREMENTS_DIR; \ - ./.venv/bin/python update.py $project_dir) - else - # soft update projects not found in requirements project.txt - (cd $REQUIREMENTS_DIR; \ - ./.venv/bin/python update.py -s $project_dir) - fi - else + if is_in_projects_txt $project_dir; then (cd $REQUIREMENTS_DIR; \ ./.venv/bin/python update.py $project_dir) + else + # soft update projects not found in requirements project.txt + echo "$project_dir not a constrained repository, soft enforcing requirements" + (cd $REQUIREMENTS_DIR; \ + ./.venv/bin/python update.py -s $project_dir) fi fi diff --git a/stackrc b/stackrc index 1ac1338f2c..9cd9c053d7 100644 --- a/stackrc +++ b/stackrc @@ -149,17 +149,6 @@ DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) # Zero disables timeouts GIT_TIMEOUT=${GIT_TIMEOUT:-0} -# Requirements enforcing mode -# -# - strict (default) : ensure all project requirements files match -# what's in global requirements. -# -# - soft : enforce requirements on everything in -# requirements/projects.txt, but do soft updates on all other -# repositories (i.e. sync versions for requirements that are in g-r, -# but pass through any extras) -REQUIREMENTS_MODE=${REQUIREMENTS_MODE:-strict} - # Repositories # ------------ From 442e4e962559479fa0000ad64e6fa34da2c141c8 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 24 Jun 2015 13:24:02 -0400 Subject: [PATCH 0315/2941] make test_with_retry a function We have this pattern of timeout with while tests for a non infinite while loop condition. It's enough of a pattern that we should probably extract it into a function to make it more widely used. Change-Id: I11afcda9fac9709acf2f52d256d6e97644d4727c --- functions-common | 13 +++++++++++++ lib/neutron-legacy | 12 ++++++------ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/functions-common b/functions-common index c85052d5b0..061a9356f5 100644 --- a/functions-common +++ b/functions-common @@ -1967,6 +1967,19 @@ function stop_service { fi } +# Test with a finite retry loop. +# +function test_with_retry { + local testcmd=$1 + local failmsg=$2 + local until=${3:-10} + local sleep=${4:-0.5} + + if ! timeout $until sh -c "while ! $testcmd; do sleep $sleep; done"; then + die $LINENO "$failmsg" + fi +} + # Restore xtrace $XTRACE diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 3ac76a2586..4cfb026ae2 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -696,9 +696,10 @@ function start_neutron_service_and_check { if is_ssl_enabled_service "neutron"; then ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" fi - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port; do sleep 1; done"; then - die $LINENO "Neutron did not start" - fi + + local testcmd="wget ${ssl_ca} --no-proxy -q -O- $service_protocol://$Q_HOST:$service_port" + test_with_retry "$testcmd" "Neutron did not start" $SERVICE_TIMEOUT + # Start proxy if enabled if is_service_enabled tls-proxy; then start_tls_proxy '*' $Q_PORT $Q_HOST $Q_PORT_INT & @@ -1380,9 +1381,8 @@ function _ssh_check_neutron { local timeout_sec=$5 local probe_cmd = "" probe_cmd=`_get_probe_cmd_prefix $from_net` - if ! timeout $timeout_sec sh -c "while ! $probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success; do sleep 1; done"; then - die $LINENO "server didn't become ssh-able!" - fi + local testcmd="$probe_cmd ssh -o StrictHostKeyChecking=no -i $key_file ${user}@$ip echo success" + test_with_retry "$testcmd" "server $ip didn't become ssh-able" $timeout_sec } # Neutron 3rd party programs From 3bae7d48c0bd283779c206152e6dcfa4c5883521 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Sun, 21 Jun 2015 09:56:17 -0500 Subject: [PATCH 0316/2941] Keystone also handle /identity and /identity_admin When configured to run under Apache Httpd, keystone will also handle requests on /identity (public) and /identity_admin (admin). Change-Id: I4e6eb0cad1603aa0e612d0adc5431565da93870e --- files/apache-keystone.template | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 0b914e2b8f..6dd1ad9ea6 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -33,3 +33,23 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLCERTFILE% %SSLKEYFILE% + +Alias /identity %PUBLICWSGI% + + SetHandler wsgi-script + Options +ExecCGI + + WSGIProcessGroup keystone-public + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + + +Alias /identity_admin %ADMINWSGI% + + SetHandler wsgi-script + Options +ExecCGI + + WSGIProcessGroup keystone-admin + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + From b0160d0fa9b20a4c1bda01dda6ffceac6beb9842 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 23 Jun 2015 12:53:51 -0400 Subject: [PATCH 0317/2941] Log all input/output in rootwrap calls This should make it easier to understand possible interactions between rootwrap processes calling commands that might be the cause of race bugs. Closes-Bug: 1081524 Change-Id: Ic1f1fa42e4478a9d30f0f582a68f648935d0097d --- stack.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stack.sh b/stack.sh index 0d9d836eca..5b3f2c1183 100755 --- a/stack.sh +++ b/stack.sh @@ -212,6 +212,15 @@ is_package_installed sudo || install_package sudo sudo grep -q "^#includedir.*/etc/sudoers.d" /etc/sudoers || echo "#includedir /etc/sudoers.d" | sudo tee -a /etc/sudoers +# Conditionally setup detailed logging for sudo +if [[ -n "$LOG_SUDO" ]]; then + TEMPFILE=`mktemp` + echo "Defaults log_output" > $TEMPFILE + chmod 0440 $TEMPFILE + sudo chown root:root $TEMPFILE + sudo mv $TEMPFILE /etc/sudoers.d/00_logging +fi + # Set up DevStack sudoers TEMPFILE=`mktemp` echo "$STACK_USER ALL=(root) NOPASSWD:ALL" >$TEMPFILE From 1e3a89eeffc58525c30b8201a552e10776fba423 Mon Sep 17 00:00:00 2001 From: Tomoki Sekine Date: Thu, 25 Jun 2015 06:35:07 +0900 Subject: [PATCH 0318/2941] Fix typo: where is runs => where it runs Change-Id: I7dad52c5b5ea91d727bd8ee7253a64422e4ec210 --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 8e2e7ffa31..e91012fe2b 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -201,7 +201,7 @@ Enable Logging | *Defaults: ``LOGFILE="" LOGDAYS=7 LOG_COLOR=True``* | By default ``stack.sh`` output is only written to the console - where is runs. It can be sent to a file in addition to the console + where it runs. It can be sent to a file in addition to the console by setting ``LOGFILE`` to the fully-qualified name of the destination log file. A timestamp will be appended to the given filename for each run of ``stack.sh``. From 71947d5fd05c3609417351ab37b9ac24b0eae9ef Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Wed, 17 Jun 2015 17:33:02 -0400 Subject: [PATCH 0319/2941] Fixes for Linux Bridge in the L3 agent Prior to this patch, the logic for configuring the interface used for the L3 agent was OVS specific. This patch introduces code to correctly identify the brq device that is used for the L3 agent when using the Linux Bridge mechanism driver. Change-Id: I1a36cad0fb790aaa37417a1176576293e4f2c87f Co-Authored-By: Jens Rosenboom --- lib/neutron-legacy | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 4cfb026ae2..ee72b600d7 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1267,9 +1267,19 @@ function _neutron_configure_router_v4 { # This logic is specific to using the l3-agent for layer 3 if is_service_enabled q-l3; then # Configure and enable public bridge + local ext_gw_interface="none" if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then - local ext_gw_interface=$(_neutron_get_ext_gw_interface) + ext_gw_interface=$(_neutron_get_ext_gw_interface) + elif [[ "$Q_AGENT" = "linuxbridge" ]]; then + # Search for the brq device the neutron router and network for $FIXED_RANGE + # will be using. + # e.x. brq3592e767-da for NET_ID 3592e767-da66-4bcb-9bec-cdb03cd96102 + ext_gw_interface=brq${EXT_NET_ID:0:11} + fi + if [[ "$ext_gw_interface" != "none" ]]; then local cidr_len=${FLOATING_RANGE#*/} + local testcmd="ip -o link | grep -q $ext_gw_interface" + test_with_retry "$testcmd" "$ext_gw_interface creation failed" if [[ $(ip addr show dev $ext_gw_interface | grep -c $ext_gw_ip) == 0 && ( $Q_USE_PROVIDERNET_FOR_PUBLIC == "False" || $Q_USE_PUBLIC_VETH == "True" ) ]]; then sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up From 614ca26b47076321e1e06d0d79b9f7c53c5ef259 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 26 Jun 2015 14:45:04 +1000 Subject: [PATCH 0320/2941] Install qemu-kvm package on centos/fedora Change I79a8d8ac7ad2fbd7d2fce696821d130218e43e03 removed the install of python-libguestfs, which was actually hiding a dependency issue on Centos. The "kvm" package is ultimately missing some bios files from "seabios-bin" -- however with python-libguestfs installed this was coming in via a dependency chain that pulled in qemu-kvm, which has the dependency. qemu-kvm is not strictly required as all the functionality is within qemu-system-x86. But while we get [1] sorted out this restores the job functionality. [1] https://bugzilla.redhat.com/show_bug.cgi?id=1235890 Change-Id: I3379bc497978befac48c5af0f1035b96d030b7eb --- files/rpms/nova | 1 + lib/nova_plugins/functions-libvirt | 7 +++++++ 2 files changed, 8 insertions(+) diff --git a/files/rpms/nova b/files/rpms/nova index ebd667454a..d32c332dd9 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -10,6 +10,7 @@ iptables iputils kpartx kvm # NOPRIME +qemu-kvm # NOPRIME libvirt-bin # NOPRIME libvirt-devel # NOPRIME libvirt-python # NOPRIME diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 22b58e0065..5525cfd951 100755 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -34,6 +34,13 @@ function install_libvirt { #pip_install_gr elif is_fedora || is_suse; then install_package kvm + # there is a dependency issue with kvm (which is really just a + # wrapper to qemu-system-x86) that leaves some bios files out, + # so install qemu-kvm (which shouldn't strictly be needed, as + # everything has been merged into qemu-system-x86) to bring in + # the right packages. see + # https://bugzilla.redhat.com/show_bug.cgi?id=1235890 + install_package qemu-kvm install_package libvirt libvirt-devel pip_install_gr libvirt-python fi From f2a05497a3716b5eaa178b970d5cd2e6db865a97 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Fri, 26 Jun 2015 15:20:54 +0200 Subject: [PATCH 0321/2941] Drop no longer needed and broken check for cinder in is_service_enabled There is properly working is_cinder_enabled now, and this check actualy matches ironic-inspector, breaking its devstack plugin. Change-Id: I659ec9b9b2b49690fd075f9766ae8cbf19e81848 Closes-Bug: #1469160 --- functions-common | 1 - 1 file changed, 1 deletion(-) diff --git a/functions-common b/functions-common index c85052d5b0..9054c335b6 100644 --- a/functions-common +++ b/functions-common @@ -1754,7 +1754,6 @@ function is_service_enabled { [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0 [[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0 [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0 - [[ ${service} == "cinder" && ${ENABLED_SERVICES} =~ "c-" ]] && enabled=0 [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0 [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0 [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0 From 68e6ae60e70161eb1f359912da42a450070846b6 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 25 Jun 2015 18:15:05 -0500 Subject: [PATCH 0322/2941] Support identity API v3 when generate clouds.yaml When using IDENTITY_API_VERSION=3, the clouds.yaml must also set auth/user_domain_id and project_domain_id. Change-Id: If028f2935ea729276f40039a4003c07c08e91672 --- tools/update_clouds_yaml.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/update_clouds_yaml.py b/tools/update_clouds_yaml.py index 08621352ad..3a364fe982 100755 --- a/tools/update_clouds_yaml.py +++ b/tools/update_clouds_yaml.py @@ -43,6 +43,9 @@ def __init__(self, args): 'project_name': args.os_project_name, }, } + if args.os_identity_api_version == '3': + self._cloud_data['auth']['user_domain_id'] = 'default' + self._cloud_data['auth']['project_domain_id'] = 'default' if args.os_cacert: self._cloud_data['cacert'] = args.os_cacert From cef5e40e0c1479f12f78c9dab74dc3100b2f10f7 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 25 Jun 2015 17:57:53 -0500 Subject: [PATCH 0323/2941] Support fernet token provider Keystone added the "fernet" token provider in Kilo. This adds support for it. Change-Id: I6b7342ea67157a40edc8b9ba3d84d118e39d86ed --- lib/keystone | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index 7a949cf96f..99e31b48f9 100644 --- a/lib/keystone +++ b/lib/keystone @@ -313,6 +313,8 @@ function configure_keystone { iniset $KEYSTONE_CONF eventlet_server admin_workers "$API_WORKERS" # Public workers will use the server default, typically number of CPU. + + iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/" } function configure_keystone_extensions { @@ -476,11 +478,15 @@ function init_keystone { $KEYSTONE_BIN_DIR/keystone-manage db_sync --extension "${extension_value}" done - if [[ "$KEYSTONE_TOKEN_FORMAT" != "uuid" ]]; then + if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then # Set up certificates rm -rf $KEYSTONE_CONF_DIR/ssl $KEYSTONE_BIN_DIR/keystone-manage pki_setup fi + if [[ "$KEYSTONE_TOKEN_FORMAT" == "fernet" ]]; then + rm -rf "$KEYSTONE_CONF_DIR/fernet-keys/" + $KEYSTONE_BIN_DIR/keystone-manage fernet_setup + fi } # install_keystoneclient() - Collect source and prepare From dcc8a30f2a1cb597ebc609d10d34b45c68c9a74e Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Sat, 27 Jun 2015 12:45:21 +0000 Subject: [PATCH 0324/2941] Be more explicit about jenkins project config In documentation like this (which is a huge boon) we should strive to be as explicit and helpful as possible, so this change tries to be more clear about what a project.yaml is and where one might go to create it or change it. Change-Id: Ia66a361fc7d79e511afa3ad903fffb122b86998b --- doc/source/plugins.rst | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index b166936a8b..1b6f5e36ed 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -221,7 +221,9 @@ the best practice is to build a dedicated ``stackforge/devstack-plugin-FOO`` project. To enable a plugin to be used in a gate job, the following lines will -be needed in your project.yaml definition:: +be needed in your ``jenkins/jobs/.yaml`` definition in +`project-config +`_:: # Because we are testing a non standard project, add the # our project repository. This makes zuul do the right From 53a8f7c7bc1864a0e63643ebf90e7c66bf3cdeb7 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Sat, 27 Jun 2015 15:07:20 +0100 Subject: [PATCH 0325/2941] Fix typo in installation of vmware virtdriver The probably got introduced in recent adjustments to cleanup the virt driver installs. Change-Id: Ic51411d5bd9b18d395dbdf948c58fea2d53eba0d --- lib/ceilometer | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index d7888d9502..ed9b93377e 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -341,7 +341,7 @@ function install_ceilometer { fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - pip_instal_gr oslo.vmware + pip_install_gr oslo.vmware fi fi From 531017cf3e760dc1e9af25684465681277dec8d4 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Sat, 27 Jun 2015 03:37:39 +0000 Subject: [PATCH 0326/2941] Add oslo.cache to devstack as part of the graduation process, add oslo.cache to lib/oslo and stackrc. Change-Id: I2baf0384dd5d71d234a95b7acd0bfe5534f2732c --- lib/oslo | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 1 + 3 files changed, 7 insertions(+) diff --git a/lib/oslo b/lib/oslo index 554bec8945..be935bbe8c 100644 --- a/lib/oslo +++ b/lib/oslo @@ -26,6 +26,7 @@ GITDIR["automaton"]=$DEST/automaton GITDIR["cliff"]=$DEST/cliff GITDIR["debtcollector"]=$DEST/debtcollector GITDIR["futurist"]=$DEST/futurist +GITDIR["oslo.cache"]=$DEST/oslo.cache GITDIR["oslo.concurrency"]=$DEST/oslo.concurrency GITDIR["oslo.config"]=$DEST/oslo.config GITDIR["oslo.context"]=$DEST/oslo.context @@ -66,6 +67,7 @@ function install_oslo { _do_install_oslo_lib "cliff" _do_install_oslo_lib "debtcollector" _do_install_oslo_lib "futurist" + _do_install_oslo_lib "oslo.cache" _do_install_oslo_lib "oslo.concurrency" _do_install_oslo_lib "oslo.config" _do_install_oslo_lib "oslo.context" diff --git a/stackrc b/stackrc index 9cd9c053d7..f2aafe97fc 100644 --- a/stackrc +++ b/stackrc @@ -327,6 +327,10 @@ GITBRANCH["debtcollector"]=${DEBTCOLLECTOR_BRANCH:-master} GITREPO["automaton"]=${AUTOMATON_REPO:-${GIT_BASE}/openstack/automaton.git} GITBRANCH["automaton"]=${AUTOMATON_BRANCH:-master} +# oslo.cache +GITREPO["oslo.cache"]=${OSLOCACHE_REPO:-${GIT_BASE}/openstack/oslo.cache.git} +GITBRANCH["oslo.cache"]=${OSLOCACHE_BRANCH:-master} + # oslo.concurrency GITREPO["oslo.concurrency"]=${OSLOCON_REPO:-${GIT_BASE}/openstack/oslo.concurrency.git} GITBRANCH["oslo.concurrency"]=${OSLOCON_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index fc6596789b..1f7169c689 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -40,6 +40,7 @@ ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" +ALL_LIBS+=" oslo.cache" # Generate the above list with # echo ${!GITREPO[@]} From 5c0546e427a02ca7f84eac0894bc84073fa32638 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Fri, 26 Jun 2015 17:43:28 +0900 Subject: [PATCH 0327/2941] Add cleanup for Linuxbridge-agent Change-Id: I53f445e7f8efd950823f79aca95b9e65d1544ee9 Closes-Bug: #1469609 --- lib/neutron_plugins/linuxbridge_agent | 14 ++++++++++++++ 1 file changed, 14 insertions(+) mode change 100644 => 100755 lib/neutron_plugins/linuxbridge_agent diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent old mode 100644 new mode 100755 index b348af9c4f..fefc1c33a8 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -9,6 +9,20 @@ set +o xtrace function neutron_lb_cleanup { sudo brctl delbr $PUBLIC_BRIDGE + + if [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vxlan" ]]; then + for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e vxlan-[0-9a-f\-]*); do + sudo ip link delete $port + done + elif [[ "$Q_ML2_TENANT_NETWORK_TYPE" = "vlan" ]]; then + for port in $(sudo brctl show | grep -o -e [a-zA-Z\-]*tap[0-9a-f\-]* -e ${LB_PHYSICAL_INTERFACE}\.[0-9a-f\-]*); do + sudo ip link delete $port + done + fi + for bridge in $(sudo brctl show |grep -o -e brq[0-9a-f\-]*); do + sudo ip link set $bridge down + sudo brctl delbr $bridge + done } function is_neutron_ovs_base_plugin { From bde24cb7c20a9797eb581a39a9f0839c4951300d Mon Sep 17 00:00:00 2001 From: Anandprakash Dnyaneshwar Tandale Date: Mon, 29 Jun 2015 15:04:30 +0530 Subject: [PATCH 0328/2941] Fixing the deprecated library function os.popen() is deprecated since version 2.6. Resolved with use of subprocess module. Change-Id: Ib6a91ee525e2e57d3901d2c0c1b2d1305bc4566f --- tools/worlddump.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 7acfb5e97d..0f1a6a1da9 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -23,6 +23,7 @@ import os.path import sys +from subprocess import Popen def get_options(): parser = argparse.ArgumentParser( @@ -46,7 +47,7 @@ def _dump_cmd(cmd): print cmd print "-" * len(cmd) print - print os.popen(cmd).read() + subprocess.Popen(cmd, shell=True) def _header(name): From 04c0f0be2643ef95380a7dcf4b510d86ff11cd58 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 30 Jun 2015 13:47:51 +1000 Subject: [PATCH 0329/2941] Move failure trap after functions it uses Move the failure trap after the functions it uses, so that "delete_all" is defined when it is triggered. Change-Id: Icb2465d0f834b8cb2d46dca3c7df4ae06e49d9b5 --- exercises/neutron-adv-test.sh | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index 04892b0e93..a0de4ccd37 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -19,18 +19,6 @@ echo "*********************************************************************" set -o errtrace -trap failed ERR -function failed { - local r=$? - set +o errtrace - set +o xtrace - echo "Failed to execute" - echo "Starting cleanup..." - delete_all - echo "Finished cleanup" - exit $r -} - # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace @@ -441,6 +429,18 @@ function main { fi } +trap failed ERR +function failed { + local r=$? + set +o errtrace + set +o xtrace + echo "Failed to execute" + echo "Starting cleanup..." + delete_all + echo "Finished cleanup" + exit $r +} + # Kick off script # --------------- From 4ffb4541452a95ae964c562c89e31c49e39dbaa7 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 30 Jun 2015 11:00:32 +1000 Subject: [PATCH 0330/2941] Add header guard to functions/functions-common To avoid sourcing this twice and getting globals mixed up, particularly when using multiple plugins, add a "header guard" that ensures we only source it once. In general I don't think functions/functions-common have been written or considered to be idempotent. I don't think going down that path is going to be a long-term solution as it's easy to break. Change-Id: Idca49eb996d2b7ff3779ec27ed672a2da7852590 Closes-Bug: #1469178 --- functions | 4 ++++ functions-common | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/functions b/functions index 1668e16b6d..e4aea677d6 100644 --- a/functions +++ b/functions @@ -10,6 +10,10 @@ # - ``GLANCE_HOSTPORT`` # +# ensure we don't re-source this in the same environment +[[ -z "$_DEVSTACK_FUNCTIONS" ]] || return 0 +declare -r _DEVSTACK_FUNCTIONS=1 + # Include the common functions FUNC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) source ${FUNC_DIR}/functions-common diff --git a/functions-common b/functions-common index 061a9356f5..5f22bf0c6d 100644 --- a/functions-common +++ b/functions-common @@ -36,6 +36,10 @@ XTRACE=$(set +o | grep xtrace) set +o xtrace +# ensure we don't re-source this in the same environment +[[ -z "$_DEVSTACK_FUNCTIONS_COMMON" ]] || return 0 +declare -r _DEVSTACK_FUNCTIONS_COMMON=1 + # Global Config Variables declare -A GITREPO declare -A GITBRANCH From 432268b17bff090a8ea8cbbd46c430ddd5be98a1 Mon Sep 17 00:00:00 2001 From: Amey Bhide Date: Tue, 30 Jun 2015 11:39:05 -0700 Subject: [PATCH 0331/2941] Fix NameError in worlddump.py Change-Id: Ie87e5b5ead777c0153ed7fa5d1db5cc1ae444261 --- tools/worlddump.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 0f1a6a1da9..628a69f7e5 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -47,7 +47,7 @@ def _dump_cmd(cmd): print cmd print "-" * len(cmd) print - subprocess.Popen(cmd, shell=True) + Popen(cmd, shell=True) def _header(name): From 77a7d11dfa828468f27bce5456d11a9827473b20 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 22 Jun 2015 16:44:13 -0400 Subject: [PATCH 0332/2941] Add new oslo libraries Full list for liberty is as follows: * oslo.service * oslo.reports * automaton * futurist oslo.cache was already added in the earlier review Some of the entries are already there, though automaton was missing in one spot. Made sure all references have all five libraries. Change-Id: Iffb720d46058424924469695a3ae1e4f20655f99 --- lib/oslo | 3 +++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/oslo b/lib/oslo index be935bbe8c..123572cd7b 100644 --- a/lib/oslo +++ b/lib/oslo @@ -36,6 +36,7 @@ GITDIR["oslo.log"]=$DEST/oslo.log GITDIR["oslo.messaging"]=$DEST/oslo.messaging GITDIR["oslo.middleware"]=$DEST/oslo.middleware GITDIR["oslo.policy"]=$DEST/oslo.policy +GITDIR["oslo.reports"]=$DEST/oslo.reports GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap GITDIR["oslo.serialization"]=$DEST/oslo.serialization GITDIR["oslo.service"]=$DEST/oslo.service @@ -64,6 +65,7 @@ function _do_install_oslo_lib { # install_oslo() - Collect source and prepare function install_oslo { + _do_install_oslo_lib "automaton" _do_install_oslo_lib "cliff" _do_install_oslo_lib "debtcollector" _do_install_oslo_lib "futurist" @@ -77,6 +79,7 @@ function install_oslo { _do_install_oslo_lib "oslo.messaging" _do_install_oslo_lib "oslo.middleware" _do_install_oslo_lib "oslo.policy" + _do_install_oslo_lib "oslo.reports" _do_install_oslo_lib "oslo.rootwrap" _do_install_oslo_lib "oslo.serialization" _do_install_oslo_lib "oslo.service" diff --git a/stackrc b/stackrc index f2aafe97fc..3f56151b00 100644 --- a/stackrc +++ b/stackrc @@ -367,6 +367,10 @@ GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-master} GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git} GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master} +# oslo.reports +GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git} +GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master} + # oslo.rootwrap GITREPO["oslo.rootwrap"]=${OSLORWRAP_REPO:-${GIT_BASE}/openstack/oslo.rootwrap.git} GITBRANCH["oslo.rootwrap"]=${OSLORWRAP_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 1f7169c689..b3b38e6baf 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -40,7 +40,7 @@ ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" -ALL_LIBS+=" oslo.cache" +ALL_LIBS+=" oslo.cache oslo.reports" # Generate the above list with # echo ${!GITREPO[@]} From 99440f9d596668724690d9091e0a5def1d7f1d6d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 1 Jul 2015 06:14:01 +1000 Subject: [PATCH 0333/2941] Wait for command in worlddump Wait for the command to complete and catch errors when running commands. Change-Id: I2c93b3bdd930ed8564e33bd2d45fe4e3f08f03f5 --- tools/worlddump.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 628a69f7e5..0a9f810877 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -21,9 +21,9 @@ import fnmatch import os import os.path +import subprocess import sys -from subprocess import Popen def get_options(): parser = argparse.ArgumentParser( @@ -47,7 +47,10 @@ def _dump_cmd(cmd): print cmd print "-" * len(cmd) print - Popen(cmd, shell=True) + try: + subprocess.check_call(cmd, shell=True) + except subprocess.CalledProcessError: + print "*** Failed to run: %s" % cmd def _header(name): From 3a9df1dab73e2cb2f27dd014543ab16f22ac3846 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 1 Jul 2015 06:18:47 +1000 Subject: [PATCH 0334/2941] Check for nova-compute before running kill Unconditionally running this can lead to confusing failure output from kill as the pgrep matches nothing when nova-compute isn't yet running. Change-Id: I37cb84fe8e0b393f49b8907af16a3e44f82c46a6 --- tools/worlddump.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/worlddump.py b/tools/worlddump.py index 0a9f810877..e4ba02b51c 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -112,6 +112,13 @@ def compute_consoles(): def guru_meditation_report(): _header("nova-compute Guru Meditation Report") + + try: + subprocess.check_call(["pgrep","nova-compute"]) + except subprocess.CalledProcessError: + print "Skipping as nova-compute does not appear to be running" + return + _dump_cmd("kill -s USR1 `pgrep nova-compute`") print "guru meditation report in nova-compute log" From 5c7f39fdcd29a74c402840021ca55f47c359fbf1 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 1 Jul 2015 06:29:27 +1000 Subject: [PATCH 0335/2941] Add worlddump unit test Add a worlddump unit test to avoid simple breakages Change-Id: I6b87d4dbe22da2c1ca5ceb13134e9bb570f5cef2 --- tests/test_worlddump.sh | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100755 tests/test_worlddump.sh diff --git a/tests/test_worlddump.sh b/tests/test_worlddump.sh new file mode 100755 index 0000000000..f407d407c0 --- /dev/null +++ b/tests/test_worlddump.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Simple test of worlddump.py + +TOP=$(cd $(dirname "$0")/.. && pwd) + +source $TOP/tests/unittest.sh + +OUT_DIR=$(mktemp -d) + +$TOP/tools/worlddump.py -d $OUT_DIR + +if [[ $? -ne 0 ]]; then + fail "worlddump failed" +else + + # worlddump creates just one output file + OUT_FILE=($OUT_DIR/*.txt) + + if [ ! -r $OUT_FILE ]; then + failed "worlddump output not seen" + else + passed "worlddump output $OUT_FILE" + + if [[ $(stat -c %s $OUT_DIR/*.txt) -gt 0 ]]; then + passed "worlddump output is not zero sized" + fi + + # put more extensive examination here, if required. + fi +fi + +rm -rf $OUT_DIR + +report_results From f750a6fedbb34399e1ee4dd540c13028d53c0e13 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Wed, 1 Jul 2015 12:17:35 +0200 Subject: [PATCH 0336/2941] Already dead process tolerance in unstack.sh The gate/updown.sh calls the unstack.sh with -ex option. Normally we do not use -e with unstack.sh. The unstack.sh can fail if the service already stopped, and it also can have flaky failures on the gate. For example the stop_swift function tries to kill swift in two different ways, and if the first one succeeds before the 2th attempt the pkill fails the whole unstack.sh. This change accepts kill failure. Normally the kill can fail if the process does not exits, or when you do not have permission to the kill operation. Since the permission issue is very unlikely in our case, this change does not tries to distinguish the two operation. The behavior of the unstack.sh wen you are not using -ex should not be changed by this change. Change-Id: I64bf3cbe1b60c96f5b271dcfb620c3d4b50de26b --- functions-common | 2 +- lib/neutron_plugins/services/loadbalancer | 2 +- lib/swift | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index 6ab567a4c6..f122e56f98 100644 --- a/functions-common +++ b/functions-common @@ -1342,7 +1342,7 @@ function screen_stop_service { if is_service_enabled $service; then # Clean up the screen window - screen -S $SCREEN_NAME -p $service -X kill + screen -S $SCREEN_NAME -p $service -X kill || true fi } diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index f465cc94b4..34190f9a56 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -42,7 +42,7 @@ function neutron_agent_lbaas_configure_agent { function neutron_lbaas_stop { pids=$(ps aux | awk '/haproxy/ { print $2 }') - [ ! -z "$pids" ] && sudo kill $pids + [ ! -z "$pids" ] && sudo kill $pids || true } # Restore xtrace diff --git a/lib/swift b/lib/swift index 0cd51aaddf..5b73981ed2 100644 --- a/lib/swift +++ b/lib/swift @@ -772,7 +772,7 @@ function stop_swift { stop_process s-${type} done # Blast out any stragglers - pkill -f swift- + pkill -f swift- || true } function swift_configure_tempurls { From 2ba4a7214c96542987e1c379a28765a242136b12 Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Fri, 26 Jun 2015 10:45:44 +0200 Subject: [PATCH 0337/2941] Fixed detection of a project in projects.txt Before the fix, requirements soft-update was used for projects that are in the file. Change-Id: I095d42521f54b45a6b13837e2f8375fa04532faa Closes-Bug: #1469067 --- inc/python | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 07a811e8c3..9a7cea0a07 100644 --- a/inc/python +++ b/inc/python @@ -195,7 +195,7 @@ function setup_develop { function is_in_projects_txt { local project_dir=$1 local project_name=$(basename $project_dir) - return grep "/$project_name\$" $REQUIREMENTS_DIR/projects.txt >/dev/null + grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt } # ``pip install -e`` the package, which processes the dependencies From 3381e09f72e7edbf39080893212b533b8e01a3a9 Mon Sep 17 00:00:00 2001 From: Sergey Lukjanov Date: Wed, 1 Jul 2015 14:20:23 +0300 Subject: [PATCH 0338/2941] Move Sahara into in-tree plugin Once the Sahara related code moved to Sahara repo and used, we can remove Sahara specific code from Devstack. Partial-Implements: bp sahara-devstack-intree Change-Id: I34412b5cb2e86944b8555b8fd04b43556eb2bbe6 Depends-on: I2e00b2ebc59dd3be6a0539dea2985f2e801a1bd7 Depends-on: I07c3fede473030e8a110cbf5a08309f890905abf --- MAINTAINERS.rst | 5 - doc/source/index.rst | 3 - doc/source/plugin-registry.rst | 2 + exercises/sahara.sh | 49 ------- extras.d/70-sahara.sh | 29 ---- lib/sahara | 259 --------------------------------- stack.sh | 2 +- stackrc | 8 - tests/test_libs_from_pypi.sh | 2 +- 9 files changed, 4 insertions(+), 355 deletions(-) delete mode 100755 exercises/sahara.sh delete mode 100644 extras.d/70-sahara.sh delete mode 100644 lib/sahara diff --git a/MAINTAINERS.rst b/MAINTAINERS.rst index eeb1f21b61..d4968a6051 100644 --- a/MAINTAINERS.rst +++ b/MAINTAINERS.rst @@ -63,11 +63,6 @@ OpenFlow Agent (ofagent) * YAMAMOTO Takashi * Fumihiko Kakuma -Sahara -~~~~~~ - -* Sergey Lukjanov - Swift ~~~~~ diff --git a/doc/source/index.rst b/doc/source/index.rst index f15c3060e7..2dd0241fba 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -170,7 +170,6 @@ Scripts * `lib/nova `__ * `lib/oslo `__ * `lib/rpc\_backend `__ -* `lib/sahara `__ * `lib/swift `__ * `lib/tempest `__ * `lib/tls `__ @@ -181,7 +180,6 @@ Scripts * `extras.d/50-ironic.sh `__ * `extras.d/60-ceph.sh `__ -* `extras.d/70-sahara.sh `__ * `extras.d/70-tuskar.sh `__ * `extras.d/70-zaqar.sh `__ * `extras.d/80-tempest.sh `__ @@ -238,7 +236,6 @@ Exercises * `exercises/floating\_ips.sh `__ * `exercises/horizon.sh `__ * `exercises/neutron-adv-test.sh `__ -* `exercises/sahara.sh `__ * `exercises/sec\_groups.sh `__ * `exercises/swift.sh `__ * `exercises/volumes.sh `__ diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 2dd70d84e9..c5c4e1eaa5 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -22,6 +22,8 @@ The following are plugins that exist for official OpenStack projects. +--------------------+-------------------------------------------+--------------------+ |magnum |git://git.openstack.org/openstack/magnum | | +--------------------+-------------------------------------------+--------------------+ +|sahara |git://git.openstack.org/openstack/sahara | | ++--------------------+-------------------------------------------+--------------------+ |trove |git://git.openstack.org/openstack/trove | | +--------------------+-------------------------------------------+--------------------+ |zaqar |git://git.openstack.org/openstack/zarar | | diff --git a/exercises/sahara.sh b/exercises/sahara.sh deleted file mode 100755 index 8cad94562d..0000000000 --- a/exercises/sahara.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash - -# **sahara.sh** - -# Sanity check that Sahara started if enabled - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -is_service_enabled sahara || exit 55 - -if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then - SAHARA_SERVICE_PROTOCOL="https" -fi - -SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -$CURL_GET $SAHARA_SERVICE_PROTOCOL://$SERVICE_HOST:8386/ 2>/dev/null | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/extras.d/70-sahara.sh b/extras.d/70-sahara.sh deleted file mode 100644 index f177766d3b..0000000000 --- a/extras.d/70-sahara.sh +++ /dev/null @@ -1,29 +0,0 @@ -# sahara.sh - DevStack extras script to install Sahara - -if is_service_enabled sahara; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/sahara - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing sahara" - install_sahara - install_python_saharaclient - cleanup_sahara - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring sahara" - configure_sahara - create_sahara_accounts - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing sahara" - sahara_register_images - start_sahara - fi - - if [[ "$1" == "unstack" ]]; then - stop_sahara - fi - - if [[ "$1" == "clean" ]]; then - cleanup_sahara - fi -fi diff --git a/lib/sahara b/lib/sahara deleted file mode 100644 index 51e431afc7..0000000000 --- a/lib/sahara +++ /dev/null @@ -1,259 +0,0 @@ -#!/bin/bash -# -# lib/sahara - -# Dependencies: -# ``functions`` file -# ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# install_sahara -# install_python_saharaclient -# configure_sahara -# sahara_register_images -# start_sahara -# stop_sahara -# cleanup_sahara - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default repos - -# Set up default directories -GITDIR["python-saharaclient"]=$DEST/python-saharaclient -SAHARA_DIR=$DEST/sahara - -SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara} -SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf - -if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then - SAHARA_SERVICE_PROTOCOL="https" -fi -SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST} -SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386} -SAHARA_SERVICE_PORT_INT=${SAHARA_SERVICE_PORT_INT:-18386} -SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -SAHARA_AUTH_CACHE_DIR=${SAHARA_AUTH_CACHE_DIR:-/var/cache/sahara} - -SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-vanilla,hdp,cdh,spark,fake} - -# Support entry points installation of console scripts -if [[ -d $SAHARA_DIR/bin ]]; then - SAHARA_BIN_DIR=$SAHARA_DIR/bin -else - SAHARA_BIN_DIR=$(get_python_exec_prefix) -fi - -# Tell Tempest this project is present -TEMPEST_SERVICES+=,sahara - -# Functions -# --------- - -# create_sahara_accounts() - Set up common required sahara accounts -# -# Tenant User Roles -# ------------------------------ -# service sahara admin -function create_sahara_accounts { - - create_service_user "sahara" - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - - # TODO: remove "data_processing" service when #1356053 will be fixed - local sahara_service_old=$(openstack service create \ - "data_processing" \ - --name "sahara" \ - --description "Sahara Data Processing" \ - -f value -c id - ) - local sahara_service_new=$(openstack service create \ - "data-processing" \ - --name "sahara" \ - --description "Sahara Data Processing" \ - -f value -c id - ) - get_or_create_endpoint $sahara_service_old \ - "$REGION_NAME" \ - "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" - get_or_create_endpoint $sahara_service_new \ - "$REGION_NAME" \ - "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" \ - "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT/v1.1/\$(tenant_id)s" - fi -} - -# cleanup_sahara() - Remove residual data files, anything left over from -# previous runs that would need to clean up. -function cleanup_sahara { - - # Cleanup auth cache dir - sudo rm -rf $SAHARA_AUTH_CACHE_DIR -} - -# configure_sahara() - Set config files, create data dirs, etc -function configure_sahara { - sudo install -d -o $STACK_USER $SAHARA_CONF_DIR - - if [[ -f $SAHARA_DIR/etc/sahara/policy.json ]]; then - cp -p $SAHARA_DIR/etc/sahara/policy.json $SAHARA_CONF_DIR - fi - - # Create auth cache dir - sudo install -d -o $STACK_USER -m 700 $SAHARA_AUTH_CACHE_DIR - rm -rf $SAHARA_AUTH_CACHE_DIR/* - - configure_auth_token_middleware $SAHARA_CONF_FILE sahara $SAHARA_AUTH_CACHE_DIR - - iniset_rpc_backend sahara $SAHARA_CONF_FILE DEFAULT - - # Set configuration to send notifications - - if is_service_enabled ceilometer; then - iniset $SAHARA_CONF_FILE DEFAULT enable_notifications "true" - iniset $SAHARA_CONF_FILE DEFAULT notification_driver "messaging" - fi - - iniset $SAHARA_CONF_FILE DEFAULT verbose True - iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - - iniset $SAHARA_CONF_FILE DEFAULT plugins $SAHARA_ENABLED_PLUGINS - - iniset $SAHARA_CONF_FILE database connection `database_connection_url sahara` - - if is_service_enabled neutron; then - iniset $SAHARA_CONF_FILE DEFAULT use_neutron true - - if is_ssl_enabled_service "neutron" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE neutron ca_file $SSL_BUNDLE_FILE - fi - else - iniset $SAHARA_CONF_FILE DEFAULT use_neutron false - fi - - if is_service_enabled heat; then - iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine heat - - if is_ssl_enabled_service "heat" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE heat ca_file $SSL_BUNDLE_FILE - fi - else - iniset $SAHARA_CONF_FILE DEFAULT infrastructure_engine direct - fi - - if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE cinder ca_file $SSL_BUNDLE_FILE - fi - - if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE nova ca_file $SSL_BUNDLE_FILE - fi - - if is_ssl_enabled_service "swift" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE swift ca_file $SSL_BUNDLE_FILE - fi - - if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then - iniset $SAHARA_CONF_FILE keystone ca_file $SSL_BUNDLE_FILE - fi - - # Register SSL certificates if provided - if is_ssl_enabled_service sahara; then - ensure_certificates SAHARA - - iniset $SAHARA_CONF_FILE ssl cert_file "$SAHARA_SSL_CERT" - iniset $SAHARA_CONF_FILE ssl key_file "$SAHARA_SSL_KEY" - fi - - iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $SAHARA_CONF_FILE DEFAULT - fi - - if is_service_enabled tls-proxy; then - # Set the service port for a proxy to take the original - iniset $SAHARA_CONF_FILE DEFAULT port $SAHARA_SERVICE_PORT_INT - fi - - recreate_database sahara - $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE upgrade head -} - -# install_sahara() - Collect source and prepare -function install_sahara { - git_clone $SAHARA_REPO $SAHARA_DIR $SAHARA_BRANCH - setup_develop $SAHARA_DIR -} - -# install_python_saharaclient() - Collect source and prepare -function install_python_saharaclient { - if use_library_from_git "python-saharaclient"; then - git_clone_by_name "python-saharaclient" - setup_dev_lib "python-saharaclient" - fi -} - -# sahara_register_images() - Registers images in sahara image registry -function sahara_register_images { - if is_service_enabled heat && [[ ! -z "$HEAT_CFN_IMAGE_URL" ]]; then - # Register heat image for Fake plugin - local fake_plugin_properties="--property _sahara_tag_0.1=True" - fake_plugin_properties+=" --property _sahara_tag_fake=True" - fake_plugin_properties+=" --property _sahara_username=fedora" - openstack --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image set $(basename "$HEAT_CFN_IMAGE_URL" ".qcow2") $fake_plugin_properties - fi -} - -# start_sahara() - Start running processes, including screen -function start_sahara { - local service_port=$SAHARA_SERVICE_PORT - local service_protocol=$SAHARA_SERVICE_PROTOCOL - if is_service_enabled tls-proxy; then - service_port=$SAHARA_SERVICE_PORT_INT - service_protocol="http" - fi - - run_process sahara "$SAHARA_BIN_DIR/sahara-all --config-file $SAHARA_CONF_FILE" - run_process sahara-api "$SAHARA_BIN_DIR/sahara-api --config-file $SAHARA_CONF_FILE" - run_process sahara-eng "$SAHARA_BIN_DIR/sahara-engine --config-file $SAHARA_CONF_FILE" - - echo "Waiting for Sahara to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$SAHARA_SERVICE_HOST:$service_port; then - die $LINENO "Sahara did not start" - fi - - # Start proxies if enabled - if is_service_enabled tls-proxy; then - start_tls_proxy '*' $SAHARA_SERVICE_PORT $SAHARA_SERVICE_HOST $SAHARA_SERVICE_PORT_INT & - fi -} - -# stop_sahara() - Stop running processes -function stop_sahara { - # Kill the Sahara screen windows - stop_process sahara - stop_process sahara-api - stop_process sahara-eng -} - - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: diff --git a/stack.sh b/stack.sh index 591c0dc614..1f355e539b 100755 --- a/stack.sh +++ b/stack.sh @@ -505,7 +505,7 @@ source $TOP_DIR/lib/rpc_backend check_rpc_backend # Service to enable with SSL if ``USE_SSL`` is True -SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron,sahara" +SSL_ENABLED_SERVICES="key,nova,cinder,glance,s-proxy,neutron" if is_service_enabled tls-proxy && [ "$USE_SSL" == "True" ]; then die $LINENO "tls-proxy and SSL are mutually exclusive" diff --git a/stackrc b/stackrc index f2aafe97fc..91eef07fe1 100644 --- a/stackrc +++ b/stackrc @@ -225,10 +225,6 @@ NEUTRON_VPNAAS_BRANCH=${NEUTRON_VPNAAS_BRANCH:-master} NOVA_REPO=${NOVA_REPO:-${GIT_BASE}/openstack/nova.git} NOVA_BRANCH=${NOVA_BRANCH:-master} -# data processing service -SAHARA_REPO=${SAHARA_REPO:-${GIT_BASE}/openstack/sahara.git} -SAHARA_BRANCH=${SAHARA_BRANCH:-master} - # object storage service SWIFT_REPO=${SWIFT_REPO:-${GIT_BASE}/openstack/swift.git} SWIFT_BRANCH=${SWIFT_BRANCH:-master} @@ -290,10 +286,6 @@ GITBRANCH["python-neutronclient"]=${NEUTRONCLIENT_BRANCH:-master} GITREPO["python-novaclient"]=${NOVACLIENT_REPO:-${GIT_BASE}/openstack/python-novaclient.git} GITBRANCH["python-novaclient"]=${NOVACLIENT_BRANCH:-master} -# python saharaclient -GITREPO["python-saharaclient"]=${SAHARACLIENT_REPO:-${GIT_BASE}/openstack/python-saharaclient.git} -GITBRANCH["python-saharaclient"]=${SAHARACLIENT_BRANCH:-master} - # python swift client library GITREPO["python-swiftclient"]=${SWIFTCLIENT_REPO:-${GIT_BASE}/openstack/python-swiftclient.git} GITBRANCH["python-swiftclient"]=${SWIFTCLIENT_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 1f7169c689..5532347848 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -35,7 +35,7 @@ ALL_LIBS+=" python-glanceclient python-ironicclient tempest-lib" ALL_LIBS+=" oslo.messaging oslo.log cliff python-heatclient stevedore" ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db" ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware" -ALL_LIBS+=" oslo.serialization python-saharaclient django_openstack_auth" +ALL_LIBS+=" oslo.serialization django_openstack_auth" ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" From 99970389925f8fb79d55d1a2e00a2d0407f76580 Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Wed, 1 Jul 2015 11:28:32 -0500 Subject: [PATCH 0339/2941] Replace RPC Backend text in README Review https://review.openstack.org/#/c/192154/ removed support for RPC backends other than RabbitMQ, but we should still document how to disable rabbit. Change-Id: I1fd64b5f02573c58d7b0d1005c39a22c459a09a5 --- README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/README.md b/README.md index ebcb0184e5..750190b716 100644 --- a/README.md +++ b/README.md @@ -115,6 +115,16 @@ following in the `localrc` section: `mysql` is the default database. +# RPC Backend + +Support for a RabbitMQ RPC backend is included. Additional RPC backends may +be available via external plugins. Enabling or disabling RabbitMQ is handled +via the usual service functions and ``ENABLED_SERVICES``. + +Example disabling RabbitMQ in ``local.conf``: + + disable_service rabbit + # Apache Frontend Apache web server can be enabled for wsgi services that support being deployed From e57a33224bcc8a22b4c64be2a7e3b7e784a2536a Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Sat, 20 Jun 2015 14:48:00 -0700 Subject: [PATCH 0340/2941] Set compute-feature-enabled.attach_encrypted_volume Tempest option This allows setting the new option in Tempest for toggling whether or not the Cinder encrypted volume tests should run. Depends-On: I48eba7c645cc1c979fd766ae9c05efb00957f787 Related-Bug: #1463525 Change-Id: I9e12f8dc9e3e6b68dc031351cb081ee2bc6e6cbb --- lib/tempest | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/tempest b/lib/tempest index 5ea217f869..a84ade2a81 100644 --- a/lib/tempest +++ b/lib/tempest @@ -379,6 +379,7 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled preserve_ports True # TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life. iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True + iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} # Network iniset $TEMPEST_CONFIG network api_version 2.0 From a709b11a533c40bee910b8a4a4c9d102fab004cc Mon Sep 17 00:00:00 2001 From: Assaf Muller Date: Wed, 1 Jul 2015 18:19:11 -0400 Subject: [PATCH 0341/2941] Update Neutron section in README Change-Id: Ic4b354a587a1d5f83037fb5250e8e5c9cfe6d48c --- README.md | 37 ++++++++++++++----------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index 455e1c69c6..cd83dd8292 100644 --- a/README.md +++ b/README.md @@ -193,7 +193,7 @@ services are started in background and managed by `swift-init` tool. Basic Setup -In order to enable Neutron a single node setup, you'll need the +In order to enable Neutron in a single node setup, you'll need the following settings in your `local.conf`: disable_service n-net @@ -203,47 +203,38 @@ following settings in your `local.conf`: enable_service q-l3 enable_service q-meta enable_service q-metering - # Optional, to enable tempest configuration as part of DevStack - enable_service tempest Then run `stack.sh` as normal. DevStack supports setting specific Neutron configuration flags to the -service, Open vSwitch plugin and LinuxBridge plugin configuration files. -To make use of this feature, the settings can be added to ``local.conf``. -The old ``Q_XXX_EXTRA_XXX_OPTS`` variables are deprecated and will be removed -in the near future. The ``local.conf`` headers for the replacements are: - -* ``Q_SRV_EXTRA_OPTS``: +service, ML2 plugin, DHCP and L3 configuration files: [[post-config|/$Q_PLUGIN_CONF_FILE]] - [linuxbridge] # or [ovs] + [ml2] + mechanism_drivers=openvswitch,l2population -Example extra config in `local.conf`: + [[post-config|$NEUTRON_CONF]] + [DEFAULT] + quota_port=42 - [[post-config|/$Q_PLUGIN_CONF_FILE]] - [agent] - tunnel_type=vxlan - vxlan_udp_port=8472 + [[post-config|$Q_L3_CONF_FILE]] + [DEFAULT] + agent_mode=legacy - [[post-config|$NEUTRON_CONF]] + [[post-config|$Q_DHCP_CONF_FILE]] [DEFAULT] - tenant_network_type=vxlan + dnsmasq_dns_servers = 8.8.8.8,8.8.4.4 -DevStack also supports configuring the Neutron ML2 plugin. The ML2 plugin -can run with the OVS, LinuxBridge, or Hyper-V agents on compute hosts. This -is a simple way to configure the ml2 plugin: +The ML2 plugin can run with the OVS, LinuxBridge, or Hyper-V agents on compute +hosts. This is a simple way to configure the ml2 plugin: # VLAN configuration - Q_PLUGIN=ml2 ENABLE_TENANT_VLANS=True # GRE tunnel configuration - Q_PLUGIN=ml2 ENABLE_TENANT_TUNNELS=True # VXLAN tunnel configuration - Q_PLUGIN=ml2 Q_ML2_TENANT_NETWORK_TYPE=vxlan The above will default in DevStack to using the OVS on each compute host. From 635a5ba9929e1594aacfc0229663f43898479e2a Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Wed, 10 Jun 2015 08:48:06 +1200 Subject: [PATCH 0342/2941] constraints file support for devstack. Constraints files allow a global view of dependencies for devstack without the side effect that requirements files have of installing everything everytime. This is part of the cross project requirements-management spec. Change-Id: If089d30146629e6cf817edd634e5c2b80f1366dd --- inc/python | 22 ++++++++++++++++++---- stack.sh | 3 +++ stackrc | 6 ++++++ 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/inc/python b/inc/python index 07a811e8c3..ca185f05d2 100644 --- a/inc/python +++ b/inc/python @@ -66,7 +66,8 @@ function pip_install_gr { # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, -# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy`` +# ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``, +# ``USE_CONSTRAINTS`` # pip_install package [package ...] function pip_install { local xtrace=$(set +o | grep xtrace) @@ -103,6 +104,13 @@ function pip_install { fi fi + cmd_pip="$cmd_pip install" + + # Handle a constraints file, if needed. + if [[ "$USE_CONSTRAINTS" == "True" ]]; then + cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt" + fi + local pip_version=$(python -c "import pip; \ print(pip.__version__.strip('.')[0])") if (( pip_version<6 )); then @@ -116,7 +124,7 @@ function pip_install { https_proxy="${https_proxy:-}" \ no_proxy="${no_proxy:-}" \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ - $cmd_pip install $upgrade \ + $cmd_pip $upgrade \ $@ # Also install test requirements @@ -128,7 +136,7 @@ function pip_install { https_proxy=${https_proxy:-} \ no_proxy=${no_proxy:-} \ PIP_FIND_LINKS=$PIP_FIND_LINKS \ - $cmd_pip install $upgrade \ + $cmd_pip $upgrade \ -r $test_req fi } @@ -215,7 +223,7 @@ function setup_package_with_req_sync { # ``errexit`` requires us to trap the exit code when the repo is changed local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed") - if [[ $update_requirements != "changed" ]]; then + if [[ $update_requirements != "changed" && "$USE_CONSTRAINTS" == "False" ]]; then if is_in_projects_txt $project_dir; then (cd $REQUIREMENTS_DIR; \ ./.venv/bin/python update.py $project_dir) @@ -227,6 +235,12 @@ function setup_package_with_req_sync { fi fi + if [ -n "$REQUIREMENTS_DIR" ]; then + # Constrain this package to this project directory from here on out. + local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) + $REQUIREMENTS_DIR/.venv/bin/edit-constraints $REQUIREMENTS_DIR/upper-constraints.txt -- $name "$flags $project_dir" + fi + setup_package $project_dir $flags # We've just gone and possibly modified the user's source tree in an diff --git a/stack.sh b/stack.sh index 591c0dc614..4069509e96 100755 --- a/stack.sh +++ b/stack.sh @@ -683,6 +683,9 @@ save_stackenv $LINENO echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh +# Normalise USE_CONSTRAINTS +USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS) + # Configure an appropriate Python environment if [[ "$OFFLINE" != "True" ]]; then PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh diff --git a/stackrc b/stackrc index f2aafe97fc..e76abc0e85 100644 --- a/stackrc +++ b/stackrc @@ -149,6 +149,12 @@ DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) # Zero disables timeouts GIT_TIMEOUT=${GIT_TIMEOUT:-0} +# Constraints mode +# - False (default) : update git projects dependencies from global-requirements. +# +# - True : use upper-constraints.txt to constrain versions of packages intalled +# and do not edit projects at all. +USE_CONSTRAINTS=${USE_CONSTRAINTS:-False} # Repositories # ------------ From 0ffdb368a65d4e1532aba14c82c8b91e30db0acd Mon Sep 17 00:00:00 2001 From: Dean Troyer Date: Thu, 2 Jul 2015 09:09:51 -0500 Subject: [PATCH 0343/2941] Move USE_CONSTRAINTS normalization back to stackrc stackrc needs to do all of the initialization for situations (Grenade, unstack.sh, etc) that do not run stack.sh Change-Id: Ib8c7b923dde817b37f852515dd049fcf970b999a --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index e76abc0e85..c59d0a4a20 100644 --- a/stackrc +++ b/stackrc @@ -154,7 +154,7 @@ GIT_TIMEOUT=${GIT_TIMEOUT:-0} # # - True : use upper-constraints.txt to constrain versions of packages intalled # and do not edit projects at all. -USE_CONSTRAINTS=${USE_CONSTRAINTS:-False} +USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS) # Repositories # ------------ From 180f5eb652c73463cd5ae7d0dbede6d9d31a5df5 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 16 Jun 2015 13:14:31 -0400 Subject: [PATCH 0344/2941] Add IPv6 support to devstack infrastructure By default, most Openstack services are bound to 0.0.0.0 and service endpoints are registered as IPv4 addresses. With this change we introduce two new variables to control this behavior: SERVICE_IP_VERSION - can either be "4" or "6". When set to "4" (default if not set) devstack will operate as today - most services will open listen sockets on 0.0.0.0 and service endpoints will be registered using HOST_IP as the address. When set to "6" devstack services will open listen sockets on :: and service endpoints will be registered using HOST_IPV6 as the address. There is no support for "4+6", more work is required for that. HOST_IPV6 - if SERVICE_IP_VERSION=6 this must be an IPv6 address configured on the system. Some existing services, like the Openvswitch agent, will continue to use IPv4 addresses for things like tunnel endpoints. This is a current restriction in the code and can be updated at a later time. This change is just a first step to supporting IPv6-only control and data planes in devstack. This change is also partly based on two previous patches, https://review.openstack.org/#/c/140519/ and https://review.openstack.org/#/c/176898/ Change-Id: I5c0b775490ce54ab104fd5e89b20fb700212ae74 Co-Authored-By: Sean Collins Co-Authored-By: Baodong Li Co-Authored-By: Sridhar Gaddam Co-Authored-By: Adam Kacmarsky Co-Authored-By: Jeremy Alvis --- README.md | 16 +++++++++++++ doc/source/configuration.rst | 26 ++++++++++++++++++++ functions-common | 12 +++++++--- lib/cinder | 2 ++ lib/database | 13 ++++++++-- lib/databases/mysql | 4 ++-- lib/glance | 6 +++++ lib/neutron-legacy | 3 +++ lib/nova | 25 ++++++++++++++++---- lib/swift | 7 +++--- samples/local.conf | 15 ++++++------ stack.sh | 5 +++- stackrc | 46 +++++++++++++++++++++++++++++++++--- 13 files changed, 154 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index ebcb0184e5..34e451dc0b 100644 --- a/README.md +++ b/README.md @@ -360,6 +360,22 @@ Be aware that there are some features currently missing in cells, one notable one being security groups. The exercises have been patched to disable functionality not supported by cells. +# IPv6 + +By default, most Openstack services are bound to 0.0.0.0 +and service endpoints are registered as IPv4 addresses. +A new variable was created to control this behavior, and to +allow for operation over IPv6 instead of IPv4. + +For this, add the following to `local.conf`: + + SERVICE_IP_VERSION=6 + +When set to "6" devstack services will open listen sockets on :: +and service endpoints will be registered using HOST_IPV6 as the +address. The default value for this setting is `4`. Dual-mode +support, for example `4+6` is not currently supported. + # Local Configuration diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index e91012fe2b..6052576e98 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -137,6 +137,11 @@ Ethernet interface to a bridge on the host. Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``. ``HOST_IP`` is not set by default. +``HOST_IPV6`` is normally detected on the first run of ``stack.sh`` but +will not be set if there is no IPv6 address on the default Ethernet interface. +Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``. +``HOST_IPV6`` is not set by default. + Common Configuration Variables ============================== @@ -391,6 +396,8 @@ Multi-host DevStack ENABLED_SERVICES=n-vol,n-cpu,n-net,n-api IP Version +---------- + | Default: ``IP_VERSION=4+6`` | This setting can be used to configure DevStack to create either an IPv4, IPv6, or dual stack tenant data network by setting ``IP_VERSION`` to @@ -418,6 +425,25 @@ IP Version | *Note: ``FIXED_RANGE_V6`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` can be configured with any valid IPv6 prefix. The default values make use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC 4193.* + | + + | Default: ``SERVICE_IP_VERSION=4`` + | This setting can be used to configure DevStack to enable services to + operate over either IPv4 or IPv6, by setting ``SERVICE_IP_VERSION`` to + either ``SERVICE_IP_VERSION=4`` or ``SERVICE_IP_VERSION=6`` respectively. + When set to ``4`` devstack services will open listen sockets on 0.0.0.0 + and service endpoints will be registered using ``HOST_IP`` as the address. + When set to ``6`` devstack services will open listen sockets on :: and + service endpoints will be registered using ``HOST_IPV6`` as the address. + The default value for this setting is ``4``. Dual-mode support, for + example ``4+6`` is not currently supported. + | The following optional variable can be used to alter the default IPv6 + address used: + | + + :: + + HOST_IPV6=${some_local_ipv6_address} Examples ======== diff --git a/functions-common b/functions-common index 483b1fa696..39c1bfc4d7 100644 --- a/functions-common +++ b/functions-common @@ -46,7 +46,8 @@ TRACK_DEPENDS=${TRACK_DEPENDS:-False} # Save these variables to .stackenv STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ KEYSTONE_AUTH_PROTOCOL KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \ - LOGFILE OS_CACERT SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP" + LOGFILE OS_CACERT SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP \ + HOST_IPV6" # Saves significant environment variables to .stackenv for later use @@ -578,13 +579,14 @@ function get_default_host_ip { local floating_range=$2 local host_ip_iface=$3 local host_ip=$4 + local af=$5 # Search for an IP unless an explicit is set by ``HOST_IP`` environment variable if [ -z "$host_ip" -o "$host_ip" == "dhcp" ]; then host_ip="" # Find the interface used for the default route - host_ip_iface=${host_ip_iface:-$(ip route | awk '/default/ {print $5}' | head -1)} - local host_ips=$(LC_ALL=C ip -f inet addr show ${host_ip_iface} | awk '/inet/ {split($2,parts,"/"); print parts[1]}') + host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)} + local host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | awk /$af'/ {split($2,parts,"/"); print parts[1]}') local ip for ip in $host_ips; do # Attempt to filter out IP addresses that are part of the fixed and @@ -593,6 +595,10 @@ function get_default_host_ip { # will be printed and the first IP from the interface will be used. # If that is not correct set ``HOST_IP`` in ``localrc`` to the correct # address. + if [[ "$af" == "inet6" ]]; then + host_ip=$ip + break; + fi if ! (address_in_net $ip $fixed_range || address_in_net $ip $floating_range); then host_ip=$ip break; diff --git a/lib/cinder b/lib/cinder index 81174474e4..ab315ac8a2 100644 --- a/lib/cinder +++ b/lib/cinder @@ -65,6 +65,7 @@ CINDER_SERVICE_HOST=${CINDER_SERVICE_HOST:-$SERVICE_HOST} CINDER_SERVICE_PORT=${CINDER_SERVICE_PORT:-8776} CINDER_SERVICE_PORT_INT=${CINDER_SERVICE_PORT_INT:-18776} CINDER_SERVICE_PROTOCOL=${CINDER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +CINDER_SERVICE_LISTEN_ADDRESS=${CINDER_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} # What type of LVM device should Cinder use for LVM backend # Defaults to default, which is thick, the other valid choice @@ -222,6 +223,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT api_paste_config $CINDER_API_PASTE_INI iniset $CINDER_CONF DEFAULT rootwrap_config "$CINDER_CONF_DIR/rootwrap.conf" iniset $CINDER_CONF DEFAULT osapi_volume_extension cinder.api.contrib.standard_extensions + iniset $CINDER_CONF DEFAULT osapi_volume_listen $CINDER_SERVICE_LISTEN_ADDRESS iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL diff --git a/lib/database b/lib/database index ff1fafee26..5bbbe3144b 100644 --- a/lib/database +++ b/lib/database @@ -70,10 +70,19 @@ function initialize_database_backends { # For backward-compatibility, read in the MYSQL_HOST/USER variables and use # them as the default values for the DATABASE_HOST/USER variables. - MYSQL_HOST=${MYSQL_HOST:-127.0.0.1} + MYSQL_HOST=${MYSQL_HOST:-$SERVICE_LOCAL_HOST} MYSQL_USER=${MYSQL_USER:-root} - DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}} + # Set DATABASE_HOST equal to MYSQL_HOST. If SERVICE_IP_VERSION is equal to 6, + # set DATABASE_HOST equal to [MYSQL_HOST]. MYSQL_HOST cannot use brackets due + # to mysql not using bracketing for IPv6 addresses. DATABASE_HOST must have brackets + # due to sqlalchemy only reading IPv6 addresses with brackets. + if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + DATABASE_HOST=${DATABASE_HOST:-[$MYSQL_HOST]} + else + DATABASE_HOST=${DATABASE_HOST:-${MYSQL_HOST}} + fi + DATABASE_USER=${DATABASE_USER:-${MYSQL_USER}} if [ -n "$MYSQL_PASSWORD" ]; then diff --git a/lib/databases/mysql b/lib/databases/mysql index 0e477ca264..9c9401edf6 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -90,10 +90,10 @@ function configure_database_mysql { # Now update ``my.cnf`` for some local needs and restart the mysql service - # Change ‘bind-address’ from localhost (127.0.0.1) to any (0.0.0.0) and + # Change ‘bind-address’ from localhost (127.0.0.1) to any (::) and # set default db type to InnoDB sudo bash -c "source $TOP_DIR/functions && \ - iniset $my_conf mysqld bind-address 0.0.0.0 && \ + iniset $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" && \ iniset $my_conf mysqld sql_mode STRICT_ALL_TABLES && \ iniset $my_conf mysqld default-storage-engine InnoDB \ iniset $my_conf mysqld max_connections 1024 \ diff --git a/lib/glance b/lib/glance index 4dbce9f521..c268324733 100644 --- a/lib/glance +++ b/lib/glance @@ -64,6 +64,7 @@ fi # Glance connection info. Note the port must be specified. GLANCE_SERVICE_HOST=${GLANCE_SERVICE_HOST:-$SERVICE_HOST} +GLANCE_SERVICE_LISTEN_ADDRESS=${GLANCE_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} GLANCE_SERVICE_PORT=${GLANCE_SERVICE_PORT:-9292} GLANCE_SERVICE_PORT_INT=${GLANCE_SERVICE_PORT_INT:-19292} GLANCE_HOSTPORT=${GLANCE_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SERVICE_PORT} @@ -106,6 +107,7 @@ function configure_glance { # Copy over our glance configurations and update them cp $GLANCE_DIR/etc/glance-registry.conf $GLANCE_REGISTRY_CONF iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file local dburl=`database_connection_url glance` iniset $GLANCE_REGISTRY_CONF DEFAULT sql_connection $dburl @@ -118,6 +120,7 @@ function configure_glance { cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $GLANCE_API_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS inicomment $GLANCE_API_CONF DEFAULT log_file iniset $GLANCE_API_CONF DEFAULT sql_connection $dburl iniset $GLANCE_API_CONF DEFAULT use_syslog $SYSLOG @@ -136,6 +139,7 @@ function configure_glance { # Store specific configs iniset $GLANCE_API_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ + iniset $GLANCE_API_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST iniset $GLANCE_API_CONF DEFAULT workers "$API_WORKERS" @@ -202,6 +206,7 @@ function configure_glance { iniset $GLANCE_CACHE_CONF DEFAULT admin_user glance iniuncomment $GLANCE_CACHE_CONF DEFAULT auth_password iniset $GLANCE_CACHE_CONF DEFAULT admin_password $SERVICE_PASSWORD + iniset $GLANCE_CACHE_CONF DEFAULT registry_host $GLANCE_SERVICE_HOST # Store specific confs iniset $GLANCE_CACHE_CONF glance_store filesystem_store_datadir $GLANCE_IMAGE_DIR/ @@ -223,6 +228,7 @@ function configure_glance { if is_service_enabled g-search; then cp $GLANCE_DIR/etc/glance-search.conf $GLANCE_SEARCH_CONF iniset $GLANCE_SEARCH_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL + iniset $GLANCE_SEARCH_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS inicomment $GLANCE_SEARCH_CONF DEFAULT log_file iniset $GLANCE_SEARCH_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_SEARCH_CONF DEFAULT sql_connection $dburl diff --git a/lib/neutron-legacy b/lib/neutron-legacy index acc2851131..cb1d1ef2ad 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -138,6 +138,8 @@ Q_PORT_INT=${Q_PORT_INT:-19696} Q_HOST=${Q_HOST:-$SERVICE_HOST} # Default protocol Q_PROTOCOL=${Q_PROTOCOL:-$SERVICE_PROTOCOL} +# Default listen address +Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} # Default admin username Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} # Default auth strategy @@ -871,6 +873,7 @@ function _configure_neutron_common { iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG + iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS # If addition config files are set, make sure their path name is set as well if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then die $LINENO "Neutron additional plugin config not set.. exiting" diff --git a/lib/nova b/lib/nova index 88b336a1be..ee748434d5 100644 --- a/lib/nova +++ b/lib/nova @@ -85,6 +85,8 @@ NOVA_SERVICE_HOST=${NOVA_SERVICE_HOST:-$SERVICE_HOST} NOVA_SERVICE_PORT=${NOVA_SERVICE_PORT:-8774} NOVA_SERVICE_PORT_INT=${NOVA_SERVICE_PORT_INT:-18774} NOVA_SERVICE_PROTOCOL=${NOVA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} +NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773} EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773} @@ -476,11 +478,20 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT default_floating_pool "$PUBLIC_NETWORK_NAME" iniset $NOVA_CONF DEFAULT s3_host "$SERVICE_HOST" iniset $NOVA_CONF DEFAULT s3_port "$S3_SERVICE_PORT" - iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" + if [[ $SERVICE_IP_VERSION == 6 ]]; then + iniset $NOVA_CONF DEFAULT my_ip "$HOST_IPV6" + iniset $NOVA_CONF DEFAULT use_ipv6 "True" + else + iniset $NOVA_CONF DEFAULT my_ip "$HOST_IP" + fi iniset $NOVA_CONF database connection `database_connection_url nova` iniset $NOVA_CONF api_database connection `database_connection_url nova_api` iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF osapi_v3 enabled "True" + iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $NOVA_CONF DEFAULT ec2_listen "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $NOVA_CONF DEFAULT s3_listen "$NOVA_SERVICE_LISTEN_ADDRESS" if is_fedora || is_suse; then # nova defaults to /usr/local/bin, but fedora and suse pip like to @@ -560,11 +571,13 @@ function create_nova_conf { if is_service_enabled n-novnc || is_service_enabled n-xvnc || [ "$NOVA_VNC_ENABLED" != False ]; then # Address on which instance vncservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. - VNCSERVER_LISTEN=${VNCSERVER_LISTEN=127.0.0.1} - VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=127.0.0.1} + VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} + VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} iniset $NOVA_CONF DEFAULT vnc_enabled true iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + iniset $NOVA_CONF DEFAULT novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" + iniset $NOVA_CONF DEFAULT xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" else iniset $NOVA_CONF DEFAULT vnc_enabled false fi @@ -572,11 +585,12 @@ function create_nova_conf { if is_service_enabled n-spice; then # Address on which instance spiceservers will listen on compute hosts. # For multi-host, this should be the management ip of the compute host. - SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=127.0.0.1} - SPICESERVER_LISTEN=${SPICESERVER_LISTEN=127.0.0.1} + SPICESERVER_PROXYCLIENT_ADDRESS=${SPICESERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} + SPICESERVER_LISTEN=${SPICESERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} iniset $NOVA_CONF spice enabled true iniset $NOVA_CONF spice server_listen "$SPICESERVER_LISTEN" iniset $NOVA_CONF spice server_proxyclient_address "$SPICESERVER_PROXYCLIENT_ADDRESS" + iniset $NOVA_CONF spice html5proxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" else iniset $NOVA_CONF spice enabled false fi @@ -616,6 +630,7 @@ function create_nova_conf { fi if is_service_enabled n-sproxy; then + iniset $NOVA_CONF serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF serial_console enabled True fi } diff --git a/lib/swift b/lib/swift index 5b73981ed2..a8c02b3eb2 100644 --- a/lib/swift +++ b/lib/swift @@ -45,6 +45,7 @@ SWIFT3_DIR=$DEST/swift3 SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081} +SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} # TODO: add logging to different location. @@ -668,9 +669,9 @@ function init_swift { swift-ring-builder account.builder create ${SWIFT_PARTITION_POWER_SIZE} ${SWIFT_REPLICAS} 1 for node_number in ${SWIFT_REPLICAS_SEQ}; do - swift-ring-builder object.builder add z${node_number}-127.0.0.1:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 - swift-ring-builder container.builder add z${node_number}-127.0.0.1:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 - swift-ring-builder account.builder add z${node_number}-127.0.0.1:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + swift-ring-builder object.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( OBJECT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + swift-ring-builder container.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 + swift-ring-builder account.builder add z${node_number}-${SWIFT_SERVICE_LOCAL_HOST}:$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) ))/sdb1 1 done swift-ring-builder object.builder rebalance swift-ring-builder container.builder rebalance diff --git a/samples/local.conf b/samples/local.conf index bd0cd9c0db..ce7007391d 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -32,14 +32,15 @@ MYSQL_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue SERVICE_PASSWORD=$ADMIN_PASSWORD -# ``HOST_IP`` should be set manually for best results if the NIC configuration -# of the host is unusual, i.e. ``eth1`` has the default route but ``eth0`` is the -# public interface. It is auto-detected in ``stack.sh`` but often is indeterminate -# on later runs due to the IP moving from an Ethernet interface to a bridge on -# the host. Setting it here also makes it available for ``openrc`` to include -# when setting ``OS_AUTH_URL``. -# ``HOST_IP`` is not set by default. +# ``HOST_IP`` and ``HOST_IPV6`` should be set manually for best results if +# the NIC configuration of the host is unusual, i.e. ``eth1`` has the default +# route but ``eth0`` is the public interface. They are auto-detected in +# ``stack.sh`` but often is indeterminate on later runs due to the IP moving +# from an Ethernet interface to a bridge on the host. Setting it here also +# makes it available for ``openrc`` to include when setting ``OS_AUTH_URL``. +# Neither is set by default. #HOST_IP=w.x.y.z +#HOST_IPV6=2001:db8::7 # Logging diff --git a/stack.sh b/stack.sh index 17cbe75802..4b53df379b 100755 --- a/stack.sh +++ b/stack.sh @@ -1403,7 +1403,10 @@ fi echo "" echo "" echo "" -echo "This is your host ip: $HOST_IP" +echo "This is your host IP address: $HOST_IP" +if [ "$HOST_IPV6" != "" ]; then + echo "This is your host IPv6 address: $HOST_IPV6" +fi # If you installed Horizon on this server you should be able # to access the site using your browser. diff --git a/stackrc b/stackrc index 9fb334a69a..5cacb1858f 100644 --- a/stackrc +++ b/stackrc @@ -669,14 +669,54 @@ FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} FIXED_NETWORK_SIZE=${FIXED_NETWORK_SIZE:-256} HOST_IP_IFACE=${HOST_IP_IFACE:-} HOST_IP=${HOST_IP:-} +HOST_IPV6=${HOST_IPV6:-} -HOST_IP=$(get_default_host_ip $FIXED_RANGE $FLOATING_RANGE "$HOST_IP_IFACE" "$HOST_IP") +HOST_IP=$(get_default_host_ip "$FIXED_RANGE" "$FLOATING_RANGE" "$HOST_IP_IFACE" "$HOST_IP" "inet") if [ "$HOST_IP" == "" ]; then die $LINENO "Could not determine host ip address. See local.conf for suggestions on setting HOST_IP." fi -# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for service endpoints. -SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +HOST_IPV6=$(get_default_host_ip "" "" "$HOST_IP_IFACE" "$HOST_IPV6" "inet6") + +# SERVICE IP version +# This is the IP version that services should be listening on, as well +# as using to register their endpoints with keystone. +SERVICE_IP_VERSION=${SERVICE_IP_VERSION:-4} + +# Validate SERVICE_IP_VERSION +# It would be nice to support "4+6" here as well, but that will require +# multiple calls into keystone to register endpoints, so for now let's +# just support one or the other. +if [[ $SERVICE_IP_VERSION != "4" ]] && [[ $SERVICE_IP_VERSION != "6" ]]; then + die $LINENO "SERVICE_IP_VERSION must be either 4 or 6" +fi + +if [[ "$SERVICE_IP_VERSION" == 4 ]]; then + DEF_SERVICE_HOST=$HOST_IP + DEF_SERVICE_LOCAL_HOST=127.0.0.1 + DEF_SERVICE_LISTEN_ADDRESS=0.0.0.0 +fi + +if [[ "$SERVICE_IP_VERSION" == 6 ]]; then + if [ "$HOST_IPV6" == "" ]; then + die $LINENO "Could not determine host IPv6 address. See local.conf for suggestions on setting HOST_IPV6." + fi + + DEF_SERVICE_HOST=[$HOST_IPV6] + DEF_SERVICE_LOCAL_HOST=::1 + DEF_SERVICE_LISTEN_ADDRESS=:: +fi + +# This is either 0.0.0.0 for IPv4 or :: for IPv6 +SERVICE_LISTEN_ADDRESS=${SERVICE_LISTEN_ADDRESS:-${DEF_SERVICE_LISTEN_ADDRESS}} + +# Allow the use of an alternate hostname (such as localhost/127.0.0.1) for +# service endpoints. Default is dependent on SERVICE_IP_VERSION above. +SERVICE_HOST=${SERVICE_HOST:-${DEF_SERVICE_HOST}} +# This is either 127.0.0.1 for IPv4 or ::1 for IPv6 +SERVICE_LOCAL_HOST=${SERVICE_LOCAL_HOST:-${DEF_SERVICE_LOCAL_HOST}} + +REGION_NAME=${REGION_NAME:-RegionOne} # Configure services to use syslog instead of writing to individual log files SYSLOG=$(trueorfalse False SYSLOG) From 5d04db20ec5a89c6f04df7c798efa41a4259d22e Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Tue, 16 Jun 2015 13:14:31 -0400 Subject: [PATCH 0345/2941] Add IPv6 support to openrc files Assumes devstack was configured with SERVICE_IP_VERSION in local.conf SERVICE_IP_VERSION is stored in .stackenv and checked in openrc. If SERVICE_IP_VERSION is set to 6, openrc will use IPv6. NOTE: At first, I added a '-6' option to the openrc call which would set the HOSTS accordingly. I then simplified the code by saving SERVICE_IP_VERSION to the .stackenv file which is sourced by openrc. After that, I simplified the code even more by removing an extra, unnecessary, variable. Change-Id: I5d46d5438d3e56fea788720ca17f0010caef3df1 --- functions-common | 2 +- openrc | 24 ++++++++++++++++-------- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/functions-common b/functions-common index 39c1bfc4d7..fc17d42422 100644 --- a/functions-common +++ b/functions-common @@ -47,7 +47,7 @@ TRACK_DEPENDS=${TRACK_DEPENDS:-False} STACK_ENV_VARS="BASE_SQL_CONN DATA_DIR DEST ENABLED_SERVICES HOST_IP \ KEYSTONE_AUTH_PROTOCOL KEYSTONE_AUTH_URI KEYSTONE_SERVICE_URI \ LOGFILE OS_CACERT SERVICE_HOST SERVICE_PROTOCOL STACK_USER TLS_IP \ - HOST_IPV6" + HOST_IPV6 SERVICE_IP_VERSION" # Saves significant environment variables to .stackenv for later use diff --git a/openrc b/openrc index 64faa58a3a..71ba5a6ea5 100644 --- a/openrc +++ b/openrc @@ -56,18 +56,26 @@ export OS_NO_CACHE=${OS_NO_CACHE:-1} # Region export OS_REGION_NAME=${REGION_NAME:-RegionOne} -# Set api HOST_IP endpoint. SERVICE_HOST may also be used to specify the endpoint, -# which is convenient for some localrc configurations. -HOST_IP=${HOST_IP:-127.0.0.1} -SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} +# Set the host API endpoint. This will default to HOST_IP if SERVICE_IP_VERSION +# is 4, else HOST_IPV6 if it's 6. SERVICE_HOST may also be used to specify the +# endpoint, which is convenient for some localrc configurations. Additionally, +# some exercises call Glance directly. On a single-node installation, Glance +# should be listening on a local IP address, depending on the setting of +# SERVICE_IP_VERSION. If its running elsewhere, it can be set here. +if [[ $SERVICE_IP_VERSION == 6 ]]; then + HOST_IPV6=${HOST_IPV6:-::1} + SERVICE_HOST=${SERVICE_HOST:-[$HOST_IPV6]} + GLANCE_HOST=${GLANCE_HOST:-[$HOST_IPV6]} +else + HOST_IP=${HOST_IP:-127.0.0.1} + SERVICE_HOST=${SERVICE_HOST:-$HOST_IP} + GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} +fi + SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} KEYSTONE_AUTH_PROTOCOL=${KEYSTONE_AUTH_PROTOCOL:-$SERVICE_PROTOCOL} KEYSTONE_AUTH_HOST=${KEYSTONE_AUTH_HOST:-$SERVICE_HOST} -# Some exercises call glance directly. On a single-node installation, Glance -# should be listening on HOST_IP. If its running elsewhere, it can be set here -GLANCE_HOST=${GLANCE_HOST:-$HOST_IP} - # Identity API version export OS_IDENTITY_API_VERSION=${IDENTITY_API_VERSION:-2.0} From aedb8b97f60e7ff5bf5d0bafc192cc6e3e289cbe Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Thu, 2 Jul 2015 17:39:07 +1000 Subject: [PATCH 0346/2941] Search for service by type When you get or create service it first checks to see if an existing service matching these parameters exists. The definition of existing is having a service with the same name, however name is not a unique field. Sahara for example creates two services, one with data-processing, one with data_processing with the same sahara name. Search for existing services by service type, not by service name. Change-Id: I6148e2254aa3968039b0e7c178e7cabc53b6be68 --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 483b1fa696..d6be1ecc1e 100644 --- a/functions-common +++ b/functions-common @@ -806,7 +806,7 @@ function get_or_create_service { # Gets service id local service_id=$( # Gets service id - openstack service show $1 -f value -c id 2>/dev/null || + openstack service show $2 -f value -c id 2>/dev/null || # Creates new service if not exists openstack service create \ $2 \ From e62c906baf9f94be1cd4d3a66c3e5b7b6324bed8 Mon Sep 17 00:00:00 2001 From: Marian Horban Date: Wed, 24 Jun 2015 16:25:16 -0400 Subject: [PATCH 0347/2941] Added processing /compute URL With config option NOVA_USE_MOD_WSGI=True nova-api handles requests on /compute URL. Depends on I83bc4731507fa028377ae6701ed4d32adefa9251 Change-Id: Ic84b5c0dc0726662470ef9c076a0cadca55a3917 --- files/apache-nova-api.template | 9 +++++++++ lib/nova | 18 ++++++++++++------ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template index 301a3bdbdd..49081528ff 100644 --- a/files/apache-nova-api.template +++ b/files/apache-nova-api.template @@ -14,3 +14,12 @@ Listen %PUBLICPORT% %SSLCERTFILE% %SSLKEYFILE% + +Alias /compute %PUBLICWSGI% + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup nova-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/lib/nova b/lib/nova index 88b336a1be..41248b10ed 100644 --- a/lib/nova +++ b/lib/nova @@ -405,19 +405,25 @@ function create_nova_accounts { local nova_service=$(get_or_create_service "nova" \ "compute" "Nova Compute Service") + local nova_api_url + if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then + nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT" + else + nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" + fi get_or_create_endpoint $nova_service \ "$REGION_NAME" \ - "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ - "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" \ - "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2/\$(tenant_id)s" + "$nova_api_url/v2/\$(tenant_id)s" \ + "$nova_api_url/v2/\$(tenant_id)s" \ + "$nova_api_url/v2/\$(tenant_id)s" local nova_v21_service=$(get_or_create_service "novav21" \ "computev21" "Nova Compute Service V2.1") get_or_create_endpoint $nova_v21_service \ "$REGION_NAME" \ - "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s" \ - "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s" \ - "$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT/v2.1/\$(tenant_id)s" + "$nova_api_url/v2.1/\$(tenant_id)s" \ + "$nova_api_url/v2.1/\$(tenant_id)s" \ + "$nova_api_url/v2.1/\$(tenant_id)s" fi fi From 7c838616c98212c808d12c3789cc17ec76cbd878 Mon Sep 17 00:00:00 2001 From: Robert Collins Date: Fri, 3 Jul 2015 13:28:09 +1200 Subject: [PATCH 0348/2941] Fix library runs. Libraries were resulting in two edit-constraints runs: - one for the library, which adds a non-editable file path - then one for the editable servers, but that fails becuse pkg-resources couldn't parse the prior entry. This is fixed in two parts: the dependent patch which supports parsing file urls that have egg names, and this patch which changes from a file path to a file url with an egg name. Change-Id: I0f07858e96ea3baf46f8a453e253b9ed29c7f7e2 --- inc/python | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/inc/python b/inc/python index ca185f05d2..bba9cda44d 100644 --- a/inc/python +++ b/inc/python @@ -238,7 +238,9 @@ function setup_package_with_req_sync { if [ -n "$REQUIREMENTS_DIR" ]; then # Constrain this package to this project directory from here on out. local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) - $REQUIREMENTS_DIR/.venv/bin/edit-constraints $REQUIREMENTS_DIR/upper-constraints.txt -- $name "$flags $project_dir" + $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ + $REQUIREMENTS_DIR/upper-constraints.txt -- $name \ + "$flags file://$project_dir#egg=$name" fi setup_package $project_dir $flags From 4f91f93557d088b315e1687db9fa462888a06312 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Sat, 4 Jul 2015 20:45:24 +0900 Subject: [PATCH 0349/2941] Remove duplication in ml2 and openvswitch_agent Configuration of local_ip is duplication in ml2 because there is the configuration in both ml2 and openvswitch_agent. It also should be set in each driver using openvswitch. Change-Id: Ib0b874aed8db883d778426ed1ae01679fc0cc075 --- lib/neutron_plugins/ml2 | 5 ----- 1 file changed, 5 deletions(-) mode change 100644 => 100755 lib/neutron_plugins/ml2 diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 old mode 100644 new mode 100755 index 2733f1f513..13ffee9b5b --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -107,11 +107,6 @@ function neutron_plugin_configure_service { iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver fi - if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then - # Set local_ip if TENANT_TUNNELS are enabled. - iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP - fi - populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 mechanism_drivers=$Q_ML2_PLUGIN_MECHANISM_DRIVERS populate_ml2_config /$Q_PLUGIN_CONF_FILE ml2 type_drivers=$Q_ML2_PLUGIN_TYPE_DRIVERS From d48d672a8d36a70b10456496159fecf7551e89f8 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Sat, 4 Jul 2015 22:58:44 +0900 Subject: [PATCH 0350/2941] Add tunnel_bridge configuration for openvswitch agent Change-Id: I0235aa05cf86b3ed9d9620dda3f16b69ced077e3 --- lib/neutron_plugins/openvswitch_agent | 1 + lib/neutron_plugins/ovs_base | 1 + 2 files changed, 2 insertions(+) mode change 100644 => 100755 lib/neutron_plugins/openvswitch_agent mode change 100644 => 100755 lib/neutron_plugins/ovs_base diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent old mode 100644 new mode 100755 index 2a05e2dcfa..1ff3a40c82 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -50,6 +50,7 @@ function neutron_plugin_configure_plugin_agent { die $LINENO "You are running OVS version $OVS_VERSION. OVS 1.4+ is required for tunneling between multiple hosts." fi iniset /$Q_PLUGIN_CONF_FILE ovs local_ip $TUNNEL_ENDPOINT_IP + iniset /$Q_PLUGIN_CONF_FILE ovs tunnel_bridge $OVS_TUNNEL_BRIDGE fi # Setup physical network bridge mappings. Override diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base old mode 100644 new mode 100755 index 4e750f0932..ad09a739c6 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -9,6 +9,7 @@ set +o xtrace OVS_BRIDGE=${OVS_BRIDGE:-br-int} OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-""} +OVS_TUNNEL_BRIDGE=${OVS_TUNNEL_BRIDGE:-br-tun} function is_neutron_ovs_base_plugin { # Yes, we use OVS. From 7ab3e39bc485acc2b54d7496a77c2e43eda4e799 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Sat, 4 Jul 2015 23:11:52 +0900 Subject: [PATCH 0351/2941] Add cleanup for br-tun with OVS Change-Id: I5c4d28844f40eaad622ef7590c54e0e6647c85e3 Closes-Bug: #1471390 --- lib/neutron_plugins/ovs_base | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index ad09a739c6..f1f7f8597b 100755 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -41,7 +41,7 @@ function neutron_ovs_base_cleanup { done # remove all OVS bridges created by Neutron - for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE}); do + for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${OVS_BRIDGE} -e ${PUBLIC_BRIDGE} -e ${OVS_TUNNEL_BRIDGE}); do sudo ovs-vsctl del-br ${bridge} done } From 21a9077d7cf32ee1c387171b943125e938eaa1b2 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Fri, 3 Jul 2015 11:54:38 +1000 Subject: [PATCH 0352/2941] Allow installing keystoneauth from git Keystoneauth is not marked as stable yet however to ensure that the integration between it, keystoneclient and other services don't break compatibility we want to test it with tempest. Unfortunately you can't put -e links in requirements.txt files so add it to devstack so we can set the test environment. This will also make it available when keystoneauth is released. Change-Id: I43ca1df9c6ae2f0ac1a687c9ce1e2ccb97e81652 --- lib/keystone | 9 +++++++++ stack.sh | 1 + stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 1 + 4 files changed, 15 insertions(+) diff --git a/lib/keystone b/lib/keystone index c33d466c6c..baa4f8c22c 100644 --- a/lib/keystone +++ b/lib/keystone @@ -35,6 +35,7 @@ set +o xtrace # -------- # Set up default directories +GITDIR["keystoneauth"]=$DEST/keystoneauth GITDIR["python-keystoneclient"]=$DEST/python-keystoneclient GITDIR["keystonemiddleware"]=$DEST/keystonemiddleware KEYSTONE_DIR=$DEST/keystone @@ -483,6 +484,14 @@ function init_keystone { fi } +# install_keystoneauth() - Collect source and prepare +function install_keystoneauth { + if use_library_from_git "keystoneauth"; then + git_clone_by_name "keystoneauth" + setup_dev_lib "keystoneauth" + fi +} + # install_keystoneclient() - Collect source and prepare function install_keystoneclient { if use_library_from_git "python-keystoneclient"; then diff --git a/stack.sh b/stack.sh index 17cbe75802..1610651fa7 100755 --- a/stack.sh +++ b/stack.sh @@ -741,6 +741,7 @@ echo_summary "Installing OpenStack project source" install_oslo # Install client libraries +install_keystoneauth install_keystoneclient install_glanceclient install_cinderclient diff --git a/stackrc b/stackrc index 342f9bf987..eb0c330ed3 100644 --- a/stackrc +++ b/stackrc @@ -280,6 +280,10 @@ GITBRANCH["python-heatclient"]=${HEATCLIENT_BRANCH:-master} GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git} GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master} +# the base authentication plugins that clients use to authenticate +GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git} +GITBRANCH["keystoneauth"]=${KEYSTONEAUTH_BRANCH:-master} + # python keystone client library to nova that horizon uses GITREPO["python-keystoneclient"]=${KEYSTONECLIENT_REPO:-${GIT_BASE}/openstack/python-keystoneclient.git} GITBRANCH["python-keystoneclient"]=${KEYSTONECLIENT_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 8dc3ba3c6e..d10cd0ee62 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -41,6 +41,7 @@ ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports" +ALL_LIBS+=" keystoneauth" # Generate the above list with # echo ${!GITREPO[@]} From 1848b837e672fd6b7e091637e7cefa1ce0052958 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Sat, 27 Jun 2015 15:05:17 +0100 Subject: [PATCH 0353/2941] Move ceilometermiddleware installation to lib/swift lib/swift is where it is used so this makes the relationship more clear and direct. Change-Id: Ie6fc09e27a39295c92f0790856446edb7dedb995 --- lib/ceilometer | 10 ---------- lib/swift | 15 +++++++++++++++ 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index ed9b93377e..163ed0b0ab 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -360,16 +360,6 @@ function install_ceilometerclient { fi } -# install_ceilometermiddleware() - Collect source and prepare -function install_ceilometermiddleware { - if use_library_from_git "ceilometermiddleware"; then - git_clone_by_name "ceilometermiddleware" - setup_dev_lib "ceilometermiddleware" - else - pip_install_gr ceilometermiddleware - fi -} - # start_ceilometer() - Start running processes, including screen function start_ceilometer { run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" diff --git a/lib/swift b/lib/swift index 5b73981ed2..3207fac165 100644 --- a/lib/swift +++ b/lib/swift @@ -697,6 +697,21 @@ function install_swiftclient { fi } +# install_ceilometermiddleware() - Collect source and prepare +# note that this doesn't really have anything to do with ceilometer; +# though ceilometermiddleware has ceilometer in its name as an +# artifact of history, it is not a ceilometer specific tool. It +# simply generates pycadf-based notifications about requests and +# responses on the swift proxy +function install_ceilometermiddleware { + if use_library_from_git "ceilometermiddleware"; then + git_clone_by_name "ceilometermiddleware" + setup_dev_lib "ceilometermiddleware" + else + pip_install_gr ceilometermiddleware + fi +} + # start_swift() - Start running processes, including screen function start_swift { # (re)start memcached to make sure we have a clean memcache. From 9ef663a70fb02d4829b4c5a50f7fe6ef3ec6a8b4 Mon Sep 17 00:00:00 2001 From: Eric Harney Date: Tue, 7 Jul 2015 09:34:33 -0400 Subject: [PATCH 0354/2941] Don't uninstall ceph packages at cleanup We stop the services, which should be sufficient. Uninstalling the packages means that doing repeated runs with devstack takes longer than necessary. Change-Id: I5626e42ce83710690a3523439bb2c9c9af560cd9 --- lib/ceph | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ceph b/lib/ceph index 16dcda2f9e..04e05e7b01 100644 --- a/lib/ceph +++ b/lib/ceph @@ -158,7 +158,6 @@ function cleanup_ceph_embedded { function cleanup_ceph_general { undefine_virsh_secret - uninstall_package ceph ceph-common python-ceph libcephfs1 > /dev/null 2>&1 } From 11298a01f74c7952b924d001c548d9721eebf591 Mon Sep 17 00:00:00 2001 From: Adam Kacmarsky Date: Fri, 26 Jun 2015 14:49:47 -0600 Subject: [PATCH 0355/2941] Add IPv6 support for _move_neutron_addresses_route Added functionallity to allow IPv6 addresses to be moved to the OVS_PHYSICAL_BRIDGE from PUBLIC_INTERFACE automatically using _move_neutron_addresses_route. Only PUBLIC_INTERFACE and OVS_PHYSICAL_BRIDGE need to be set in localrc. HOST_IP must be set in localrc. HOST_IPV6 must be set in localrc if a global IPv6 address is configured on PUBLIC_INTERFACE. Change-Id: I8d2c055702e1c7cf08499a77f6843393762fd4c1 --- lib/neutron-legacy | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index cb1d1ef2ad..ee98015708 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -788,6 +788,7 @@ function _move_neutron_addresses_route { local from_intf=$1 local to_intf=$2 local add_ovs_port=$3 + local af=$4 if [[ -n "$from_intf" && -n "$to_intf" ]]; then # Remove the primary IP address from $from_intf and add it to $to_intf, @@ -795,10 +796,18 @@ function _move_neutron_addresses_route { # on configure we will also add $from_intf as a port on $to_intf, # assuming it is an OVS bridge. - local IP_BRD=$(ip -4 a s dev $from_intf | awk '/inet/ { print $2, $3, $4; exit }') + local IP_BRD=$(ip -f $af a s dev $from_intf | awk '/inet/ { print $2, $3, $4; exit }') local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }") local ADD_OVS_PORT="" + if [[ $af == "inet" ]]; then + IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IP | awk '{ print $2, $3, $4; exit }') + fi + + if [[ $af == "inet6" ]]; then + IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IPV6 | awk '{ print $2, $3, $4; exit }') + fi + if [ "$DEFAULT_ROUTE_GW" != "" ]; then ADD_DEFAULT_ROUTE="sudo ip r replace default via $DEFAULT_ROUTE_GW dev $to_intf" fi @@ -815,7 +824,13 @@ function _move_neutron_addresses_route { # runs that a clean run would need to clean up function cleanup_neutron { - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False + if [[ $(ip -f inet a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet" + fi + + if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6" + fi if is_provider_network && is_ironic_hardware; then for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do @@ -997,8 +1012,12 @@ function _configure_neutron_l3_agent { neutron_plugin_configure_l3_agent - if [[ $(ip -4 a s dev "$PUBLIC_INTERFACE" | grep -c 'inet') != 0 ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True + if [[ $(ip -f inet a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True "inet" + fi + + if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False "inet6" fi } From b17ad7560d7d67e3464b489e124c540e025b9299 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Fri, 29 May 2015 06:04:47 +0000 Subject: [PATCH 0356/2941] Use identity V3 API for endpoint creation Always use the keystone V3 API when creating services and endpoints. The syntax here is slightly different but we maintain the function interface. Change-Id: Ib3a375918a45fd6e37d873a1a5c0c4b26bdbb5d8 Implements: bp keystonev3 --- functions-common | 59 ++++++++++++++++++++++++++++++++++------------ lib/ceilometer | 5 ++-- lib/cinder | 10 ++++---- lib/glance | 10 ++++---- lib/heat | 10 ++++---- lib/ironic | 5 ++-- lib/keystone | 5 ++-- lib/neutron-legacy | 5 ++-- lib/nova | 21 +++++++---------- lib/swift | 5 ++-- lib/tempest | 4 ++-- lib/zaqar | 5 ++-- 12 files changed, 79 insertions(+), 65 deletions(-) diff --git a/functions-common b/functions-common index d6be1ecc1e..9023e85fad 100644 --- a/functions-common +++ b/functions-common @@ -809,6 +809,8 @@ function get_or_create_service { openstack service show $2 -f value -c id 2>/dev/null || # Creates new service if not exists openstack service create \ + --os-url $KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ $2 \ --name $1 \ --description="$3" \ @@ -817,29 +819,56 @@ function get_or_create_service { echo $service_id } -# Gets or creates endpoint -# Usage: get_or_create_endpoint -function get_or_create_endpoint { - # Gets endpoint id +# Create an endpoint with a specific interface +# Usage: _get_or_create_endpoint_with_interface +function _get_or_create_endpoint_with_interface { local endpoint_id=$(openstack endpoint list \ - --column "ID" \ - --column "Region" \ - --column "Service Name" \ - | grep " $2 " \ - | grep " $1 " | get_field 1) + --os-url $KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ + --service $1 \ + --interface $2 \ + --region $4 \ + -c ID -f value) if [[ -z "$endpoint_id" ]]; then # Creates new endpoint endpoint_id=$(openstack endpoint create \ - $1 \ - --region $2 \ - --publicurl $3 \ - --adminurl $4 \ - --internalurl $5 \ - | grep " id " | get_field 2) + --os-url $KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ + $1 $2 $3 --region $4 -f value -c id) fi + echo $endpoint_id } +# Gets or creates endpoint +# Usage: get_or_create_endpoint +function get_or_create_endpoint { + # NOTE(jamielennnox): when converting to v3 endpoint creation we go from + # creating one endpoint with multiple urls to multiple endpoints each with + # a different interface. To maintain the existing function interface we + # create 3 endpoints and return the id of the public one. In reality + # returning the public id will not make a lot of difference as there are no + # scenarios currently that use the returned id. Ideally this behaviour + # should be pushed out to the service setups and let them create the + # endpoints they need. + local public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2) + _get_or_create_endpoint_with_interface $1 admin $4 $2 + _get_or_create_endpoint_with_interface $1 internal $5 $2 + + # return the public id to indicate success, and this is the endpoint most likely wanted + echo $public_id +} + +# Get a URL from the identity service +# Usage: get_endpoint_url +function get_endpoint_url { + echo $(openstack endpoint list \ + --service $1 --interface $2 \ + --os-url $KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ + -c URL -f value) +} + # Package Functions # ================= diff --git a/lib/ceilometer b/lib/ceilometer index 163ed0b0ab..7905384623 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -130,9 +130,8 @@ function create_ceilometer_accounts { create_service_user "ceilometer" "admin" if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local ceilometer_service=$(get_or_create_service "ceilometer" \ - "metering" "OpenStack Telemetry Service") - get_or_create_endpoint $ceilometer_service \ + get_or_create_service "ceilometer" "metering" "OpenStack Telemetry Service" + get_or_create_endpoint "metering" \ "$REGION_NAME" \ "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ diff --git a/lib/cinder b/lib/cinder index 81174474e4..e51cd8ba1b 100644 --- a/lib/cinder +++ b/lib/cinder @@ -327,16 +327,14 @@ function create_cinder_accounts { if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local cinder_service=$(get_or_create_service "cinder" \ - "volume" "Cinder Volume Service") - get_or_create_endpoint $cinder_service "$REGION_NAME" \ + get_or_create_service "cinder" "volume" "Cinder Volume Service" + get_or_create_endpoint "volume" "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" - local cinder_v2_service=$(get_or_create_service "cinderv2" \ - "volumev2" "Cinder Volume Service V2") - get_or_create_endpoint $cinder_v2_service "$REGION_NAME" \ + get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" + get_or_create_endpoint "volumev2" "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" diff --git a/lib/glance b/lib/glance index 4dbce9f521..be6ccf9a37 100644 --- a/lib/glance +++ b/lib/glance @@ -266,9 +266,8 @@ function create_glance_accounts { if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local glance_service=$(get_or_create_service "glance" \ - "image" "Glance Image Service") - get_or_create_endpoint $glance_service \ + get_or_create_service "glance" "image" "Glance Image Service" + get_or_create_endpoint "image" \ "$REGION_NAME" \ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT" \ @@ -279,10 +278,9 @@ function create_glance_accounts { # Add glance-search service and endpoints if is_service_enabled g-search; then if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local glance_search_service=$(get_or_create_service "glance-search" \ - "search" "EXPERIMENTAL - Glance Graffiti Search Service") + get_or_create_service "glance-search" "search" "EXPERIMENTAL - Glance Graffiti Search Service" - get_or_create_endpoint $glance_search_service \ + get_or_create_endpoint "search" \ "$REGION_NAME" \ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT" \ "$GLANCE_SERVICE_PROTOCOL://$GLANCE_SEARCH_HOSTPORT" \ diff --git a/lib/heat b/lib/heat index 5cb0dbf6d9..cedddd2d26 100644 --- a/lib/heat +++ b/lib/heat @@ -250,17 +250,15 @@ function create_heat_accounts { if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local heat_service=$(get_or_create_service "heat" \ - "orchestration" "Heat Orchestration Service") - get_or_create_endpoint $heat_service \ + get_or_create_service "heat" "orchestration" "Heat Orchestration Service" + get_or_create_endpoint "orchestration" \ "$REGION_NAME" \ "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" \ "$SERVICE_PROTOCOL://$HEAT_API_HOST:$HEAT_API_PORT/v1/\$(tenant_id)s" - local heat_cfn_service=$(get_or_create_service "heat-cfn" \ - "cloudformation" "Heat CloudFormation Service") - get_or_create_endpoint $heat_cfn_service \ + get_or_create_service "heat-cfn" "cloudformation" "Heat CloudFormation Service" + get_or_create_endpoint "cloudformation" \ "$REGION_NAME" \ "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ "$SERVICE_PROTOCOL://$HEAT_API_CFN_HOST:$HEAT_API_CFN_PORT/v1" \ diff --git a/lib/ironic b/lib/ironic index cff20c9bbc..2d197dc655 100644 --- a/lib/ironic +++ b/lib/ironic @@ -411,9 +411,8 @@ function create_ironic_accounts { if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local ironic_service=$(get_or_create_service "ironic" \ - "baremetal" "Ironic baremetal provisioning service") - get_or_create_endpoint $ironic_service \ + get_or_create_service "ironic" "baremetal" "Ironic baremetal provisioning service" + get_or_create_endpoint "baremetal" \ "$REGION_NAME" \ "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ diff --git a/lib/keystone b/lib/keystone index c33d466c6c..428e615444 100644 --- a/lib/keystone +++ b/lib/keystone @@ -406,9 +406,8 @@ function create_keystone_accounts { # Keystone if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - KEYSTONE_SERVICE=$(get_or_create_service "keystone" \ - "identity" "Keystone Identity Service") - get_or_create_endpoint $KEYSTONE_SERVICE \ + get_or_create_service "keystone" "identity" "Keystone Identity Service" + get_or_create_endpoint "identity" \ "$REGION_NAME" \ "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v$IDENTITY_API_VERSION" \ "$KEYSTONE_AUTH_PROTOCOL://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT/v$IDENTITY_API_VERSION" \ diff --git a/lib/neutron-legacy b/lib/neutron-legacy index acc2851131..af7c6a1a47 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -517,9 +517,8 @@ function create_neutron_accounts { if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local neutron_service=$(get_or_create_service "neutron" \ - "network" "Neutron Service") - get_or_create_endpoint $neutron_service \ + get_or_create_service "neutron" "network" "Neutron Service" + get_or_create_endpoint "network" \ "$REGION_NAME" \ "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \ "$Q_PROTOCOL://$SERVICE_HOST:$Q_PORT/" \ diff --git a/lib/nova b/lib/nova index 41248b10ed..0d8f975fa7 100644 --- a/lib/nova +++ b/lib/nova @@ -402,24 +402,22 @@ function create_nova_accounts { create_service_user "nova" "admin" if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - - local nova_service=$(get_or_create_service "nova" \ - "compute" "Nova Compute Service") local nova_api_url if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST:$NOVA_SERVICE_PORT" else nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" fi - get_or_create_endpoint $nova_service \ + + get_or_create_service "nova" "compute" "Nova Compute Service" + get_or_create_endpoint "compute" \ "$REGION_NAME" \ "$nova_api_url/v2/\$(tenant_id)s" \ "$nova_api_url/v2/\$(tenant_id)s" \ "$nova_api_url/v2/\$(tenant_id)s" - local nova_v21_service=$(get_or_create_service "novav21" \ - "computev21" "Nova Compute Service V2.1") - get_or_create_endpoint $nova_v21_service \ + get_or_create_service "novav21" "computev21" "Nova Compute Service V2.1" + get_or_create_endpoint "computev21" \ "$REGION_NAME" \ "$nova_api_url/v2.1/\$(tenant_id)s" \ "$nova_api_url/v2.1/\$(tenant_id)s" \ @@ -438,9 +436,8 @@ function create_nova_accounts { # EC2 if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then - local ec2_service=$(get_or_create_service "ec2" \ - "ec2" "EC2 Compatibility Layer") - get_or_create_endpoint $ec2_service \ + get_or_create_service "ec2" "ec2" "EC2 Compatibility Layer" + get_or_create_endpoint "ec2" \ "$REGION_NAME" \ "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \ "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \ @@ -452,8 +449,8 @@ function create_nova_accounts { if is_service_enabled n-obj swift3; then if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local s3_service=$(get_or_create_service "s3" "s3" "S3") - get_or_create_endpoint $s3_service \ + get_or_create_service "s3" "s3" "S3" + get_or_create_endpoint "s3" \ "$REGION_NAME" \ "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ "http://$SERVICE_HOST:$S3_SERVICE_PORT" \ diff --git a/lib/swift b/lib/swift index 3207fac165..52101f863a 100644 --- a/lib/swift +++ b/lib/swift @@ -607,9 +607,8 @@ function create_swift_accounts { if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local swift_service=$(get_or_create_service "swift" \ - "object-store" "Swift Service") - get_or_create_endpoint $swift_service \ + get_or_create_service "swift" "object-store" "Swift Service" + get_or_create_endpoint "object-store" \ "$REGION_NAME" \ "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080" \ diff --git a/lib/tempest b/lib/tempest index a84ade2a81..1376c87a77 100644 --- a/lib/tempest +++ b/lib/tempest @@ -270,11 +270,11 @@ function configure_tempest { fi fi - EC2_URL=$(openstack endpoint show -f value -c publicurl ec2 || true) + EC2_URL=$(get_endpoint_url ec2 public || true) if [[ -z $EC2_URL ]]; then EC2_URL="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" fi - S3_URL=$(openstack endpoint show -f value -c publicurl s3 || true) + S3_URL=$(get_endpoint_url s3 public || true) if [[ -z $S3_URL ]]; then S3_URL="http://$SERVICE_HOST:${S3_SERVICE_PORT:-3333}" fi diff --git a/lib/zaqar b/lib/zaqar index 891b0eab04..fdab3a26a8 100644 --- a/lib/zaqar +++ b/lib/zaqar @@ -210,9 +210,8 @@ function create_zaqar_accounts { if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - local zaqar_service=$(get_or_create_service "zaqar" \ - "messaging" "Zaqar Service") - get_or_create_endpoint $zaqar_service \ + get_or_create_service "zaqar" "messaging" "Zaqar Service" + get_or_create_endpoint "messaging" \ "$REGION_NAME" \ "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \ "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \ From 72ce6acd22a553e4d34f0ffdc429aaaed7ed1212 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Thu, 2 Jul 2015 09:19:01 +1000 Subject: [PATCH 0357/2941] Use Keystone V3 API for role creation Always use the V3 API for role creation. Groups only exist in the v3 identity API and so we must specify --os-identity-api-version in these commands. Implements: bp keystonev3 Closes-Bug: #1470668 Change-Id: I5e01d23ebcb5a0c7de56233071a4eb9b16d3b813 --- functions-common | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/functions-common b/functions-common index 9023e85fad..a1a8afaecf 100644 --- a/functions-common +++ b/functions-common @@ -753,7 +753,10 @@ function get_or_create_project { function get_or_create_role { local role_id=$( # Creates role with --or-show - openstack role create $1 --or-show -f value -c id + openstack role create $1 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ + --or-show -f value -c id ) echo $role_id } @@ -764,8 +767,10 @@ function get_or_add_user_project_role { # Gets user role id local user_role_id=$(openstack role list \ --user $2 \ - --project $3 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ --column "ID" \ + --project $3 \ --column "Name" \ | grep " $1 " | get_field 1) if [[ -z "$user_role_id" ]]; then @@ -774,6 +779,8 @@ function get_or_add_user_project_role { $1 \ --user $2 \ --project $3 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ | grep " id " | get_field 2) fi echo $user_role_id @@ -784,18 +791,24 @@ function get_or_add_user_project_role { function get_or_add_group_project_role { # Gets group role id local group_role_id=$(openstack role list \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ --group $2 \ --project $3 \ - --column "ID" \ - --column "Name" \ - | grep " $1 " | get_field 1) + -c "ID" -f value) if [[ -z "$group_role_id" ]]; then - # Adds role to group - group_role_id=$(openstack role add \ - $1 \ + # Adds role to group and get it + openstack role add $1 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ + --group $2 \ + --project $3 + group_role_id=$(openstack role list \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ --group $2 \ --project $3 \ - | grep " id " | get_field 2) + -c "ID" -f value) fi echo $group_role_id } From 494f7cdf35d025f73b4cce9a3bde7b433183b1d8 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Fri, 29 May 2015 08:33:03 +0000 Subject: [PATCH 0358/2941] Use openstack cli for cinder type creation This command was commented out so assumedly there used to be a bug. Switch to OpenStackClient as the cinder CLI doesn't handle v3 auth correctly. Implements: bp keystonev3 Change-Id: I1acdc04cf04b7056701bdded31ef2a015de5bce3 --- lib/cinder | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/cinder b/lib/cinder index e51cd8ba1b..eb32b719c8 100644 --- a/lib/cinder +++ b/lib/cinder @@ -480,13 +480,10 @@ function stop_cinder { function create_volume_types { # Create volume types if is_service_enabled c-api && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then - local be be_name be_type + local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do - be_type=${be%%:*} be_name=${be##*:} - # openstack volume type create --property volume_backend_name="${be_type}" ${be_name} - cinder type-create ${be_name} && \ - cinder type-key ${be_name} set volume_backend_name="${be_name}" + openstack volume type create --property volume_backend_name="${be_name}" ${be_name} done fi } From 9d6d8f801571a1a0e6eff90cd2bfcdbac7945bcc Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Mon, 22 Jun 2015 03:37:59 +0000 Subject: [PATCH 0359/2941] Use project instead of tenant in create_userrc The create_userrc file is littered with references to a tenant. The tenant concept has been deprecated long enough that we should use project instead. I have not attempted to maintain compatibility with the --os-tenant-X flags because I have not found reference to anyone using this script outside of devstack. Change-Id: I613f1bdc6673f0c4bfe29aaab7b514348a617a8c --- tools/create_userrc.sh | 103 +++++++++++++++++++++++------------------ 1 file changed, 57 insertions(+), 46 deletions(-) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index f067ed1f4b..3f10abbf7c 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -16,45 +16,45 @@ cat < -This script creates certificates and sourcable rc files per tenant/user. +This script creates certificates and sourcable rc files per project/user. Target account directory hierarchy: target_dir-| |-cacert.pem - |-tenant1-name| - | |- user1 - | |- user1-cert.pem - | |- user1-pk.pem - | |- user2 - | .. - |-tenant2-name.. + |-project1-name| + | |- user1 + | |- user1-cert.pem + | |- user1-pk.pem + | |- user2 + | .. + |-project2-name.. .. Optional Arguments -P include password to the rc files; with -A it assume all users password is the same -A try with all user -u create files just for the specified user --C create user and tenant, the specifid tenant will be the user's tenant --r when combined with -C and the (-u) user exists it will be the user's tenant role in the (-C)tenant (default: Member) +-C create user and project, the specifid project will be the user's project +-r when combined with -C and the (-u) user exists it will be the user's project role in the (-C)project (default: Member) -p password for the user --heat-url --os-username --os-password ---os-tenant-name ---os-tenant-id +--os-project-name +--os-project-id --os-auth-url --os-cacert --target-dir ---skip-tenant +--skip-project --debug Example: $0 -AP -$0 -P -C mytenant -u myuser -p mypass +$0 -P -C myproject -u myuser -p mypass EOF } -if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-name:,os-tenant-id:,os-auth-url:,target-dir:,heat-url:,skip-tenant:,os-cacert:,help,debug -- "$@"); then +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-id:,os-tenant-name:,os-project-name:,os-project-id:,os-auth-url:,target-dir:,heat-url:,skip-project:,os-cacert:,help,debug -- "$@"); then display_help exit 1 fi @@ -62,10 +62,10 @@ eval set -- $options ADDPASS="" HEAT_URL="" -# The services users usually in the service tenant. +# The services users usually in the service project. # rc files for service users, is out of scope. -# Supporting different tenant for services is out of scope. -SKIP_TENANT="service" +# Supporting different project for services is out of scope. +SKIP_PROJECT="service" MODE="" ROLE=Member USER_NAME="" @@ -75,9 +75,12 @@ while [ $# -gt 0 ]; do -h|--help) display_help; exit 0 ;; --os-username) export OS_USERNAME=$2; shift ;; --os-password) export OS_PASSWORD=$2; shift ;; - --os-tenant-name) export OS_TENANT_NAME=$2; shift ;; - --os-tenant-id) export OS_TENANT_ID=$2; shift ;; - --skip-tenant) SKIP_TENANT="$SKIP_TENANT$2,"; shift ;; + --os-tenant-name) export OS_PROJECT_NAME=$2; shift ;; + --os-tenant-id) export OS_PROJECT_ID=$2; shift ;; + --os-project-name) export OS_PROJECT_NAME=$2; shift ;; + --os-project-id) export OS_PROJECT_ID=$2; shift ;; + --skip-tenant) SKIP_PROJECT="$SKIP_PROJECT$2,"; shift ;; + --skip-project) SKIP_PROJECT="$SKIP_PROJECT$2,"; shift ;; --os-auth-url) export OS_AUTH_URL=$2; shift ;; --os-cacert) export OS_CACERT=$2; shift ;; --target-dir) ACCOUNT_DIR=$2; shift ;; @@ -87,7 +90,7 @@ while [ $# -gt 0 ]; do -p) USER_PASS=$2; shift ;; -A) MODE=all; ;; -P) ADDPASS="yes" ;; - -C) MODE=create; TENANT=$2; shift ;; + -C) MODE=create; PROJECT=$2; shift ;; -r) ROLE=$2; shift ;; (--) shift; break ;; (-*) echo "$0: error - unrecognized option $1" >&2; display_help; exit 1 ;; @@ -105,8 +108,16 @@ if [ -z "$OS_PASSWORD" ]; then fi fi -if [ -z "$OS_TENANT_NAME" -a -z "$OS_TENANT_ID" ]; then - export OS_TENANT_NAME=admin +if [ -z "$OS_PROJECT_ID" -a "$OS_TENANT_ID" ]; then + export OS_PROJECT_ID=$OS_TENANT_ID +fi + +if [ -z "$OS_PROJECT_NAME" -a "$OS_TENANT_NAME" ]; then + export OS_PROJECT_NAME=$OS_TENANT_NAME +fi + +if [ -z "$OS_PROJECT_NAME" -a -z "$OS_PROJECT_ID" ]; then + export OS_PROJECT_NAME=admin fi if [ -z "$OS_USERNAME" ]; then @@ -156,21 +167,21 @@ fi function add_entry { local user_id=$1 local user_name=$2 - local tenant_id=$3 - local tenant_name=$4 + local project_id=$3 + local project_name=$4 local user_passwd=$5 # The admin user can see all user's secret AWS keys, it does not looks good - local line=`openstack ec2 credentials list --user $user_id | grep " $tenant_id "` + local line=`openstack ec2 credentials list --user $user_id | grep " $project_id "` if [ -z "$line" ]; then - openstack ec2 credentials create --user $user_id --project $tenant_id 1>&2 - line=`openstack ec2 credentials list --user $user_id | grep " $tenant_id "` + openstack ec2 credentials create --user $user_id --project $project_id 1>&2 + line=`openstack ec2 credentials list --user $user_id | grep " $project_id "` fi local ec2_access_key ec2_secret_key read ec2_access_key ec2_secret_key <<< `echo $line | awk '{print $2 " " $4 }'` - mkdir -p "$ACCOUNT_DIR/$tenant_name" - local rcfile="$ACCOUNT_DIR/$tenant_name/$user_name" - # The certs subject part are the tenant ID "dash" user ID, but the CN should be the first part of the DN + mkdir -p "$ACCOUNT_DIR/$project_name" + local rcfile="$ACCOUNT_DIR/$project_name/$user_name" + # The certs subject part are the project ID "dash" user ID, but the CN should be the first part of the DN # Generally the subject DN parts should be in reverse order like the Issuer # The Serial does not seams correctly marked either local ec2_cert="$rcfile-cert.pem" @@ -183,7 +194,7 @@ function add_entry { mv -f "$ec2_cert" "$ec2_cert.old" fi # It will not create certs when the password is incorrect - if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-tenant-name "$tenant_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then + if ! nova --os-password "$user_passwd" --os-username "$user_name" --os-project-name "$project_name" x509-create-cert "$ec2_private_key" "$ec2_cert"; then if [ -e "$ec2_private_key.old" ]; then mv -f "$ec2_private_key.old" "$ec2_private_key" fi @@ -199,8 +210,8 @@ export EC2_URL="$EC2_URL" export S3_URL="$S3_URL" # OpenStack USER ID = $user_id export OS_USERNAME="$user_name" -# OpenStack Tenant ID = $tenant_id -export OS_TENANT_NAME="$tenant_name" +# OpenStack project ID = $project_id +export OS_PROJECT_NAME="$project_name" export OS_AUTH_URL="$OS_AUTH_URL" export OS_CACERT="$OS_CACERT" export EC2_CERT="$ec2_cert" @@ -213,7 +224,7 @@ EOF echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile" fi if [ -n "$HEAT_URL" ]; then - echo "export HEAT_URL=\"$HEAT_URL/$tenant_id\"" >>"$rcfile" + echo "export HEAT_URL=\"$HEAT_URL/$project_id\"" >>"$rcfile" echo "export OS_NO_CLIENT_AUTH=True" >>"$rcfile" fi } @@ -245,9 +256,9 @@ function get_user_id { } if [ $MODE != "create" ]; then - # looks like I can't ask for all tenant related to a specified user - openstack project list --long --quote none -f csv | grep ',True' | grep -v "${SKIP_TENANT}" | while IFS=, read tenant_id tenant_name desc enabled; do - openstack user list --project $tenant_id --long --quote none -f csv | grep ',True' | while IFS=, read user_id user_name project email enabled; do + # looks like I can't ask for all project related to a specified user + openstack project list --long --quote none -f csv | grep ',True' | grep -v "${SKIP_PROJECT}" | while IFS=, read project_id project_name desc enabled; do + openstack user list --project $project_id --long --quote none -f csv | grep ',True' | while IFS=, read user_id user_name project email enabled; do if [ $MODE = one -a "$user_name" != "$USER_NAME" ]; then continue; fi @@ -259,21 +270,21 @@ if [ $MODE != "create" ]; then if [ -n "$SPECIFIC_UPASSWORD" ]; then USER_PASS=$SPECIFIC_UPASSWORD fi - add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS" done done else - tenant_name=$TENANT - tenant_id=$(create_or_get_project "$TENANT") + project_name=$PROJECT + project_id=$(create_or_get_project "$PROJECT") user_name=$USER_NAME user_id=`get_user_id $user_name` if [ -z "$user_id" ]; then - eval $(openstack user create "$user_name" --project "$tenant_id" --password "$USER_PASS" --email "$user_name@example.com" -f shell -c id) + eval $(openstack user create "$user_name" --project "$project_id" --password "$USER_PASS" --email "$user_name@example.com" -f shell -c id) user_id=$id - add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS" else role_id=$(create_or_get_role "$ROLE") - openstack role add "$role_id" --user "$user_id" --project "$tenant_id" - add_entry "$user_id" "$user_name" "$tenant_id" "$tenant_name" "$USER_PASS" + openstack role add "$role_id" --user "$user_id" --project "$project_id" + add_entry "$user_id" "$user_name" "$project_id" "$project_name" "$USER_PASS" fi fi From c54d4ab9104057b82e02dc9a62f0dd35928f6f64 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Mon, 22 Jun 2015 04:07:18 +0000 Subject: [PATCH 0360/2941] Include domain variables in accrc Include the user and project domain parameters in the generated user rc files. This is fairly simplistic, if we were to follow the existing attitudes we should iterate over the domains and add a new level of folders however this would change the output location for files that may be depended upon. Change-Id: I5e9e78406b11382751a591d91f711161bb98f47a --- tools/create_userrc.sh | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index 3f10abbf7c..c2dbe1aeb4 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -42,6 +42,10 @@ Optional Arguments --os-password --os-project-name --os-project-id +--os-user-domain-id +--os-user-domain-name +--os-project-domain-id +--os-project-domain-name --os-auth-url --os-cacert --target-dir @@ -54,7 +58,7 @@ $0 -P -C myproject -u myuser -p mypass EOF } -if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-id:,os-tenant-name:,os-project-name:,os-project-id:,os-auth-url:,target-dir:,heat-url:,skip-project:,os-cacert:,help,debug -- "$@"); then +if ! options=$(getopt -o hPAp:u:r:C: -l os-username:,os-password:,os-tenant-id:,os-tenant-name:,os-project-name:,os-project-id:,os-project-domain-id:,os-project-domain-name:,os-user-domain-id:,os-user-domain-name:,os-auth-url:,target-dir:,heat-url:,skip-project:,os-cacert:,help,debug -- "$@"); then display_help exit 1 fi @@ -79,6 +83,10 @@ while [ $# -gt 0 ]; do --os-tenant-id) export OS_PROJECT_ID=$2; shift ;; --os-project-name) export OS_PROJECT_NAME=$2; shift ;; --os-project-id) export OS_PROJECT_ID=$2; shift ;; + --os-user-domain-id) export OS_USER_DOMAIN_ID=$2; shift ;; + --os-user-domain-name) export OS_USER_DOMAIN_NAME=$2; shift ;; + --os-project-domain-id) export OS_PROJECT_DOMAIN_ID=$2; shift ;; + --os-project-domain-name) export OS_PROJECT_DOMAIN_NAME=$2; shift ;; --skip-tenant) SKIP_PROJECT="$SKIP_PROJECT$2,"; shift ;; --skip-project) SKIP_PROJECT="$SKIP_PROJECT$2,"; shift ;; --os-auth-url) export OS_AUTH_URL=$2; shift ;; @@ -128,6 +136,16 @@ if [ -z "$OS_AUTH_URL" ]; then export OS_AUTH_URL=http://localhost:5000/v2.0/ fi +if [ -z "$OS_USER_DOMAIN_ID" -a -z "$OS_USER_DOMAIN_NAME" ]; then + # purposefully not exported as it would force v3 auth within this file. + OS_USER_DOMAIN_ID=default +fi + +if [ -z "$OS_PROJECT_DOMAIN_ID" -a -z "$OS_PROJECT_DOMAIN_NAME" ]; then + # purposefully not exported as it would force v3 auth within this file. + OS_PROJECT_DOMAIN_ID=default +fi + USER_PASS=${USER_PASS:-$OS_PASSWORD} USER_NAME=${USER_NAME:-$OS_USERNAME} @@ -219,6 +237,7 @@ export EC2_PRIVATE_KEY="$ec2_private_key" export EC2_USER_ID=42 #not checked by nova (can be a 12-digit id) export EUCALYPTUS_CERT="$ACCOUNT_DIR/cacert.pem" export NOVA_CERT="$ACCOUNT_DIR/cacert.pem" +export OS_AUTH_TYPE=v2password EOF if [ -n "$ADDPASS" ]; then echo "export OS_PASSWORD=\"$user_passwd\"" >>"$rcfile" @@ -227,6 +246,13 @@ EOF echo "export HEAT_URL=\"$HEAT_URL/$project_id\"" >>"$rcfile" echo "export OS_NO_CLIENT_AUTH=True" >>"$rcfile" fi + for v in OS_USER_DOMAIN_ID OS_USER_DOMAIN_NAME OS_PROJECT_DOMAIN_ID OS_PROJECT_DOMAIN_NAME; do + if [ ${!v} ]; then + echo "export $v=${!v}" >>"$rcfile" + else + echo "unset $v" >>"$rcfile" + fi + done } #admin users expected From 6e121ff53e85fbe8f9eb9437403a4302bd0ee222 Mon Sep 17 00:00:00 2001 From: John Hua Date: Wed, 8 Jul 2015 03:00:07 +0100 Subject: [PATCH 0361/2941] Remove unused files from tools/xen. Most of unused files were used with build_domU_multi.sh which has been replaced by build_xva.sh. Besides tools/xen/scripts/templatedelete.sh was created for convenience purposes and now not in use. Change-Id: I4282c779629e3413ee3cd3ff134c3b7b19eee487 Closes-Bug: #1470535 --- tools/xen/build_domU_multi.sh | 29 --- tools/xen/files/fstab | 5 - tools/xen/files/hvc0.conf | 10 - tools/xen/scripts/mkxva | 365 ---------------------------- tools/xen/scripts/templatedelete.sh | 9 - tools/xen/templates/hosts.in | 8 - tools/xen/templates/menu.lst.in | 6 - tools/xen/templates/ova.xml.in | 14 -- 8 files changed, 446 deletions(-) delete mode 100755 tools/xen/build_domU_multi.sh delete mode 100644 tools/xen/files/fstab delete mode 100644 tools/xen/files/hvc0.conf delete mode 100755 tools/xen/scripts/mkxva delete mode 100755 tools/xen/scripts/templatedelete.sh delete mode 100644 tools/xen/templates/hosts.in delete mode 100644 tools/xen/templates/menu.lst.in delete mode 100644 tools/xen/templates/ova.xml.in diff --git a/tools/xen/build_domU_multi.sh b/tools/xen/build_domU_multi.sh deleted file mode 100755 index 0eb2077414..0000000000 --- a/tools/xen/build_domU_multi.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -# Echo commands -set -o xtrace - -# Head node host, which runs glance, api, keystone -HEAD_PUB_IP=${HEAD_PUB_IP:-192.168.1.57} -HEAD_MGT_IP=${HEAD_MGT_IP:-172.16.100.57} - -COMPUTE_PUB_IP=${COMPUTE_PUB_IP:-192.168.1.58} -COMPUTE_MGT_IP=${COMPUTE_MGT_IP:-172.16.100.58} - -# Networking params -FLOATING_RANGE=${FLOATING_RANGE:-192.168.1.196/30} - -# Variables common amongst all hosts in the cluster -COMMON_VARS="$STACKSH_PARAMS MYSQL_HOST=$HEAD_MGT_IP RABBIT_HOST=$HEAD_MGT_IP GLANCE_HOSTPORT=$HEAD_MGT_IP:9292 FLOATING_RANGE=$FLOATING_RANGE" - -# Helper to launch containers -function build_xva { - GUEST_NAME=$1 PUB_IP=$2 MGT_IP=$3 DO_SHUTDOWN=$4 TERMINATE=$TERMINATE STACKSH_PARAMS="$COMMON_VARS $5" ./build_xva.sh -} - -# Launch the head node - headnode uses a non-ip domain name, -# because rabbit won't launch with an ip addr hostname :( -build_xva HEADNODE $HEAD_PUB_IP $HEAD_MGT_IP 1 "ENABLED_SERVICES=g-api,g-reg,key,n-api,n-sch,n-vnc,horizon,mysql,rabbit" - -# Build the HA compute host -build_xva COMPUTENODE $COMPUTE_PUB_IP $COMPUTE_MGT_IP 0 "ENABLED_SERVICES=n-cpu,n-net,n-api" diff --git a/tools/xen/files/fstab b/tools/xen/files/fstab deleted file mode 100644 index 6c9b9818c3..0000000000 --- a/tools/xen/files/fstab +++ /dev/null @@ -1,5 +0,0 @@ -LABEL=vpxroot / ext3 defaults 1 1 -tmpfs /dev/shm tmpfs defaults 0 0 -devpts /dev/pts devpts gid=5,mode=620 0 0 -sysfs /sys sysfs defaults 0 0 -proc /proc proc defaults 0 0 diff --git a/tools/xen/files/hvc0.conf b/tools/xen/files/hvc0.conf deleted file mode 100644 index 4eedaf6ee1..0000000000 --- a/tools/xen/files/hvc0.conf +++ /dev/null @@ -1,10 +0,0 @@ -# hvc0 - getty -# -# This service maintains a getty on hvc0 from the point the system is -# started until it is shut down again. - -start on stopped rc RUNLEVEL=[2345] -stop on runlevel [!2345] - -respawn -exec /sbin/getty -8 9600 hvc0 diff --git a/tools/xen/scripts/mkxva b/tools/xen/scripts/mkxva deleted file mode 100755 index 392c05b407..0000000000 --- a/tools/xen/scripts/mkxva +++ /dev/null @@ -1,365 +0,0 @@ -#!/bin/bash -# -# Copyright (c) 2011 Citrix Systems, Inc. -# Copyright 2011 OpenStack Foundation -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -# - -set -eu - -set -o xtrace - -VBOX_IMG=/output/packages/vbox-img - -usage() { - cat >&2 < -t -x - -o: Colon-separated list of output filenames (one for each type). - -p: Create a disk label and partition within the output image - -t: Colon-separated list of types of output file. xva and ovf supported. - -x: XML filenames (one for each type) - -EOF - exit 1 -} - -# parse cmdline - -OPT_USE_PARTITION= -OPT_TYPES= -OPT_OUTPUT_FILES= -OPT_XML_FILES= - -while getopts o:pt:x: o -do case "$o" in - o) OPT_OUTPUT_FILES=$(echo "$OPTARG" | sed -e 's/\s*:\s*/ /g') - ;; - p) OPT_USE_PARTITION=1 - ;; - t) OPT_TYPES=$(echo "$OPTARG" | sed -e 's/\s*:\s*/ /g') - ;; - x) OPT_XML_FILES=$(echo "$OPTARG" | sed -e 's/\s*:\s*/ /g') - ;; - [?]) usage - ;; - esac -done -shift $((OPTIND-1)) - -[ $# -ne 3 ] && usage -FS_STAGING="$1" -FS_SIZE_MIB="$2" -TMPDIR="$3" - -if [ "$UID" = "0" ] -then - SUDO= -else - SUDO=sudo -fi - -if [ "$FS_SIZE_MIB" = "0" ] -then - # Just create a dummy file. This allows developers to bypass bits of - # the build by setting the size to 0. - touch $OPT_OUTPUT_FILES - exit 0 -fi - -# create temporary files and dirs -FS_TMPFILE=$(mktemp "$TMPDIR/mkxva-fsimg-XXXXX") -XVA_TARBALL_STAGING=$(mktemp -d "$TMPDIR/mkxva-tarball-staging-XXXXX") -OVF_STAGING=$(mktemp -d "$TMPDIR/mkxva-ovf-staging-XXXXX") - -# Find udevsettle and udevtrigger on this installation -if [ -x "/sbin/udevsettle" ] ; then - UDEVSETTLE="/sbin/udevsettle --timeout=30" -elif [ -x "/sbin/udevadm" ] ; then - UDEVSETTLE='/sbin/udevadm settle' -else - UDEVSETTLE='/bin/true' -fi - -if [ -x "/sbin/udevtrigger" ] ; then - UDEVTRIGGER=/sbin/udevtrigger -elif [ -x "/sbin/udevadm" ] ; then - UDEVTRIGGER='/sbin/udevadm trigger' -else - UDEVTRIGGER= -fi - -# CLEAN_ variables track devices and mounts that must be taken down -# no matter how the script exits. Loop devices are vulnerable to -# exhaustion so we make every effort to remove them - -CLEAN_KPARTX= -CLEAN_LOSETUP= -CLEAN_MOUNTPOINT= - -cleanup_devices () { - if [ -n "$CLEAN_MOUNTPOINT" ] ; then - echo "Mountpoint $CLEAN_MOUNTPOINT removed on abnormal exit" - $SUDO umount "$CLEAN_MOUNTPOINT" || echo "umount failed" - rmdir "$CLEAN_MOUNTPOINT" || echo "rmdir failed" - fi - if [ -n "$CLEAN_KPARTX" ] ; then - echo "kpartx devices for $CLEAN_KPARTX removed on abnormal exit" - $SUDO kpartx -d "$CLEAN_KPARTX" || echo "kpartx -d failed" - fi - if [ -n "$CLEAN_LOSETUP" ] ; then - echo "Loop device $CLEAN_LOSETUP removed on abnormal exit" - $SUDO losetup -d "$CLEAN_LOSETUP" # Allow losetup errors to propagate - fi -} - -trap "cleanup_devices" EXIT - -make_fs_inner () { - local staging="$1" - local output="$2" - local options="$3" - CLEAN_MOUNTPOINT=$(mktemp -d "$TMPDIR/mkfs-XXXXXX") - - # copy staging dir contents to fs image - $SUDO mount $options "$output" "$CLEAN_MOUNTPOINT" - $SUDO tar -C "$staging" -c . | tar -C "$CLEAN_MOUNTPOINT" -x - $SUDO umount "$CLEAN_MOUNTPOINT" - rmdir "$CLEAN_MOUNTPOINT" - CLEAN_MOUNTPOINT= -} - -# Turn a staging dir into an ext3 filesystem within a partition -make_fs_in_partition () { - local staging="$1" - local output="$2" - - # create new empty disk - dd if=/dev/zero of="$output" bs=1M count=$FS_SIZE_MIB - # Set up a loop device on the empty disk image - local loopdevice=$($SUDO losetup -f) - $SUDO losetup "$loopdevice" "$output" - CLEAN_LOSETUP="$loopdevice" - # Create a partition table and single partition. - # Start partition at sector 63 to allow space for grub - cat < "$CLEAN_MOUNTPOINT/boot/grub/grub.conf" </dev/null - gzip "$file" - else - local file="$outputdir"/$(printf "%08d" $i) - dd if="$diskimg" of="$file" skip=$i bs=1M count=1 2>/dev/null - local chksum=$(sha1sum -b "$file") - echo -n "${chksum/ */}" > "$file.checksum" - fi - i=$(($i + 1)) - done -} - -if [ -n "$OPT_USE_PARTITION" ] ; then - make_fs_in_partition "$FS_STAGING" "$FS_TMPFILE" -else - make_fs "$FS_STAGING" "$FS_TMPFILE" -fi - -VDI_SIZE=$(stat --format=%s "$FS_TMPFILE") - -make_xva () { - local output_file="$1" - local xml_file="$2" - local subdir - local rio - - if [[ `cat $xml_file` =~ "\s*class\s*VDI\s*\s*\s*id\s*(Ref:[0-9]+)" ]] - then - # it's a rio style xva - subdir="${BASH_REMATCH[1]}"; - rio=1 - else - # it's a geneva style xva - subdir="xvda" - rio=0 - fi - - cp "$xml_file" "$XVA_TARBALL_STAGING"/ova.xml - sed -i -e "s/@VDI_SIZE@/$VDI_SIZE/" "$XVA_TARBALL_STAGING"/ova.xml - mkdir "$XVA_TARBALL_STAGING/$subdir" - splitvdi "$FS_TMPFILE" "$XVA_TARBALL_STAGING/$subdir" "$rio" - TARFILE_MEMBERS=$(cd "$XVA_TARBALL_STAGING" && echo ova.xml $subdir/*) - tar -C "$XVA_TARBALL_STAGING" --format=v7 -c $TARFILE_MEMBERS -f "$output_file.tmp" - mv "$output_file.tmp" "$output_file" -} - -make_ovf () { - local output_dir="$1" - local xml_file="$2" - local output_base=$(basename "$output_dir") - local disk="$output_dir/${output_base}.vmdk" - local manifest="$output_dir/${output_base}.mf" - local ovf="$output_dir/${output_base}.ovf" - - mkdir -p "$output_dir" - rm -f "$disk" - $VBOX_IMG convert --srcfilename="$FS_TMPFILE" --dstfilename="$disk" \ - --srcformat RAW --dstformat VMDK --variant Stream - chmod 0644 "$disk" - - local n_bytes=$(stat --printf=%s "$disk") - cp "$xml_file" "$ovf" - sed -i -e "s/@MKXVA_DISK_FULLSIZE@/$VDI_SIZE/" "$ovf" - sed -i -e "s/@MKXVA_DISK_SIZE@/$n_bytes/" "$ovf" - sed -i -e "s/@MKXVA_DISK_MIB_SIZE@/$FS_SIZE_MIB/" "$ovf" - sed -i -e "s/@MKXVA_DISK_FILENAME@/${output_base}.vmdk/" "$ovf" - - for to_sign in "$ovf" "$disk" - do - local sha1_sum=$(sha1sum "$to_sign" | cut -d' ' -f1) - echo "SHA1($(basename "$to_sign"))= $sha1_sum" >> $manifest - done -} - -output_files="$OPT_OUTPUT_FILES" -xml_files="$OPT_XML_FILES" -# Iterate through the type list creating the relevant VMs -for create_type in $OPT_TYPES -do - # Shift one parameter from the front of the lists - create_output_file="${output_files%% *}" - output_files="${output_files#* }" - create_xml_file="${xml_files%% *}" - xml_files="${xml_files#* }" - echo "Creating $create_type appliance $create_output_file using metadata file $create_xml_file" - - case "$create_type" in - xva) - make_xva "$create_output_file" "$create_xml_file" - ;; - ovf) - make_ovf "$create_output_file" "$create_xml_file" - ;; - *) - echo "Unknown VM type '$create_type'" - exit 1 - ;; - esac - -done - - -# cleanup -if [ -z "${DO_NOT_CLEANUP:-}" ] ; then - rm -rf "$XVA_TARBALL_STAGING" - rm -f "$FS_TMPFILE" -fi diff --git a/tools/xen/scripts/templatedelete.sh b/tools/xen/scripts/templatedelete.sh deleted file mode 100755 index 66765b2446..0000000000 --- a/tools/xen/scripts/templatedelete.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash - -#Usage: ./templatedelete.sh - -templateuuid="$1" - -xe template-param-set other-config:default_template=false uuid="$templateuuid" -xe template-param-set is-a-template=false uuid="$templateuuid" -xe vm-destroy uuid="$templateuuid" diff --git a/tools/xen/templates/hosts.in b/tools/xen/templates/hosts.in deleted file mode 100644 index 8ab4c3e919..0000000000 --- a/tools/xen/templates/hosts.in +++ /dev/null @@ -1,8 +0,0 @@ -127.0.0.1 localhost -127.0.0.1 %HOSTNAME% -::1 localhost ip6-localhost ip6-loopback -fe00::0 ip6-localnet -ff00::0 ip6-mcastprefix -ff02::1 ip6-allnodes -ff02::2 ip6-allrouters - diff --git a/tools/xen/templates/menu.lst.in b/tools/xen/templates/menu.lst.in deleted file mode 100644 index 8bc6426251..0000000000 --- a/tools/xen/templates/menu.lst.in +++ /dev/null @@ -1,6 +0,0 @@ -default 0 - -title default - root (hd0,0) - kernel /boot/vmlinuz-@KERNEL_VERSION@ ro root=LABEL=vpxroot console=xvc0 - initrd /boot/initrd.img-@KERNEL_VERSION@ diff --git a/tools/xen/templates/ova.xml.in b/tools/xen/templates/ova.xml.in deleted file mode 100644 index 01041e2030..0000000000 --- a/tools/xen/templates/ova.xml.in +++ /dev/null @@ -1,14 +0,0 @@ - - - - - - - - - - - - From e446fc3f5982089770a936cd8614fe75566cc103 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 8 Jul 2015 12:10:15 -0700 Subject: [PATCH 0362/2941] Set ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False if testing with Ceph The encrypted Cinder volume tests in Tempest don't actually work properly for a Ceph backend in cinder since the volume encryption support is not in Nova for RBD volume types. This is needed for Cinder change I03f8cae05cc117e14f7482115de685fc9f3fa54a which tells Nova that the rbd volume connection is on an encrypted volume type. Related-Bug: #1463525 Change-Id: I8548d41095513b9e669f773e3f35353e9228ead9 --- lib/ceph | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/ceph b/lib/ceph index 16dcda2f9e..5104629065 100644 --- a/lib/ceph +++ b/lib/ceph @@ -74,6 +74,10 @@ CEPH_REPLICAS_SEQ=$(seq ${CEPH_REPLICAS}) REMOTE_CEPH=$(trueorfalse False REMOTE_CEPH) REMOTE_CEPH_ADMIN_KEY_PATH=${REMOTE_CEPH_ADMIN_KEY_PATH:-$CEPH_CONF_DIR/ceph.client.admin.keyring} +# Cinder encrypted volume tests are not supported with a Ceph backend due to +# bug 1463525. +ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False + # Functions # ------------ From 23cd71ac94cf11a18d5d153b963a96eabcb0cd15 Mon Sep 17 00:00:00 2001 From: Zhang Jinnan Date: Wed, 8 Jul 2015 11:36:32 -0400 Subject: [PATCH 0363/2941] Update Centos/RHEL to use Kilo RDO packages Update Centos/RHEL to use Kilo based RDO Change-Id: I901c4e9dfb6bce377b6afb19bbce94c71dfe127f --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 17cbe75802..012b726811 100755 --- a/stack.sh +++ b/stack.sh @@ -273,8 +273,8 @@ EOF # ... and also optional to be enabled sudo yum-config-manager --enable rhel-7-server-optional-rpms - RHEL_RDO_REPO_RPM=${RHEL7_RDO_REPO_RPM:-"https://repos.fedorapeople.org/repos/openstack/openstack-juno/rdo-release-juno-1.noarch.rpm"} - RHEL_RDO_REPO_ID=${RHEL7_RDO_REPO_ID:-"openstack-juno"} + RHEL_RDO_REPO_RPM=${RHEL7_RDO_REPO_RPM:-"https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-1.noarch.rpm"} + RHEL_RDO_REPO_ID=${RHEL7_RDO_REPO_ID:-"openstack-kilo"} if ! sudo yum repolist enabled $RHEL_RDO_REPO_ID | grep -q $RHEL_RDO_REPO_ID; then echo "RDO repo not detected; installing" From f0247ed21ab38e525fb6edace5dbbdd7c14cfb2d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 9 Jul 2015 15:49:16 +1000 Subject: [PATCH 0364/2941] Move xtrace early I dug back through the history to see why xtrace is enabled where it is. Originally (like first commit originally), it was somewhat sanely placed to turn on tracing after it had done the interactive "read_password" prompt stuff. Over time, it has just shuffled it's way down as stuff got added around it. This was noticed this because I was looking for tracing of earlier commands when looking at the repo setup (see Iec2ad7b5598fdaefbc2338ade702bc7b08963b96) and couldn't find it. Putting this at the start means we both capture all output unconditionally, and avoid needlessly getting this interleaved at some odd place again. Change-Id: I441d7eecbab9d204258c18a071ccc1cbf4f7512a --- stack.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 17cbe75802..dea820a050 100755 --- a/stack.sh +++ b/stack.sh @@ -21,6 +21,10 @@ # Learn more and get the most recent version at http://devstack.org +# Print the commands being run so that we can see the command that triggers +# an error. It is also useful for following along as the install occurs. +set -o xtrace + # Make sure custom grep options don't get in the way unset GREP_OPTIONS @@ -485,10 +489,6 @@ function err_trap { # Begin trapping error exit codes set -o errexit -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following along as the install occurs. -set -o xtrace - # Print the kernel version uname -a From 89ee58523050443a38c284e8f0920dae152a901a Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 9 Jul 2015 13:25:04 -0700 Subject: [PATCH 0365/2941] neutron: add NOVA_ALLOW_DUPLICATE_NETWORKS config option Nova commit 322cc9336fe6f6fe9b3f0da33c6b26a3e5ea9b0c added the neutron.allow_duplicate_networks config option in Juno and it defaults to False. The option was deprecated in Kilo with commit 4306d9190f49e7fadf88669d18effedabc880d3b and removed in Liberty with commit b06867c581541ed325ddc5e5b5a2d53b1b0261ac so it's the default behavior in Liberty. To test it in the gate with Tempest, we need to be able to set it to True in devstack-gate and update tempest.conf (since tempest is branchless and we don't want to try to test duplicate networks against kilo/juno code). We can remove the change to lib/tempest when it's removed from Tempest after kilo-eol. Depends-On: I05f81d86cde249c23be06d5804fadbf40fc4a7f3 Change-Id: Ifd075420f57c9b60746f4a6af6520c0ef04800db --- lib/tempest | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/tempest b/lib/tempest index a84ade2a81..cb5711f7d8 100644 --- a/lib/tempest +++ b/lib/tempest @@ -30,6 +30,7 @@ # - ``DEFAULT_INSTANCE_TYPE`` # - ``DEFAULT_INSTANCE_USER`` # - ``CINDER_ENABLED_BACKENDS`` +# - ``NOVA_ALLOW_DUPLICATE_NETWORKS`` # # ``stack.sh`` calls the entry points in this order: # @@ -380,6 +381,10 @@ function configure_tempest { # TODO(gilliard): Remove the live_migrate_paused_instances flag when Juno is end of life. iniset $TEMPEST_CONFIG compute-feature-enabled live_migrate_paused_instances True iniset $TEMPEST_CONFIG compute-feature-enabled attach_encrypted_volume ${ATTACH_ENCRYPTED_VOLUME_AVAILABLE:-True} + # TODO(mriedem): Remove this when kilo-eol happens since the + # neutron.allow_duplicate_networks option was removed from nova in Liberty + # and is now the default behavior. + iniset $TEMPEST_CONFIG compute-feature-enabled allow_duplicate_networks ${NOVA_ALLOW_DUPLICATE_NETWORKS:-True} # Network iniset $TEMPEST_CONFIG network api_version 2.0 From ee6161720360f589485596dd572286d7a1865b17 Mon Sep 17 00:00:00 2001 From: Bob Ball Date: Mon, 22 Jun 2015 13:48:57 +0100 Subject: [PATCH 0366/2941] XenAPI: Update to a newer Cirros image Update to 0.3.4 as it has support for config drive v2 Change-Id: Id8143f16fb3f0b6ce82c1332e8f695ac739a9e8c --- stackrc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 09ba3e9807..c8069be994 100644 --- a/stackrc +++ b/stackrc @@ -564,8 +564,8 @@ case "$VIRT_DRIVER" in DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk"};; xenserver) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.0-x86_64-disk} - IMAGE_URLS=${IMAGE_URLS:-"https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz"} + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk} + IMAGE_URLS=${IMAGE_URLS:-"http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz"} IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; ironic) # Ironic can do both partition and full disk images, depending on the driver From a3c94468baa159840a47c34cf94d97d816208313 Mon Sep 17 00:00:00 2001 From: Dirk Mueller Date: Tue, 23 Jun 2015 12:23:29 +0200 Subject: [PATCH 0367/2941] Remove support for enabling file injection File injection is disabled in nova meanwhile, and devstack core reviewers think it shouldn't be configureable in devstack anymore. This basically reverts https://review.openstack.org/#/c/70560/ Change-Id: Ia7dd407da00c0b1c9641865aea1f7b74533d7357 --- lib/nova_plugins/hypervisor-libvirt | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index f70b21a475..f52629dc91 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -25,9 +25,6 @@ source $TOP_DIR/lib/nova_plugins/functions-libvirt # Defaults # -------- -# File injection is disabled by default in Nova. This will turn it back on. -ENABLE_FILE_INJECTION=$(trueorfalse False ENABLE_FILE_INJECTION) - # Entry Points # ------------ @@ -60,17 +57,9 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT vnc_enabled "false" fi - if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then - # When libguestfs is available for file injection, enable using - # libguestfs to inspect the image and figure out the proper - # partition to inject into. - iniset $NOVA_CONF libvirt inject_partition '-1' - iniset $NOVA_CONF libvirt inject_key 'true' - else - # File injection is being disabled by default in the near future - - # disable it here for now to avoid surprises later. - iniset $NOVA_CONF libvirt inject_partition '-2' - fi + # File injection is being disabled by default in the near future - + # disable it here for now to avoid surprises later. + iniset $NOVA_CONF libvirt inject_partition '-2' if [[ "$LIBVIRT_TYPE" = "parallels" ]]; then iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system" @@ -96,14 +85,6 @@ function install_nova_hypervisor { yum_install libcgroup-tools fi fi - - if [[ "$ENABLE_FILE_INJECTION" = "True" ]] ; then - if is_ubuntu; then - install_package python-guestfs - elif is_fedora || is_suse; then - install_package python-libguestfs - fi - fi } # start_nova_hypervisor - Start any required external services From 11cf23e03457357368cc40622ecc37cebe56293c Mon Sep 17 00:00:00 2001 From: Jeffrey Zhang Date: Wed, 15 Jul 2015 08:18:23 +0800 Subject: [PATCH 0368/2941] Fix the typo zaqar Change-Id: Ic05b88a55f4110cd2e72985c7f3f544d0de8dd67 --- doc/source/plugin-registry.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index c5c4e1eaa5..99bfb85b38 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -26,7 +26,7 @@ The following are plugins that exist for official OpenStack projects. +--------------------+-------------------------------------------+--------------------+ |trove |git://git.openstack.org/openstack/trove | | +--------------------+-------------------------------------------+--------------------+ -|zaqar |git://git.openstack.org/openstack/zarar | | +|zaqar |git://git.openstack.org/openstack/zaqar | | +--------------------+-------------------------------------------+--------------------+ From 7aaaf9090145b3e84e1d9a3c90e12bd370f5f31f Mon Sep 17 00:00:00 2001 From: Isao Yamagata Date: Sat, 11 Jul 2015 16:07:22 +0000 Subject: [PATCH 0369/2941] Fix distribution link in documentation Fix broken link to distributions of OpenStack Closes-Bug: #1469433 Change-Id: Ica572e11c0dedfe0684fad1c4d48b248f7003d47 --- doc/source/faq.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/faq.rst b/doc/source/faq.rst index f61002baf2..0db8932e9c 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -16,7 +16,7 @@ upstream code. It makes many choices that are not appropriate for production systems. Your best choice is probably to choose a `distribution of OpenStack -`__. +`__. Why a shell script, why not chef/puppet/... ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From 54ee8a82a837eb2b28746c77e0259ec326ab15cc Mon Sep 17 00:00:00 2001 From: Maxim Nestratov Date: Wed, 15 Jul 2015 11:47:11 +0300 Subject: [PATCH 0370/2941] Make it possible to upload ploop images Add support of ploop images (*.hds extension) for both exe and hvm types. In devstack we assume that images have '-exe' and '-hvm' suffixes in their names correspondently. Change-Id: I1c074876c530be0535a6e02e764d67a4ebcbbbe5 --- functions | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/functions b/functions index 1668e16b6d..5bbe18f990 100644 --- a/functions +++ b/functions @@ -219,6 +219,23 @@ function upload_image { return fi + if [[ "$image_url" =~ '.hds' ]]; then + image_name="${image_fname%.hds}" + vm_mode=${image_name##*-} + if [[ $vm_mode != 'exe' && $vm_mode != 'hvm' ]]; then + die $LINENO "Unknown vm_mode=${vm_mode} for Virtuozzo image" + fi + + openstack \ + --os-token $token \ + --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \ + image create \ + "$image_name" --public \ + --container-format=bare --disk-format=ploop \ + --property vm_mode=$vm_mode < "${image}" + return + fi + local kernel="" local ramdisk="" local disk_format="" From e6f37b91e5cd5b0f70d2be1e75eb4f61adc1e16a Mon Sep 17 00:00:00 2001 From: Maxim Nestratov Date: Tue, 30 Jun 2015 14:54:12 +0300 Subject: [PATCH 0371/2941] libvirt virt_type=parallels support enchancement As soon as Parallels Cloud Server/Virtuozzo is based on CloudLinux distribution this new rpm kind of distribution is introduced. Also we setup vnc and set vnc_encoding parameter to None as soon it isn't supported by parallels. Change-Id: Ib97a09f397f950227498cfc2ce162d19b700f6f4 --- functions-common | 6 ++++-- lib/nova_plugins/hypervisor-libvirt | 4 ++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 061a9356f5..813d164272 100644 --- a/functions-common +++ b/functions-common @@ -269,8 +269,9 @@ function GetOSVersion { # Fedora release 16 (Verne) # XenServer release 6.2.0-70446c (xenenterprise) # Oracle Linux release 7 + # CloudLinux release 7.1 os_CODENAME="" - for r in "Red Hat" CentOS Fedora XenServer; do + for r in "Red Hat" CentOS Fedora XenServer CloudLinux; do os_VENDOR=$r if [[ -n "`grep \"$r\" /etc/redhat-release`" ]]; then ver=`sed -e 's/^.* \([0-9].*\) (\(.*\)).*$/\1\|\2/' /etc/redhat-release` @@ -374,7 +375,8 @@ function is_fedora { fi [ "$os_VENDOR" = "Fedora" ] || [ "$os_VENDOR" = "Red Hat" ] || \ - [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleLinux" ] + [ "$os_VENDOR" = "CentOS" ] || [ "$os_VENDOR" = "OracleLinux" ] || \ + [ "$os_VENDOR" = "CloudLinux" ] } diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index f70b21a475..dfd8a67fd9 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -75,6 +75,10 @@ function configure_nova_hypervisor { if [[ "$LIBVIRT_TYPE" = "parallels" ]]; then iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system" iniset $NOVA_CONF libvirt images_type "ploop" + iniset $NOVA_CONF DEFAULT force_raw_images "False" + iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address $HOST_IP + iniset $NOVA_CONF DEFAULT vncserver_listen $HOST_IP + iniset $NOVA_CONF DEFAULT vnc_keymap fi } From 54dc19ecad2ed06694a4b6269c2833d9533a26f5 Mon Sep 17 00:00:00 2001 From: Mahito OGURA Date: Tue, 14 Jul 2015 17:16:42 +0900 Subject: [PATCH 0372/2941] Add export_proxy_variables() tests to test_functions.sh In test_functions.sh, There aren't export_proxy_variables() tests. This patch add test of export_proxy_variables to test_funstions.sh. Change-Id: I76f2bab84f4019961e612b0bff0ab66646b6e160 --- tests/test_functions.sh | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/test_functions.sh b/tests/test_functions.sh index f555de8dff..be8dc5e287 100755 --- a/tests/test_functions.sh +++ b/tests/test_functions.sh @@ -245,4 +245,33 @@ else passed "OK" fi +function test_export_proxy_variables { + echo "Testing export_proxy_variables()" + + local expected results + + http_proxy=http_proxy_test + https_proxy=https_proxy_test + no_proxy=no_proxy_test + + export_proxy_variables + expected=$(echo -e "http_proxy=$http_proxy\nhttps_proxy=$https_proxy\nno_proxy=$no_proxy") + results=$(env | egrep '(http(s)?|no)_proxy=') + if [[ $expected = $results ]]; then + passed "OK: Proxy variables are exported when proxy variables are set" + else + failed "Expected: $expected, Failed: $results" + fi + + unset http_proxy https_proxy no_proxy + export_proxy_variables + results=$(env | egrep '(http(s)?|no)_proxy=') + if [[ "" = $results ]]; then + passed "OK: Proxy variables aren't exported when proxy variables aren't set" + else + failed "Expected: '', Failed: $results" + fi +} +test_export_proxy_variables + report_results From 7e5fb63b71364637eaa81a0f978af76fe3df2c97 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Thu, 16 Jul 2015 10:40:43 +1000 Subject: [PATCH 0373/2941] Always use volume v1 API for type create. OpenStackClient doesn't currently support volume type create on the V2 API. Make sure that all requests use the V1 api until this has been fixed in OpenStackClient. Change-Id: I2fa133d30753e188d383d3de78c0022a3625cb34 Closes-Bug: #1475062 --- lib/cinder | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index a9a9f0d298..e5ed2db1a3 100644 --- a/lib/cinder +++ b/lib/cinder @@ -485,7 +485,9 @@ function create_volume_types { local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_name=${be##*:} - openstack volume type create --property volume_backend_name="${be_name}" ${be_name} + # FIXME(jamielennox): Remove --os-volume-api-version pinning when + # osc supports volume type create on v2 api. bug #1475060 + openstack volume type create --os-volume-api-version 1 --property volume_backend_name="${be_name}" ${be_name} done fi } From 11d276c73cdd848c0287f6718d0163369cefd157 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 2 Jul 2015 09:34:34 +1000 Subject: [PATCH 0374/2941] local call masks errors in subshells In another "things from the man page" The return status is 0 unless local is used outside a function, an invalid name is supplied, or name is a readonly variable. Thus if anything fails in "cmd" of "local foo=$( cmd )" we don't notice. Change-Id: I22b10d5d39f014b6c92d2e101b167cbacf81afca --- functions-common | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/functions-common b/functions-common index 322bf82f30..a2a9a3daf4 100644 --- a/functions-common +++ b/functions-common @@ -684,9 +684,10 @@ function policy_add { # Gets or creates a domain # Usage: get_or_create_domain function get_or_create_domain { + local domain_id local os_url="$KEYSTONE_SERVICE_URI_V3" # Gets domain id - local domain_id=$( + domain_id=$( # Gets domain id openstack --os-token=$OS_TOKEN --os-url=$os_url \ --os-identity-api-version=3 domain show $1 \ @@ -705,8 +706,9 @@ function get_or_create_domain { function get_or_create_group { local desc="${3:-}" local os_url="$KEYSTONE_SERVICE_URI_V3" + local group_id # Gets group id - local group_id=$( + group_id=$( # Creates new group with --or-show openstack --os-token=$OS_TOKEN --os-url=$os_url \ --os-identity-api-version=3 group create $1 \ @@ -719,13 +721,14 @@ function get_or_create_group { # Gets or creates user # Usage: get_or_create_user [] function get_or_create_user { + local user_id if [[ ! -z "$4" ]]; then local email="--email=$4" else local email="" fi # Gets user id - local user_id=$( + user_id=$( # Creates new user with --or-show openstack user create \ $1 \ @@ -743,7 +746,8 @@ function get_or_create_user { # Gets or creates project # Usage: get_or_create_project function get_or_create_project { - local project_id=$( + local project_id + project_id=$( # Creates new project with --or-show openstack --os-url=$KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version=3 \ @@ -757,7 +761,8 @@ function get_or_create_project { # Gets or creates role # Usage: get_or_create_role function get_or_create_role { - local role_id=$( + local role_id + role_id=$( # Creates role with --or-show openstack role create $1 \ --os-url=$KEYSTONE_SERVICE_URI_V3 \ @@ -770,8 +775,9 @@ function get_or_create_role { # Gets or adds user role to project # Usage: get_or_add_user_project_role function get_or_add_user_project_role { + local user_role_id # Gets user role id - local user_role_id=$(openstack role list \ + user_role_id=$(openstack role list \ --user $2 \ --os-url=$KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version=3 \ @@ -795,8 +801,9 @@ function get_or_add_user_project_role { # Gets or adds group role to project # Usage: get_or_add_group_project_role function get_or_add_group_project_role { + local group_role_id # Gets group role id - local group_role_id=$(openstack role list \ + group_role_id=$(openstack role list \ --os-url=$KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version=3 \ --group $2 \ @@ -822,8 +829,9 @@ function get_or_add_group_project_role { # Gets or creates service # Usage: get_or_create_service function get_or_create_service { + local service_id # Gets service id - local service_id=$( + service_id=$( # Gets service id openstack service show $2 -f value -c id 2>/dev/null || # Creates new service if not exists @@ -841,7 +849,8 @@ function get_or_create_service { # Create an endpoint with a specific interface # Usage: _get_or_create_endpoint_with_interface function _get_or_create_endpoint_with_interface { - local endpoint_id=$(openstack endpoint list \ + local endpoint_id + endpoint_id=$(openstack endpoint list \ --os-url $KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version=3 \ --service $1 \ From 2fa8944057787fc01e94f9f94a905cee9492edc5 Mon Sep 17 00:00:00 2001 From: Ken'ichi Ohmichi Date: Fri, 17 Jul 2015 03:08:55 +0000 Subject: [PATCH 0375/2941] Remove osapi_v3 configuration Nova option osapi_v3 is used for Nova v2.1 API and the default value was False(disabled). However Nova v2.1 API is CURRENT status and the API should be enabled as the default as we discussed on http://lists.openstack.org/pipermail/openstack-dev/2015-July/069624.html We could not find it before because devstack makes it True, so this patch removes it for avoiding confusion any more. Change-Id: I4efd2036605a1a41ea297b44a5f31b2da7412593 Related-Bug: #1462901 Depends-on: I43f0352f9fa89401f79389a6dc1035d901f52ed2 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index a6cd651a9c..6441a891eb 100644 --- a/lib/nova +++ b/lib/nova @@ -490,7 +490,6 @@ function create_nova_conf { iniset $NOVA_CONF database connection `database_connection_url nova` iniset $NOVA_CONF api_database connection `database_connection_url nova_api` iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" - iniset $NOVA_CONF osapi_v3 enabled "True" iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT ec2_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" From 92ad15251226dc8f19ad5f901c48f4eb8892f24e Mon Sep 17 00:00:00 2001 From: Rawlin Peters Date: Mon, 20 Jul 2015 13:33:33 -0600 Subject: [PATCH 0376/2941] Explicitly set bind_ip in Swift server config files Currently, the Swift proxy, object, account, and container servers bind to IPv4 address 0.0.0.0 by default. In the case of a user setting SERVICE_IP_VERSION=6 in their local.conf file, these Swift servers still listen on 0.0.0.0 instead of ::, which causes a ./stack.sh run to fail. This change explicitly sets the bind_ip variable in the Swift server config files so that the servers bind to either 0.0.0.0 (when SERVICE_IP_VERSION != 6) or :: (when SERVICE_IP_VERSION == 6). This patch is related to the following patch for devstack IPv6 support: https://review.openstack.org/#/c/192329 Change-Id: Ie268c6daf5374e67ef8710a731c3af50ffdb821e --- lib/swift | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lib/swift b/lib/swift index 826f233414..96d730ef2c 100644 --- a/lib/swift +++ b/lib/swift @@ -46,6 +46,7 @@ SWIFT3_DIR=$DEST/swift3 SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081} SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} +SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} # TODO: add logging to different location. @@ -361,6 +362,9 @@ function configure_swift { iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT log_level DEBUG + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_ip + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} + iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port if is_service_enabled tls-proxy; then iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT_INT} @@ -463,17 +467,23 @@ EOF local swift_node_config=${SWIFT_CONF_DIR}/object-server/${node_number}.conf cp ${SWIFT_DIR}/etc/object-server.conf-sample ${swift_node_config} generate_swift_config_services ${swift_node_config} ${node_number} $(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) object + iniuncomment ${swift_node_config} DEFAULT bind_ip + iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} iniset ${swift_node_config} filter:recon recon_cache_path ${SWIFT_DATA_DIR}/cache swift_node_config=${SWIFT_CONF_DIR}/container-server/${node_number}.conf cp ${SWIFT_DIR}/etc/container-server.conf-sample ${swift_node_config} generate_swift_config_services ${swift_node_config} ${node_number} $(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) container + iniuncomment ${swift_node_config} DEFAULT bind_ip + iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} iniuncomment ${swift_node_config} app:container-server allow_versions iniset ${swift_node_config} app:container-server allow_versions "true" swift_node_config=${SWIFT_CONF_DIR}/account-server/${node_number}.conf cp ${SWIFT_DIR}/etc/account-server.conf-sample ${swift_node_config} generate_swift_config_services ${swift_node_config} ${node_number} $(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) account + iniuncomment ${swift_node_config} DEFAULT bind_ip + iniset ${swift_node_config} DEFAULT bind_ip ${SWIFT_SERVICE_LISTEN_ADDRESS} done # Set new accounts in tempauth to match keystone tenant/user (to make testing easier) From 3756687d1777153e1d6fbf938a74470011aa7bef Mon Sep 17 00:00:00 2001 From: Pradeep Kilambi Date: Mon, 20 Jul 2015 16:16:31 -0400 Subject: [PATCH 0377/2941] Include meter.yaml when devstack sets up ceilometer Partially Implements: blueprint declarative-notifications Change-Id: Ia20f3558eb85b4e3478e33a9e54b7e696eb6c3fd --- lib/ceilometer | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ceilometer b/lib/ceilometer index 7905384623..9226d85e20 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -211,6 +211,7 @@ function configure_ceilometer { cp $CEILOMETER_DIR/etc/ceilometer/event_pipeline.yaml $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/api_paste.ini $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/event_definitions.yaml $CEILOMETER_CONF_DIR + cp $CEILOMETER_DIR/etc/ceilometer/meters.yaml $CEILOMETER_CONF_DIR if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml From 0294ddc7352d5cf9ab0eca48a6cab3894aa866dc Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Tue, 21 Jul 2015 14:11:49 -0500 Subject: [PATCH 0378/2941] mysql: Fix mysql config devstack attempts to set bind-address, sql_mode, default-storage-engine, max_connections, query_cache_type and query_cache_size. However the bash command is missing some '&&'s and was omiting max_connections, query_cache_type and query_cache_size. Change-Id: I24388b5de777995f92d73076524122cf599d6371 --- lib/databases/mysql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 9c9401edf6..fb55b60ff6 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -95,9 +95,9 @@ function configure_database_mysql { sudo bash -c "source $TOP_DIR/functions && \ iniset $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" && \ iniset $my_conf mysqld sql_mode STRICT_ALL_TABLES && \ - iniset $my_conf mysqld default-storage-engine InnoDB \ - iniset $my_conf mysqld max_connections 1024 \ - iniset $my_conf mysqld query_cache_type OFF \ + iniset $my_conf mysqld default-storage-engine InnoDB && \ + iniset $my_conf mysqld max_connections 1024 && \ + iniset $my_conf mysqld query_cache_type OFF && \ iniset $my_conf mysqld query_cache_size 0" From c1dded9b91ae46d339430efcc69b0d3a4882b88b Mon Sep 17 00:00:00 2001 From: Hiroshi Miura Date: Wed, 22 Jul 2015 12:18:35 +0900 Subject: [PATCH 0379/2941] fix typo in guide for nested kvm Change-Id: Ie103a097830401248c75fcb16d4dd746bbbb7288 Signed-off-by: Hiroshi Miura --- doc/source/guides/devstack-with-nested-kvm.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst index b35492ea17..c652bacced 100644 --- a/doc/source/guides/devstack-with-nested-kvm.rst +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -88,7 +88,7 @@ host: parm: nested:int To make the above value persistent across reboots, add an entry in -/etc/modprobe.ddist.conf so it looks as below:: +/etc/modprobe.d/dist.conf so it looks as below:: cat /etc/modprobe.d/dist.conf options kvm-amd nested=y From 92884ede5d20a399186c61cf5e003da61838eec4 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 22 Jul 2015 12:16:45 +1000 Subject: [PATCH 0380/2941] ini-config : always reenable xtrace on return These return paths doesn't renable xtrace, so output mysteriously goes missing until the next time it is enabled. Change-Id: I3a8018dfa9397c07534970c39fba8dc10afcbe41 --- inc/ini-config | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/inc/ini-config b/inc/ini-config index 26401f3917..8e7c0187ae 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -104,7 +104,10 @@ function iniadd_literal { local option=$3 local value=$4 - [[ -z $section || -z $option ]] && return + if [[ -z $section || -z $option ]]; then + $xtrace + return + fi # Add it sed -i -e "/^\[$section\]/ a\\ @@ -123,7 +126,10 @@ function inidelete { local section=$2 local option=$3 - [[ -z $section || -z $option ]] && return + if [[ -z $section || -z $option ]]; then + $xtrace + return + fi # Remove old values sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" @@ -141,7 +147,10 @@ function iniset { local option=$3 local value=$4 - [[ -z $section || -z $option ]] && return + if [[ -z $section || -z $option ]]; then + $xtrace + return + fi if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then # Add section at the end From b997db602e15b60c68c0f7a99db74b5d2419a85c Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 22 Jul 2015 10:05:32 +1000 Subject: [PATCH 0381/2941] Cleanup of ini test-case Various cleanup to this file. Firstly create a temporary space to test, rather than working in the source directory. We have "assert_equal" which simplifies a lot. Add "assert_empty" that is used in a couple of tests too. Remove a couple of duplicate tests. Change-Id: I7fd476ed63026e67d66a8ac2891b2e4a6687d09c --- tests/test_ini_config.sh | 234 +++++++++++---------------------------- tests/unittest.sh | 23 +++- 2 files changed, 87 insertions(+), 170 deletions(-) diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index b2529ac4c9..3aef6f39bd 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -13,7 +13,13 @@ set -e echo "Testing INI functions" -cat >test.ini <${TEST_INI} < Date: Wed, 22 Jul 2015 10:34:47 +1000 Subject: [PATCH 0382/2941] Add -sudo option to ini setting options Add a -sudo option to allow these functions to operate on root-owned files. Test-case is updated, but not enabled by default as we can't expect test-runner to have sudo access. Change-Id: I134c3397314c7d9395996eb6c825ecb7e7fdfc69 --- inc/ini-config | 69 ++++++++++++++++++++++++++++++---------- tests/test_ini_config.sh | 36 +++++++++++++-------- 2 files changed, 74 insertions(+), 31 deletions(-) diff --git a/inc/ini-config b/inc/ini-config index 8e7c0187ae..d23f4743f1 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -15,30 +15,40 @@ set +o xtrace # ================ # Append a new option in an ini file without replacing the old value -# iniadd config-file section option value1 value2 value3 ... +# iniadd [-sudo] config-file section option value1 value2 value3 ... function iniadd { local xtrace=$(set +o | grep xtrace) set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="-sudo " + shift + fi local file=$1 local section=$2 local option=$3 shift 3 local values="$(iniget_multiline $file $section $option) $@" - iniset_multiline $file $section $option $values + iniset_multiline $sudo $file $section $option $values $xtrace } # Comment an option in an INI file -# inicomment config-file section option +# inicomment [-sudo] config-file section option function inicomment { local xtrace=$(set +o | grep xtrace) set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ s|^\($option[ \t]*=.*$\)|#\1|" "$file" $xtrace } @@ -95,10 +105,15 @@ function ini_has_option { # in the argument list. Doing that will cause incorrect configuration # if spaces are used in the config values. # -# iniadd_literal config-file section option value +# iniadd_literal [-sudo] config-file section option value function iniadd_literal { local xtrace=$(set +o | grep xtrace) set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi local file=$1 local section=$2 local option=$3 @@ -110,7 +125,7 @@ function iniadd_literal { fi # Add it - sed -i -e "/^\[$section\]/ a\\ + $sudo sed -i -e "/^\[$section\]/ a\\ $option = $value " "$file" @@ -118,10 +133,15 @@ $option = $value } # Remove an option from an INI file -# inidelete config-file section option +# inidelete [-sudo] config-file section option function inidelete { local xtrace=$(set +o | grep xtrace) set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi local file=$1 local section=$2 local option=$3 @@ -132,16 +152,21 @@ function inidelete { fi # Remove old values - sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" $xtrace } # Set an option in an INI file -# iniset config-file section option value +# iniset [-sudo] config-file section option value function iniset { local xtrace=$(set +o | grep xtrace) set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi local file=$1 local section=$2 local option=$3 @@ -154,26 +179,31 @@ function iniset { if ! grep -q "^\[$section\]" "$file" 2>/dev/null; then # Add section at the end - echo -e "\n[$section]" >>"$file" + echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null fi if ! ini_has_option "$file" "$section" "$option"; then # Add it - sed -i -e "/^\[$section\]/ a\\ + $sudo sed -i -e "/^\[$section\]/ a\\ $option = $value " "$file" else local sep=$(echo -ne "\x01") # Replace it - sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" + $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" fi $xtrace } # Set a multiple line option in an INI file -# iniset_multiline config-file section option value1 value2 valu3 ... +# iniset_multiline [-sudo] config-file section option value1 value2 valu3 ... function iniset_multiline { local xtrace=$(set +o | grep xtrace) set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi local file=$1 local section=$2 local option=$3 @@ -188,14 +218,14 @@ function iniset_multiline { done if ! grep -q "^\[$section\]" "$file"; then # Add section at the end - echo -e "\n[$section]" >>"$file" + echo -e "\n[$section]" | $sudo tee --append "$file" > /dev/null else # Remove old values - sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ { /^$option[ \t]*=/ d; }" "$file" fi # Add new ones for v in $values; do - sed -i -e "/^\[$section\]/ a\\ + $sudo sed -i -e "/^\[$section\]/ a\\ $option = $v " "$file" done @@ -207,10 +237,15 @@ $option = $v function iniuncomment { local xtrace=$(set +o | grep xtrace) set +o xtrace + local sudo="" + if [ $1 == "-sudo" ]; then + sudo="sudo " + shift + fi local file=$1 local section=$2 local option=$3 - sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" + $sudo sed -i -e "/^\[$section\]/,/^\[.*\]/ s|[^ \t]*#[ \t]*\($option[ \t]*=.*$\)|\1|" "$file" $xtrace } diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index 3aef6f39bd..61f2c410f9 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -71,15 +71,23 @@ b=d EOF -# Test with missing arguments +# set TEST_SUDO to test writing to root-owned files +SUDO_ARG="" +SUDO="" +if [ -n "$TEST_SUDO" ]; then + SUDO="sudo " + SUDO_ARG="-sudo " + sudo chown -R root:root ${INI_TMP_ETC_DIR} +fi +# Test with missing arguments BEFORE=$(cat ${TEST_INI}) -iniset ${TEST_INI} aaa +iniset ${SUDO_ARG} ${TEST_INI} aaa NO_ATTRIBUTE=$(cat ${TEST_INI}) assert_equal "$BEFORE" "$NO_ATTRIBUTE" "test missing attribute argument" -iniset ${TEST_INI} +iniset ${SUDO_ARG} ${TEST_INI} NO_SECTION=$(cat ${TEST_INI}) assert_equal "$BEFORE" "$NO_SECTION" "missing section argument" @@ -87,7 +95,7 @@ assert_equal "$BEFORE" "$NO_SECTION" "missing section argument" VAL=$(iniget ${TEST_INI} aaa handlers) assert_equal "$VAL" "aa, bb" "iniget spaces in option" -iniset ${TEST_INI} aaa handlers "11, 22" +iniset ${SUDO_ARG} ${TEST_INI} aaa handlers "11, 22" VAL=$(iniget ${TEST_INI} aaa handlers) assert_equal "$VAL" "11, 22" "iniset spaces in option" @@ -95,7 +103,7 @@ assert_equal "$VAL" "11, 22" "iniset spaces in option" VAL=$(iniget ${TEST_INI} " ccc " spaces) assert_equal "$VAL" "yes" "iniget with section header space" -iniset ${TEST_INI} "b b" opt_ion 42 +iniset ${SUDO_ARG} ${TEST_INI} "b b" opt_ion 42 VAL=$(iniget ${TEST_INI} "b b" opt_ion) assert_equal "$VAL" "42" "iniset with section header space" @@ -103,7 +111,7 @@ assert_equal "$VAL" "42" "iniset with section header space" VAL=$(iniget ${TEST_INI} bbb handlers) assert_equal "$VAL" "ee,ff" "iniget at EOF" -iniset ${TEST_INI} bbb handlers "33,44" +iniset ${SUDO_ARG} ${TEST_INI} bbb handlers "33,44" VAL=$(iniget ${TEST_INI} bbb handlers) assert_equal "$VAL" "33,44" "inset at EOF" @@ -122,12 +130,12 @@ else fi # test changing empty option -iniset ${TEST_INI} ddd empty "42" +iniset ${SUDO_ARG} ${TEST_INI} ddd empty "42" VAL=$(iniget ${TEST_INI} ddd empty) assert_equal "$VAL" "42" "change empty option" # test pipe in option -iniset ${TEST_INI} aaa handlers "a|b" +iniset ${SUDO_ARG} ${TEST_INI} aaa handlers "a|b" VAL=$(iniget ${TEST_INI} aaa handlers) assert_equal "$VAL" "a|b" "pipe in option" @@ -146,23 +154,23 @@ else fi # Test comments -inicomment ${TEST_INI} aaa handlers +inicomment ${SUDO_ARG} ${TEST_INI} aaa handlers VAL=$(iniget ${TEST_INI} aaa handlers) assert_empty VAL "test inicomment" # Test multiple line iniset/iniget -iniset_multiline ${TEST_INI} eee multi bar1 bar2 +iniset_multiline ${SUDO_ARG} ${TEST_INI} eee multi bar1 bar2 VAL=$(iniget_multiline ${TEST_INI} eee multi) assert_equal "$VAL" "bar1 bar2" "iniget_multiline" # Test iniadd with exiting values -iniadd ${TEST_INI} eee multi bar3 +iniadd ${SUDO_ARG} ${TEST_INI} eee multi bar3 VAL=$(iniget_multiline ${TEST_INI} eee multi) assert_equal "$VAL" "bar1 bar2 bar3" "iniadd with existing values" # Test iniadd with non-exiting values -iniadd ${TEST_INI} eee non-multi foobar1 foobar2 +iniadd ${SUDO_ARG} ${TEST_INI} eee non-multi foobar1 foobar2 VAL=$(iniget_multiline ${TEST_INI} eee non-multi) assert_equal "$VAL" "foobar1 foobar2" "iniadd non-existing values" @@ -176,7 +184,7 @@ del_cases=" del_no_section" for x in $del_cases; do - inidelete ${TEST_INI} $x a + inidelete ${SUDO_ARG} ${TEST_INI} $x a VAL=$(iniget_multiline ${TEST_INI} $x a) assert_empty VAL "inidelete $x" if [ "$x" = "del_separate_options" -o \ @@ -191,6 +199,6 @@ for x in $del_cases; do fi done -rm -rf ${INI_TMP_DIR} +$SUDO rm -rf ${INI_TMP_DIR} report_results From cede78748291a8b5ae0dd0dc34c95da4a72fb3ea Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 22 Jul 2015 13:36:12 +1000 Subject: [PATCH 0383/2941] Add file creation test Ensure that iniadd is creating files that don't exist, as it has historically done. Change-Id: I2798996f3d46ff1dce410b815a87395f1bf729f9 --- inc/ini-config | 1 + tests/test_ini_config.sh | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/inc/ini-config b/inc/ini-config index d23f4743f1..58386e2441 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -159,6 +159,7 @@ function inidelete { # Set an option in an INI file # iniset [-sudo] config-file section option value +# - if the file does not exist, it is created function iniset { local xtrace=$(set +o | grep xtrace) set +o xtrace diff --git a/tests/test_ini_config.sh b/tests/test_ini_config.sh index 61f2c410f9..d9cb8d8a99 100755 --- a/tests/test_ini_config.sh +++ b/tests/test_ini_config.sh @@ -199,6 +199,11 @@ for x in $del_cases; do fi done +# test file-creation +iniset $SUDO_ARG ${INI_TMP_ETC_DIR}/test.new.ini test foo bar +VAL=$(iniget ${INI_TMP_ETC_DIR}/test.new.ini test foo) +assert_equal "$VAL" "bar" "iniset created file" + $SUDO rm -rf ${INI_TMP_DIR} report_results From ff70dad892a89cc4cb09aebfcf72e2de5ab7d556 Mon Sep 17 00:00:00 2001 From: Jens Rosenboom Date: Wed, 1 Jul 2015 15:22:53 +0200 Subject: [PATCH 0384/2941] Make image_list.sh independent of host IP address We do not need the HOST_IP to be detected in order to be able to list our images. So just set that to some dummy value before sourcing functions. This will allow tools like disk-image-builder to work regardless of whether get_default_host_ip succeeds or not. Change-Id: I9c22d2066e34309e70e56076e3d17c5db6ecee06 --- tools/image_list.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/image_list.sh b/tools/image_list.sh index a27635effd..27b3d4612d 100755 --- a/tools/image_list.sh +++ b/tools/image_list.sh @@ -3,6 +3,12 @@ # Keep track of the DevStack directory TOP_DIR=$(cd $(dirname "$0")/.. && pwd) +# The following "source" implicitly calls get_default_host_ip() in +# stackrc and will die if the selected default IP happens to lie +# in the default ranges for FIXED_RANGE or FLOATING_RANGE. Since we +# do not really need HOST_IP to be properly set in the remainder of +# this script, just set it to some dummy value and make stackrc happy. +HOST_IP=SKIP source $TOP_DIR/functions # Possible virt drivers, if we have more, add them here. Always keep From 22cf648cf64029b6ba34a77aadd43b356acd53e7 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Thu, 23 Jul 2015 18:13:55 +0900 Subject: [PATCH 0385/2941] Remove restraint on agent file from ml2 plugin Ml2 plugin always needs agent file even if the agent is out of tree. This patch removes the restraint and ofagent_agent. Change-Id: I12de58e13da1fd162ad8b632d895779ae7560c3c Closes-Bug: #1477459 --- lib/neutron_plugins/ml2 | 4 +++- lib/neutron_plugins/ofagent_agent | 4 ---- 2 files changed, 3 insertions(+), 5 deletions(-) delete mode 100644 lib/neutron_plugins/ofagent_agent diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index 13ffee9b5b..ace5335a78 100755 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -19,7 +19,9 @@ fi # Default openvswitch L2 agent Q_AGENT=${Q_AGENT:-openvswitch} -source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent +if [ -f $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent ]; then + source $TOP_DIR/lib/neutron_plugins/${Q_AGENT}_agent +fi # List of MechanismDrivers to load Q_ML2_PLUGIN_MECHANISM_DRIVERS=${Q_ML2_PLUGIN_MECHANISM_DRIVERS:-openvswitch,linuxbridge} diff --git a/lib/neutron_plugins/ofagent_agent b/lib/neutron_plugins/ofagent_agent deleted file mode 100644 index 0bc9bffb55..0000000000 --- a/lib/neutron_plugins/ofagent_agent +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -# REVISIT(yamamoto): This file is intentionally left empty -# in order to keep Q_AGENT=ofagent_agent work. From 1c506c5c3422b80ca01903f929b47011a4f969e1 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Fri, 24 Jul 2015 10:42:13 +0900 Subject: [PATCH 0386/2941] Remove unused variable Change-Id: Ib3b50c0e34403311b529e116f39bf82dd03dca0e --- lib/neutron-legacy | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 2c9dd1aa7d..4069439183 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -999,7 +999,6 @@ function _configure_neutron_dhcp_agent { } function _configure_neutron_l3_agent { - local cfg_file Q_L3_ENABLED=True # for l3-agent, only use per tenant router if we have namespaces Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE From eef5d8590f560e4dd2be21942086e1e8b1223de4 Mon Sep 17 00:00:00 2001 From: Mehdi Abaakouk Date: Fri, 24 Jul 2015 10:55:42 +0200 Subject: [PATCH 0387/2941] Adds two new configuration files Ceilometer introduces two new configuration files This change adds them. Change-Id: I4da44f09eb0a839f36fef513aec41d9b1564155d Depends-On: I5a202c30614d06821063e243d4e2330736aba5fd --- lib/ceilometer | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ceilometer b/lib/ceilometer index 9226d85e20..ce93ebdc4f 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -212,6 +212,8 @@ function configure_ceilometer { cp $CEILOMETER_DIR/etc/ceilometer/api_paste.ini $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/event_definitions.yaml $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/meters.yaml $CEILOMETER_CONF_DIR + cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_archive_policy_map.yaml $CEILOMETER_CONF_DIR + cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_resources.yaml $CEILOMETER_CONF_DIR if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml From aa81e2ea73cf46ab4e220a66959d19529f7bfeac Mon Sep 17 00:00:00 2001 From: Stanislaw Szydlo Date: Mon, 27 Jul 2015 10:54:44 +0200 Subject: [PATCH 0388/2941] Fix errors in tools/ping_neutron.sh Due to errors in tools/ping_neutron.sh, exercise neutron-adv-test.sh fails. Faults were: 'neutron net-list' took too much arguments and variable REMAINING_ARGS was mistyped. Change-Id: I681328bfb1e4445543ef9d94e3b3824dbc9c8346 Closes-Bug: #1478021 --- tools/ping_neutron.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/ping_neutron.sh b/tools/ping_neutron.sh index d36b7f60c8..dba7502652 100755 --- a/tools/ping_neutron.sh +++ b/tools/ping_neutron.sh @@ -51,15 +51,15 @@ if [[ -z "$NET_NAME" ]]; then usage fi -REMANING_ARGS="${@:2}" +REMAINING_ARGS="${@:2}" # BUG: with duplicate network names, this fails pretty hard. -NET_ID=$(neutron net-list $NET_NAME | grep "$NET_NAME" | awk '{print $2}') +NET_ID=$(neutron net-list | grep "$NET_NAME" | awk '{print $2}') PROBE_ID=$(neutron-debug probe-list -c id -c network_id | grep "$NET_ID" | awk '{print $2}' | head -n 1) # This runs a command inside the specific netns NET_NS_CMD="ip netns exec qprobe-$PROBE_ID" -PING_CMD="sudo $NET_NS_CMD ping $REMAING_ARGS" +PING_CMD="sudo $NET_NS_CMD ping $REMAINING_ARGS" echo "Running $PING_CMD" $PING_CMD From 20401434091e6083c85f7269cd4e7cf44e5713b4 Mon Sep 17 00:00:00 2001 From: Atsushi SAKAI Date: Mon, 27 Jul 2015 20:42:44 +0900 Subject: [PATCH 0389/2941] Fix four typos on devstack documentation behaviour => behavior mechansim => mechanism glustfs => glusterfs pluggin => plugin Change-Id: I3de6740e5d4b2b55009614007767458124036b75 Closes-Bug: #1478538 --- HACKING.rst | 2 +- doc/source/plugins.rst | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index a40af54b45..6bd24b0174 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -355,7 +355,7 @@ your change * **Should this be upstream** -- DevStack generally does not override default choices provided by projects and attempts to not - unexpectedly modify behaviour. + unexpectedly modify behavior. * **Context in commit messages** -- DevStack touches many different areas and reviewers need context around changes to make good diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 1b6f5e36ed..803dd08a48 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -15,7 +15,7 @@ evolves. Plugin Interface ================ -DevStack supports a standard mechansim for including plugins from +DevStack supports a standard mechanism for including plugins from external repositories. The plugin interface assumes the following: An external git repository that includes a ``devstack/`` top level @@ -49,7 +49,7 @@ They are added in the following format:: [[local|localrc]] enable_plugin [GITREF] -- ``name`` - an arbitrary name. (ex: glustfs, docker, zaqar, congress) +- ``name`` - an arbitrary name. (ex: glusterfs, docker, zaqar, congress) - ``giturl`` - a valid git url that can be cloned - ``gitref`` - an optional git ref (branch / ref / tag) that will be cloned. Defaults to master. @@ -209,7 +209,7 @@ enough to be an issue). Ideally a plugin will be included within the ``devstack`` directory of the project they are being tested. For example, the stackforge/ec2-api -project has its pluggin support in its own tree. +project has its plugin support in its own tree. However, some times a DevStack plugin might be used solely to configure a backend service that will be used by the rest of From 089f044cbee4be1d2e1c3ad943c20eec33df736f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Fri, 24 Jul 2015 17:07:07 +0200 Subject: [PATCH 0390/2941] update cinder's key permission to allow nova snapshots MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Preparing the field for this patch: https://review.openstack.org/#/c/205282/2 The client.cinder key needs to have write permission to the glance pool in order to complete the snapshot process. Change-Id: I90c6aa056b99944aa558783f3f81d06f918f3e26 Signed-off-by: Sébastien Han --- lib/ceph | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceph b/lib/ceph index 6cf481e530..aeb7191512 100644 --- a/lib/ceph +++ b/lib/ceph @@ -295,7 +295,7 @@ function configure_ceph_nova { iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE} if ! is_service_enabled cinder; then - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring fi } From 64ab8d122893d4ab1e135e82dab17f14cc74c33c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Han?= Date: Mon, 27 Jul 2015 14:29:57 +0200 Subject: [PATCH 0391/2941] Re-enable direct URL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This parameter is needed in order to test the copy on write cloning functionnality. Change-Id: Ie8179a68827acba2dd8614ea9c6cecf2ddb20e29 Signed-off-by: Sébastien Han --- lib/ceph | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ceph b/lib/ceph index 6cf481e530..f09252ff5d 100644 --- a/lib/ceph +++ b/lib/ceph @@ -267,6 +267,7 @@ function configure_ceph_glance { sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring + iniset $GLANCE_API_CONF DEFAULT show_image_direct_url True iniset $GLANCE_API_CONF glance_store default_store rbd iniset $GLANCE_API_CONF glance_store stores "file, http, rbd" iniset $GLANCE_API_CONF glance_store rbd_store_ceph_conf $CEPH_CONF_FILE From ac9313e5a5af9e350d66c0ae628ca900c4cfc218 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 27 Jul 2015 13:33:30 -0400 Subject: [PATCH 0392/2941] add -n --name flag to worlddump We're worlddumping at success points in grenade, and it would be much handier to explain when that happens via a symbolic name in the filename. Add a --name option to worlddump to allow it. Change-Id: I644200fe08e404dc7ca2006478ae4e11ca020672 --- tools/worlddump.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index e4ba02b51c..926b4a1873 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -31,12 +31,19 @@ def get_options(): parser.add_argument('-d', '--dir', default='.', help='Output directory for worlddump') + parser.add_argument('-n', '--name', + default='', + help='Additional name to tag into file') return parser.parse_args() -def filename(dirname): +def filename(dirname, name=""): now = datetime.datetime.utcnow() - return os.path.join(dirname, now.strftime("worlddump-%Y-%m-%d-%H%M%S.txt")) + fmt = "worlddump-%Y-%m-%d-%H%M%S" + if name: + fmt += "-" + name + fmt += ".txt" + return os.path.join(dirname, now.strftime(fmt)) def warn(msg): @@ -125,7 +132,7 @@ def guru_meditation_report(): def main(): opts = get_options() - fname = filename(opts.dir) + fname = filename(opts.dir, opts.name) print "World dumping... see %s for details" % fname sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0) with open(fname, 'w') as f: From dd07c484e474f3e9410c558c8fb3103233facd3e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 27 Jul 2015 13:10:44 -0400 Subject: [PATCH 0393/2941] line wrapping for longer ceph commands Some of the ceph commands had gotten quite long, so reviewing them in gerrit is a bit problematic. Do some line wrapping just to bring these back to a bit more managable state. Change-Id: Ice5122702f2466d059dd275b038d5ff983bcda44 --- lib/ceph | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/lib/ceph b/lib/ceph index aeb7191512..b05e6e818c 100644 --- a/lib/ceph +++ b/lib/ceph @@ -176,7 +176,9 @@ function configure_ceph { sudo mkdir -p ${CEPH_DATA_DIR}/{bootstrap-mds,bootstrap-osd,mds,mon,osd,tmp} # create ceph monitor initial key and directory - sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) --cap mon 'allow *' + sudo ceph-authtool /var/lib/ceph/tmp/keyring.mon.$(hostname) \ + --create-keyring --name=mon. --add-key=$(ceph-authtool --gen-print-key) \ + --cap mon 'allow *' sudo mkdir /var/lib/ceph/mon/ceph-$(hostname) # create a default ceph configuration file @@ -194,12 +196,14 @@ osd journal size = 100 EOF # bootstrap the ceph monitor - sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname) + sudo ceph-mon -c ${CEPH_CONF_FILE} --mkfs -i $(hostname) \ + --keyring /var/lib/ceph/tmp/keyring.mon.$(hostname) + if is_ubuntu; then - sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart + sudo touch /var/lib/ceph/mon/ceph-$(hostname)/upstart sudo initctl emit ceph-mon id=$(hostname) else - sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit + sudo touch /var/lib/ceph/mon/ceph-$(hostname)/sysvinit sudo service ceph start mon.$(hostname) fi @@ -240,7 +244,9 @@ EOF OSD_ID=$(sudo ceph -c ${CEPH_CONF_FILE} osd create) sudo mkdir -p ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID} sudo ceph-osd -c ${CEPH_CONF_FILE} -i ${OSD_ID} --mkfs - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} mon 'allow profile osd ' osd 'allow *' | sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create osd.${OSD_ID} \ + mon 'allow profile osd ' osd 'allow *' | \ + sudo tee ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/keyring # ceph's init script is parsing ${CEPH_DATA_DIR}/osd/ceph-${OSD_ID}/ and looking for a file # 'upstart' or 'sysinitv', thanks to these 'touches' we are able to control OSDs daemons @@ -264,7 +270,10 @@ function configure_ceph_embedded_glance { # configure_ceph_glance() - Glance config needs to come after Glance is set up function configure_ceph_glance { sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${GLANCE_CEPH_POOL} ${GLANCE_CEPH_POOL_PG} ${GLANCE_CEPH_POOL_PGP} - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${GLANCE_CEPH_USER} \ + mon "allow r" \ + osd "allow class-read object_prefix rbd_children, allow rwx pool=${GLANCE_CEPH_POOL}" | \ + sudo tee ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${GLANCE_CEPH_USER}.keyring iniset $GLANCE_API_CONF glance_store default_store rbd @@ -295,7 +304,10 @@ function configure_ceph_nova { iniset $NOVA_CONF libvirt images_rbd_ceph_conf ${CEPH_CONF_FILE} if ! is_service_enabled cinder; then - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \ + mon "allow r" \ + osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \ + sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring > /dev/null sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring fi } @@ -311,7 +323,10 @@ function configure_ceph_embedded_cinder { # configure_ceph_cinder() - Cinder config needs to come after Cinder is set up function configure_ceph_cinder { sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP} - sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} mon "allow r" osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring + sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \ + mon "allow r" \ + osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | \ + sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring } From 961643e404919e0fa3b90f7620a4daccc962e6a3 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Fri, 31 Jul 2015 13:45:27 +0900 Subject: [PATCH 0394/2941] configuration.rst: Document post-extra meta section phase Change-Id: I81d121424057fd79c1a0a65d420df3ee1badb6f3 --- doc/source/configuration.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 6052576e98..96f91ec774 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -56,6 +56,7 @@ The defined phases are: before they are started - **extra** - runs after services are started and before any files in ``extra.d`` are executed +- **post-extra** - runs after files in ``extra.d`` are executed The file is processed strictly in sequence; meta-sections may be specified more than once but if any settings are duplicated the last to From 93c10571e8b1b2c8e6916c759d5b92ab379c037f Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Fri, 31 Jul 2015 10:38:50 -0400 Subject: [PATCH 0395/2941] Stop relying on the tempest sample config file This commit stops using the sample config file as the base for tempest configuration. The sample config isn't actually needed as a based for configuration because all the options are commented out so from the perspective of the config parser it's a blank file. There are 2 reasons for making this change, first using the sample like this creates a hard dependency on tempest having a sample config file in tree. This is something that the project wants to change since keeping the file in sync causes headaches because of new oslo releases. The second aspect is that it makes the generated output difficult to read. It includes *every* option and it's description in the generated output which makes finding where devstack is actually setting something more difficult to find. Change-Id: I4064a041a965ed2419b68efc8dc31fce37b24cfd --- lib/tempest | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index 68ddd44105..ebed5ae98f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -167,10 +167,10 @@ function configure_tempest { esac fi - # Create ``tempest.conf`` from ``tempest.conf.sample`` - # Copy every time because the image UUIDS are going to change + # (Re)create ``tempest.conf`` + # Create every time because the image UUIDS are going to change sudo install -d -o $STACK_USER $TEMPEST_CONFIG_DIR - install -m 644 $TEMPEST_DIR/etc/tempest.conf.sample $TEMPEST_CONFIG + rm -f $TEMPEST_CONFIG password=${ADMIN_PASSWORD:-secrete} From d73df506f29854997f3b74f1b60968535923316b Mon Sep 17 00:00:00 2001 From: Pradeep Kilambi Date: Fri, 31 Jul 2015 11:07:31 -0400 Subject: [PATCH 0396/2941] Remove the old meter.yaml path from devstack This will let us defauilt to the path within the code, we can add the right path in once we have the file in the codebase. Change-Id: I9de94c5ac6349c3b46adbacb77fc877b5201285c --- lib/ceilometer | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/ceilometer b/lib/ceilometer index ce93ebdc4f..3df75b7300 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -211,7 +211,6 @@ function configure_ceilometer { cp $CEILOMETER_DIR/etc/ceilometer/event_pipeline.yaml $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/api_paste.ini $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/event_definitions.yaml $CEILOMETER_CONF_DIR - cp $CEILOMETER_DIR/etc/ceilometer/meters.yaml $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_archive_policy_map.yaml $CEILOMETER_CONF_DIR cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_resources.yaml $CEILOMETER_CONF_DIR From e97cb825d2800d34d1fe6eceae85a3a7f84549d1 Mon Sep 17 00:00:00 2001 From: Nicolas Simonds Date: Tue, 28 Jul 2015 11:46:46 -0700 Subject: [PATCH 0397/2941] update cinder's key permission to allow nova snapshots Preparing the field for this patch: https://review.openstack.org/205282 The client.cinder key needs to have write permission to the glance pool in order to complete the snapshot process. Change-Id: I98f16167db864ffd14e8c3dd5dec81fc16245448 --- lib/ceph | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ceph b/lib/ceph index 6af4b3a139..8e34aa49a4 100644 --- a/lib/ceph +++ b/lib/ceph @@ -326,7 +326,7 @@ function configure_ceph_cinder { sudo ceph -c ${CEPH_CONF_FILE} osd pool create ${CINDER_CEPH_POOL} ${CINDER_CEPH_POOL_PG} ${CINDER_CEPH_POOL_PGP} sudo ceph -c ${CEPH_CONF_FILE} auth get-or-create client.${CINDER_CEPH_USER} \ mon "allow r" \ - osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rx pool=${GLANCE_CEPH_POOL}" | \ + osd "allow class-read object_prefix rbd_children, allow rwx pool=${CINDER_CEPH_POOL}, allow rwx pool=${NOVA_CEPH_POOL},allow rwx pool=${GLANCE_CEPH_POOL}" | \ sudo tee ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring sudo chown ${STACK_USER}:$(id -g -n $whoami) ${CEPH_CONF_DIR}/ceph.client.${CINDER_CEPH_USER}.keyring } From 57aafb5a9ad20e19e2c248a8e853a32d5b719c03 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Fri, 31 Jul 2015 12:22:44 -0700 Subject: [PATCH 0398/2941] Clone reqs repo prior to using pip constraints We pull the pip constraints from the requirements repo so need to clone that repo prior to using the constraints. In fixup_stuff.sh devstack attempts to install packages like prettytable using the constraints. It is also possible to need constraints before fixup_stuff.sh if tracking depends. To deal with this clone requirements repo before any possible use of constraints in pip_install. Change-Id: I42e981c8c5ce1b8a57b9f6cce213065c72d6af11 --- lib/infra | 2 -- stack.sh | 5 +++++ 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/lib/infra b/lib/infra index 3d68e45bd9..eb8000e8d3 100644 --- a/lib/infra +++ b/lib/infra @@ -30,8 +30,6 @@ REQUIREMENTS_DIR=$DEST/requirements # install_infra() - Collect source and prepare function install_infra { local PIP_VIRTUAL_ENV="$REQUIREMENTS_DIR/.venv" - # bring down global requirements - git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH [ ! -d $PIP_VIRTUAL_ENV ] && virtualenv $PIP_VIRTUAL_ENV # We don't care about testing git pbr in the requirements venv. PIP_VIRTUAL_ENV=$PIP_VIRTUAL_ENV pip_install -U pbr diff --git a/stack.sh b/stack.sh index cc8bc8c88f..2288af57a4 100755 --- a/stack.sh +++ b/stack.sh @@ -698,6 +698,11 @@ fi TRACK_DEPENDS=${TRACK_DEPENDS:-False} +# Bring down global requirements before any use of pip_install. This is +# necessary to ensure that the constraints file is in place before we +# attempt to apply any constraints to pip installs. +git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH + # Install Python packages into a virtualenv so that we can track them if [[ $TRACK_DEPENDS = True ]]; then echo_summary "Installing Python packages into a virtualenv $DEST/.venv" From 994db6173861a8c6f8c73d59cdf85cd28463ab31 Mon Sep 17 00:00:00 2001 From: lanoux Date: Mon, 3 Aug 2015 13:48:12 +0000 Subject: [PATCH 0399/2941] Add tempest run_validation option This change adds the tempest run_validation option to run tests with ssh connection in the gate. Change-Id: I140f79b06d2e85c1d5d07fa5f117d4f8b250fa3d --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 68ddd44105..a534530b05 100644 --- a/lib/tempest +++ b/lib/tempest @@ -457,6 +457,9 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG object-storage-feature-enabled discoverable_apis $object_storage_api_extensions + # Validation + iniset $TEMPEST_CONFIG validation run_validation ${TEMPEST_RUN_VALIDATION:-False} + # Volume # TODO(dkranz): Remove the bootable flag when Juno is end of life. iniset $TEMPEST_CONFIG volume-feature-enabled bootable True From a6db5e30cc53d6dae11ffdc228e0ed01865bf603 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 4 Aug 2015 06:23:28 -0400 Subject: [PATCH 0400/2941] fix multinode guide The multinode guide erroneously said to enable n-api on the worker nodes, which is a typo. n-api-meta is the thing that's needed. Change-Id: I733896681f7f6fe3bea0fdeeb8ffc9033d7fc761 --- doc/source/guides/multinode-lab.rst | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 27d71f1b9c..557b522f69 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -178,12 +178,17 @@ machines, create a ``local.conf`` with: MYSQL_HOST=192.168.42.11 RABBIT_HOST=192.168.42.11 GLANCE_HOSTPORT=192.168.42.11:9292 - ENABLED_SERVICES=n-cpu,n-net,n-api,c-vol + ENABLED_SERVICES=n-cpu,n-net,n-api-meta,c-vol NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://192.168.42.11:6080/vnc_auto.html" VNCSERVER_LISTEN=$HOST_IP VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN +**Note:** the ``n-api-meta`` service is a version of the api server +that only serves the metadata service. It's needed because the +computes created won't have a routing path to the metadata service on +the controller. + Fire up OpenStack: :: @@ -263,7 +268,7 @@ Swift ----- Swift, OpenStack Object Storage, requires a significant amount of resources -and is disabled by default in DevStack. The support in DevStack is geared +and is disabled by default in DevStack. The support in DevStack is geared toward a minimal installation but can be used for testing. To implement a true multi-node test of swift, additional steps will be required. Enabling it is as simple as enabling the ``swift`` service in ``local.conf``: From 815db16c2099610a52dec35f0846c0d19b9bd5b9 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 6 Aug 2015 10:25:45 +1000 Subject: [PATCH 0401/2941] Clean up configuration documentation The current format is just copy-paste after auto-conversion and very inconsistent. Move discussion of each option into a section and reword some slightly so they read more clearly. Group some together into a section+sub-sections, such as the logging and ip-version option discussions. Add a top table-of-contents for the major sections, and then a separate toc for each of the configuration options that are discussed in detail. Change-Id: Iddd27cb54f1d9f062b9c47ff9ad6a2bef3650d6b --- doc/source/configuration.rst | 330 +++++++++++++++++------------------ 1 file changed, 165 insertions(+), 165 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 6052576e98..05a8d95f51 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -2,6 +2,10 @@ Configuration ============= +.. contents:: + :local: + :depth: 1 + DevStack has always tried to be mostly-functional with a minimal amount of configuration. The number of options has ballooned as projects add features, new projects added and more combinations need to be tested. @@ -142,121 +146,79 @@ will not be set if there is no IPv6 address on the default Ethernet interface. Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``. ``HOST_IPV6`` is not set by default. -Common Configuration Variables -============================== +Configuration Notes +=================== + +.. contents:: + :local: Installation Directory ---------------------- - | *Default: ``DEST=/opt/stack``* - | The DevStack install directory is set by the ``DEST`` variable. - | By setting it early in the ``localrc`` section you can reference it - in later variables. It can be useful to set it even though it is not - changed from the default value. - | +The DevStack install directory is set by the ``DEST`` variable. By +default it is ``/opt/stack``. + +By setting it early in the ``localrc`` section you can reference it in +later variables. It can be useful to set it even though it is not +changed from the default value. :: DEST=/opt/stack -Libraries from Git ------------------- - - | *Default: ``LIBS_FROM_GIT=""``* - - | By default devstack installs OpenStack server components from - git, however it installs client libraries from released versions - on pypi. This is appropriate if you are working on server - development, but if you want to see how an unreleased version of - the client affects the system you can have devstack install it - from upstream, or from local git trees. - | Multiple libraries can be specified as a comma separated list. - | - - :: - - LIBS_FROM_GIT=python-keystoneclient,oslo.config - -Virtual Environments --------------------- - - | *Default: ``USE_VENV=False``* - | Enable the use of Python virtual environments by setting ``USE_VENV`` - to ``True``. This will enable the creation of venvs for each project - that is defined in the ``PROJECT_VENV`` array. - - | *Default: ``PROJECT_VENV['']='.venv'* - | Each entry in the ``PROJECT_VENV`` array contains the directory name - of a venv to be used for the project. The array index is the project - name. Multiple projects can use the same venv if desired. - - :: - - PROJECT_VENV["glance"]=${GLANCE_DIR}.venv - - | *Default: ``ADDITIONAL_VENV_PACKAGES=""``* - | A comma-separated list of additional packages to be installed into each - venv. Often projects will not have certain packages listed in its - ``requirements.txt`` file because they are 'optional' requirements, - i.e. only needed for certain configurations. By default, the enabled - databases will have their Python bindings added when they are enabled. +Logging +------- Enable Logging --------------- +~~~~~~~~~~~~~~ - | *Defaults: ``LOGFILE="" LOGDAYS=7 LOG_COLOR=True``* - | By default ``stack.sh`` output is only written to the console - where it runs. It can be sent to a file in addition to the console - by setting ``LOGFILE`` to the fully-qualified name of the - destination log file. A timestamp will be appended to the given - filename for each run of ``stack.sh``. - | +By default ``stack.sh`` output is only written to the console where it +runs. It can be sent to a file in addition to the console by setting +``LOGFILE`` to the fully-qualified name of the destination log file. A +timestamp will be appended to the given filename for each run of +``stack.sh``. :: LOGFILE=$DEST/logs/stack.sh.log - Old log files are cleaned automatically if ``LOGDAYS`` is set to the - number of days of old log files to keep. +Old log files are cleaned automatically if ``LOGDAYS`` is set to the +number of days of old log files to keep. :: LOGDAYS=1 - The some of the project logs (Nova, Cinder, etc) will be colorized - by default (if ``SYSLOG`` is not set below); this can be turned off - by setting ``LOG_COLOR`` False. +The some of the project logs (Nova, Cinder, etc) will be colorized by +default (if ``SYSLOG`` is not set below); this can be turned off by +setting ``LOG_COLOR`` to ``False``. :: LOG_COLOR=False Logging the Service Output --------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~ - | *Default: ``LOGDIR=""``* - | DevStack will log the stdout output of the services it starts. - When using ``screen`` this logs the output in the screen windows - to a file. Without ``screen`` this simply redirects stdout of - the service process to a file in ``LOGDIR``. - | +DevStack will log the ``stdout`` output of the services it starts. +When using ``screen`` this logs the output in the screen windows to a +file. Without ``screen`` this simply redirects stdout of the service +process to a file in ``LOGDIR``. :: LOGDIR=$DEST/logs - *Note the use of ``DEST`` to locate the main install directory; this - is why we suggest setting it in ``local.conf``.* +*Note the use of ``DEST`` to locate the main install directory; this +is why we suggest setting it in ``local.conf``.* Enabling Syslog ---------------- +~~~~~~~~~~~~~~~ - | *Default: ``SYSLOG=False SYSLOG_HOST=$HOST_IP SYSLOG_PORT=516``* - | Logging all services to a single syslog can be convenient. Enable - syslogging by setting ``SYSLOG`` to ``True``. If the destination log - host is not localhost ``SYSLOG_HOST`` and ``SYSLOG_PORT`` can be - used to direct the message stream to the log host. - | +Logging all services to a single syslog can be convenient. Enable +syslogging by setting ``SYSLOG`` to ``True``. If the destination log +host is not localhost ``SYSLOG_HOST`` and ``SYSLOG_PORT`` can be used +to direct the message stream to the log host. | :: @@ -264,15 +226,55 @@ Enabling Syslog SYSLOG_HOST=$HOST_IP SYSLOG_PORT=516 +Libraries from Git +------------------ + +By default devstack installs OpenStack server components from git, +however it installs client libraries from released versions on pypi. +This is appropriate if you are working on server development, but if +you want to see how an unreleased version of the client affects the +system you can have devstack install it from upstream, or from local +git trees by specifying it in ``LIBS_FROM_GIT``. Multiple libraries +can be specified as a comma separated list. + + :: + + LIBS_FROM_GIT=python-keystoneclient,oslo.config + +Virtual Environments +-------------------- + +Enable the use of Python virtual environments by setting ``USE_VENV`` +to ``True``. This will enable the creation of venvs for each project +that is defined in the ``PROJECT_VENV`` array. + +Each entry in the ``PROJECT_VENV`` array contains the directory name +of a venv to be used for the project. The array index is the project +name. Multiple projects can use the same venv if desired. + + :: + + PROJECT_VENV["glance"]=${GLANCE_DIR}.venv + +``ADDITIONAL_VENV_PACKAGES`` is a comma-separated list of additional +packages to be installed into each venv. Often projects will not have +certain packages listed in its ``requirements.txt`` file because they +are 'optional' requirements, i.e. only needed for certain +configurations. By default, the enabled databases will have their +Python bindings added when they are enabled. + + :: + + ADDITIONAL_VENV_PACKAGES="python-foo, python-bar" + + A clean install every time -------------------------- - | *Default: ``RECLONE=""``* - | By default ``stack.sh`` only clones the project repos if they do - not exist in ``$DEST``. ``stack.sh`` will freshen each repo on each - run if ``RECLONE`` is set to ``yes``. This avoids having to manually - remove repos in order to get the current branch from ``$GIT_BASE``. - | +By default ``stack.sh`` only clones the project repos if they do not +exist in ``$DEST``. ``stack.sh`` will freshen each repo on each run if +``RECLONE`` is set to ``yes``. This avoids having to manually remove +repos in order to get the current branch from ``$GIT_BASE``. :: @@ -281,13 +283,11 @@ A clean install every time Upgrade packages installed by pip --------------------------------- - | *Default: ``PIP_UPGRADE=""``* - | By default ``stack.sh`` only installs Python packages if no version - is currently installed or the current version does not match a specified - requirement. If ``PIP_UPGRADE`` is set to ``True`` then existing required - Python packages will be upgraded to the most recent version that - matches requirements. - | +By default ``stack.sh`` only installs Python packages if no version is +currently installed or the current version does not match a specified +requirement. If ``PIP_UPGRADE`` is set to ``True`` then existing +required Python packages will be upgraded to the most recent version +that matches requirements. :: @@ -296,74 +296,69 @@ Upgrade packages installed by pip Swift ----- - | Default: SWIFT_HASH="" - | SWIFT_REPLICAS=1 - | SWIFT_DATA_DIR=$DEST/data/swift +Swift is now used as the back-end for the S3-like object store. When +enabled Nova's objectstore (``n-obj`` in ``ENABLED_SERVICES``) is +automatically disabled. Enable Swift by adding it services to +``ENABLED_SERVICES`` - | Swift is now used as the back-end for the S3-like object store. - When enabled Nova's objectstore (n-obj in ENABLED_SERVICES) is - automatically disabled. Enable Swift by adding it services to - ENABLED_SERVICES: enable_service s-proxy s-object s-container - s-account + :: + + enable_service s-proxy s-object s-container s-account - Setting Swift's hash value is required and you will be prompted for - it if Swift is enabled so just set it to something already: +Setting Swift's hash value is required and you will be prompted for it +if Swift is enabled so just set it to something already: :: SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 - For development purposes the default number of replicas is set to - ``1`` to reduce the overhead required. To better simulate a - production deployment set this to ``3`` or more. +For development purposes the default number of replicas is set to +``1`` to reduce the overhead required. To better simulate a production +deployment set this to ``3`` or more. :: SWIFT_REPLICAS=3 - The data for Swift is stored in the source tree by default (in - ``$DEST/swift/data``) and can be moved by setting - ``SWIFT_DATA_DIR``. The specified directory will be created if it - does not exist. +The data for Swift is stored in the source tree by default (in +``$DEST/swift/data``) and can be moved by setting +``SWIFT_DATA_DIR``. The specified directory will be created if it does +not exist. :: SWIFT_DATA_DIR=$DEST/data/swift - *Note: Previously just enabling ``swift`` was sufficient to start - the Swift services. That does not provide proper service - granularity, particularly in multi-host configurations, and is - considered deprecated. Some service combination tests now check for - specific Swift services and the old blanket acceptance will longer - work correctly.* +*Note*: Previously just enabling ``swift`` was sufficient to start the +Swift services. That does not provide proper service granularity, +particularly in multi-host configurations, and is considered +deprecated. Some service combination tests now check for specific +Swift services and the old blanket acceptance will longer work +correctly. Service Catalog Backend ----------------------- - | *Default: ``KEYSTONE_CATALOG_BACKEND=sql``* - | DevStack uses Keystone's ``sql`` service catalog backend. An - alternate ``template`` backend is also available. However, it does - not support the ``service-*`` and ``endpoint-*`` commands of the - ``keystone`` CLI. To do so requires the ``sql`` backend be enabled: - | +By default DevStack uses Keystone's ``sql`` service catalog backend. +An alternate ``template`` backend is also available, however, it does +not support the ``service-*`` and ``endpoint-*`` commands of the +``keystone`` CLI. To do so requires the ``sql`` backend be enabled +with ``KEYSTONE_CATALOG_BACKEND``: :: KEYSTONE_CATALOG_BACKEND=template - DevStack's default configuration in ``sql`` mode is set in - ``files/keystone_data.sh`` +DevStack's default configuration in ``sql`` mode is set in +``files/keystone_data.sh`` Cinder ------ - | Default: - | VOLUME_GROUP="stack-volumes" VOLUME_NAME_PREFIX="volume-" VOLUME_BACKING_FILE_SIZE=10250M - | The logical volume group used to hold the Cinder-managed volumes - is set by ``VOLUME_GROUP``, the logical volume name prefix is set - with ``VOLUME_NAME_PREFIX`` and the size of the volume backing file - is set with ``VOLUME_BACKING_FILE_SIZE``. - | +The logical volume group used to hold the Cinder-managed volumes is +set by ``VOLUME_GROUP``, the logical volume name prefix is set with +``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set +with ``VOLUME_BACKING_FILE_SIZE``. :: @@ -374,19 +369,23 @@ Cinder Multi-host DevStack ------------------- - | *Default: ``MULTI_HOST=False``* - | Running DevStack with multiple hosts requires a custom - ``local.conf`` section for each host. The master is the same as a - single host installation with ``MULTI_HOST=True``. The slaves have - fewer services enabled and a couple of host variables pointing to - the master. - | **Master** +Running DevStack with multiple hosts requires a custom ``local.conf`` +section for each host. The master is the same as a single host +installation with ``MULTI_HOST=True``. The slaves have fewer services +enabled and a couple of host variables pointing to the master. + +Master +~~~~~~ +Set ``MULTI_HOST`` to true :: MULTI_HOST=True - **Slave** +Slave +~~~~~ + +Set the following options to point to the master :: @@ -398,22 +397,19 @@ Multi-host DevStack IP Version ---------- - | Default: ``IP_VERSION=4+6`` - | This setting can be used to configure DevStack to create either an IPv4, - IPv6, or dual stack tenant data network by setting ``IP_VERSION`` to - either ``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6`` - respectively. This functionality requires that the Neutron networking - service is enabled by setting the following options: - | +``IP_VERSION`` can be used to configure DevStack to create either an +IPv4, IPv6, or dual-stack tenant data-network by with either +``IP_VERSION=4``, ``IP_VERSION=6``, or ``IP_VERSION=4+6`` +respectively. This functionality requires that the Neutron networking +service is enabled by setting the following options: :: disable_service n-net enable_service q-svc q-agt q-dhcp q-l3 - | The following optional variables can be used to alter the default IPv6 - behavior: - | +The following optional variables can be used to alter the default IPv6 +behavior: :: @@ -422,24 +418,28 @@ IP Version FIXED_RANGE_V6=fd$IPV6_GLOBAL_ID::/64 IPV6_PRIVATE_NETWORK_GATEWAY=fd$IPV6_GLOBAL_ID::1 - | *Note: ``FIXED_RANGE_V6`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` - can be configured with any valid IPv6 prefix. The default values make - use of an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC 4193.* - | - - | Default: ``SERVICE_IP_VERSION=4`` - | This setting can be used to configure DevStack to enable services to - operate over either IPv4 or IPv6, by setting ``SERVICE_IP_VERSION`` to - either ``SERVICE_IP_VERSION=4`` or ``SERVICE_IP_VERSION=6`` respectively. - When set to ``4`` devstack services will open listen sockets on 0.0.0.0 - and service endpoints will be registered using ``HOST_IP`` as the address. - When set to ``6`` devstack services will open listen sockets on :: and - service endpoints will be registered using ``HOST_IPV6`` as the address. - The default value for this setting is ``4``. Dual-mode support, for - example ``4+6`` is not currently supported. - | The following optional variable can be used to alter the default IPv6 - address used: - | +*Note*: ``FIXED_RANGE_V6`` and ``IPV6_PRIVATE_NETWORK_GATEWAY`` can be +configured with any valid IPv6 prefix. The default values make use of +an auto-generated ``IPV6_GLOBAL_ID`` to comply with RFC4193. + +Service Version +~~~~~~~~~~~~~~~ + +DevStack can enable service operation over either IPv4 or IPv6 by +setting ``SERVICE_IP_VERSION`` to either ``SERVICE_IP_VERSION=4`` or +``SERVICE_IP_VERSION=6`` respectively. + +When set to ``4`` devstack services will open listen sockets on +``0.0.0.0`` and service endpoints will be registered using ``HOST_IP`` +as the address. + +When set to ``6`` devstack services will open listen sockets on ``::`` +and service endpoints will be registered using ``HOST_IPV6`` as the +address. + +The default value for this setting is ``4``. Dual-mode support, for +example ``4+6`` is not currently supported. ``HOST_IPV6`` can +optionally be used to alter the default IPv6 address :: From 66919076838771c3a07864037ab661994145a958 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 6 Aug 2015 11:49:46 +0200 Subject: [PATCH 0402/2941] Install cryptsetup on the n-cpu nodes test_encrypted_cinder_volumes.* tempest test failes, when cryptsetup package is not installed. The following error can be seen in the n-cpu log: Stderr: u'/usr/bin/nova-rootwrap: Executable not found: cryptsetup (filter match = cryptsetup)\n' Change-Id: I86603f1301fa946c8bb22de3e69a2ec1ab7f1ef3 --- files/debs/n-cpu | 1 + files/rpms-suse/n-cpu | 1 + files/rpms/n-cpu | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/files/debs/n-cpu b/files/debs/n-cpu index 5d5052aa4e..ffc947a36d 100644 --- a/files/debs/n-cpu +++ b/files/debs/n-cpu @@ -5,3 +5,4 @@ genisoimage sysfsutils sg3-utils python-guestfs # NOPRIME +cryptsetup diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu index 7040b843bf..b3a468d2d8 100644 --- a/files/rpms-suse/n-cpu +++ b/files/rpms-suse/n-cpu @@ -4,3 +4,4 @@ lvm2 open-iscsi sysfsutils sg3_utils +cryptsetup diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index c1a8e8ffa6..81278b30bb 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -4,4 +4,4 @@ lvm2 genisoimage sysfsutils sg3_utils - +cryptsetup From 2da606da2e47b7260732bb6ef43f1cbf6b4a1559 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 6 Aug 2015 10:02:43 -0400 Subject: [PATCH 0403/2941] add ebtables to world dump I'm still at a loss about why guests stop being pingable in grenade, so lets get ourselves some ebtables output as well. Change-Id: I4e40eff6d0b1ef194e43b151a83206fbd50deb66 --- tools/worlddump.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/worlddump.py b/tools/worlddump.py index 926b4a1873..1b337a9a83 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -85,6 +85,11 @@ def disk_space(): print dfraw +def ebtables_dump(): + _header("EB Tables Dump") + _dump_cmd("sudo ebtables -L") + + def iptables_dump(): tables = ['filter', 'nat', 'mangle'] _header("IP Tables Dump") @@ -141,6 +146,7 @@ def main(): process_list() network_dump() iptables_dump() + ebtables_dump() compute_consoles() guru_meditation_report() From c0057ed5158c61446d3ba025a3b1feb337688859 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 7 Aug 2015 12:36:00 +1000 Subject: [PATCH 0404/2941] exercises/aggregates.sh: Only source openrc once Only source openrc once, and remove the unnecessary re-sourcing of "functions" which is done by openrc. Change-Id: I61c87a0742de274d47753a0b216c56d96344d161 --- exercises/aggregates.sh | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/exercises/aggregates.sh b/exercises/aggregates.sh index 01d548d1f2..808ef76e2f 100755 --- a/exercises/aggregates.sh +++ b/exercises/aggregates.sh @@ -31,18 +31,13 @@ set -o xtrace EXERCISE_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc +# Test as the admin user +# note this imports stackrc/functions, etc +. $TOP_DIR/openrc admin admin # Import exercise configuration source $TOP_DIR/exerciserc -# Test as the admin user -. $TOP_DIR/openrc admin admin - # If nova api is not enabled we exit with exitcode 55 so that # the exercise is skipped is_service_enabled n-api || exit 55 From 296c1e3809082db4173f00141a981d3a2d168922 Mon Sep 17 00:00:00 2001 From: Jerry Zhao Date: Fri, 7 Aug 2015 20:43:54 -0400 Subject: [PATCH 0405/2941] Move policy.json creation to _configure_neutron_common To allow separating neutron l3, metadata, or dhcp agent from neutron server or controller, there is supposed to be policy.json on the nodes with l3, dhcp, metadata agent enabled, so it would be more appropriate to create policy.json in _configure_neutron_common. Change-Id: I890d647ffca05482f36ebaaf9c2c6e9e6cb23e2b --- lib/neutron-legacy | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 4069439183..498cf46179 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -878,6 +878,12 @@ function _configure_neutron_common { cp $NEUTRON_DIR/etc/neutron.conf $NEUTRON_CONF + Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json + cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE + + # allow neutron user to administer neutron to match neutron account + sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE + # Set plugin-specific variables ``Q_DB_NAME``, ``Q_PLUGIN_CLASS``. # For main plugin config file, set ``Q_PLUGIN_CONF_PATH``, ``Q_PLUGIN_CONF_FILENAME``. # For addition plugin config files, set ``Q_PLUGIN_EXTRA_CONF_PATH``, @@ -1106,13 +1112,7 @@ function _configure_neutron_plugin_agent { # It is called when q-svc is enabled. function _configure_neutron_service { Q_API_PASTE_FILE=$NEUTRON_CONF_DIR/api-paste.ini - Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json - cp $NEUTRON_DIR/etc/api-paste.ini $Q_API_PASTE_FILE - cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE - - # allow neutron user to administer neutron to match neutron account - sed -i 's/"context_is_admin": "role:admin"/"context_is_admin": "role:admin or user_name:neutron"/g' $Q_POLICY_FILE # Update either configuration file with plugin iniset $NEUTRON_CONF DEFAULT core_plugin $Q_PLUGIN_CLASS From 144dbc62f8aa6a62cdca403a69bb883cb8552142 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 10 Aug 2015 12:51:29 +1000 Subject: [PATCH 0406/2941] Remove config_apache_wsgi mention This has only ever appeared in I3a5d1e511c5dca1e6d01a1adca8fda0a43d4f632 and has never been exported, referenced, etc. Remove it to avoid confusion (e.g. Icfad40ee6998296727a95613199e5c2d87bd0a45) Change-Id: Ic71e841f6f751ff43083e12ad734b9c84be7b645 --- lib/apache | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/apache b/lib/apache index c7d69f2ea7..a8e9bc5ad2 100644 --- a/lib/apache +++ b/lib/apache @@ -11,7 +11,6 @@ # lib/apache exports the following functions: # # - install_apache_wsgi -# - config_apache_wsgi # - apache_site_config_for # - enable_apache_site # - disable_apache_site From df8f43b44adec60ce6528441f2a0ea193793adb8 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Sun, 9 Aug 2015 20:30:39 -0400 Subject: [PATCH 0407/2941] Add support to lib/tempest for using tempest test accounts This commit adds support to lib/tempest for configuring tempest to use the test accounts mechanism. It adds a new variable TEMPEST_USE_TEST_ACCOUNTS which will be used to trigger using test accounts. The generate tempest-account-generator utility packaged with tempest is used to generate the users and projects and write an accounts.yaml. Another option TEMPEST_CONCURRENCY is added to specify the the number of accounts to create, the value defaults to the number of processors on the system. The auth configuration section is moved to the bottom of the configure_tempest function to ensure the proper auth endpoint and catalog entries are all set in the tempest.conf file because the tempest-account-generator tool depends on tempest knowing how to talk to keystone to create the accounts. Change-Id: I8682f72ffe26fd133874f5c575df6389f787ffcc --- lib/tempest | 38 ++++++++++++++++++++++++++++---------- 1 file changed, 28 insertions(+), 10 deletions(-) diff --git a/lib/tempest b/lib/tempest index 3a9ba814d0..240d55c545 100644 --- a/lib/tempest +++ b/lib/tempest @@ -82,6 +82,21 @@ TEMPEST_STORAGE_PROTOCOL=${TEMPEST_STORAGE_PROTOCOL:-$TEMPEST_DEFAULT_STORAGE_PR IPV6_ENABLED=$(trueorfalse True IPV6_ENABLED) IPV6_SUBNET_ATTRIBUTES_ENABLED=$(trueorfalse True IPV6_SUBNET_ATTRIBUTES_ENABLED) +# Do we want to make a configuration where Tempest has admin on +# the cloud. We don't always want to so that we can ensure Tempest +# would work on a public cloud. +TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN) + +# Credential provider configuration option variables +TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} +TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False $TEMPEST_USE_TEST_ACCOUNTS) + +# The number of workers tempest is expected to be run with. This is used for +# generating a accounts.yaml for running with test-accounts. This is also the +# same variable that devstack-gate uses to specify the number of workers that +# it will run tempest with +TEMPEST_CONCURRENCY=${TEMPEST_CONCURRENCY:-$(nproc)} + # Functions # --------- @@ -174,11 +189,6 @@ function configure_tempest { password=${ADMIN_PASSWORD:-secrete} - # Do we want to make a configuration where Tempest has admin on - # the cloud. We don't always want to so that we can ensure Tempest - # would work on a public cloud. - TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN) - # See ``lib/keystone`` where these users and tenants are set up ADMIN_USERNAME=${ADMIN_USERNAME:-admin} ADMIN_TENANT_NAME=${ADMIN_TENANT_NAME:-admin} @@ -335,11 +345,6 @@ function configure_tempest { # Image Features iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True - # Auth - TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} - iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} - iniset $TEMPEST_CONFIG auth tempest_roles "Member" - # Compute iniset $TEMPEST_CONFIG compute ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # DEPRECATED iniset $TEMPEST_CONFIG compute network_for_ssh $PRIVATE_NETWORK_NAME @@ -545,6 +550,19 @@ function configure_tempest { sudo chown $STACK_USER $BOTO_CONF fi + # Auth + iniset $TEMPEST_CONFIG auth tempest_roles "Member" + if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then + if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then + tempest-account-generator -c $TEMPEST_CONFIG --os-username $ADMIN_USERNAME --os-password $ADMIN_PASSWORD --os-tenant-name $ADMIN_TENANT_NAME -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml + else: + tempest-account-generator -c $TEMPEST_CONFIG --os-username $ADMIN_USERNAME --os-password $ADMIN_PASSWORD --os-tenant-name $ADMIN_TENANT_NAME -r $TEMPEST_CONCURRENCY etc/accounts.yaml + fi + iniset $TEMPEST_CONFIG auth allow_tenant_isolation False + iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml" + else + iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} + fi # Restore IFS IFS=$ifs } From 19c5e62a04902e19a603d5a55bcfc5f318a522cf Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Wed, 5 Aug 2015 15:53:21 +1000 Subject: [PATCH 0408/2941] Configure glance swift communication with v3 With the aim of moving everything fully over to v3 authentication we need to configure glance_store to use v3 when calling swift. Requires glance_store 0.8.0 Change-Id: I61e8c5a4136404077f5505ebc2edfe49841c244f Implements: bp keystonev3 --- lib/glance | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/glance b/lib/glance index f200dcaeea..b1b0f32f62 100644 --- a/lib/glance +++ b/lib/glance @@ -154,7 +154,10 @@ function configure_glance { iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_TENANT_NAME:glance-swift iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD - iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v2.0/ + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_address $KEYSTONE_SERVICE_URI/v3 + iniset $GLANCE_SWIFT_STORE_CONF ref1 user_domain_id default + iniset $GLANCE_SWIFT_STORE_CONF ref1 project_domain_id default + iniset $GLANCE_SWIFT_STORE_CONF ref1 auth_version 3 # commenting is not strictly necessary but it's confusing to have bad values in conf inicomment $GLANCE_API_CONF glance_store swift_store_user From 1991e7599313cdbc75cf7674df94f32e9443cc60 Mon Sep 17 00:00:00 2001 From: Daniel Gonzalez Date: Tue, 11 Aug 2015 19:34:22 +0200 Subject: [PATCH 0409/2941] Fix creation of endpoints in multi-region Keystone API v3 does currently not support filtering for region names. As a consequence an additional check is needed in get_or_create_endpoint to check if an endpoint must be created for a given region or if it already exists. See related bug for more information regarding the missing region filtering in keystone. Closes-Bug: #1483784 Related-Bug: #1482772 Change-Id: Ia6a497b9fb58f7474636ab52dc01b99857bed3a2 --- functions-common | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index 60cf04c499..641eca8362 100644 --- a/functions-common +++ b/functions-common @@ -852,13 +852,18 @@ function get_or_create_service { # Usage: _get_or_create_endpoint_with_interface function _get_or_create_endpoint_with_interface { local endpoint_id + # TODO(dgonzalez): The check of the region name, as done in the grep + # statement below, exists only because keystone does currently + # not allow filtering the region name when listing endpoints. If keystone + # gets support for this, the check for the region name can be removed. + # Related bug in keystone: https://bugs.launchpad.net/keystone/+bug/1482772 endpoint_id=$(openstack endpoint list \ --os-url $KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version=3 \ --service $1 \ --interface $2 \ --region $4 \ - -c ID -f value) + -c ID -c Region -f value | grep $4 | cut -f 1 -d " ") if [[ -z "$endpoint_id" ]]; then # Creates new endpoint endpoint_id=$(openstack endpoint create \ From 75c1dfe3b0adc8bcb98276bcdbecca76a94eaab7 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Thu, 13 Aug 2015 10:40:57 +1000 Subject: [PATCH 0410/2941] Rename bad option value To disable tempest running the v2 tests when the identity v2 api is disabled you need to set api_v2=False not v2_api=False. Change-Id: Ied8a0593619dccb5985f9a1e51feb370754336c7 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 240d55c545..6863bae9de 100644 --- a/lib/tempest +++ b/lib/tempest @@ -323,7 +323,7 @@ function configure_tempest { fi if [ "$ENABLE_IDENTITY_V2" == "False" ]; then # Only Identity v3 is available; then skip Identity API v2 tests - iniset $TEMPEST_CONFIG identity-feature-enabled v2_api False + iniset $TEMPEST_CONFIG identity-feature-enabled api_v2 False # In addition, use v3 auth tokens for running all Tempest tests iniset $TEMPEST_CONFIG identity auth_version v3 else From 7b105c572ed51510d951304b31c043cfe4674731 Mon Sep 17 00:00:00 2001 From: David Lyle Date: Mon, 27 Jul 2015 17:14:32 -0600 Subject: [PATCH 0411/2941] Move horizon apache root to /dashboard With keystone's move to /identity, a conflict in for resources was created as both keystone and horizon used /identity. The keystone config took precedence and rendered API output in the horizon UI. This patch sets the root for horizon to /dashboard and serves all horizon content from there. Additionally, a RedirectMatch has been added to the apache config for horizon to redirect '/' to '/dashboard' this will allow the implementation to change without being immediately painful to users. Also made the path '/dashboard/' configurable in stackrc. Closes-Bug: #1478306 Depends-On: I9a04f936ed6d8c14775a332dc28e903992806c42 for devstack-gate changes to remove hard coded horizon url structure assumptions. Change-Id: I6fbca5cea9e44df160afbccc71bd045437657320 --- files/apache-horizon.template | 7 +++++-- lib/horizon | 4 ++++ stack.sh | 2 +- stackrc | 3 +++ 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/files/apache-horizon.template b/files/apache-horizon.template index 68838985ee..bfd75678e3 100644 --- a/files/apache-horizon.template +++ b/files/apache-horizon.template @@ -1,5 +1,5 @@ - WSGIScriptAlias / %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi + WSGIScriptAlias %WEBROOT% %HORIZON_DIR%/openstack_dashboard/wsgi/django.wsgi WSGIDaemonProcess horizon user=%USER% group=%GROUP% processes=3 threads=10 home=%HORIZON_DIR% display-name=%{GROUP} WSGIApplicationGroup %{GLOBAL} @@ -8,7 +8,10 @@ WSGIProcessGroup horizon DocumentRoot %HORIZON_DIR%/.blackhole/ - Alias /media %HORIZON_DIR%/openstack_dashboard/static + Alias %WEBROOT%/media %HORIZON_DIR%/openstack_dashboard/static + Alias %WEBROOT%/static %HORIZON_DIR%/static + + RedirectMatch "^/$" "%WEBROOT%/" Options FollowSymLinks diff --git a/lib/horizon b/lib/horizon index b0f306b675..9fe0aa8b49 100644 --- a/lib/horizon +++ b/lib/horizon @@ -93,6 +93,9 @@ function init_horizon { local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings + _horizon_config_set $local_settings "" WEBROOT \"$HORIZON_APACHE_ROOT/\" + _horizon_config_set $local_settings "" CUSTOM_THEME_PATH \"themes/webroot\" + _horizon_config_set $local_settings "" COMPRESS_OFFLINE True _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_DEFAULT_ROLE \"Member\" @@ -122,6 +125,7 @@ function init_horizon { s,%HORIZON_DIR%,$HORIZON_DIR,g; s,%APACHE_NAME%,$APACHE_NAME,g; s,%DEST%,$DEST,g; + s,%WEBROOT%,$HORIZON_APACHE_ROOT,g; \" $FILES/apache-horizon.template >$horizon_conf" if is_ubuntu; then diff --git a/stack.sh b/stack.sh index cc8bc8c88f..cfcf126bda 100755 --- a/stack.sh +++ b/stack.sh @@ -1421,7 +1421,7 @@ fi # If you installed Horizon on this server you should be able # to access the site using your browser. if is_service_enabled horizon; then - echo "Horizon is now available at http://$SERVICE_HOST/" + echo "Horizon is now available at http://$SERVICE_HOST$HORIZON_APACHE_ROOT" fi # If Keystone is present you can point ``nova`` cli to this server diff --git a/stackrc b/stackrc index 8beef9639f..f4245a40cf 100644 --- a/stackrc +++ b/stackrc @@ -87,6 +87,9 @@ TEMPEST_SERVICES="" # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata +# Set the root URL for Horizon +HORIZON_APACHE_ROOT="/dashboard" + # Whether to use 'dev mode' for screen windows. Dev mode works by # stuffing text into the screen windows so that a developer can use # ctrl-c, up-arrow, enter to restart the service. Starting services From b237b93f2bdfd66152bd68eedccec85ce0cb75b8 Mon Sep 17 00:00:00 2001 From: venkatamahesh Date: Mon, 10 Aug 2015 16:07:03 +0530 Subject: [PATCH 0412/2941] Location of the "local.conf" file is confusing. It is updated. It was shown that the local.conf is at root devstack directory, but it is at devstack/samples directory. So the path is updated. 1.) Copy the file into root Devstack directory. Change-Id: I6ff8a404a3664c892bb458023c57ccc5d0926fdf Closes-Bug: #1464491 --- doc/source/configuration.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 05a8d95f51..90b7d44dec 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -28,8 +28,10 @@ simplify this process and meet the following goals: local.conf ========== -The new configuration file is ``local.conf`` and resides in the root -DevStack directory like the old ``localrc`` file. It is a modified INI +The new configuration file is ``local.conf`` and should reside in the +root Devstack directory. An example of such ``local.conf`` file +is provided in the ``devstack/samples`` directory. Copy this file into +the root Devstack directory and adapt it to your needs. It is a modified INI format file that introduces a meta-section header to carry additional information regarding the configuration files to be changed. From 6b172c8dd52effc649673ac5955d1ec8ae5016f9 Mon Sep 17 00:00:00 2001 From: Adam Kacmarsky Date: Thu, 13 Aug 2015 15:14:05 -0600 Subject: [PATCH 0413/2941] Always add OVS port in _move_neutron_addresses_route Added functionallity to allow _move_neutron_addresses_route to support interfaces without a configured IP address. If PUBLIC_INTERFACE is set to an interface without a configured IP, only the port will be added to the OVS_PHYSICAL_BRIDGE. Change-Id: I511ea5229ab871298086af5c96761390529bd85e --- lib/neutron-legacy | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 4069439183..4bbc802ba9 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -793,7 +793,8 @@ function stop_neutron { } # _move_neutron_addresses_route() - Move the primary IP to the OVS bridge -# on startup, or back to the public interface on cleanup +# on startup, or back to the public interface on cleanup. If no IP is +# configured on the interface, just add it as a port to the OVS bridge. function _move_neutron_addresses_route { local from_intf=$1 local to_intf=$2 @@ -806,7 +807,8 @@ function _move_neutron_addresses_route { # on configure we will also add $from_intf as a port on $to_intf, # assuming it is an OVS bridge. - local IP_BRD=$(ip -f $af a s dev $from_intf | awk '/inet/ { print $2, $3, $4; exit }') + local IP_ADD="" + local IP_DEL="" local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }") local ADD_OVS_PORT="" @@ -826,7 +828,12 @@ function _move_neutron_addresses_route { ADD_OVS_PORT="sudo ovs-vsctl --may-exist add-port $to_intf $from_intf" fi - sudo ip addr del $IP_BRD dev $from_intf; sudo ip addr add $IP_BRD dev $to_intf; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE + if [[ "$IP_BRD" != "" ]]; then + IP_ADD="sudo ip addr del $IP_BRD dev $from_intf" + IP_DEL="sudo ip addr add $IP_BRD dev $to_intf" + fi + + $IP_ADD; $IP_DEL; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE fi } @@ -834,9 +841,7 @@ function _move_neutron_addresses_route { # runs that a clean run would need to clean up function cleanup_neutron { - if [[ $(ip -f inet a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet" - fi + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet" if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6" @@ -1021,9 +1026,7 @@ function _configure_neutron_l3_agent { neutron_plugin_configure_l3_agent - if [[ $(ip -f inet a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True "inet" - fi + _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" True "inet" if [[ $(ip -f inet6 a s dev "$PUBLIC_INTERFACE" | grep -c 'global') != 0 ]]; then _move_neutron_addresses_route "$PUBLIC_INTERFACE" "$OVS_PHYSICAL_BRIDGE" False "inet6" From dbdee698700d9c7cf86ad3cde74e8b4347ef757a Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Fri, 14 Aug 2015 12:22:18 +1000 Subject: [PATCH 0414/2941] Use standard get_or_create_role to find role The openstack role list command doesn't include any identity API version information and so will fail when running purely with v3. We could add this information to the command however we already have a function that does what swift requires so we should use it. Change-Id: I5d5417eaed432760bfb97cf35bd76a0919c6004d --- lib/swift | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/swift b/lib/swift index 96d730ef2c..7bc811e1e6 100644 --- a/lib/swift +++ b/lib/swift @@ -610,7 +610,7 @@ function create_swift_accounts { KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} - local another_role=$(openstack role list | awk "/ anotherrole / { print \$2 }") + local another_role=$(get_or_create_role "anotherrole") # NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses # temp urls, which break when uploaded by a non-admin role From e4289c88c8007ae942a18a1786fc6fd36f2489c7 Mon Sep 17 00:00:00 2001 From: Paul Michali Date: Fri, 14 Aug 2015 11:49:27 -0400 Subject: [PATCH 0415/2941] Prevent spawning two VPN agents Currently, if the VPN devstack plugin is enabled (which is the method used for VPN in all test jobs), there will be two VPN agent processes started. This doesn't seem to affect the tests, but is incorrect. To resolve this, the proposal is to do this in two steps. With this commit, the script is modified to start the q-vpn process, if q-vpn is enabled (legacy), and to only start q-l3 process, if neither q-vpn nor neutron-vpnaas is enabled. Once committed, the opertion will be the same - if no VPN service is enabled, we get q-l3 (correct); if legacy q-vpn is enabled (only), we get q-vpn (correct); if the plugin is used (the default), we get two q-vpn processes started (wrong). With a separate plugin commit (to be pushed next), the plugin will be renamed to neutron-vpnaas, and then we'll get only one agent process (q-vpn or neutron-vpnaas) runing. We can't commit the plugin first, because both the VPN agent and the q-l3 agent will be started at once (just as bad, if not worse). Change-Id: I2bb7ac01e619c8a9b22bd517a4ff60d67035dfed Partial-Bug: 1484141 --- lib/neutron-legacy | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 4069439183..01be7cb244 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -730,7 +730,9 @@ function start_neutron_l2_agent { function start_neutron_other_agents { run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" - if is_service_enabled q-vpn; then + if is_service_enabled neutron-vpnaas; then + : # Started by plugin + elif is_service_enabled q-vpn; then run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)" else run_process q-l3 "python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" From a83e90b56080d1c86f98e66d146f781c19d150c5 Mon Sep 17 00:00:00 2001 From: Tim Buckley Date: Wed, 5 Aug 2015 10:25:00 -0600 Subject: [PATCH 0416/2941] Enable CSV logging output for DStat. Future work toward visualization of DevStack and devstack-gate performance would benefit greatly from the availability of machine-parsable DStat output. This patch outputs an additional logfile to $LOGDIR, `dstat-csv.log`, using DStat's built-in CSV logging functionality. An additional instance of DStat is started during start_dstat that outputs to CSV-formatted text without `--top-cpu-adv` and `-top-io-adv` enabled, as these plugins are currently incompatible with CSV output. To facilitate this, a new `dstat.sh` script is added to $TOP_DIR/tools/ to act as a daemon to manage the two processes. Change-Id: I826c94c35b6a109308b4f132c181ff7a1f63bc7b Depends-On: I534fb1f9356a7948d2fec0aecc7f275e47362a11 --- lib/dstat | 3 +-- tools/dstat.sh | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) create mode 100755 tools/dstat.sh diff --git a/lib/dstat b/lib/dstat index f11bfa55c0..fe4790b12d 100644 --- a/lib/dstat +++ b/lib/dstat @@ -19,8 +19,7 @@ set +o xtrace # start_dstat() - Start running processes, including screen function start_dstat { # A better kind of sysstat, with the top process per time slice - DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv" - run_process dstat "dstat $DSTAT_OPTS" + run_process dstat "$TOP_DIR/tools/dstat.sh $LOGDIR" # To enable peakmem_tracker add: # enable_service peakmem_tracker diff --git a/tools/dstat.sh b/tools/dstat.sh new file mode 100755 index 0000000000..6ba4515acf --- /dev/null +++ b/tools/dstat.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# **tools/dstat.sh** - Execute instances of DStat to log system load info +# +# Multiple instances of DStat are executed in order to take advantage of +# incompatible features, particularly CSV output and the "top-cpu-adv" and +# "top-io-adv" flags. +# +# Assumes: +# - dstat command is installed + +# Retreive log directory as argument from calling script. +LOGDIR=$1 + +# Command line arguments for primary DStat process. +DSTAT_OPTS="-tcmndrylpg --top-cpu-adv --top-io-adv" + +# Command-line arguments for secondary background DStat process. +DSTAT_CSV_OPTS="-tcmndrylpg --output $LOGDIR/dstat-csv.log" + +# Execute and background the secondary dstat process and discard its output. +dstat $DSTAT_CSV_OPTS >& /dev/null & + +# Execute and background the primary dstat process, but keep its output in this +# TTY. +dstat $DSTAT_OPTS & + +# Catch any exit signals, making sure to also terminate any child processes. +trap "kill -- -$$" EXIT + +# Keep this script running as long as child dstat processes are alive. +wait From 51bddb8c64f5cb4086d672574202262ad9c38332 Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Fri, 14 Aug 2015 17:21:47 +0100 Subject: [PATCH 0417/2941] Ironic: Create nodes with names This patch is setting a name for each node created in Ironic, when testing stuff it's easier to refer to a nome by its name then uuid. The format of the name is: node-0, node-1, ... Change-Id: I60fcddbcb36d1b1da8b3846b6edf14c59401f102 --- lib/ironic | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ironic b/lib/ironic index 1323446c1f..b3ad586923 100644 --- a/lib/ironic +++ b/lib/ironic @@ -618,6 +618,7 @@ function enroll_nodes { local node_id=$(ironic node-create $standalone_node_uuid\ --chassis_uuid $chassis_id \ --driver $IRONIC_DEPLOY_DRIVER \ + --name node-$total_nodes \ -p cpus=$ironic_node_cpu\ -p memory_mb=$ironic_node_ram\ -p local_gb=$ironic_node_disk\ From 597c902dfc9ea62f1a1455d6db7c7c50d09a5876 Mon Sep 17 00:00:00 2001 From: Masaki Matsushita Date: Sat, 15 Aug 2015 11:35:20 +0900 Subject: [PATCH 0418/2941] use $SERVICE_HOST in multi-node doc The change fix multi-node doc to use SERVICE_HOST. It resolves duplicate IP address. Closes-Bug: #1485159 Change-Id: If86393e9a37bcb911a9aa125829cd8ce684edd9f --- doc/source/guides/multinode-lab.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 557b522f69..1530a84523 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -175,12 +175,12 @@ machines, create a ``local.conf`` with: SERVICE_TOKEN=xyzpdqlazydog DATABASE_TYPE=mysql SERVICE_HOST=192.168.42.11 - MYSQL_HOST=192.168.42.11 - RABBIT_HOST=192.168.42.11 - GLANCE_HOSTPORT=192.168.42.11:9292 + MYSQL_HOST=$SERVICE_HOST + RABBIT_HOST=$SERVICE_HOST + GLANCE_HOSTPORT=$SERVICE_HOST:9292 ENABLED_SERVICES=n-cpu,n-net,n-api-meta,c-vol NOVA_VNC_ENABLED=True - NOVNCPROXY_URL="http://192.168.42.11:6080/vnc_auto.html" + NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html" VNCSERVER_LISTEN=$HOST_IP VNCSERVER_PROXYCLIENT_ADDRESS=$VNCSERVER_LISTEN From 4b115ad526df7e12bbdc71e0280b3c691e53ed04 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Fri, 29 May 2015 08:36:40 +0000 Subject: [PATCH 0419/2941] Convert identity defaults to keystone v3 api At this point all our function calls should be using the V3 APIs anyway so switch the authentication credentials to v3 compatible ones and remove all the hacks we added to force v3 API calls. Implements: bp keystonev3 Change-Id: If92d3e11b9a363454f77527783b6d25f4da9c249 --- functions-common | 35 ++++------------------------------- stack.sh | 15 ++++++++------- 2 files changed, 12 insertions(+), 38 deletions(-) diff --git a/functions-common b/functions-common index 641eca8362..cc5136da56 100644 --- a/functions-common +++ b/functions-common @@ -687,16 +687,13 @@ function policy_add { # Usage: get_or_create_domain function get_or_create_domain { local domain_id - local os_url="$KEYSTONE_SERVICE_URI_V3" # Gets domain id domain_id=$( # Gets domain id - openstack --os-token=$OS_TOKEN --os-url=$os_url \ - --os-identity-api-version=3 domain show $1 \ + openstack domain show $1 \ -f value -c id 2>/dev/null || # Creates new domain - openstack --os-token=$OS_TOKEN --os-url=$os_url \ - --os-identity-api-version=3 domain create $1 \ + openstack domain create $1 \ --description "$2" \ -f value -c id ) @@ -707,13 +704,11 @@ function get_or_create_domain { # Usage: get_or_create_group [] function get_or_create_group { local desc="${3:-}" - local os_url="$KEYSTONE_SERVICE_URI_V3" local group_id # Gets group id group_id=$( # Creates new group with --or-show - openstack --os-token=$OS_TOKEN --os-url=$os_url \ - --os-identity-api-version=3 group create $1 \ + openstack group create $1 \ --domain $2 --description "$desc" --or-show \ -f value -c id ) @@ -735,8 +730,6 @@ function get_or_create_user { openstack user create \ $1 \ --password "$2" \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --domain=$3 \ $email \ --or-show \ @@ -751,9 +744,7 @@ function get_or_create_project { local project_id project_id=$( # Creates new project with --or-show - openstack --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ - project create $1 \ + openstack project create $1 \ --domain=$2 \ --or-show -f value -c id ) @@ -767,8 +758,6 @@ function get_or_create_role { role_id=$( # Creates role with --or-show openstack role create $1 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --or-show -f value -c id ) echo $role_id @@ -781,8 +770,6 @@ function get_or_add_user_project_role { # Gets user role id user_role_id=$(openstack role list \ --user $2 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --column "ID" \ --project $3 \ --column "Name" \ @@ -793,8 +780,6 @@ function get_or_add_user_project_role { $1 \ --user $2 \ --project $3 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ | grep " id " | get_field 2) fi echo $user_role_id @@ -806,21 +791,15 @@ function get_or_add_group_project_role { local group_role_id # Gets group role id group_role_id=$(openstack role list \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --group $2 \ --project $3 \ -c "ID" -f value) if [[ -z "$group_role_id" ]]; then # Adds role to group and get it openstack role add $1 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --group $2 \ --project $3 group_role_id=$(openstack role list \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --group $2 \ --project $3 \ -c "ID" -f value) @@ -838,8 +817,6 @@ function get_or_create_service { openstack service show $2 -f value -c id 2>/dev/null || # Creates new service if not exists openstack service create \ - --os-url $KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ $2 \ --name $1 \ --description="$3" \ @@ -858,8 +835,6 @@ function _get_or_create_endpoint_with_interface { # gets support for this, the check for the region name can be removed. # Related bug in keystone: https://bugs.launchpad.net/keystone/+bug/1482772 endpoint_id=$(openstack endpoint list \ - --os-url $KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --service $1 \ --interface $2 \ --region $4 \ @@ -867,8 +842,6 @@ function _get_or_create_endpoint_with_interface { if [[ -z "$endpoint_id" ]]; then # Creates new endpoint endpoint_id=$(openstack endpoint create \ - --os-url $KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ $1 $2 $3 --region $4 -f value -c id) fi diff --git a/stack.sh b/stack.sh index cc8bc8c88f..fa2e490704 100755 --- a/stack.sh +++ b/stack.sh @@ -987,13 +987,15 @@ if is_service_enabled keystone; then start_keystone fi + export OS_IDENTITY_API_VERSION=3 + # Set up a temporary admin URI for Keystone - SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v2.0 + SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v3 if is_service_enabled tls-proxy; then export OS_CACERT=$INT_CA_DIR/ca-chain.pem # Until the client support is fixed, just use the internal endpoint - SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 + SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v3 fi # Setup OpenStackClient token-endpoint auth @@ -1021,14 +1023,13 @@ if is_service_enabled keystone; then # Begone token auth unset OS_TOKEN OS_URL - # force set to use v2 identity authentication even with v3 commands - export OS_AUTH_TYPE=v2password - # Set up password auth credentials now that Keystone is bootstrapped - export OS_AUTH_URL=$SERVICE_ENDPOINT - export OS_TENANT_NAME=admin + export OS_AUTH_URL=$KEYSTONE_AUTH_URI export OS_USERNAME=admin + export OS_USER_DOMAIN_ID=default export OS_PASSWORD=$ADMIN_PASSWORD + export OS_PROJECT_NAME=admin + export OS_PROJECT_DOMAIN_ID=default export OS_REGION_NAME=$REGION_NAME fi From 05076fb7ea45b7f1f7d9f232afb56dbb1d6e2f08 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Sat, 15 Aug 2015 19:01:59 +1000 Subject: [PATCH 0420/2941] Configure horizon with identity v3 Currently horizon configures keystone using v3 only if v2 is not available. Really we should just always be using v3. Change-Id: Icac4d90b617209da75abf33f8e25ffc021c45fdb --- lib/horizon | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/lib/horizon b/lib/horizon index b0f306b675..108a7f0a8f 100644 --- a/lib/horizon +++ b/lib/horizon @@ -98,13 +98,8 @@ function init_horizon { _horizon_config_set $local_settings "" OPENSTACK_HOST \"${KEYSTONE_SERVICE_HOST}\" - if [ "$ENABLE_IDENTITY_V2" == "False" ]; then - # Only Identity v3 API is available; then use it with v3 auth tokens - _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3} - _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v3\"" - else - _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v2.0\"" - fi + _horizon_config_set $local_settings "" OPENSTACK_API_VERSIONS {\"identity\":3} + _horizon_config_set $local_settings "" OPENSTACK_KEYSTONE_URL "\"${KEYSTONE_SERVICE_PROTOCOL}://${KEYSTONE_SERVICE_HOST}:${KEYSTONE_SERVICE_PORT}/v3\"" if [ -f $SSL_BUNDLE_FILE ]; then _horizon_config_set $local_settings "" OPENSTACK_SSL_CACERT \"${SSL_BUNDLE_FILE}\" From 1e7fb4c9a0a7a45fb89363a25504e8b173e032aa Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 23 Jul 2015 15:49:39 +0900 Subject: [PATCH 0421/2941] _configure_neutron_dhcp_agent: Modify the right config file This has been incorrect since the initial commit (I632df4149e9d7f78cb5a7091dfe4ea8f8ca3ddfa) Closes-Bug: #1483499 Change-Id: Ife4defce989c4f3c7eb5381376c0f93de50a9668 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 4069439183..64126094cb 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -979,7 +979,7 @@ function _configure_neutron_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + iniset $Q_DHCP_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi if ! is_service_enabled q-l3; then From fc657f4ff280e0908e096d562ce0770dbfd9e41a Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Thu, 23 Jul 2015 15:52:49 +0900 Subject: [PATCH 0422/2941] neutron-legacy: Update after DEFAULT.root_helper removal Update after change I17cd62c8763430bf3a4b67ab5e9cf5b736065133 . Closes-Bug: #1483501 Change-Id: Ieb5270484205cffe4ec97db0d59f012d1e7708d4 --- lib/neutron-legacy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 64126094cb..7dddde15fd 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -977,7 +977,7 @@ function _configure_neutron_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT verbose True iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_DHCP_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + iniset $Q_DHCP_CONF_FILE agent root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then iniset $Q_DHCP_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi @@ -1012,7 +1012,7 @@ function _configure_neutron_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT verbose True iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_L3_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + iniset $Q_L3_CONF_FILE agent root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then iniset $Q_L3_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi @@ -1036,7 +1036,7 @@ function _configure_neutron_metadata_agent { iniset $Q_META_CONF_FILE DEFAULT verbose True iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP - iniset $Q_META_CONF_FILE DEFAULT root_helper "$Q_RR_COMMAND" + iniset $Q_META_CONF_FILE agent root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then iniset $Q_META_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi From 05aa3846a0402edc9cc49f4ba36f09592004b273 Mon Sep 17 00:00:00 2001 From: Clark Boylan Date: Mon, 3 Aug 2015 11:14:13 -0700 Subject: [PATCH 0423/2941] Just use constraints everywhere Having behavior on your laptop diverge from behavior in the gate is confusing. Just use constraints on every devstack run to be consistent. Users of devstack can edit the requirements repo in order to change these constraints locally if necessary. Change-Id: I843208e2e982eb04931b76f5cb4bd219fbcd70de --- functions-common | 1 - inc/python | 51 ++++++++------------------------------------ lib/infra | 1 - stack.sh | 3 --- stackrc | 13 +++-------- tools/install_pip.sh | 2 +- 6 files changed, 13 insertions(+), 58 deletions(-) diff --git a/functions-common b/functions-common index 60cf04c499..749ca2b40c 100644 --- a/functions-common +++ b/functions-common @@ -28,7 +28,6 @@ # - ``REQUIREMENTS_DIR`` # - ``STACK_USER`` # - ``TRACK_DEPENDS`` -# - ``UNDO_REQUIREMENTS`` # - ``http_proxy``, ``https_proxy``, ``no_proxy`` # diff --git a/inc/python b/inc/python index 54e19a7886..5c9dc5c3e5 100644 --- a/inc/python +++ b/inc/python @@ -67,7 +67,6 @@ function pip_install_gr { # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``, -# ``USE_CONSTRAINTS`` # pip_install package [package ...] function pip_install { local xtrace=$(set +o | grep xtrace) @@ -105,11 +104,8 @@ function pip_install { fi cmd_pip="$cmd_pip install" - - # Handle a constraints file, if needed. - if [[ "$USE_CONSTRAINTS" == "True" ]]; then - cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt" - fi + # Always apply constraints + cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt" local pip_version=$(python -c "import pip; \ print(pip.__version__.strip('.')[0])") @@ -187,13 +183,13 @@ function setup_dev_lib { # use this, especially *oslo* ones function setup_install { local project_dir=$1 - setup_package_with_req_sync $project_dir + setup_package_with_constraints_edit $project_dir } # this should be used for projects which run services, like all services function setup_develop { local project_dir=$1 - setup_package_with_req_sync $project_dir -e + setup_package_with_constraints_edit $project_dir -e } # determine if a project as specified by directory is in @@ -209,32 +205,16 @@ function is_in_projects_txt { # ``pip install -e`` the package, which processes the dependencies # using pip before running `setup.py develop` # -# Updates the dependencies in project_dir from the -# openstack/requirements global list before installing anything. +# Updates the constraints from REQUIREMENTS_DIR to reflect the +# future installed state of this package. This ensures when we +# install this package we get the from source version. # -# Uses globals ``TRACK_DEPENDS``, ``REQUIREMENTS_DIR``, ``UNDO_REQUIREMENTS`` +# Uses globals ``REQUIREMENTS_DIR`` # setup_develop directory -function setup_package_with_req_sync { +function setup_package_with_constraints_edit { local project_dir=$1 local flags=$2 - # Don't update repo if local changes exist - # Don't use buggy "git diff --quiet" - # ``errexit`` requires us to trap the exit code when the repo is changed - local update_requirements=$(cd $project_dir && git diff --exit-code >/dev/null || echo "changed") - - if [[ $update_requirements != "changed" && "$USE_CONSTRAINTS" == "False" ]]; then - if is_in_projects_txt $project_dir; then - (cd $REQUIREMENTS_DIR; \ - ./.venv/bin/python update.py $project_dir) - else - # soft update projects not found in requirements project.txt - echo "$project_dir not a constrained repository, soft enforcing requirements" - (cd $REQUIREMENTS_DIR; \ - ./.venv/bin/python update.py -s $project_dir) - fi - fi - if [ -n "$REQUIREMENTS_DIR" ]; then # Constrain this package to this project directory from here on out. local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) @@ -245,19 +225,6 @@ function setup_package_with_req_sync { setup_package $project_dir $flags - # We've just gone and possibly modified the user's source tree in an - # automated way, which is considered bad form if it's a development - # tree because we've screwed up their next git checkin. So undo it. - # - # However... there are some circumstances, like running in the gate - # where we really really want the overridden version to stick. So provide - # a variable that tells us whether or not we should UNDO the requirements - # changes (this will be set to False in the OpenStack ci gate) - if [ $UNDO_REQUIREMENTS = "True" ]; then - if [[ $update_requirements != "changed" ]]; then - (cd $project_dir && git reset --hard) - fi - fi } # ``pip install -e`` the package, which processes the dependencies diff --git a/lib/infra b/lib/infra index eb8000e8d3..89397de792 100644 --- a/lib/infra +++ b/lib/infra @@ -22,7 +22,6 @@ set +o xtrace # Defaults # -------- GITDIR["pbr"]=$DEST/pbr -REQUIREMENTS_DIR=$DEST/requirements # Entry Points # ------------ diff --git a/stack.sh b/stack.sh index 2288af57a4..ba935be76e 100755 --- a/stack.sh +++ b/stack.sh @@ -688,9 +688,6 @@ save_stackenv $LINENO echo_summary "Installing package prerequisites" source $TOP_DIR/tools/install_prereqs.sh -# Normalise USE_CONSTRAINTS -USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS) - # Configure an appropriate Python environment if [[ "$OFFLINE" != "True" ]]; then PYPI_ALTERNATIVE_URL=${PYPI_ALTERNATIVE_URL:-""} $TOP_DIR/tools/install_pip.sh diff --git a/stackrc b/stackrc index 8beef9639f..f67afd9aee 100644 --- a/stackrc +++ b/stackrc @@ -149,13 +149,6 @@ DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) # Zero disables timeouts GIT_TIMEOUT=${GIT_TIMEOUT:-0} -# Constraints mode -# - False (default) : update git projects dependencies from global-requirements. -# -# - True : use upper-constraints.txt to constrain versions of packages intalled -# and do not edit projects at all. -USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS) - # Repositories # ------------ @@ -163,6 +156,9 @@ USE_CONSTRAINTS=$(trueorfalse False USE_CONSTRAINTS) # Another option is https://git.openstack.org GIT_BASE=${GIT_BASE:-git://git.openstack.org} +# The location of REQUIREMENTS once cloned +REQUIREMENTS_DIR=$DEST/requirements + # Which libraries should we install from git instead of using released # versions on pypi? # @@ -627,9 +623,6 @@ USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} # Set default screen name SCREEN_NAME=${SCREEN_NAME:-stack} -# Undo requirements changes by global requirements -UNDO_REQUIREMENTS=${UNDO_REQUIREMENTS:-True} - # Allow the use of an alternate protocol (such as https) for service endpoints SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 0f7c962b2b..7b42c8c485 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -20,7 +20,7 @@ TOP_DIR=`cd $TOOLS_DIR/..; pwd` cd $TOP_DIR # Import common functions -source $TOP_DIR/functions +source $TOP_DIR/stackrc FILES=$TOP_DIR/files From ea21eb4f69e2f2ea2c9c6d2fb9c4ed9aef4fc198 Mon Sep 17 00:00:00 2001 From: Marian Horban Date: Tue, 18 Aug 2015 06:57:18 -0400 Subject: [PATCH 0424/2941] Remove non-ASCII characters Change-Id: If1c68e5aab6990617519150d8aeb3f073df2ad17 --- lib/databases/mysql | 4 ++-- lib/swift | 2 +- lib/tempest | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index fb55b60ff6..7ae9a936d6 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -85,12 +85,12 @@ function configure_database_mysql { sudo mysqladmin -u root password $DATABASE_PASSWORD || true fi - # Update the DB to give user ‘$DATABASE_USER’@’%’ full control of the all databases: + # Update the DB to give user '$DATABASE_USER'@'%' full control of the all databases: sudo mysql -uroot -p$DATABASE_PASSWORD -h127.0.0.1 -e "GRANT ALL PRIVILEGES ON *.* TO '$DATABASE_USER'@'%' identified by '$DATABASE_PASSWORD';" # Now update ``my.cnf`` for some local needs and restart the mysql service - # Change ‘bind-address’ from localhost (127.0.0.1) to any (::) and + # Change bind-address from localhost (127.0.0.1) to any (::) and # set default db type to InnoDB sudo bash -c "source $TOP_DIR/functions && \ iniset $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" && \ diff --git a/lib/swift b/lib/swift index 96d730ef2c..dac121be0c 100644 --- a/lib/swift +++ b/lib/swift @@ -97,7 +97,7 @@ SWIFT_EXTRAS_MIDDLEWARE_LAST=${SWIFT_EXTRAS_MIDDLEWARE_LAST:-} # the beginning of the pipeline, before authentication middlewares. SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH=${SWIFT_EXTRAS_MIDDLEWARE_NO_AUTH:-crossdomain} -# The ring uses a configurable number of bits from a path’s MD5 hash as +# The ring uses a configurable number of bits from a path's MD5 hash as # a partition index that designates a device. The number of bits kept # from the hash is known as the partition power, and 2 to the partition # power indicates the partition count. Partitioning the full MD5 hash diff --git a/lib/tempest b/lib/tempest index 3a9ba814d0..b598db4527 100644 --- a/lib/tempest +++ b/lib/tempest @@ -386,7 +386,7 @@ function configure_tempest { # and is now the default behavior. iniset $TEMPEST_CONFIG compute-feature-enabled allow_duplicate_networks ${NOVA_ALLOW_DUPLICATE_NETWORKS:-True} - # Network + # Network iniset $TEMPEST_CONFIG network api_version 2.0 iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable" iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" @@ -468,7 +468,7 @@ function configure_tempest { if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then # Enabled extensions are either the ones explicitly specified or those available on the API endpoint volume_api_extensions=${VOLUME_API_EXTENSIONS:-$(iniget $tmp_cfg_file volume-feature-enabled api_extensions | tr -d " ")} - # Remove disabled extensions + # Remove disabled extensions volume_api_extensions=$(remove_disabled_extensions $volume_api_extensions $DISABLE_VOLUME_API_EXTENSIONS) fi iniset $TEMPEST_CONFIG volume-feature-enabled api_extensions $volume_api_extensions From 2bb3a648929550ae9ff237185be43d864e1e0225 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Tue, 18 Aug 2015 12:59:08 -0700 Subject: [PATCH 0425/2941] Fix duplicated section name Although l3_agent.ini, dhcp_agent.ini and metadata_agent.ini have "AGENT" section as default, devstack added "agent" section. Change-Id: Ie4034257d8aed00d67e3f28e7dd3b05cc5d89fc4 --- lib/neutron-legacy | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 7dddde15fd..a6743c31fa 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -960,9 +960,9 @@ function _configure_neutron_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper "$Q_RR_COMMAND" + iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $NEUTRON_TEST_CONFIG_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi _neutron_setup_interface_driver $NEUTRON_TEST_CONFIG_FILE @@ -977,9 +977,9 @@ function _configure_neutron_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT verbose True iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_DHCP_CONF_FILE agent root_helper "$Q_RR_COMMAND" + iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_DHCP_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi if ! is_service_enabled q-l3; then @@ -1012,9 +1012,9 @@ function _configure_neutron_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT verbose True iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE - iniset $Q_L3_CONF_FILE agent root_helper "$Q_RR_COMMAND" + iniset $Q_L3_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_L3_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + iniset $Q_L3_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi _neutron_setup_interface_driver $Q_L3_CONF_FILE @@ -1036,9 +1036,9 @@ function _configure_neutron_metadata_agent { iniset $Q_META_CONF_FILE DEFAULT verbose True iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $Q_META_CONF_FILE DEFAULT nova_metadata_ip $Q_META_DATA_IP - iniset $Q_META_CONF_FILE agent root_helper "$Q_RR_COMMAND" + iniset $Q_META_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then - iniset $Q_META_CONF_FILE agent root_helper_daemon "$Q_RR_DAEMON_COMMAND" + iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi # Configures keystone for metadata_agent From 0612b485d3646e40ea0f70dfaadd7705a2b10c71 Mon Sep 17 00:00:00 2001 From: Tomoki Sekiyama Date: Tue, 18 Aug 2015 18:51:24 -0400 Subject: [PATCH 0426/2941] Ironic: Fix tag ID detection of tap devices On Linux kernel 4.1, `ip link` reports peer interface name for each Open vSwitch interface, like: $ ip link ... 71: ovs-tap1@brbm-tap1: ... ~~~~~~~~~~ Currently it is regarded as a part of interface name, so causes failure in tap id detection from ovs-vsctl output, that results into ironic deployment failure. This patch removes the peer name from the interface name. Change-Id: Id3b181fa821e9bff1afabef4d63964f61fa49a65 Closes-Bug: #1486273 --- lib/ironic | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ironic b/lib/ironic index b3ad586923..d9c3396944 100644 --- a/lib/ironic +++ b/lib/ironic @@ -505,9 +505,9 @@ function create_ovs_taps { sleep 10 if [[ "$Q_USE_NAMESPACE" = "True" ]]; then - local tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -b2-) + local tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-) else - local tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -b2-) + local tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-) fi local tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-) From 2d91fe8a6ba466c57bae9c7f16ece1d1cc6d7563 Mon Sep 17 00:00:00 2001 From: Shashank Hegde Date: Tue, 18 Aug 2015 18:33:55 -0700 Subject: [PATCH 0427/2941] Cloning requirements repository before pip install The commit 05aa3846a0402edc9cc49f4ba36f09592004b273 into devstack exposed a bug where pip_install is called before the requirements repository is cloned. This change ensures that the requirements repository exists before pip_install is called. Change-Id: I60b157fc98691764a69cf022852e7a95fc50cdd7 Closes-Bug: #1486304 --- stack.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index 341df7fc92..639f72b1bc 100755 --- a/stack.sh +++ b/stack.sh @@ -683,6 +683,11 @@ save_stackenv $LINENO # OpenStack uses a fair number of other projects. +# Bring down global requirements before any use of pip_install. This is +# necessary to ensure that the constraints file is in place before we +# attempt to apply any constraints to pip installs. +git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH + # Install package requirements # Source it so the entire environment is available echo_summary "Installing package prerequisites" @@ -695,11 +700,6 @@ fi TRACK_DEPENDS=${TRACK_DEPENDS:-False} -# Bring down global requirements before any use of pip_install. This is -# necessary to ensure that the constraints file is in place before we -# attempt to apply any constraints to pip installs. -git_clone $REQUIREMENTS_REPO $REQUIREMENTS_DIR $REQUIREMENTS_BRANCH - # Install Python packages into a virtualenv so that we can track them if [[ $TRACK_DEPENDS = True ]]; then echo_summary "Installing Python packages into a virtualenv $DEST/.venv" From 67168e807adfaff996bf2767601fde0d8cb16c02 Mon Sep 17 00:00:00 2001 From: Mahito Date: Tue, 18 Aug 2015 23:59:29 -0700 Subject: [PATCH 0428/2941] Add "source $TOP_DIR/lib/lvm" to clean.sh When clean.sh is executed, it shows "command not found" messages. Commands are defined in lib/lvm, however lib/lvm doesn't include clean.sh. This pache add lib/lvm to clean.sh. Change-Id: I56672e949d25f7cdcda879badd992f849d06c749 Closes-Bug: 1486392 --- clean.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/clean.sh b/clean.sh index 74bcaee924..78e2a7a826 100755 --- a/clean.sh +++ b/clean.sh @@ -41,6 +41,7 @@ source $TOP_DIR/lib/rpc_backend source $TOP_DIR/lib/tls source $TOP_DIR/lib/oslo +source $TOP_DIR/lib/lvm source $TOP_DIR/lib/horizon source $TOP_DIR/lib/keystone source $TOP_DIR/lib/glance From d6456e67589a78f50f71e93fd789c1423513ab60 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 19 Aug 2015 10:33:23 -0400 Subject: [PATCH 0429/2941] Ability to specify keystone v3 in nova.conf for neutron As part of moving components to use keystone v3, this review allows nova.conf's [neutron] section to switch to using the auth_plugin in keystoneclient for talking to keystone /v3 API Change-Id: I42502bff147534199096fb581630b8559f311963 --- lib/neutron-legacy | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 9206fe1754..c4d2dd5f7e 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -471,11 +471,21 @@ function configure_neutron { function create_nova_conf_neutron { iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API" - iniset $NOVA_CONF neutron admin_username "$Q_ADMIN_USERNAME" - iniset $NOVA_CONF neutron admin_password "$SERVICE_PASSWORD" - iniset $NOVA_CONF neutron admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + + + if [ "$ENABLE_IDENTITY_V2" == "False" ]; then + iniset $NOVA_CONF neutron auth_plugin "v3password" + iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" + iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME" + iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD" + iniset $NOVA_CONF neutron user_domain_name "default" + else + iniset $NOVA_CONF neutron admin_username "$Q_ADMIN_USERNAME" + iniset $NOVA_CONF neutron admin_password "$SERVICE_PASSWORD" + iniset $NOVA_CONF neutron admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" + iniset $NOVA_CONF neutron admin_tenant_name "$SERVICE_TENANT_NAME" + fi iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY" - iniset $NOVA_CONF neutron admin_tenant_name "$SERVICE_TENANT_NAME" iniset $NOVA_CONF neutron region_name "$REGION_NAME" iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT" From b203d0c71aeb155ae194650004f3a10335801b0f Mon Sep 17 00:00:00 2001 From: gordon chung Date: Wed, 12 Aug 2015 11:58:11 -0400 Subject: [PATCH 0430/2941] do not redefine path in elasticsearch in I0272d56bc2e50e8174db78bd449f65f60f7f4000, we reset DEST value when installing elasticsearch. it gets set to /opt/stack/ which is not always correct in gate causing the path to be wrong and elasticseach cannot be installed. we should reuse DEST from stackrc Change-Id: Ia3a2383ada30c4e92c37386aedd6164c69cac60a Closes-Bug: #1484182 --- pkg/elasticsearch.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pkg/elasticsearch.sh b/pkg/elasticsearch.sh index 79f67a0179..14d13cf733 100755 --- a/pkg/elasticsearch.sh +++ b/pkg/elasticsearch.sh @@ -6,9 +6,7 @@ # step can probably be factored out to something nicer TOP_DIR=$(cd $(dirname "$0")/.. && pwd) FILES=$TOP_DIR/files -source $TOP_DIR/functions -DEST=${DEST:-/opt/stack} -source $TOP_DIR/lib/infra +source $TOP_DIR/stackrc # Package source and version, all pkg files are expected to have # something like this, as well as a way to override them. From 9b21f98ce0aa5093e477bab68aede5af0fb8d9ad Mon Sep 17 00:00:00 2001 From: Andrey Pavlov Date: Thu, 20 Aug 2015 23:37:04 +0300 Subject: [PATCH 0431/2941] Add region definition to swift3 Swift3 should be in the same region as all other cloud. By default it has regaion name 'US'. It's ok for requests that signed by version 1 of signature because they haven't region information in request. But S3 signature of version 4 protocol sends region name to server and swift3 plugin checks that input region equals to internal. And because all cloud lives with 'RegionOne' then swift3 fails request because it has 'US' region name by default. Change-Id: Icd817183b1a040110372a8ae5d73fd2f0ec5559c Related-Bug: #1411078 --- lib/swift | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/swift b/lib/swift index dac121be0c..233a493acb 100644 --- a/lib/swift +++ b/lib/swift @@ -455,6 +455,7 @@ admin_password = ${SERVICE_PASSWORD} [filter:swift3] use = egg:swift3#swift3 +location = ${REGION_NAME} EOF fi From dca06dc7dec148cac2d9e27cdb20d7d26bb0a941 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 20 Aug 2015 13:56:57 -0700 Subject: [PATCH 0432/2941] Enable nbd if you're running an lxc virt_type without an lvm backend If nbd isn't enabled you can't boot instances with libvirt using lxc (unless you're using an lvm backend). Closes-Bug: #1487195 Co-Authored-By: Andrew Melton Change-Id: I08c4d498ed35166f566291d9530ca1ecfae05625 --- lib/nova | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/nova b/lib/nova index 6441a891eb..087cac6016 100644 --- a/lib/nova +++ b/lib/nova @@ -354,6 +354,12 @@ function configure_nova { sudo mount /cgroup fi fi + + # enable nbd for lxc unless you're using an lvm backend + # otherwise you can't boot instances + if [[ "$NOVA_BACKEND" != "LVM" ]]; then + sudo modprobe nbd + fi fi fi fi From 485c962667631e4f321ee2513729718305f4f372 Mon Sep 17 00:00:00 2001 From: Andrey Pavlov Date: Mon, 24 Aug 2015 22:55:19 +0300 Subject: [PATCH 0433/2941] Fix create_userrc.sh script Patchset fixes calculating EC2_URL/S3_URL for user rc files in 'accrc' directory. Currently calculation of these url's uses 'openstack endpoint show' command without specifying os-identity-v3 flag. But output is empty without such flag. So this patchset uses same construction as exists in functions-common. Change-Id: Ia4f2510750fa0f46e2f1d58cf0a7a16782f022b3 --- tools/create_userrc.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index c2dbe1aeb4..de44abbbe5 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -158,12 +158,12 @@ fi export -n SERVICE_TOKEN SERVICE_ENDPOINT OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT -EC2_URL=$(openstack endpoint show -f value -c publicurl ec2 || true) +EC2_URL=$(openstack endpoint list --service ec2 --interface public --os-identity-api-version=3 -c URL -f value || true) if [[ -z $EC2_URL ]]; then EC2_URL=http://localhost:8773/ fi -S3_URL=$(openstack endpoint show -f value -c publicurl s3 || true) +S3_URL=$(openstack endpoint list --service s3 --interface public --os-identity-api-version=3 -c URL -f value || true) if [[ -z $S3_URL ]]; then S3_URL=http://localhost:3333 fi From 403fbb1d33a3bbb0901d1a696ef68a3fe099dd70 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Mon, 24 Aug 2015 21:17:37 -0400 Subject: [PATCH 0434/2941] Fix trueorfalse call in tempest use accounts check The lib/tempest variable definition for TEMPEST_USE_TEST_ACCOUNTS was incorrectly calling the trueorfalse function by passing the variable's value to the function instead of the variable's name. This was causing trueorfalse the default value of false to always be returned even when specifying the option as true in the localrc. (well assuming True or it's variants wasn't an actual defined variable with a value that would return true) This commit fixes this issue by properly using the trueorfalse function. Change-Id: I8cefb58f49dcd2cb2def8a5071d0892af520e7f7 --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index e7f825f417..fe63015c5d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -89,7 +89,7 @@ TEMPEST_HAS_ADMIN=$(trueorfalse True TEMPEST_HAS_ADMIN) # Credential provider configuration option variables TEMPEST_ALLOW_TENANT_ISOLATION=${TEMPEST_ALLOW_TENANT_ISOLATION:-$TEMPEST_HAS_ADMIN} -TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False $TEMPEST_USE_TEST_ACCOUNTS) +TEMPEST_USE_TEST_ACCOUNTS=$(trueorfalse False TEMPEST_USE_TEST_ACCOUNTS) # The number of workers tempest is expected to be run with. This is used for # generating a accounts.yaml for running with test-accounts. This is also the From b274dbd7d04b643932fc583e2901353cfded45c3 Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Tue, 25 Aug 2015 10:01:39 -0400 Subject: [PATCH 0435/2941] Fix syntax error on if else statement This commit fixes an simple syntax error on an else statement causing it to crash instead of eval. Clearly someone has been writing too much python and not enough bash. Change-Id: I81d2324abd17790dc4790147f210ad7d9f0db74b --- lib/tempest | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index fe63015c5d..be24da6b61 100644 --- a/lib/tempest +++ b/lib/tempest @@ -555,7 +555,7 @@ function configure_tempest { if [[ $TEMPEST_USE_TEST_ACCOUNTS == "True" ]]; then if [[ $TEMPEST_HAS_ADMIN == "True" ]]; then tempest-account-generator -c $TEMPEST_CONFIG --os-username $ADMIN_USERNAME --os-password $ADMIN_PASSWORD --os-tenant-name $ADMIN_TENANT_NAME -r $TEMPEST_CONCURRENCY --with-admin etc/accounts.yaml - else: + else tempest-account-generator -c $TEMPEST_CONFIG --os-username $ADMIN_USERNAME --os-password $ADMIN_PASSWORD --os-tenant-name $ADMIN_TENANT_NAME -r $TEMPEST_CONCURRENCY etc/accounts.yaml fi iniset $TEMPEST_CONFIG auth allow_tenant_isolation False From 2ad1a42ca667ff21e6f7d2ae906be23a20430036 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Tue, 23 Jun 2015 10:53:50 -0500 Subject: [PATCH 0436/2941] Use keystone wsgi_scripts Devstack was setting up a separate directory and copying http/keystone.py into it for the admin and public endpoints. Keystone now defines wsgi_scripts entrypoints so that keystone-wsgi-admin and keystone-wsgi-public are created on install so devstack can reference these files instead. See http://httpd.apache.org/docs/2.4/upgrading.html#access for the apache docs with examples for the Allow|Deny/Require directives. Depends-On: Ic9c03e6c00408f3698c10012ca98cfc6ea9b6ace Change-Id: Ided688be62b64066d90776313c963ec5016363f2 --- files/apache-keystone.template | 24 ++++++++++++++++++++++-- lib/keystone | 15 +-------------- 2 files changed, 23 insertions(+), 16 deletions(-) diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 6dd1ad9ea6..4d3d2d6623 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -5,7 +5,7 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup keystone-public - WSGIScriptAlias / %PUBLICWSGI% + WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On = 2.4> @@ -16,12 +16,22 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + WSGIDaemonProcess keystone-admin processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup keystone-admin - WSGIScriptAlias / %ADMINWSGI% + WSGIScriptAlias / %KEYSTONE_BIN%/keystone-wsgi-admin WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On = 2.4> @@ -32,6 +42,16 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + Alias /identity %PUBLICWSGI% diff --git a/lib/keystone b/lib/keystone index e2448c9068..921dc766dd 100644 --- a/lib/keystone +++ b/lib/keystone @@ -51,11 +51,6 @@ fi KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} -if is_suse; then - KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/srv/www/htdocs/keystone} -else - KEYSTONE_WSGI_DIR=${KEYSTONE_WSGI_DIR:-/var/www/keystone} -fi # Set up additional extensions, such as oauth1, federation # Example of KEYSTONE_EXTENSIONS=oauth1,federation @@ -132,14 +127,11 @@ function cleanup_keystone { # _cleanup_keystone_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file function _cleanup_keystone_apache_wsgi { - sudo rm -f $KEYSTONE_WSGI_DIR/* sudo rm -f $(apache_site_config_for keystone) } # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone function _config_keystone_apache_wsgi { - sudo mkdir -p $KEYSTONE_WSGI_DIR - local keystone_apache_conf=$(apache_site_config_for keystone) local keystone_ssl="" local keystone_certfile="" @@ -161,22 +153,17 @@ function _config_keystone_apache_wsgi { venv_path="python-path=${PROJECT_VENV["keystone"]}/lib/$(python_version)/site-packages" fi - # copy proxy vhost and wsgi file - sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/main - sudo cp $KEYSTONE_DIR/httpd/keystone.py $KEYSTONE_WSGI_DIR/admin - sudo cp $FILES/apache-keystone.template $keystone_apache_conf sudo sed -e " s|%PUBLICPORT%|$keystone_service_port|g; s|%ADMINPORT%|$keystone_auth_port|g; s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$KEYSTONE_WSGI_DIR/main|g; - s|%ADMINWSGI%|$KEYSTONE_WSGI_DIR/admin|g; s|%SSLENGINE%|$keystone_ssl|g; s|%SSLCERTFILE%|$keystone_certfile|g; s|%SSLKEYFILE%|$keystone_keyfile|g; s|%USER%|$STACK_USER|g; s|%VIRTUALENV%|$venv_path|g + s|%KEYSTONE_BIN%|$KEYSTONE_BIN_DIR|g " -i $keystone_apache_conf } From 091d1ff39d47bf9bebf564b11bbbe5edc984340b Mon Sep 17 00:00:00 2001 From: Henry Gessau Date: Sun, 5 Jul 2015 08:55:18 -0400 Subject: [PATCH 0437/2941] Neutron auto-discovers installed alembic_migrations Neutron and its sub-projects have been made more intelligent about the alembic migrations of installed sub-projects. Neutron will now discover the installed migrations and run them automatically. Partial-Bug: #1470625 Change-Id: Iec8993b02400ae306abf520e6e70d86bba042c8d --- lib/neutron-legacy | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index da8c06446c..35ca402087 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -609,16 +609,6 @@ function init_neutron { recreate_database $Q_DB_NAME # Run Neutron db migrations $NEUTRON_BIN_DIR/neutron-db-manage --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head - for svc in fwaas lbaas vpnaas; do - if [ "$svc" = "vpnaas" ]; then - q_svc="q-vpn" - else - q_svc="q-$svc" - fi - if is_service_enabled $q_svc; then - $NEUTRON_BIN_DIR/neutron-db-manage --service $svc --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE upgrade head - fi - done } # install_neutron() - Collect source and prepare From 925c256cd45bd845c8dd03827ae9c26f43ad5481 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 25 Aug 2015 13:40:25 -0700 Subject: [PATCH 0438/2941] Remove NOVA_VIF_DRIVER variable Commit 7561c8ded211d53e8745d1420a73b82bd0fc35cf removed the libvirt.vif_driver option from Nova in Juno so we should remove the variable from devstack since it's useless / confusing. Change-Id: I70a8cb4a3606eb5eabd3c0ef331945e72c80543a --- lib/neutron-legacy | 5 +---- lib/neutron_plugins/README.md | 4 +--- lib/neutron_plugins/brocade | 2 +- lib/neutron_plugins/ibm | 1 - lib/neutron_plugins/nuage | 1 - lib/neutron_plugins/oneconvergence | 1 - 6 files changed, 3 insertions(+), 11 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index da8c06446c..f381f6ebec 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -158,8 +158,6 @@ Q_ALLOW_OVERLAPPING_IP=${Q_ALLOW_OVERLAPPING_IP:-True} Q_USE_DEBUG_COMMAND=${Q_USE_DEBUG_COMMAND:-False} # The name of the default q-l3 router Q_ROUTER_NAME=${Q_ROUTER_NAME:-router1} -# nova vif driver that all plugins should use -NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} Q_NOTIFY_NOVA_PORT_STATUS_CHANGES=${Q_NOTIFY_NOVA_PORT_STATUS_CHANGES:-True} Q_NOTIFY_NOVA_PORT_DATA_CHANGES=${Q_NOTIFY_NOVA_PORT_DATA_CHANGES:-True} VIF_PLUGGING_IS_FATAL=${VIF_PLUGGING_IS_FATAL:-True} @@ -485,10 +483,9 @@ function create_nova_conf_neutron { iniset $NOVA_CONF DEFAULT security_group_api neutron fi - # set NOVA_VIF_DRIVER and optionally set options in nova_conf + # optionally set options in nova_conf neutron_plugin_create_nova_conf - iniset $NOVA_CONF libvirt vif_driver "$NOVA_VIF_DRIVER" iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" if is_service_enabled q-meta; then iniset $NOVA_CONF neutron service_metadata_proxy "True" diff --git a/lib/neutron_plugins/README.md b/lib/neutron_plugins/README.md index 4b220d3377..f03000e7cb 100644 --- a/lib/neutron_plugins/README.md +++ b/lib/neutron_plugins/README.md @@ -16,9 +16,7 @@ functions ``lib/neutron-legacy`` calls the following functions when the ``$Q_PLUGIN`` is enabled * ``neutron_plugin_create_nova_conf`` : - set ``NOVA_VIF_DRIVER`` and optionally set options in nova_conf - e.g. - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + optionally set options in nova_conf * ``neutron_plugin_install_agent_packages`` : install packages that is specific to plugin agent e.g. diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index b8166d9af9..557b94dec0 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -12,7 +12,7 @@ function is_neutron_ovs_base_plugin { } function neutron_plugin_create_nova_conf { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} + : } function neutron_plugin_install_agent_packages { diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm index 3660a9f2b9..dd5cfa6694 100644 --- a/lib/neutron_plugins/ibm +++ b/lib/neutron_plugins/ibm @@ -42,7 +42,6 @@ function neutron_setup_integration_bridge { } function neutron_plugin_create_nova_conf { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} # if n-cpu is enabled, then setup integration bridge if is_service_enabled n-cpu; then neutron_setup_integration_bridge diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage index 7bce233853..9e5307ba53 100644 --- a/lib/neutron_plugins/nuage +++ b/lib/neutron_plugins/nuage @@ -10,7 +10,6 @@ set +o xtrace function neutron_plugin_create_nova_conf { NOVA_OVS_BRIDGE=${NOVA_OVS_BRIDGE:-"br-int"} iniset $NOVA_CONF neutron ovs_bridge $NOVA_OVS_BRIDGE - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} LIBVIRT_FIREWALL_DRIVER=nova.virt.firewall.NoopFirewallDriver iniset $NOVA_CONF DEFAULT firewall_driver $LIBVIRT_FIREWALL_DRIVER } diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence index 48a368a967..0c570e534b 100644 --- a/lib/neutron_plugins/oneconvergence +++ b/lib/neutron_plugins/oneconvergence @@ -68,7 +68,6 @@ function neutron_plugin_configure_plugin_agent { } function neutron_plugin_create_nova_conf { - NOVA_VIF_DRIVER=${NOVA_VIF_DRIVER:-"nova.virt.libvirt.vif.LibvirtGenericVIFDriver"} if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then setup_integration_bridge fi From d20435bdd4fb5ea856497c797376517ed516d833 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Tue, 25 Aug 2015 19:24:51 -0400 Subject: [PATCH 0439/2941] Filter out temporary addresses Some IPv6 addresses are temporary and are generated by IPv6 privacy extensions. They eventually expire and are regenerated, so we should filter them out. Change-Id: I916d6a335bab096f765ae8c7e0e540a4349dd15f Closes-Bug: #1488691 --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index f6a525354f..446de5374f 100644 --- a/functions-common +++ b/functions-common @@ -591,7 +591,7 @@ function get_default_host_ip { host_ip="" # Find the interface used for the default route host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)} - local host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | awk /$af'/ {split($2,parts,"/"); print parts[1]}') + local host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}') local ip for ip in $host_ips; do # Attempt to filter out IP addresses that are part of the fixed and From 346edcc532719f6f29471920f9434b6d5300d43a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 26 Aug 2015 09:38:37 -0400 Subject: [PATCH 0440/2941] check all possible services when configuring tempest The previous approach assumed that devstack in tree service support would always be a super set of tempest. That's not necessarily true. Instead when configuring tempest we should look at all the possible services that tempest could know about, which will let us disable services we don't have support for. Change-Id: I9c24705e494689f09a885eb0a640efd50db33fcf --- lib/ceilometer | 3 --- lib/cinder | 3 --- lib/glance | 3 --- lib/heat | 4 ---- lib/horizon | 4 ---- lib/ironic | 3 --- lib/neutron-legacy | 4 ---- lib/nova | 4 ---- lib/swift | 4 ---- lib/tempest | 5 +++++ lib/zaqar | 4 ---- stackrc | 6 ------ 12 files changed, 5 insertions(+), 42 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index 3df75b7300..d1cc862160 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -102,9 +102,6 @@ CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True} CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-} CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,ceilometer - # Functions # --------- diff --git a/lib/cinder b/lib/cinder index e5ed2db1a3..26277ccaba 100644 --- a/lib/cinder +++ b/lib/cinder @@ -108,9 +108,6 @@ CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,cinder - # Source the enabled backends if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then diff --git a/lib/glance b/lib/glance index b1b0f32f62..7be3a8495c 100644 --- a/lib/glance +++ b/lib/glance @@ -75,9 +75,6 @@ GLANCE_SEARCH_PORT=${GLANCE_SEARCH_PORT:-9393} GLANCE_SEARCH_PORT_INT=${GLANCE_SEARCH_PORT_INT:-19393} GLANCE_SEARCH_HOSTPORT=${GLANCE_SEARCH_HOSTPORT:-$GLANCE_SERVICE_HOST:$GLANCE_SEARCH_PORT} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,glance - # Functions # --------- diff --git a/lib/heat b/lib/heat index cedddd2d26..3489578162 100644 --- a/lib/heat +++ b/lib/heat @@ -64,10 +64,6 @@ else HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts} fi -# Tell Tempest this project is present -TEMPEST_SERVICES+=,heat - - # Functions # --------- diff --git a/lib/horizon b/lib/horizon index 9fe0aa8b49..b2539d1b7d 100644 --- a/lib/horizon +++ b/lib/horizon @@ -35,10 +35,6 @@ HORIZON_DIR=$DEST/horizon # The example file in Horizon repo is used by default. HORIZON_SETTINGS=${HORIZON_SETTINGS:-$HORIZON_DIR/openstack_dashboard/local/local_settings.py.example} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,horizon - - # Functions # --------- diff --git a/lib/ironic b/lib/ironic index b3ad586923..cc1dfe31d9 100644 --- a/lib/ironic +++ b/lib/ironic @@ -114,9 +114,6 @@ IRONIC_SERVICE_PROTOCOL=http IRONIC_SERVICE_PORT=${IRONIC_SERVICE_PORT:-6385} IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:$IRONIC_SERVICE_PORT} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,ironic - # Enable iPXE IRONIC_IPXE_ENABLED=$(trueorfalse False IRONIC_IPXE_ENABLED) IRONIC_HTTP_DIR=${IRONIC_HTTP_DIR:-$IRONIC_DATA_DIR/httpboot} diff --git a/lib/neutron-legacy b/lib/neutron-legacy index da8c06446c..9fb668ef81 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -358,10 +358,6 @@ else Q_USE_SECGROUP=False fi -# Tell Tempest this project is present -TEMPEST_SERVICES+=,neutron - - # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace diff --git a/lib/nova b/lib/nova index 6441a891eb..d4be019ae3 100644 --- a/lib/nova +++ b/lib/nova @@ -167,10 +167,6 @@ NOVA_ALLOW_MOVE_TO_SAME_HOST=$(trueorfalse True NOVA_ALLOW_MOVE_TO_SAME_HOST) TEST_FLOATING_POOL=${TEST_FLOATING_POOL:-test} TEST_FLOATING_RANGE=${TEST_FLOATING_RANGE:-192.168.253.0/29} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,nova - - # Functions # --------- diff --git a/lib/swift b/lib/swift index fc736a60bc..b119d2fd35 100644 --- a/lib/swift +++ b/lib/swift @@ -141,10 +141,6 @@ SWIFT_TEMPURL_KEY=${SWIFT_TEMPURL_KEY:-} # Toggle for deploying Swift under HTTPD + mod_wsgi SWIFT_USE_MOD_WSGI=${SWIFT_USE_MOD_WSGI:-False} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,swift - - # Functions # --------- diff --git a/lib/tempest b/lib/tempest index fe63015c5d..323a90a4cd 100644 --- a/lib/tempest +++ b/lib/tempest @@ -536,6 +536,11 @@ function configure_tempest { fi # ``service_available`` + # + # this tempest service list needs to be all the services that + # tempest supports, otherwise we can have an erroneous set of + # defaults (something defaulting true in Tempest, but not listed here). + TEMPEST_SERVICES="key,glance,nova,neutron,cinder,swift,heat,ceilometer,horizon,sahara,ironic,trove,zaqar" for service in ${TEMPEST_SERVICES//,/ }; do if is_service_enabled $service ; then iniset $TEMPEST_CONFIG service_available $service "True" diff --git a/lib/zaqar b/lib/zaqar index fdab3a26a8..aa21aac271 100644 --- a/lib/zaqar +++ b/lib/zaqar @@ -59,10 +59,6 @@ ZAQAR_SERVICE_HOST=${ZAQAR_SERVICE_HOST:-$SERVICE_HOST} ZAQAR_SERVICE_PORT=${ZAQAR_SERVICE_PORT:-8888} ZAQAR_SERVICE_PROTOCOL=${ZAQAR_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} -# Tell Tempest this project is present -TEMPEST_SERVICES+=,zaqar - - # Functions # --------- diff --git a/stackrc b/stackrc index 156cb1f85a..67b6424aab 100644 --- a/stackrc +++ b/stackrc @@ -78,12 +78,6 @@ fi # services will rely on the local toggle variable (e.g. ``KEYSTONE_USE_MOD_WSGI``) ENABLE_HTTPD_MOD_WSGI_SERVICES=True -# Tell Tempest which services are available. The default is set here as -# Tempest falls late in the configuration sequence. This differs from -# ``ENABLED_SERVICES`` in that the project names are used here rather than -# the service names, i.e.: ``TEMPEST_SERVICES="key,glance,nova"`` -TEMPEST_SERVICES="" - # Set the default Nova APIs to enable NOVA_ENABLED_APIS=ec2,osapi_compute,metadata From 869b72b8512d73d24b42e0fa5c39b8dc0d7b28f7 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Thu, 13 Aug 2015 13:36:23 +0200 Subject: [PATCH 0441/2941] Support installing ironic-lib ironic-lib will soon become a dependency of ironic, make sure we can write dsvm gates against it. Change-Id: I6e66ae770cf5065980848e7e987bfd75765a5ac6 --- lib/ironic | 7 +++++++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index b3ad586923..6fb5184af9 100644 --- a/lib/ironic +++ b/lib/ironic @@ -31,6 +31,7 @@ set +o pipefail # Set up default directories GITDIR["python-ironicclient"]=$DEST/python-ironicclient +GITDIR["ironic-lib"]=$DEST/ironic-lib IRONIC_DIR=$DEST/ironic IRONIC_PYTHON_AGENT_DIR=$DEST/ironic-python-agent @@ -191,6 +192,12 @@ function install_ironic { die $LINENO "$srv should be enabled for Ironic." fi done + + if use_library_from_git "ironic-lib"; then + git_clone_by_name "ironic-lib" + setup_dev_lib "ironic-lib" + fi + git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH setup_develop $IRONIC_DIR diff --git a/stackrc b/stackrc index 156cb1f85a..760e2fc745 100644 --- a/stackrc +++ b/stackrc @@ -455,6 +455,10 @@ GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master} GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git} GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master} +# ironic common lib +GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git} +GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master} + ################## # diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index d10cd0ee62..cf6ec1cbff 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -41,7 +41,7 @@ ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports" -ALL_LIBS+=" keystoneauth" +ALL_LIBS+=" keystoneauth ironic-lib" # Generate the above list with # echo ${!GITREPO[@]} From 7d5be299206e801d39e0e07eec54dfc5948a15a5 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 10 Aug 2015 13:39:17 +1000 Subject: [PATCH 0442/2941] Move configuration notes into configuration guide We have configuration information split between the README.md and configuration documentation. A lot of it is duplicated and it shows little organisation. This clears the README.md of detailed configuration options and consolidates it into the existing configuration guide. When someone first hits the README they don't need details on changing the RPC back-end; but more importantly this indicates clearly where we should be adding or clarifying details. Firstly, the detailed overview of local.conf is removed; it was duplicated in the configuration guide. This is left as a first-level section of that guide. The configuration notes are divided into generic devstack things (logging, database-backend, etc) and then the rest of the notes on various projects' configuration options have been moved into a dedicated sub-section "Projects". Each project gets its own sub-sub-section. Duplicated swift guides is consolidated into the single "Swift section". The neutron and multi-node nodes, which were all duplicated in their more specific dedicated guides are removed and replaced with links to those. Other sections are moved directly. Change-Id: Ib0bac56d82be870fe99c47c53fda674d8668b968 --- README.md | 345 +--------------------------------- doc/source/configuration.rst | 351 +++++++++++++++++++++++++---------- 2 files changed, 255 insertions(+), 441 deletions(-) diff --git a/README.md b/README.md index acc3e5a747..df0df917a4 100644 --- a/README.md +++ b/README.md @@ -93,345 +93,14 @@ for example). # Customizing -You can override environment variables used in `stack.sh` by creating file -name `local.conf` with a ``localrc`` section as shown below. It is likely -that you will need to do this to tweak your networking configuration should -you need to access your cloud from a different host. +You can override environment variables used in `stack.sh` by creating +file name `local.conf` with a ``localrc`` section as shown below. It +is likely that you will need to do this to tweak several settings for +your environment. [[local|localrc]] VARIABLE=value -See the **Local Configuration** section below for more details. - -# Database Backend - -Multiple database backends are available. The available databases are defined -in the lib/databases directory. -`mysql` is the default database, choose a different one by putting the -following in the `localrc` section: - - disable_service mysql - enable_service postgresql - -`mysql` is the default database. - -# RPC Backend - -Support for a RabbitMQ RPC backend is included. Additional RPC backends may -be available via external plugins. Enabling or disabling RabbitMQ is handled -via the usual service functions and ``ENABLED_SERVICES``. - -Example disabling RabbitMQ in ``local.conf``: - - disable_service rabbit - -# Apache Frontend - -Apache web server can be enabled for wsgi services that support being deployed -under HTTPD + mod_wsgi. By default, services that recommend running under -HTTPD + mod_wsgi are deployed under Apache. To use an alternative deployment -strategy (e.g. eventlet) for services that support an alternative to HTTPD + -mod_wsgi set ``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your -``local.conf``. - -Each service that can be run under HTTPD + mod_wsgi also has an override -toggle available that can be set in your ``local.conf``. - -Keystone is run under HTTPD + mod_wsgi by default. - -Example (Keystone): - - KEYSTONE_USE_MOD_WSGI="True" - -Example (Nova): - - NOVA_USE_MOD_WSGI="True" - -Example (Swift): - - SWIFT_USE_MOD_WSGI="True" - -# Swift - -Swift is disabled by default. When enabled, it is configured with -only one replica to avoid being IO/memory intensive on a small -vm. When running with only one replica the account, container and -object services will run directly in screen. The others services like -replicator, updaters or auditor runs in background. - -If you would like to enable Swift you can add this to your `localrc` section: - - enable_service s-proxy s-object s-container s-account - -If you want a minimal Swift install with only Swift and Keystone you -can have this instead in your `localrc` section: - - disable_all_services - enable_service key mysql s-proxy s-object s-container s-account - -If you only want to do some testing of a real normal swift cluster -with multiple replicas you can do so by customizing the variable -`SWIFT_REPLICAS` in your `localrc` section (usually to 3). - -# Swift S3 - -If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will -install the swift3 middleware emulation. Swift will be configured to -act as a S3 endpoint for Keystone so effectively replacing the -`nova-objectstore`. - -Only Swift proxy server is launched in the screen session all other -services are started in background and managed by `swift-init` tool. - -# Neutron - -Basic Setup - -In order to enable Neutron in a single node setup, you'll need the -following settings in your `local.conf`: - - disable_service n-net - enable_service q-svc - enable_service q-agt - enable_service q-dhcp - enable_service q-l3 - enable_service q-meta - enable_service q-metering - -Then run `stack.sh` as normal. - -DevStack supports setting specific Neutron configuration flags to the -service, ML2 plugin, DHCP and L3 configuration files: - - [[post-config|/$Q_PLUGIN_CONF_FILE]] - [ml2] - mechanism_drivers=openvswitch,l2population - - [[post-config|$NEUTRON_CONF]] - [DEFAULT] - quota_port=42 - - [[post-config|$Q_L3_CONF_FILE]] - [DEFAULT] - agent_mode=legacy - - [[post-config|$Q_DHCP_CONF_FILE]] - [DEFAULT] - dnsmasq_dns_servers = 8.8.8.8,8.8.4.4 - -The ML2 plugin can run with the OVS, LinuxBridge, or Hyper-V agents on compute -hosts. This is a simple way to configure the ml2 plugin: - - # VLAN configuration - ENABLE_TENANT_VLANS=True - - # GRE tunnel configuration - ENABLE_TENANT_TUNNELS=True - - # VXLAN tunnel configuration - Q_ML2_TENANT_NETWORK_TYPE=vxlan - -The above will default in DevStack to using the OVS on each compute host. -To change this, set the `Q_AGENT` variable to the agent you want to run -(e.g. linuxbridge). - - Variable Name Notes - ---------------------------------------------------------------------------- - Q_AGENT This specifies which agent to run with the - ML2 Plugin (Typically either `openvswitch` - or `linuxbridge`). - Defaults to `openvswitch`. - Q_ML2_PLUGIN_MECHANISM_DRIVERS The ML2 MechanismDrivers to load. The default - is `openvswitch,linuxbridge`. - Q_ML2_PLUGIN_TYPE_DRIVERS The ML2 TypeDrivers to load. Defaults to - all available TypeDrivers. - Q_ML2_PLUGIN_GRE_TYPE_OPTIONS GRE TypeDriver options. Defaults to - `tunnel_id_ranges=1:1000'. - Q_ML2_PLUGIN_VXLAN_TYPE_OPTIONS VXLAN TypeDriver options. Defaults to - `vni_ranges=1001:2000` - Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS VLAN TypeDriver options. Defaults to none. - -# Heat - -Heat is disabled by default (see `stackrc` file). To enable it explicitly -you'll need the following settings in your `localrc` section: - - enable_service heat h-api h-api-cfn h-api-cw h-eng - -Heat can also run in standalone mode, and be configured to orchestrate -on an external OpenStack cloud. To launch only Heat in standalone mode -you'll need the following settings in your `localrc` section: - - disable_all_services - enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng - HEAT_STANDALONE=True - KEYSTONE_SERVICE_HOST=... - KEYSTONE_AUTH_HOST=... - -# Tempest - -If tempest has been successfully configured, a basic set of smoke -tests can be run as follows: - - $ cd /opt/stack/tempest - $ tox -efull tempest.scenario.test_network_basic_ops - -By default tempest is downloaded and the config file is generated, but the -tempest package is not installed in the system's global site-packages (the -package install includes installing dependences). So tempest won't run -outside of tox. If you would like to install it add the following to your -``localrc`` section: - - INSTALL_TEMPEST=True - -# DevStack on Xenserver - -If you would like to use Xenserver as the hypervisor, please refer -to the instructions in `./tools/xen/README.md`. - -# Additional Projects - -DevStack has a hook mechanism to call out to a dispatch script at specific -points in the execution of `stack.sh`, `unstack.sh` and `clean.sh`. This -allows upper-layer projects, especially those that the lower layer projects -have no dependency on, to be added to DevStack without modifying the core -scripts. Tempest is built this way as an example of how to structure the -dispatch script, see `extras.d/80-tempest.sh`. See `extras.d/README.md` -for more information. - -# Multi-Node Setup - -A more interesting setup involves running multiple compute nodes, with Neutron -networks connecting VMs on different compute nodes. -You should run at least one "controller node", which should have a `stackrc` -that includes at least: - - disable_service n-net - enable_service q-svc - enable_service q-agt - enable_service q-dhcp - enable_service q-l3 - enable_service q-meta - enable_service neutron - -You likely want to change your `localrc` section to run a scheduler that -will balance VMs across hosts: - - SCHEDULER=nova.scheduler.filter_scheduler.FilterScheduler - -You can then run many compute nodes, each of which should have a `stackrc` -which includes the following, with the IP address of the above controller node: - - ENABLED_SERVICES=n-cpu,rabbit,neutron,q-agt - SERVICE_HOST=[IP of controller node] - MYSQL_HOST=$SERVICE_HOST - RABBIT_HOST=$SERVICE_HOST - Q_HOST=$SERVICE_HOST - MATCHMAKER_REDIS_HOST=$SERVICE_HOST - -# Multi-Region Setup - -We want to setup two devstack (RegionOne and RegionTwo) with shared keystone -(same users and services) and horizon. -Keystone and Horizon will be located in RegionOne. -Full spec is available at: -https://wiki.openstack.org/wiki/Heat/Blueprints/Multi_Region_Support_for_Heat. - -In RegionOne: - - REGION_NAME=RegionOne - -In RegionTwo: - - disable_service horizon - KEYSTONE_SERVICE_HOST= - KEYSTONE_AUTH_HOST= - REGION_NAME=RegionTwo - -# Cells - -Cells is a new scaling option with a full spec at: -http://wiki.openstack.org/blueprint-nova-compute-cells. - -To setup a cells environment add the following to your `localrc` section: - - enable_service n-cell - -Be aware that there are some features currently missing in cells, one notable -one being security groups. The exercises have been patched to disable -functionality not supported by cells. - -# IPv6 - -By default, most Openstack services are bound to 0.0.0.0 -and service endpoints are registered as IPv4 addresses. -A new variable was created to control this behavior, and to -allow for operation over IPv6 instead of IPv4. - -For this, add the following to `local.conf`: - - SERVICE_IP_VERSION=6 - -When set to "6" devstack services will open listen sockets on :: -and service endpoints will be registered using HOST_IPV6 as the -address. The default value for this setting is `4`. Dual-mode -support, for example `4+6` is not currently supported. - - -# Local Configuration - -Historically DevStack has used ``localrc`` to contain all local configuration -and customizations. More and more of the configuration variables available for -DevStack are passed-through to the individual project configuration files. -The old mechanism for this required specific code for each file and did not -scale well. This is handled now by a master local configuration file. - -# local.conf - -The new config file ``local.conf`` is an extended-INI format that introduces -a new meta-section header that provides some additional information such -as a phase name and destination config filename: - - [[ | ]] - -where ```` is one of a set of phase names defined by ``stack.sh`` -and ```` is the configuration filename. The filename is -eval'ed in the ``stack.sh`` context so all environment variables are -available and may be used. Using the project config file variables in -the header is strongly suggested (see the ``NOVA_CONF`` example below). -If the path of the config file does not exist it is skipped. - -The defined phases are: - -* **local** - extracts ``localrc`` from ``local.conf`` before ``stackrc`` is sourced -* **post-config** - runs after the layer 2 services are configured - and before they are started -* **extra** - runs after services are started and before any files - in ``extra.d`` are executed -* **post-extra** - runs after files in ``extra.d`` are executed - -The file is processed strictly in sequence; meta-sections may be specified more -than once but if any settings are duplicated the last to appear in the file -will be used. - - [[post-config|$NOVA_CONF]] - [DEFAULT] - use_syslog = True - - [osapi_v3] - enabled = False - -A specific meta-section ``local|localrc`` is used to provide a default -``localrc`` file (actually ``.localrc.auto``). This allows all custom -settings for DevStack to be contained in a single file. If ``localrc`` -exists it will be used instead to preserve backward-compatibility. - - [[local|localrc]] - FIXED_RANGE=10.254.1.0/24 - ADMIN_PASSWORD=speciale - LOGFILE=$DEST/logs/stack.sh.log - -Note that ``Q_PLUGIN_CONF_FILE`` is unique in that it is assumed to *NOT* -start with a ``/`` (slash) character. A slash will need to be added: - - [[post-config|/$Q_PLUGIN_CONF_FILE]] +Start by reading the [configuration +guide](doc/source/configuration.rst) for details of the many available +options. \ No newline at end of file diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 90b7d44dec..ed17924d45 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -148,6 +148,34 @@ will not be set if there is no IPv6 address on the default Ethernet interface. Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``. ``HOST_IPV6`` is not set by default. +Examples +======== + +- Eliminate a Cinder pass-through (``CINDER_PERIODIC_INTERVAL``): + + :: + + [[post-config|$CINDER_CONF]] + [DEFAULT] + periodic_interval = 60 + +- Sample ``local.conf`` with screen logging enabled: + + :: + + [[local|localrc]] + FIXED_RANGE=10.254.1.0/24 + NETWORK_GATEWAY=10.254.1.1 + LOGDAYS=1 + LOGDIR=$DEST/logs + LOGFILE=$LOGDIR/stack.sh.log + ADMIN_PASSWORD=quiet + DATABASE_PASSWORD=$ADMIN_PASSWORD + RABBIT_PASSWORD=$ADMIN_PASSWORD + SERVICE_PASSWORD=$ADMIN_PASSWORD + SERVICE_TOKEN=a682f596-76f3-11e3-b3b2-e716f9080d50 + + Configuration Notes =================== @@ -228,6 +256,72 @@ to direct the message stream to the log host. | SYSLOG_HOST=$HOST_IP SYSLOG_PORT=516 + +Database Backend +---------------- + +Multiple database backends are available. The available databases are defined +in the lib/databases directory. +`mysql` is the default database, choose a different one by putting the +following in the `localrc` section: + + :: + + disable_service mysql + enable_service postgresql + +`mysql` is the default database. + +RPC Backend +----------- + +Support for a RabbitMQ RPC backend is included. Additional RPC +backends may be available via external plugins. Enabling or disabling +RabbitMQ is handled via the usual service functions and +``ENABLED_SERVICES``. + +Example disabling RabbitMQ in ``local.conf``: + +:: + disable_service rabbit + + +Apache Frontend +--------------- + +The Apache web server can be enabled for wsgi services that support +being deployed under HTTPD + mod_wsgi. By default, services that +recommend running under HTTPD + mod_wsgi are deployed under Apache. To +use an alternative deployment strategy (e.g. eventlet) for services +that support an alternative to HTTPD + mod_wsgi set +``ENABLE_HTTPD_MOD_WSGI_SERVICES`` to ``False`` in your +``local.conf``. + +Each service that can be run under HTTPD + mod_wsgi also has an +override toggle available that can be set in your ``local.conf``. + +Keystone is run under Apache with ``mod_wsgi`` by default. + +Example (Keystone) + +:: + + KEYSTONE_USE_MOD_WSGI="True" + +Example (Nova): + +:: + + NOVA_USE_MOD_WSGI="True" + +Example (Swift): + +:: + + SWIFT_USE_MOD_WSGI="True" + + + Libraries from Git ------------------ @@ -295,48 +389,6 @@ that matches requirements. PIP_UPGRADE=True -Swift ------ - -Swift is now used as the back-end for the S3-like object store. When -enabled Nova's objectstore (``n-obj`` in ``ENABLED_SERVICES``) is -automatically disabled. Enable Swift by adding it services to -``ENABLED_SERVICES`` - - :: - - enable_service s-proxy s-object s-container s-account - -Setting Swift's hash value is required and you will be prompted for it -if Swift is enabled so just set it to something already: - - :: - - SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 - -For development purposes the default number of replicas is set to -``1`` to reduce the overhead required. To better simulate a production -deployment set this to ``3`` or more. - - :: - - SWIFT_REPLICAS=3 - -The data for Swift is stored in the source tree by default (in -``$DEST/swift/data``) and can be moved by setting -``SWIFT_DATA_DIR``. The specified directory will be created if it does -not exist. - - :: - - SWIFT_DATA_DIR=$DEST/data/swift - -*Note*: Previously just enabling ``swift`` was sufficient to start the -Swift services. That does not provide proper service granularity, -particularly in multi-host configurations, and is considered -deprecated. Some service combination tests now check for specific -Swift services and the old blanket acceptance will longer work -correctly. Service Catalog Backend ----------------------- @@ -354,47 +406,6 @@ with ``KEYSTONE_CATALOG_BACKEND``: DevStack's default configuration in ``sql`` mode is set in ``files/keystone_data.sh`` -Cinder ------- - -The logical volume group used to hold the Cinder-managed volumes is -set by ``VOLUME_GROUP``, the logical volume name prefix is set with -``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set -with ``VOLUME_BACKING_FILE_SIZE``. - - :: - - VOLUME_GROUP="stack-volumes" - VOLUME_NAME_PREFIX="volume-" - VOLUME_BACKING_FILE_SIZE=10250M - -Multi-host DevStack -------------------- - -Running DevStack with multiple hosts requires a custom ``local.conf`` -section for each host. The master is the same as a single host -installation with ``MULTI_HOST=True``. The slaves have fewer services -enabled and a couple of host variables pointing to the master. - -Master -~~~~~~ - -Set ``MULTI_HOST`` to true - :: - - MULTI_HOST=True - -Slave -~~~~~ - -Set the following options to point to the master - - :: - - MYSQL_HOST=w.x.y.z - RABBIT_HOST=w.x.y.z - GLANCE_HOSTPORT=w.x.y.z:9292 - ENABLED_SERVICES=n-vol,n-cpu,n-net,n-api IP Version ---------- @@ -447,29 +458,163 @@ optionally be used to alter the default IPv6 address HOST_IPV6=${some_local_ipv6_address} -Examples -======== +Multi-node setup +~~~~~~~~~~~~~~~~ -- Eliminate a Cinder pass-through (``CINDER_PERIODIC_INTERVAL``): +See the :doc:`multi-node lab guide` - :: +Projects +-------- - [[post-config|$CINDER_CONF]] - [DEFAULT] - periodic_interval = 60 +Neutron +~~~~~~~ -- Sample ``local.conf`` with screen logging enabled: +See the :doc:`neutron configuration guide` for +details on configuration of Neutron - :: - [[local|localrc]] - FIXED_RANGE=10.254.1.0/24 - NETWORK_GATEWAY=10.254.1.1 - LOGDAYS=1 - LOGDIR=$DEST/logs - LOGFILE=$LOGDIR/stack.sh.log - ADMIN_PASSWORD=quiet - DATABASE_PASSWORD=$ADMIN_PASSWORD - RABBIT_PASSWORD=$ADMIN_PASSWORD - SERVICE_PASSWORD=$ADMIN_PASSWORD - SERVICE_TOKEN=a682f596-76f3-11e3-b3b2-e716f9080d50 +Swift +~~~~~ + +Swift is disabled by default. When enabled, it is configured with +only one replica to avoid being IO/memory intensive on a small +VM. When running with only one replica the account, container and +object services will run directly in screen. The others services like +replicator, updaters or auditor runs in background. + +If you would like to enable Swift you can add this to your `localrc` +section: + +:: + + enable_service s-proxy s-object s-container s-account + +If you want a minimal Swift install with only Swift and Keystone you +can have this instead in your `localrc` section: + +:: + + disable_all_services + enable_service key mysql s-proxy s-object s-container s-account + +If you only want to do some testing of a real normal swift cluster +with multiple replicas you can do so by customizing the variable +`SWIFT_REPLICAS` in your `localrc` section (usually to 3). + +Swift S3 +++++++++ + +If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will +install the swift3 middleware emulation. Swift will be configured to +act as a S3 endpoint for Keystone so effectively replacing the +`nova-objectstore`. + +Only Swift proxy server is launched in the screen session all other +services are started in background and managed by `swift-init` tool. + +Heat +~~~~ + +Heat is disabled by default (see `stackrc` file). To enable it +explicitly you'll need the following settings in your `localrc` +section + +:: + + enable_service heat h-api h-api-cfn h-api-cw h-eng + +Heat can also run in standalone mode, and be configured to orchestrate +on an external OpenStack cloud. To launch only Heat in standalone mode +you'll need the following settings in your `localrc` section + +:: + + disable_all_services + enable_service rabbit mysql heat h-api h-api-cfn h-api-cw h-eng + HEAT_STANDALONE=True + KEYSTONE_SERVICE_HOST=... + KEYSTONE_AUTH_HOST=... + +Tempest +~~~~~~~ + +If tempest has been successfully configured, a basic set of smoke +tests can be run as follows: + +:: + + $ cd /opt/stack/tempest + $ tox -efull tempest.scenario.test_network_basic_ops + +By default tempest is downloaded and the config file is generated, but the +tempest package is not installed in the system's global site-packages (the +package install includes installing dependences). So tempest won't run +outside of tox. If you would like to install it add the following to your +``localrc`` section: + +:: + + INSTALL_TEMPEST=True + + +Xenserver +~~~~~~~~~ + +If you would like to use Xenserver as the hypervisor, please refer to +the instructions in `./tools/xen/README.md`. + +Cells +~~~~~ + +`Cells `__ is +an alternative scaling option. To setup a cells environment add the +following to your `localrc` section: + +:: + + enable_service n-cell + +Be aware that there are some features currently missing in cells, one +notable one being security groups. The exercises have been patched to +disable functionality not supported by cells. + +Cinder +~~~~~~ + +The logical volume group used to hold the Cinder-managed volumes is +set by ``VOLUME_GROUP``, the logical volume name prefix is set with +``VOLUME_NAME_PREFIX`` and the size of the volume backing file is set +with ``VOLUME_BACKING_FILE_SIZE``. + + :: + + VOLUME_GROUP="stack-volumes" + VOLUME_NAME_PREFIX="volume-" + VOLUME_BACKING_FILE_SIZE=10250M + + +Keystone +~~~~~~~~ + +Multi-Region Setup +++++++++++++++++++ + +We want to setup two devstack (RegionOne and RegionTwo) with shared +keystone (same users and services) and horizon. Keystone and Horizon +will be located in RegionOne. Full spec is available at: +``__. + +In RegionOne: + +:: + + REGION_NAME=RegionOne + +In RegionTwo: + +:: + + disable_service horizon + KEYSTONE_SERVICE_HOST= + KEYSTONE_AUTH_HOST= + REGION_NAME=RegionTwo From a35391e3bb497190e7e78cd0f233ddf1684fe18e Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 10 Aug 2015 13:53:40 +1000 Subject: [PATCH 0443/2941] Talk about local.conf first We bury the lead with all the historical notes about localrc; just talk about what is important to somebody setting up a current devstack, which is local.conf. There are already inline examples of config-variables, etc. Remove them, but add a small overview example for logging in its place. Change-Id: I466252ffba66ef4ea180c9355f715a19eb4f8017 --- README.md | 39 +++++++----------- doc/source/configuration.rst | 78 +++++++++++++----------------------- 2 files changed, 42 insertions(+), 75 deletions(-) diff --git a/README.md b/README.md index df0df917a4..ee7f0e7809 100644 --- a/README.md +++ b/README.md @@ -77,30 +77,21 @@ here, so run it in a VM. And take advantage of the snapshot capabilities of your hypervisor of choice to reduce testing cycle times. You might even save enough time to write one more feature before the next feature freeze... -``stack.sh`` needs to have root access for a lot of tasks, but uses ``sudo`` -for all of those tasks. However, it needs to be not-root for most of its -work and for all of the OpenStack services. ``stack.sh`` specifically -does not run if started as root. - -This is a recent change (Oct 2013) from the previous behaviour of -automatically creating a ``stack`` user. Automatically creating -user accounts is not the right response to running as root, so -that bit is now an explicit step using ``tools/create-stack-user.sh``. -Run that (as root!) or just check it out to see what DevStack's -expectations are for the account it runs under. Many people simply -use their usual login (the default 'ubuntu' login on a UEC image -for example). +``stack.sh`` needs to have root access for a lot of tasks, but uses +``sudo`` for all of those tasks. However, it needs to be not-root for +most of its work and for all of the OpenStack services. ``stack.sh`` +specifically does not run if started as root. -# Customizing - -You can override environment variables used in `stack.sh` by creating -file name `local.conf` with a ``localrc`` section as shown below. It -is likely that you will need to do this to tweak several settings for -your environment. +DevStack will not automatically create the user, but provides a helper +script in ``tools/create-stack-user.sh``. Run that (as root!) or just +check it out to see what DevStack's expectations are for the account +it runs under. Many people simply use their usual login (the default +'ubuntu' login on a UEC image for example). - [[local|localrc]] - VARIABLE=value +# Customizing -Start by reading the [configuration -guide](doc/source/configuration.rst) for details of the many available -options. \ No newline at end of file +DevStack can be extensively configured via the configuration file +`local.conf`. It is likely that you will need to provide and modify +this file if you want anything other than the most basic setup. Start +by reading the [configuration guide](doc/source/configuration.rst) for +details of the configuration file and the many available options. \ No newline at end of file diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index ed17924d45..fe23d6cb34 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -6,34 +6,15 @@ Configuration :local: :depth: 1 -DevStack has always tried to be mostly-functional with a minimal amount -of configuration. The number of options has ballooned as projects add -features, new projects added and more combinations need to be tested. -Historically DevStack obtained all local configuration and -customizations from a ``localrc`` file. The number of configuration -variables that are simply passed-through to the individual project -configuration files is also increasing. The old mechanism for this -(``EXTRAS_OPTS`` and friends) required specific code for each file and -did not scale well. - -In Oct 2013 a new configuration method was introduced (in `review -46768 `__) to hopefully -simplify this process and meet the following goals: - -- contain all non-default local configuration in a single file -- be backward-compatible with ``localrc`` to smooth the transition - process -- allow settings in arbitrary configuration files to be changed - local.conf ========== -The new configuration file is ``local.conf`` and should reside in the -root Devstack directory. An example of such ``local.conf`` file -is provided in the ``devstack/samples`` directory. Copy this file into -the root Devstack directory and adapt it to your needs. It is a modified INI -format file that introduces a meta-section header to carry additional -information regarding the configuration files to be changed. +DevStack configuration is modified via the file ``local.conf``. It is +a modified INI format file that introduces a meta-section header to +carry additional information regarding the configuration files to be +changed. + +A sample is provided in ``devstack/samples`` The new header is similar to a normal INI section header but with double brackets (``[[ ... ]]``) and two internal fields separated by a pipe @@ -148,33 +129,14 @@ will not be set if there is no IPv6 address on the default Ethernet interface. Setting it here also makes it available for ``openrc`` to set ``OS_AUTH_URL``. ``HOST_IPV6`` is not set by default. -Examples -======== - -- Eliminate a Cinder pass-through (``CINDER_PERIODIC_INTERVAL``): - - :: - - [[post-config|$CINDER_CONF]] - [DEFAULT] - periodic_interval = 60 - -- Sample ``local.conf`` with screen logging enabled: - - :: - - [[local|localrc]] - FIXED_RANGE=10.254.1.0/24 - NETWORK_GATEWAY=10.254.1.1 - LOGDAYS=1 - LOGDIR=$DEST/logs - LOGFILE=$LOGDIR/stack.sh.log - ADMIN_PASSWORD=quiet - DATABASE_PASSWORD=$ADMIN_PASSWORD - RABBIT_PASSWORD=$ADMIN_PASSWORD - SERVICE_PASSWORD=$ADMIN_PASSWORD - SERVICE_TOKEN=a682f596-76f3-11e3-b3b2-e716f9080d50 +Historical Notes +================ +Historically DevStack obtained all local configuration and +customizations from a ``localrc`` file. In Oct 2013 the +``local.conf`` configuration method was introduced (in `review 46768 +`__) to simplify this +process. Configuration Notes =================== @@ -257,6 +219,20 @@ to direct the message stream to the log host. | SYSLOG_PORT=516 +Example Logging Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For example, non-interactive installs probably wish to save output to +a file, keep service logs and disable color in the stored files. + + :: + + [[local|localrc]] + DEST=/opt/stack/ + LOGDIR=$DEST/logs + LOGFILE=$LOGDIR/stack.sh.log + LOG_COLOR=False + Database Backend ---------------- From a4693b5dea459acb02f226bbd1a8efdbcf1fc2b2 Mon Sep 17 00:00:00 2001 From: John Hua Date: Thu, 6 Aug 2015 13:53:35 +0100 Subject: [PATCH 0444/2941] Add/Overwrite default images in IMAGE_URLS and detect duplicates IMAGE_URLS could be set both in localrc with customization or stackrc by default. By setting DOWNLOAD_DEFAULT_IMAGES, user could choose to add default images to IMAGE_URLS or overwrite them. As uploading duplicate images will cause a "409 Conflict" error, a duplicate detection will expose it earlier. Care needs to be taken that you don't end up with a duplicate image, so clean up Xen's README. Depends-On: I6fbae12f950a03afab39f341132746d3db9f788c Change-Id: I3ca4e576aa3fb8992c08ca44900a8c53dd4b4163 Closes-Bug: #1473432 --- doc/source/configuration.rst | 18 ++++++++ stackrc | 87 ++++++++++++++++++++++-------------- tools/xen/README.md | 5 --- 3 files changed, 71 insertions(+), 39 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 90b7d44dec..fef395c327 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -396,6 +396,24 @@ Set the following options to point to the master GLANCE_HOSTPORT=w.x.y.z:9292 ENABLED_SERVICES=n-vol,n-cpu,n-net,n-api +Guest Images +------------ + +Images provided in URLS via the comma-separated ``IMAGE_URLS`` +variable will be downloaded and uploaded to glance by DevStack. + +Default guest-images are predefined for each type of hypervisor and +their testing-requirements in ``stack.sh``. Setting +``DOWNLOAD_DEFAULT_IMAGES=False`` will prevent DevStack downloading +these default images; in that case, you will want to populate +``IMAGE_URLS`` with sufficient images to satisfy testing-requirements. + + :: + + DOWNLOAD_DEFAULT_IMAGES=False + IMAGE_URLS="http://foo.bar.com/image.qcow," + IMAGE_URLS+="http://foo.bar.com/image2.qcow" + IP Version ---------- diff --git a/stackrc b/stackrc index 156cb1f85a..43292a5e02 100644 --- a/stackrc +++ b/stackrc @@ -2,6 +2,11 @@ # # stackrc # + +# ensure we don't re-source this in the same environment +[[ -z "$_DEVSTACK_STACKRC" ]] || return 0 +declare -r _DEVSTACK_STACKRC=1 + # Find the other rc files RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) @@ -560,40 +565,47 @@ CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} # Set default image based on ``VIRT_DRIVER`` and ``LIBVIRT_TYPE``, either of # which may be set in ``local.conf``. Also allow ``DEFAULT_IMAGE_NAME`` and # ``IMAGE_URLS`` to be set in the `localrc` section of ``local.conf``. -case "$VIRT_DRIVER" in - openvz) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} - IMAGE_URLS=${IMAGE_URLS:-"http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz"};; - libvirt) - case "$LIBVIRT_TYPE" in - lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} - IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz"};; - *) # otherwise, use the uec style image (with kernel, ramdisk, disk) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} - IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};; - esac - ;; - vsphere) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} - IMAGE_URLS=${IMAGE_URLS:-"http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk"};; - xenserver) - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk} - IMAGE_URLS=${IMAGE_URLS:-"http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz"} - IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; - ironic) - # Ironic can do both partition and full disk images, depending on the driver - if [[ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]]; then - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-disk} - else - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec} - fi - IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz"} - IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img";; - *) # Default to Cirros with kernel, ramdisk and disk image - DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} - IMAGE_URLS=${IMAGE_URLS:-"http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz"};; -esac +DOWNLOAD_DEFAULT_IMAGES=$(trueorfalse True DOWNLOAD_DEFAULT_IMAGES) +if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then + if [ -n $IMAGE_URLS ]; then + IMAGE_URLS+="," + fi + case "$VIRT_DRIVER" in + openvz) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-ubuntu-12.04-x86_64} + IMAGE_URLS+="http://download.openvz.org/template/precreated/ubuntu-12.04-x86_64.tar.gz";; + libvirt) + case "$LIBVIRT_TYPE" in + lxc) # the cirros root disk in the uec tarball is empty, so it will not work for lxc + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs} + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-rootfs.img.gz";; + *) # otherwise, use the uec style image (with kernel, ramdisk, disk) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz";; + esac + ;; + vsphere) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.2-i386-disk.vmdk} + IMAGE_URLS+="http://partnerweb.vmware.com/programs/vmdkimage/cirros-0.3.2-i386-disk.vmdk";; + xenserver) + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-0.3.4-x86_64-disk} + IMAGE_URLS+="http://ca.downloads.xensource.com/OpenStack/cirros-0.3.4-x86_64-disk.vhd.tgz" + IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz";; + ironic) + # Ironic can do both partition and full disk images, depending on the driver + if [[ "$IRONIC_DEPLOY_DRIVER" == "agent_ssh" ]]; then + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-disk} + else + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-x86_64-uec} + fi + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-uec.tar.gz" + IMAGE_URLS+=",http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-x86_64-disk.img";; + *) # Default to Cirros with kernel, ramdisk and disk image + DEFAULT_IMAGE_NAME=${DEFAULT_IMAGE_NAME:-cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec} + IMAGE_URLS+="http://download.cirros-cloud.net/${CIRROS_VERSION}/cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-uec.tar.gz";; + esac + DOWNLOAD_DEFAULT_IMAGES=False +fi # Staging Area for New Images, have them here for at least 24hrs for nodepool # to cache them otherwise the failure rates in the gate are too high @@ -606,6 +618,13 @@ if [[ "$PRECACHE_IMAGES" == "True" ]]; then fi fi +# Detect duplicate values in IMAGE_URLS +for image_url in ${IMAGE_URLS//,/ }; do + if [ $(echo "$IMAGE_URLS" | grep -o -F "$image_url" | wc -l) -gt 1 ]; then + die $LINENO "$image_url is duplicate, please remove it from IMAGE_URLS." + fi +done + # 10Gb default volume backing file size VOLUME_BACKING_FILE_SIZE=${VOLUME_BACKING_FILE_SIZE:-10250M} diff --git a/tools/xen/README.md b/tools/xen/README.md index 61694e9616..6212cc54d7 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -94,11 +94,6 @@ Of course, use real passwords if this machine is exposed. XENAPI_CONNECTION_URL="http://address_of_your_xenserver" VNCSERVER_PROXYCLIENT_ADDRESS=address_of_your_xenserver - # Download a vhd and a uec image - IMAGE_URLS="\ - https://github.com/downloads/citrix-openstack/warehouse/cirros-0.3.0-x86_64-disk.vhd.tgz,\ - http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-uec.tar.gz" - # Explicitly set virt driver VIRT_DRIVER=xenserver From 605d6417e60494ef1f9d0511d526afe498d72120 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 26 Aug 2015 15:37:41 +0000 Subject: [PATCH 0445/2941] Add aodh and gnocchi to the plugin registry The comment is their keystone service entry. Change-Id: I896d5c594fa0c61924dbf4527215cc543b6e43e7 --- doc/source/plugin-registry.rst | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 99bfb85b38..428efc4977 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -20,6 +20,10 @@ The following are plugins that exist for official OpenStack projects. +--------------------+-------------------------------------------+--------------------+ |Plugin Name |URL |Comments | +--------------------+-------------------------------------------+--------------------+ +|aodh |git://git.openstack.org/openstack/aodh | alarming | ++--------------------+-------------------------------------------+--------------------+ +|gnocchi |git://git.openstack.org/openstack/gnocchi | metric | ++--------------------+-------------------------------------------+--------------------+ |magnum |git://git.openstack.org/openstack/magnum | | +--------------------+-------------------------------------------+--------------------+ |sahara |git://git.openstack.org/openstack/sahara | | From e89126f6a446ad9a8e17a0e6481644403f8d5a22 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Fri, 28 Aug 2015 15:29:14 -0400 Subject: [PATCH 0446/2941] remove too_slow_to_test flag this flag was added to deal with inefficiencies of Icehouse. this patch removes flag as it's not used in post-Icehouse Change-Id: Ib715e68dc61f3c3ea0a40fae0ea57028e36285bd Depends-On: I842dfe04725b2482399c0e95b54403fb82001645 --- lib/tempest | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/tempest b/lib/tempest index be24da6b61..fe90023c06 100644 --- a/lib/tempest +++ b/lib/tempest @@ -447,9 +447,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG scenario large_ops_number ${TEMPEST_LARGE_OPS_NUMBER:-0} # Telemetry - # Ceilometer API optimization happened in Juno that allows to run more tests in tempest. - # Once Tempest retires support for icehouse this flag can be removed. - iniset $TEMPEST_CONFIG telemetry too_slow_to_test "False" iniset $TEMPEST_CONFIG telemetry-feature-enabled events "True" # Object Store From 9c0b9f30247d30babf35147351cfcf8bdf64d223 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 22 Jul 2015 06:08:09 +1000 Subject: [PATCH 0447/2941] Use sudo iniset to modify /etc files The existing mysql code is wrong and not detected as failing [1], and boto config requires work-arounds [2,3] that are all fairly ugly. Use -sudo argument to iniset to handle this. [1] I24388b5de777995f92d73076524122cf599d6371 [2] I5f4c43bbbe477c570936e2e40ac05cc38febbb3f [3] Ib7556dac9aaaf2f3c96237e0ca28ed6ae1b1b7ac Change-Id: Iaceb8d42ce37be728adae6fd0a30a1f9d33d4029 --- lib/databases/mysql | 24 ++++++++++-------------- lib/tempest | 2 +- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index 7ae9a936d6..ada56a762a 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -92,14 +92,12 @@ function configure_database_mysql { # Change bind-address from localhost (127.0.0.1) to any (::) and # set default db type to InnoDB - sudo bash -c "source $TOP_DIR/functions && \ - iniset $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" && \ - iniset $my_conf mysqld sql_mode STRICT_ALL_TABLES && \ - iniset $my_conf mysqld default-storage-engine InnoDB && \ - iniset $my_conf mysqld max_connections 1024 && \ - iniset $my_conf mysqld query_cache_type OFF && \ - iniset $my_conf mysqld query_cache_size 0" - + iniset -sudo $my_conf mysqld bind-address "$SERVICE_LISTEN_ADDRESS" + iniset -sudo $my_conf mysqld sql_mode STRICT_ALL_TABLES + iniset -sudo $my_conf mysqld default-storage-engine InnoDB + iniset -sudo $my_conf mysqld max_connections 1024 + iniset -sudo $my_conf mysqld query_cache_type OFF + iniset -sudo $my_conf mysqld query_cache_size 0 if [[ "$DATABASE_QUERY_LOGGING" == "True" ]]; then echo_summary "Enabling MySQL query logging" @@ -115,12 +113,10 @@ function configure_database_mysql { # Turn on slow query log, log all queries (any query taking longer than # 0 seconds) and log all non-indexed queries - sudo bash -c "source $TOP_DIR/functions && \ - iniset $my_conf mysqld slow-query-log 1 && \ - iniset $my_conf mysqld slow-query-log-file $slow_log && \ - iniset $my_conf mysqld long-query-time 0 && \ - iniset $my_conf mysqld log-queries-not-using-indexes 1" - + iniset -sudo $my_conf mysqld slow-query-log 1 + iniset -sudo $my_conf mysqld slow-query-log-file $slow_log + iniset -sudo $my_conf mysqld long-query-time 0 + iniset -sudo $my_conf mysqld log-queries-not-using-indexes 1 fi restart_service $mysql diff --git a/lib/tempest b/lib/tempest index be24da6b61..645d245d8f 100644 --- a/lib/tempest +++ b/lib/tempest @@ -546,7 +546,7 @@ function configure_tempest { if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then # Use the ``BOTO_CONFIG`` environment variable to point to this file - iniset $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE + iniset -sudo $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE sudo chown $STACK_USER $BOTO_CONF fi From 2e1a91c50b73ca7f46871d3a906ade93bbcac6a7 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 31 Aug 2015 09:43:00 -0400 Subject: [PATCH 0448/2941] turn multi host true for nova network by default With multi host set to true devstack's dnsmasq server no longer listens on the network to other systems. In the gate we can see we're getting a ton of spurious dhcp requests from other systems on the network, and it's better that we never even see it. Change-Id: Ie600de91e4a7da734eae722e78101c2401a7b1f5 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6441a891eb..ed4d43be30 100644 --- a/lib/nova +++ b/lib/nova @@ -156,7 +156,7 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # ``MULTI_HOST`` is a mode where each compute node runs its own network node. This # allows network operations and routing for a VM to occur on the server that is # running the VM - removing a SPOF and bandwidth bottleneck. -MULTI_HOST=$(trueorfalse False MULTI_HOST) +MULTI_HOST=$(trueorfalse True MULTI_HOST) # ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, # where there are at least two nova-computes. From 8349aff5abd26c63470b96e99ade0e8292a87e7a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 1 Sep 2015 12:45:28 -0400 Subject: [PATCH 0449/2941] add options to support nova test matrix This adds 2 devstack options: NOVA_V2_LEGACY={True/False} which is whether we'd like to force the /v2 endpoint to use the legacy v2.0 code base. it also provides TEMPEST_COMPUTE_TYPE as an way to pass in which service catalog entry we'd like to use for compute testing. We also make v2.1 the default compute endpoint, as that's what we'd like everyone to be testing and using. The other options will let us build jobs that nova can run to ensure those APIs don't regress. Change-Id: Ie6b7e4290d9a1d9789d04099b3b31c9a557bc22b --- lib/nova | 21 +++++++++++++++++---- lib/tempest | 8 ++++++++ 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/lib/nova b/lib/nova index 6441a891eb..dcb4f13bd6 100644 --- a/lib/nova +++ b/lib/nova @@ -64,6 +64,11 @@ NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} # Expect to remove in L or M. NOVA_API_VERSION=${NOVA_API_VERSION-default} +# NOVA_V2_LEGACY defines whether we force the Nova v2.0 enpoint onto +# the Nova v2.0 legacy code base. Remove this option once the Nova +# v2.0 legacy codebase is removed. +NOVA_V2_LEGACY=$(trueorfalse False NOVA_V2_LEGACY) + if is_suse; then NOVA_WSGI_DIR=${NOVA_WSGI_DIR:-/srv/www/htdocs/nova} else @@ -317,6 +322,13 @@ function configure_nova { if [[ "$NOVA_API_VERSION" == "v21default" ]]; then sed -i s/": openstack_compute_api_v2$"/": openstack_compute_api_v21"/ "$NOVA_API_PASTE_INI" fi + + # For setting up an environment where v2.0 is running on the + # v2.0 legacy code base. + if [[ "$NOVA_V2_LEGACY" == "True" ]]; then + sed -i s@"^/v2: openstack_compute_api_v21_legacy_v2_compatible$"@"/v2: openstack_compute_api_legacy_v2"@ \ + "$NOVA_API_PASTE_INI" + fi fi if is_service_enabled n-cpu; then @@ -411,15 +423,16 @@ function create_nova_accounts { nova_api_url="$NOVA_SERVICE_PROTOCOL://$NOVA_SERVICE_HOST/compute" fi - get_or_create_service "nova" "compute" "Nova Compute Service" - get_or_create_endpoint "compute" \ + get_or_create_service "nova_legacy" "compute_legacy" \ + "Nova Compute Service (Legacy 2.0)" + get_or_create_endpoint "compute_legacy" \ "$REGION_NAME" \ "$nova_api_url/v2/\$(tenant_id)s" \ "$nova_api_url/v2/\$(tenant_id)s" \ "$nova_api_url/v2/\$(tenant_id)s" - get_or_create_service "novav21" "computev21" "Nova Compute Service V2.1" - get_or_create_endpoint "computev21" \ + get_or_create_service "nova" "compute" "Nova Compute Service" + get_or_create_endpoint "compute" \ "$REGION_NAME" \ "$nova_api_url/v2.1/\$(tenant_id)s" \ "$nova_api_url/v2.1/\$(tenant_id)s" \ diff --git a/lib/tempest b/lib/tempest index be24da6b61..df0e382133 100644 --- a/lib/tempest +++ b/lib/tempest @@ -361,6 +361,14 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME fi + # Set the service catalog entry for Tempest to run on. Typically + # used to try different compute API version targets. The tempest + # default if 'compute', which is typically valid, so only set this + # if you want to change it. + if [[ -n "$TEMPEST_COMPUTE_TYPE" ]]; then + iniset $TEMPEST_CONFIG compute catalog_type $TEMPEST_COMPUTE_TYPE + fi + # Compute Features # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints # NOTE(mtreinish): This must be done after auth settings are added to the tempest config From f798ec12422b6ff9314e0f5fbaa995ab7203b419 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Tue, 1 Sep 2015 13:20:48 -0700 Subject: [PATCH 0450/2941] docs: add a blurb in the single-vm doc about cloud-init log output I had to poke around for awhile to find /var/log/cloud-init-output.log so I figured I'd add a sentence in the docs about using that. Change-Id: I8bb6cb730032e41661ee443da816cbea2b28f76d --- doc/source/guides/single-vm.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst index c2ce1a34a0..515cd505c3 100644 --- a/doc/source/guides/single-vm.rst +++ b/doc/source/guides/single-vm.rst @@ -78,6 +78,11 @@ passed as the user-data file when booting the VM. As DevStack will refuse to run as root, this configures ``cloud-init`` to create a non-root user and run the ``start.sh`` script as that user. +If you are using cloud-init and you have not +`enabled custom logging <../configuration.html#enable-logging>`_ of the stack +output, then the stack output can be found in +``/var/log/cloud-init-output.log`` by default. + Launching By Hand ----------------- From e5a6f82e431bece62deb830257439b309b2921ec Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Wed, 2 Sep 2015 13:19:48 +0900 Subject: [PATCH 0451/2941] Cleanup nova v2.1 API testing options Now gate will tests Nova v2.1 as default and separate jobs for v2 legacy and v2 compatible APIs - I86a627b8ec7b1246452a16c10dcfb1ad5f83bdef This commit cleanup the options used for old v2.1 jobs. Separate options are provided for Nova APIs testing- Ie6b7e4290d9a1d9789d04099b3b31c9a557bc22b Depends-On: Ie0430cedb7a8136c04b9fb7d08746293aab79f42 To remove old V2.1 jobs. Change-Id: Ibbed44e1c41ec1e6b3675317f08061810762796c --- lib/nova | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/lib/nova b/lib/nova index dcb4f13bd6..c0227c524f 100644 --- a/lib/nova +++ b/lib/nova @@ -56,13 +56,6 @@ NOVA_CELLS_DB=${NOVA_CELLS_DB:-nova_cell} NOVA_API_DB=${NOVA_API_DB:-nova_api} NOVA_API_PASTE_INI=${NOVA_API_PASTE_INI:-$NOVA_CONF_DIR/api-paste.ini} -# NOVA_API_VERSION valid options -# - default - setup API end points as nova does out of the box -# - v21default - make v21 the default on /v2 -# -# NOTE(sdague): this is for transitional testing of the Nova v21 API. -# Expect to remove in L or M. -NOVA_API_VERSION=${NOVA_API_VERSION-default} # NOVA_V2_LEGACY defines whether we force the Nova v2.0 enpoint onto # the Nova v2.0 legacy code base. Remove this option once the Nova @@ -318,11 +311,6 @@ function configure_nova { # Get the sample configuration file in place cp $NOVA_DIR/etc/nova/api-paste.ini $NOVA_CONF_DIR - # For testing v21 is equivalent to v2 - if [[ "$NOVA_API_VERSION" == "v21default" ]]; then - sed -i s/": openstack_compute_api_v2$"/": openstack_compute_api_v21"/ "$NOVA_API_PASTE_INI" - fi - # For setting up an environment where v2.0 is running on the # v2.0 legacy code base. if [[ "$NOVA_V2_LEGACY" == "True" ]]; then From 36daecd1a362a6a5388aa4ee5c5269563a820cbf Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Thu, 23 Jul 2015 17:50:40 +0900 Subject: [PATCH 0452/2941] Remove restraint on plugin file from neutron plugins Neutron plugin always needs plugin file even if the plugin is out of tree. This patch remove the restraint. Change-Id: Iedd52db6430def47505a127986170d7279966141 Closes-Bug: #1477452 --- lib/neutron-legacy | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) mode change 100644 => 100755 lib/neutron-legacy diff --git a/lib/neutron-legacy b/lib/neutron-legacy old mode 100644 new mode 100755 index c244bc54cd..c74c844a0c --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -326,7 +326,9 @@ ENABLE_METADATA_NETWORK=${ENABLE_METADATA_NETWORK:-False} # --------------------------------- # Please refer to ``lib/neutron_plugins/README.md`` for details. -source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN +if [ -f $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN ]; then + source $TOP_DIR/lib/neutron_plugins/$Q_PLUGIN +fi # Agent loadbalancer service plugin functions # ------------------------------------------- From 09b431d72d3db29e01be44f85dcbfcb78f651b13 Mon Sep 17 00:00:00 2001 From: Chuck Carmack Date: Wed, 2 Sep 2015 14:27:58 +0000 Subject: [PATCH 0453/2941] Disable the shelve tests for nova-cells using tempest config Change from using a blacklist to disable the shelve tests for nova-cells to using the tempest config option to disable the shelve feature tests. This is the intended method of disabling feature tests. This first commit is to add code to lib/tempest to disable the shelve feature test if the nova-cells service is enabled. The next st will remove the shelve blacklist from http://git.openstack.org/cgit/openstack/nova/tree/devstack/tempest-dsvm-cells-rc Change-Id: Ibf1f9aaa63e5f17b7d8774b511940ba8421e0887 Partial-bug: 1491152 --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index a8d597a34e..272b549c1a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -390,6 +390,10 @@ function configure_tempest { # neutron.allow_duplicate_networks option was removed from nova in Liberty # and is now the default behavior. iniset $TEMPEST_CONFIG compute-feature-enabled allow_duplicate_networks ${NOVA_ALLOW_DUPLICATE_NETWORKS:-True} + if is_service_enabled n-cell; then + # Cells doesn't support shelving/unshelving + iniset $TEMPEST_CONFIG compute-feature-enabled shelve False + fi # Network iniset $TEMPEST_CONFIG network api_version 2.0 From 0c3a3b051e08589189914bc543378e4dd2dcdeb8 Mon Sep 17 00:00:00 2001 From: Mikhail Feoktistov Date: Thu, 3 Sep 2015 18:15:28 +0300 Subject: [PATCH 0454/2941] Fix upload_image error in stackrc This commit fixes error caused by adding default image path to IMAGE_URLS without ',' If user sets IMAGE_URLS in localrc like IMAGE_URLS="path1, path2" (with a space after the comma) the we get an error "binary operator expected" in if [ -n $IMAGE_URLS ] condition and a comma will not be added to the end of IMAGE_URLS In the code below we add default image path(path3) to IMAGE_URLS and we get IMAGE_URLS="path1, path2path3" Change-Id: I6543f7178c49a42c71ad9df4cdb4c6e78cbf7758 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index b838911d97..ca897a1c4d 100644 --- a/stackrc +++ b/stackrc @@ -565,7 +565,7 @@ CIRROS_ARCH=${CIRROS_ARCH:-"x86_64"} # ``IMAGE_URLS`` to be set in the `localrc` section of ``local.conf``. DOWNLOAD_DEFAULT_IMAGES=$(trueorfalse True DOWNLOAD_DEFAULT_IMAGES) if [[ "$DOWNLOAD_DEFAULT_IMAGES" == "True" ]]; then - if [ -n $IMAGE_URLS ]; then + if [[ -n "$IMAGE_URLS" ]]; then IMAGE_URLS+="," fi case "$VIRT_DRIVER" in From c7e772c164c0c08be90624d76d4c3e11864364b7 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Tue, 1 Sep 2015 15:18:57 +0200 Subject: [PATCH 0455/2941] Use the pip installed version of requests with Fedora The upstream version of requests contains a copy of urllib3 and cardet library, common practice in many distros to create symbolic links for these libraries instead of creating a huge package which contains the same library as the distro provides as separate package as well. Now devstack upgrades the urllib3 to incompatible version, but it leaves the requests unchanged because Fedora already has the latest version. The issue does not happens with Ubuntu because it has older requests and devstack updates it as well. The pip installed version contains a bundled urllib3 and the actually installed urllib3 version does not matters. This is not the `usual` distro package overrides pip installed package case. Change-Id: Icfa71368384b0c2e3ff39265b2fa9190b5566b9b Related-Bug: #1476770 --- tools/fixup_stuff.sh | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index 4fff57f401..a601cf2f67 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -134,6 +134,31 @@ if is_fedora; then sudo systemctl start iptables fi fi + + if [[ "$os_RELEASE" -ge "21" ]]; then + # requests ships vendored version of chardet/urllib3, but on + # fedora these are symlinked back to the primary versions to + # avoid duplication of code on disk. This is fine when + # maintainers keep things in sync, but since devstack takes + # over and installs later versions via pip we can end up with + # incompatible versions. + # + # The rpm package is not removed to preserve the dependent + # packages like cloud-init; rather we remove the symlinks and + # force a re-install of requests so the vendored versions it + # wants are present. + # + # Realted issues: + # https://bugs.launchpad.net/glance/+bug/1476770 + # https://bugzilla.redhat.com/show_bug.cgi?id=1253823 + + base_path=/usr/lib/python2.7/site-packages/requests/packages + if [ -L $base_path/chardet -o -L $base_path/urllib3 ]; then + sudo rm -f /usr/lib/python2.7/site-packages/requests/packages/{chardet,urllib3} + # install requests with the bundled urllib3 to avoid conflicts + pip_install --upgrade --force-reinstall requests + fi + fi fi # The version of pip(1.5.4) supported by python-virtualenv(1.11.4) has From 975243189216561f66ca91520495e0c6e2f747e2 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 4 Sep 2015 14:15:27 +0000 Subject: [PATCH 0456/2941] Revert "turn multi host true for nova network by default" This reverts commit 2e1a91c50b73ca7f46871d3a906ade93bbcac6a7 It looks like this introduced race bug 1491949 in the gate-tempest-dsvm-large-ops job causing rpc timeouts when deallocating network information for an instance, specifically around the dnsmasq callback to release the fixed IP that the instance was using which triggers the disassociation between the fixed IP and the instance in the nova database. Change-Id: I163cdeea75e92485f241647c69aea0d7456c3258 Closes-Bug: #1491949 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index ed4d43be30..6441a891eb 100644 --- a/lib/nova +++ b/lib/nova @@ -156,7 +156,7 @@ FLAT_INTERFACE=${FLAT_INTERFACE:-$GUEST_INTERFACE_DEFAULT} # ``MULTI_HOST`` is a mode where each compute node runs its own network node. This # allows network operations and routing for a VM to occur on the server that is # running the VM - removing a SPOF and bandwidth bottleneck. -MULTI_HOST=$(trueorfalse True MULTI_HOST) +MULTI_HOST=$(trueorfalse False MULTI_HOST) # ``NOVA_ALLOW_MOVE_TO_SAME_HOST`` can be set to False in multi node DevStack, # where there are at least two nova-computes. From f768787bdd6dddf2790f83a884618d29677ca77c Mon Sep 17 00:00:00 2001 From: Lucas Alvares Gomes Date: Fri, 4 Sep 2015 15:34:06 +0100 Subject: [PATCH 0457/2941] Revert "Convert identity defaults to keystone v3 api" This change have broke the Ironic tests. Reverting to unblock the Ironic gate. This reverts commit 4b115ad526df7e12bbdc71e0280b3c691e53ed04. Closes-Bug: #1492216 Change-Id: I03acfdf47caf435cede1df08fd79b288a6662435 --- functions-common | 35 +++++++++++++++++++++++++++++++---- stack.sh | 15 +++++++-------- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/functions-common b/functions-common index 473808b1f5..446de5374f 100644 --- a/functions-common +++ b/functions-common @@ -690,13 +690,16 @@ function policy_add { # Usage: get_or_create_domain function get_or_create_domain { local domain_id + local os_url="$KEYSTONE_SERVICE_URI_V3" # Gets domain id domain_id=$( # Gets domain id - openstack domain show $1 \ + openstack --os-token=$OS_TOKEN --os-url=$os_url \ + --os-identity-api-version=3 domain show $1 \ -f value -c id 2>/dev/null || # Creates new domain - openstack domain create $1 \ + openstack --os-token=$OS_TOKEN --os-url=$os_url \ + --os-identity-api-version=3 domain create $1 \ --description "$2" \ -f value -c id ) @@ -707,11 +710,13 @@ function get_or_create_domain { # Usage: get_or_create_group [] function get_or_create_group { local desc="${3:-}" + local os_url="$KEYSTONE_SERVICE_URI_V3" local group_id # Gets group id group_id=$( # Creates new group with --or-show - openstack group create $1 \ + openstack --os-token=$OS_TOKEN --os-url=$os_url \ + --os-identity-api-version=3 group create $1 \ --domain $2 --description "$desc" --or-show \ -f value -c id ) @@ -733,6 +738,8 @@ function get_or_create_user { openstack user create \ $1 \ --password "$2" \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ --domain=$3 \ $email \ --or-show \ @@ -747,7 +754,9 @@ function get_or_create_project { local project_id project_id=$( # Creates new project with --or-show - openstack project create $1 \ + openstack --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ + project create $1 \ --domain=$2 \ --or-show -f value -c id ) @@ -761,6 +770,8 @@ function get_or_create_role { role_id=$( # Creates role with --or-show openstack role create $1 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ --or-show -f value -c id ) echo $role_id @@ -773,6 +784,8 @@ function get_or_add_user_project_role { # Gets user role id user_role_id=$(openstack role list \ --user $2 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ --column "ID" \ --project $3 \ --column "Name" \ @@ -783,6 +796,8 @@ function get_or_add_user_project_role { $1 \ --user $2 \ --project $3 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ | grep " id " | get_field 2) fi echo $user_role_id @@ -794,15 +809,21 @@ function get_or_add_group_project_role { local group_role_id # Gets group role id group_role_id=$(openstack role list \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ --group $2 \ --project $3 \ -c "ID" -f value) if [[ -z "$group_role_id" ]]; then # Adds role to group and get it openstack role add $1 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ --group $2 \ --project $3 group_role_id=$(openstack role list \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ --group $2 \ --project $3 \ -c "ID" -f value) @@ -820,6 +841,8 @@ function get_or_create_service { openstack service show $2 -f value -c id 2>/dev/null || # Creates new service if not exists openstack service create \ + --os-url $KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ $2 \ --name $1 \ --description="$3" \ @@ -838,6 +861,8 @@ function _get_or_create_endpoint_with_interface { # gets support for this, the check for the region name can be removed. # Related bug in keystone: https://bugs.launchpad.net/keystone/+bug/1482772 endpoint_id=$(openstack endpoint list \ + --os-url $KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ --service $1 \ --interface $2 \ --region $4 \ @@ -845,6 +870,8 @@ function _get_or_create_endpoint_with_interface { if [[ -z "$endpoint_id" ]]; then # Creates new endpoint endpoint_id=$(openstack endpoint create \ + --os-url $KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ $1 $2 $3 --region $4 -f value -c id) fi diff --git a/stack.sh b/stack.sh index 093fef4cd9..accfd0ac3e 100755 --- a/stack.sh +++ b/stack.sh @@ -989,15 +989,13 @@ if is_service_enabled keystone; then start_keystone fi - export OS_IDENTITY_API_VERSION=3 - # Set up a temporary admin URI for Keystone - SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v3 + SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v2.0 if is_service_enabled tls-proxy; then export OS_CACERT=$INT_CA_DIR/ca-chain.pem # Until the client support is fixed, just use the internal endpoint - SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v3 + SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 fi # Setup OpenStackClient token-endpoint auth @@ -1025,13 +1023,14 @@ if is_service_enabled keystone; then # Begone token auth unset OS_TOKEN OS_URL + # force set to use v2 identity authentication even with v3 commands + export OS_AUTH_TYPE=v2password + # Set up password auth credentials now that Keystone is bootstrapped - export OS_AUTH_URL=$KEYSTONE_AUTH_URI + export OS_AUTH_URL=$SERVICE_ENDPOINT + export OS_TENANT_NAME=admin export OS_USERNAME=admin - export OS_USER_DOMAIN_ID=default export OS_PASSWORD=$ADMIN_PASSWORD - export OS_PROJECT_NAME=admin - export OS_PROJECT_DOMAIN_ID=default export OS_REGION_NAME=$REGION_NAME fi From e0550190a597c9c78caf2f0cfe3d79ad1d368259 Mon Sep 17 00:00:00 2001 From: Andrey Pavlov Date: Sun, 6 Sep 2015 12:05:49 +0300 Subject: [PATCH 0458/2941] remove unused param from tempest config Remove setting of ssh_user param to boto section. Because boto uses user name to ssh from compute.ssh_user Change-Id: Ifd5b99ef35eaf126a3c6e0055837c4741353345e --- lib/tempest | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index a78cd741c6..7695054f78 100644 --- a/lib/tempest +++ b/lib/tempest @@ -426,7 +426,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG boto aki_manifest cirros-${CIRROS_VERSION}-${CIRROS_ARCH}-vmlinuz.manifest.xml iniset $TEMPEST_CONFIG boto instance_type "$boto_instance_type" iniset $TEMPEST_CONFIG boto http_socket_timeout 30 - iniset $TEMPEST_CONFIG boto ssh_user ${DEFAULT_INSTANCE_USER:-cirros} # Orchestration Tests if is_service_enabled heat; then From 9451021200cf333dc624275ef832acd3f37dd553 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Wed, 2 Sep 2015 15:40:04 -0400 Subject: [PATCH 0459/2941] Fix typo in _move_neutron_addresses_route() A previous change in this code had a typo, reversing the definitions of IP_ADD and IP_DEL, noticed while debugging another issue. Change-Id: Ifb87de1138eeb72081a2e52a5c81bfe9fe91ecd6 --- lib/neutron-legacy | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index c74c844a0c..d1865d8bcd 100755 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -826,11 +826,11 @@ function _move_neutron_addresses_route { fi if [[ "$IP_BRD" != "" ]]; then - IP_ADD="sudo ip addr del $IP_BRD dev $from_intf" - IP_DEL="sudo ip addr add $IP_BRD dev $to_intf" + IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" + IP_ADD="sudo ip addr add $IP_BRD dev $to_intf" fi - $IP_ADD; $IP_DEL; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE + $IP_DEL; $IP_ADD; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE fi } From b848ad783ffa417c4e1a732c164774ee22442f1d Mon Sep 17 00:00:00 2001 From: Takashi NATSUME Date: Tue, 8 Sep 2015 10:56:28 +0900 Subject: [PATCH 0460/2941] Fix a typo in make_cert.sh Change-Id: I6d4f02edf843f0519c3d0413bb033604a7ec73e9 --- tools/make_cert.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/make_cert.sh b/tools/make_cert.sh index cb93e57c4b..2628b40524 100755 --- a/tools/make_cert.sh +++ b/tools/make_cert.sh @@ -5,7 +5,7 @@ # Create a CA hierarchy (if necessary) and server certificate # # This mimics the CA structure that DevStack sets up when ``tls_proxy`` is enabled -# but in the curent directory unless ``DATA_DIR`` is set +# but in the current directory unless ``DATA_DIR`` is set ENABLE_TLS=True DATA_DIR=${DATA_DIR:-`pwd`/ca-data} From 3e3212b52d14c27c002c27b6d4f8bcfa7f5ffbcf Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Fri, 4 Sep 2015 13:02:19 +0200 Subject: [PATCH 0461/2941] Add trailing IDENTITY_API_VERSION to OS_AUTH_URL in swift_configure_tempurls The python-keystoneclient requires a trailing /v to successfully authenticate, otherwise it fails with a 404 error due to a not found resource. This error showed up only when generating Swift tempurls, because the error was raised when using python-swiftclient. This change fixes this for python-swiftclient within devstack. Change-Id: Ibe222d65162898db69acba076b5fe1cb3621fbc3 Closes-Bug: 1492216 --- lib/swift | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/swift b/lib/swift index 6b61274127..645bfd7cd9 100644 --- a/lib/swift +++ b/lib/swift @@ -801,6 +801,7 @@ function swift_configure_tempurls { OS_USERNAME=swift \ OS_TENANT_NAME=$SERVICE_TENANT_NAME \ OS_PASSWORD=$SERVICE_PASSWORD \ + OS_AUTH_URL=$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ swift post -m "Temp-URL-Key: $SWIFT_TEMPURL_KEY" } From 2af6915e08da87334da9ea023ad65b1f10040604 Mon Sep 17 00:00:00 2001 From: Wei Jiangang Date: Tue, 8 Sep 2015 18:03:22 +0800 Subject: [PATCH 0462/2941] Fix typos in devstack/stack.sh log to the the file => log to the file pluggins => plugins Change-Id: Iff5c54c39afb4398962bfe6a0500b1f011c75c8a --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 639f72b1bc..40f481331e 100755 --- a/stack.sh +++ b/stack.sh @@ -431,7 +431,7 @@ fi # Set up logging of screen windows # Set ``SCREEN_LOGDIR`` to turn on logging of screen windows to the -# directory specified in ``SCREEN_LOGDIR``, we will log to the the file +# directory specified in ``SCREEN_LOGDIR``, we will log to the file # ``screen-$SERVICE_NAME-$TIMESTAMP.log`` in that dir and have a link # ``screen-$SERVICE_NAME.log`` to the latest log file. # Logs are kept for as long specified in ``LOGDAYS``. @@ -522,7 +522,7 @@ fi # Clone all external plugins fetch_plugins -# Plugin Phase 0: override_defaults - allow pluggins to override +# Plugin Phase 0: override_defaults - allow plugins to override # defaults before other services are run run_phase override_defaults From 0eec4f86c1ac607bfbdf1ec19561b3bbdb56cf4f Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Tue, 8 Sep 2015 10:45:06 +0000 Subject: [PATCH 0463/2941] database: fix PostgreSQL connection string If all databases drivers are loaded, MySQL SQLAlchemy driver overrides all the other one that might not have set one. This patches fixes that. Change-Id: If6d8d08e5b7b7c48ca012677b536d71058def6fd Closes-Bug: #1493304 --- lib/database | 10 +--------- lib/databases/mysql | 14 ++++++++------ lib/databases/postgresql | 4 ++++ 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/lib/database b/lib/database index 5bbbe3144b..13740b90e6 100644 --- a/lib/database +++ b/lib/database @@ -101,7 +101,7 @@ function initialize_database_backends { # a multi-node DevStack installation. # NOTE: Don't specify ``/db`` in this string so we can use it for multiple services - BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} + BASE_SQL_CONN=${BASE_SQL_CONN:-$(get_database_type_$DATABASE_TYPE)://$DATABASE_USER:$DATABASE_PASSWORD@$DATABASE_HOST} return 0 } @@ -135,14 +135,6 @@ function database_connection_url { database_connection_url_$DATABASE_TYPE $db } -function get_database_type { - if [[ -n "${SQLALCHEMY_DATABASE_DRIVER}" ]]; then - echo "${DATABASE_TYPE}+${SQLALCHEMY_DATABASE_DRIVER}" - else - echo "${DATABASE_TYPE}" - fi -} - # Restore xtrace $XTRACE diff --git a/lib/databases/mysql b/lib/databases/mysql index ada56a762a..c2ab32e5b2 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -12,12 +12,6 @@ MY_XTRACE=$(set +o | grep xtrace) set +o xtrace MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} -# Force over to pymysql driver by default if we are using it. -if is_service_enabled mysql; then - if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then - SQLALCHEMY_DATABASE_DRIVER=${SQLALCHEMY_DATABASE_DRIVER:-"pymysql"} - fi -fi register_database mysql @@ -30,6 +24,14 @@ fi # Functions # --------- +function get_database_type_mysql { + if [[ "$MYSQL_DRIVER" == "PyMySQL" ]]; then + echo mysql+pymysql + else + echo mysql + fi +} + # Get rid of everything enough to cleanly change database backends function cleanup_database_mysql { stop_service $MYSQL diff --git a/lib/databases/postgresql b/lib/databases/postgresql index e087a1e0d4..78c7bedc90 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -21,6 +21,10 @@ register_database postgresql # Functions # --------- +function get_database_type_postgresql { + echo postgresql +} + # Get rid of everything enough to cleanly change database backends function cleanup_database_postgresql { stop_service postgresql From 54616845dfd0942164a67f69aef4e929d004d2d9 Mon Sep 17 00:00:00 2001 From: Gary Kotton Date: Tue, 8 Sep 2015 21:39:20 -0700 Subject: [PATCH 0464/2941] VMware: remove configuration integration_bridge from nova There are a number of different neutron plugins that work with the VMware nova driver. If necessary this flag can be set by each plugin if necessary. Change-Id: I47ac2a5c71ff573f474d45b85a523fc243ec3ade --- lib/nova_plugins/hypervisor-vsphere | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere index c406e094f3..698f836bad 100644 --- a/lib/nova_plugins/hypervisor-vsphere +++ b/lib/nova_plugins/hypervisor-vsphere @@ -42,9 +42,6 @@ function configure_nova_hypervisor { iniset $NOVA_CONF vmware host_username "$VMWAREAPI_USER" iniset $NOVA_CONF vmware host_password "$VMWAREAPI_PASSWORD" iniset_multiline $NOVA_CONF vmware cluster_name "$VMWAREAPI_CLUSTER" - if is_service_enabled neutron; then - iniset $NOVA_CONF vmware integration_bridge $OVS_BRIDGE - fi } # install_nova_hypervisor() - Install external components From 97cc85b9b1661cb73f732b854a3f2ebd738539ed Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Tue, 8 Sep 2015 13:51:01 +0900 Subject: [PATCH 0465/2941] Provide hook for neutron plugin config This removes a restriction for neutron vendor plugin. Some neutron vendor plugins were already decomposed and there is no config file in Neutron tree. They should prepare the file in each plugin. Change-Id: I4997b8eae1f433b1c23f20c06ba254568ac4982b --- lib/neutron-legacy | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index d1865d8bcd..550eadb4b4 100755 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -901,7 +901,11 @@ function _configure_neutron_common { # If needed, move config file from ``$NEUTRON_DIR/etc/neutron`` to ``NEUTRON_CONF_DIR`` mkdir -p /$Q_PLUGIN_CONF_PATH Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME - cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + # NOTE(hichihara): Some neutron vendor plugins were already decomposed and + # there is no config file in Neutron tree. They should prepare the file in each plugin. + if [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then + cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE + fi iniset $NEUTRON_CONF database connection `database_connection_url $Q_DB_NAME` iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron From 23f65cb9d77cac11101dc7f25c3b8a6a25a73d53 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Wed, 9 Sep 2015 08:33:15 +0000 Subject: [PATCH 0466/2941] Update lib/ceilometer to reflect script renames Without this change some services for ceilometer will not start breaking CI. This change I7447ba4f408c95b0acf1b809504ce16fff1c6e21 was validated against the ceilometer devstack plugin but apparently not against devstack itself. Until I413ab159474b7d7231ad66d3a482201f74efe8a8 merges devstack still has ceilometer support and is used in the gate. Change-Id: Ib1ea8b6ef7019570f82b0ba87e03fc627c8f6801 --- lib/ceilometer | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/ceilometer b/lib/ceilometer index d1cc862160..c6c4c87fbc 100644 --- a/lib/ceilometer +++ b/lib/ceilometer @@ -360,10 +360,10 @@ function install_ceilometerclient { # start_ceilometer() - Start running processes, including screen function start_ceilometer { - run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-agent-central --config-file $CEILOMETER_CONF" + run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF" run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF" run_process ceilometer-collector "$CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" - run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-agent-ipmi --config-file $CEILOMETER_CONF" + run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF" if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" @@ -378,10 +378,10 @@ function start_ceilometer { # Start the compute agent last to allow time for the collector to # fully wake up and connect to the message bus. See bug #1355809 if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP + run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP fi if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-agent-compute --config-file $CEILOMETER_CONF" + run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" fi # Only die on API if it was actually intended to be turned on From e8c70e23b5c8b9a41e2e86116972cf4da3367e19 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Thu, 13 Aug 2015 18:10:00 +0200 Subject: [PATCH 0467/2941] Tempest: add a Cinder extend_with_snapshot feature flag A new tempest test is being added in https://review.openstack.org/#/c/200108/ but it doesn't run by default because the test fails on Juno. So a feature flag in Tempest is added. This patch turns on this feature flag at Devstack's side. Change-Id: If1cf90dac3edc81a483fc51da74495042c96d543 --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index fb4b0d34ae..85e8d5ffc3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -477,6 +477,8 @@ function configure_tempest { # Volume # TODO(dkranz): Remove the bootable flag when Juno is end of life. iniset $TEMPEST_CONFIG volume-feature-enabled bootable True + # TODO(jordanP): Remove the extend_with_snapshot flag when Juno is end of life. + iniset $TEMPEST_CONFIG volume-feature-enabled extend_with_snapshot True local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then From e9a4750fe1b111eeb457378c26ca9eea1b5e0085 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Sat, 27 Jun 2015 11:29:09 +0000 Subject: [PATCH 0468/2941] Remove ceilometer in favor of plugin The ceilometer project is moving to using a devstack plugin rather than having ceilometer in the base devstack. This is to allow greater control and flexibility. Change-Id: I413ab159474b7d7231ad66d3a482201f74efe8a8 --- clean.sh | 1 - doc/source/index.rst | 1 - files/apache-ceilometer.template | 15 -- functions-common | 6 - lib/ceilometer | 418 ------------------------------- stack.sh | 22 +- stackrc | 9 +- tests/test_libs_from_pypi.sh | 2 +- unstack.sh | 5 - 9 files changed, 4 insertions(+), 475 deletions(-) delete mode 100644 files/apache-ceilometer.template delete mode 100644 lib/ceilometer diff --git a/clean.sh b/clean.sh index 78e2a7a826..b22a29cb41 100755 --- a/clean.sh +++ b/clean.sh @@ -48,7 +48,6 @@ source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ironic diff --git a/doc/source/index.rst b/doc/source/index.rst index 2dd0241fba..7ff2705055 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -154,7 +154,6 @@ Scripts * `functions `__ - DevStack-specific functions * `functions-common `__ - Functions shared with other projects * `lib/apache `__ -* `lib/ceilometer `__ * `lib/ceph `__ * `lib/cinder `__ * `lib/database `__ diff --git a/files/apache-ceilometer.template b/files/apache-ceilometer.template deleted file mode 100644 index 79f14c38ab..0000000000 --- a/files/apache-ceilometer.template +++ /dev/null @@ -1,15 +0,0 @@ -Listen %PORT% - - - WSGIDaemonProcess ceilometer-api processes=2 threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup ceilometer-api - WSGIScriptAlias / %WSGIAPP% - WSGIApplicationGroup %{GLOBAL} - = 2.4> - ErrorLogFormat "%{cu}t %M" - - ErrorLog /var/log/%APACHE_NAME%/ceilometer.log - CustomLog /var/log/%APACHE_NAME%/ceilometer_access.log combined - - -WSGISocketPrefix /var/run/%APACHE_NAME% diff --git a/functions-common b/functions-common index 446de5374f..867bd5227d 100644 --- a/functions-common +++ b/functions-common @@ -1033,10 +1033,6 @@ function get_packages { if [[ ! $file_to_parse =~ $package_dir/cinder ]]; then file_to_parse="${file_to_parse} ${package_dir}/cinder" fi - elif [[ $service == ceilometer-* ]]; then - if [[ ! $file_to_parse =~ $package_dir/ceilometer ]]; then - file_to_parse="${file_to_parse} ${package_dir}/ceilometer" - fi elif [[ $service == s-* ]]; then if [[ ! $file_to_parse =~ $package_dir/swift ]]; then file_to_parse="${file_to_parse} ${package_dir}/swift" @@ -1777,7 +1773,6 @@ function enable_service { # There are special cases for some 'catch-all' services:: # **nova** returns true if any service enabled start with **n-** # **cinder** returns true if any service enabled start with **c-** -# **ceilometer** returns true if any service enabled start with **ceilometer** # **glance** returns true if any service enabled start with **g-** # **neutron** returns true if any service enabled start with **q-** # **swift** returns true if any service enabled start with **s-** @@ -1813,7 +1808,6 @@ function is_service_enabled { [[ ${service} == n-cell-* && ${ENABLED_SERVICES} =~ "n-cell" ]] && enabled=0 [[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0 [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0 - [[ ${service} == "ceilometer" && ${ENABLED_SERVICES} =~ "ceilometer-" ]] && enabled=0 [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0 [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0 [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0 diff --git a/lib/ceilometer b/lib/ceilometer deleted file mode 100644 index c6c4c87fbc..0000000000 --- a/lib/ceilometer +++ /dev/null @@ -1,418 +0,0 @@ -#!/bin/bash -# -# lib/ceilometer -# Install and start **Ceilometer** service - -# To enable a minimal set of Ceilometer services, add the following to the -# ``localrc`` section of ``local.conf``: -# -# enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api -# -# To ensure Ceilometer alarming services are enabled also, further add to the -# localrc section of local.conf: -# -# enable_service ceilometer-alarm-notifier ceilometer-alarm-evaluator -# -# To enable Ceilometer to collect the IPMI based meters, further add to the -# localrc section of local.conf: -# -# enable_service ceilometer-aipmi -# -# NOTE: Currently, there are two ways to get the IPMI based meters in -# OpenStack. One way is to configure Ironic conductor to report those meters -# for the nodes managed by Ironic and to have Ceilometer notification -# agent to collect them. Ironic by default does NOT enable that reporting -# functionality. So in order to do so, users need to set the option of -# conductor.send_sensor_data to true in the ironic.conf configuration file -# for the Ironic conductor service, and also enable the -# ceilometer-anotification service. -# -# The other way is to use Ceilometer ipmi agent only to get the IPMI based -# meters. To avoid duplicated meters, users need to make sure to set the -# option of conductor.send_sensor_data to false in the ironic.conf -# configuration file if the node on which Ceilometer ipmi agent is running -# is also managed by Ironic. -# -# Several variables set in the localrc section adjust common behaviors -# of Ceilometer (see within for additional settings): -# -# CEILOMETER_USE_MOD_WSGI: When True, run the api under mod_wsgi. -# CEILOMETER_PIPELINE_INTERVAL: Seconds between pipeline processing runs. Default 600. -# CEILOMETER_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es') -# CEILOMETER_COORDINATION_URL: URL for group membership service provided by tooz. -# CEILOMETER_EVENTS: Enable event collection - -# Dependencies: -# -# - functions -# - OS_AUTH_URL for auth in api -# - DEST set to the destination directory -# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api -# - STACK_USER service user - -# stack.sh -# --------- -# - install_ceilometer -# - configure_ceilometer -# - init_ceilometer -# - start_ceilometer -# - stop_ceilometer -# - cleanup_ceilometer - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient -GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware - -CEILOMETER_DIR=$DEST/ceilometer -CEILOMETER_CONF_DIR=/etc/ceilometer -CEILOMETER_CONF=$CEILOMETER_CONF_DIR/ceilometer.conf -CEILOMETER_API_LOG_DIR=/var/log/ceilometer-api -CEILOMETER_AUTH_CACHE_DIR=${CEILOMETER_AUTH_CACHE_DIR:-/var/cache/ceilometer} -CEILOMETER_WSGI_DIR=${CEILOMETER_WSGI_DIR:-/var/www/ceilometer} - -# Support potential entry-points console scripts in VENV or not -if [[ ${USE_VENV} = True ]]; then - PROJECT_VENV["ceilometer"]=${CEILOMETER_DIR}.venv - CEILOMETER_BIN_DIR=${PROJECT_VENV["ceilometer"]}/bin -else - CEILOMETER_BIN_DIR=$(get_python_exec_prefix) -fi - -# Set up database backend -CEILOMETER_BACKEND=${CEILOMETER_BACKEND:-mysql} - -# Ceilometer connection info. -CEILOMETER_SERVICE_PROTOCOL=http -CEILOMETER_SERVICE_HOST=$SERVICE_HOST -CEILOMETER_SERVICE_PORT=${CEILOMETER_SERVICE_PORT:-8777} -CEILOMETER_USE_MOD_WSGI=$(trueorfalse False CEILOMETER_USE_MOD_WSGI) - -# To enable OSprofiler change value of this variable to "notifications,profiler" -CEILOMETER_NOTIFICATION_TOPICS=${CEILOMETER_NOTIFICATION_TOPICS:-notifications} -CEILOMETER_EVENTS=${CEILOMETER_EVENTS:-True} - -CEILOMETER_COORDINATION_URL=${CEILOMETER_COORDINATION_URL:-} -CEILOMETER_PIPELINE_INTERVAL=${CEILOMETER_PIPELINE_INTERVAL:-} - - -# Functions -# --------- - -# Test if any Ceilometer services are enabled -# is_ceilometer_enabled -function is_ceilometer_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"ceilometer-" ]] && return 0 - return 1 -} - -# create_ceilometer_accounts() - Set up common required Ceilometer accounts -# -# Project User Roles -# ------------------------------------------------------------------ -# SERVICE_TENANT_NAME ceilometer admin -# SERVICE_TENANT_NAME ceilometer ResellerAdmin (if Swift is enabled) -function create_ceilometer_accounts { - - # Ceilometer - if [[ "$ENABLED_SERVICES" =~ "ceilometer-api" ]]; then - - create_service_user "ceilometer" "admin" - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - get_or_create_service "ceilometer" "metering" "OpenStack Telemetry Service" - get_or_create_endpoint "metering" \ - "$REGION_NAME" \ - "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ - "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" \ - "$CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/" - fi - if is_service_enabled swift; then - # Ceilometer needs ResellerAdmin role to access Swift account stats. - get_or_add_user_project_role "ResellerAdmin" "ceilometer" $SERVICE_TENANT_NAME - fi - fi -} - - -# _cleanup_keystone_apache_wsgi() - Remove WSGI files, disable and remove Apache vhost file -function _cleanup_ceilometer_apache_wsgi { - sudo rm -f $CEILOMETER_WSGI_DIR/* - sudo rm -f $(apache_site_config_for ceilometer) -} - -# cleanup_ceilometer() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_ceilometer { - if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then - mongo ceilometer --eval "db.dropDatabase();" - elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then - curl -XDELETE "localhost:9200/events_*" - fi - if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then - _cleanup_ceilometer_apache_wsgi - fi -} - -function _config_ceilometer_apache_wsgi { - sudo mkdir -p $CEILOMETER_WSGI_DIR - - local ceilometer_apache_conf=$(apache_site_config_for ceilometer) - local apache_version=$(get_apache_version) - local venv_path="" - - # Copy proxy vhost and wsgi file - sudo cp $CEILOMETER_DIR/ceilometer/api/app.wsgi $CEILOMETER_WSGI_DIR/app - - if [[ ${USE_VENV} = True ]]; then - venv_path="python-path=${PROJECT_VENV["ceilometer"]}/lib/$(python_version)/site-packages" - fi - - sudo cp $FILES/apache-ceilometer.template $ceilometer_apache_conf - sudo sed -e " - s|%PORT%|$CEILOMETER_SERVICE_PORT|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%WSGIAPP%|$CEILOMETER_WSGI_DIR/app|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - " -i $ceilometer_apache_conf -} - -# configure_ceilometer() - Set config files, create data dirs, etc -function configure_ceilometer { - sudo install -d -o $STACK_USER -m 755 $CEILOMETER_CONF_DIR $CEILOMETER_API_LOG_DIR - - iniset_rpc_backend ceilometer $CEILOMETER_CONF - - iniset $CEILOMETER_CONF DEFAULT notification_topics "$CEILOMETER_NOTIFICATION_TOPICS" - iniset $CEILOMETER_CONF DEFAULT verbose True - iniset $CEILOMETER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" - - if [[ -n "$CEILOMETER_COORDINATION_URL" ]]; then - iniset $CEILOMETER_CONF coordination backend_url $CEILOMETER_COORDINATION_URL - iniset $CEILOMETER_CONF compute workload_partitioning True - fi - - # Install the policy file for the API server - cp $CEILOMETER_DIR/etc/ceilometer/policy.json $CEILOMETER_CONF_DIR - iniset $CEILOMETER_CONF oslo_policy policy_file $CEILOMETER_CONF_DIR/policy.json - - cp $CEILOMETER_DIR/etc/ceilometer/pipeline.yaml $CEILOMETER_CONF_DIR - cp $CEILOMETER_DIR/etc/ceilometer/event_pipeline.yaml $CEILOMETER_CONF_DIR - cp $CEILOMETER_DIR/etc/ceilometer/api_paste.ini $CEILOMETER_CONF_DIR - cp $CEILOMETER_DIR/etc/ceilometer/event_definitions.yaml $CEILOMETER_CONF_DIR - cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_archive_policy_map.yaml $CEILOMETER_CONF_DIR - cp $CEILOMETER_DIR/etc/ceilometer/gnocchi_resources.yaml $CEILOMETER_CONF_DIR - - if [ "$CEILOMETER_PIPELINE_INTERVAL" ]; then - sed -i "s/interval:.*/interval: ${CEILOMETER_PIPELINE_INTERVAL}/" $CEILOMETER_CONF_DIR/pipeline.yaml - fi - - # The compute and central agents need these credentials in order to - # call out to other services' public APIs. - # The alarm evaluator needs these options to call ceilometer APIs - iniset $CEILOMETER_CONF service_credentials os_username ceilometer - iniset $CEILOMETER_CONF service_credentials os_password $SERVICE_PASSWORD - iniset $CEILOMETER_CONF service_credentials os_tenant_name $SERVICE_TENANT_NAME - iniset $CEILOMETER_CONF service_credentials os_region_name $REGION_NAME - iniset $CEILOMETER_CONF service_credentials os_auth_url $KEYSTONE_SERVICE_URI/v2.0 - - configure_auth_token_middleware $CEILOMETER_CONF ceilometer $CEILOMETER_AUTH_CACHE_DIR - - iniset $CEILOMETER_CONF notification store_events $CEILOMETER_EVENTS - - if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] ; then - iniset $CEILOMETER_CONF database alarm_connection $(database_connection_url ceilometer) - iniset $CEILOMETER_CONF database event_connection $(database_connection_url ceilometer) - iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) - iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS - elif [ "$CEILOMETER_BACKEND" = 'es' ] ; then - # es is only supported for events. we will use sql for alarming/metering. - iniset $CEILOMETER_CONF database alarm_connection $(database_connection_url ceilometer) - iniset $CEILOMETER_CONF database event_connection es://localhost:9200 - iniset $CEILOMETER_CONF database metering_connection $(database_connection_url ceilometer) - iniset $CEILOMETER_CONF DEFAULT collector_workers $API_WORKERS - ${TOP_DIR}/pkg/elasticsearch.sh start - cleanup_ceilometer - elif [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then - iniset $CEILOMETER_CONF database alarm_connection mongodb://localhost:27017/ceilometer - iniset $CEILOMETER_CONF database event_connection mongodb://localhost:27017/ceilometer - iniset $CEILOMETER_CONF database metering_connection mongodb://localhost:27017/ceilometer - configure_mongodb - cleanup_ceilometer - else - die $LINENO "Unable to configure unknown CEILOMETER_BACKEND $CEILOMETER_BACKEND" - fi - - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - iniset $CEILOMETER_CONF DEFAULT hypervisor_inspector vsphere - iniset $CEILOMETER_CONF vmware host_ip "$VMWAREAPI_IP" - iniset $CEILOMETER_CONF vmware host_username "$VMWAREAPI_USER" - iniset $CEILOMETER_CONF vmware host_password "$VMWAREAPI_PASSWORD" - fi - - if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then - iniset $CEILOMETER_CONF api pecan_debug "False" - _config_ceilometer_apache_wsgi - fi - - if is_service_enabled ceilometer-aipmi; then - # Configure rootwrap for the ipmi agent - configure_rootwrap ceilometer - fi -} - -function configure_mongodb { - # Server package is the same on all - local packages=mongodb-server - - if is_fedora; then - # mongodb client - packages="${packages} mongodb" - fi - - install_package ${packages} - - if is_fedora; then - # Ensure smallfiles is selected to minimize freespace requirements - sudo sed -i '/--smallfiles/!s/OPTIONS=\"/OPTIONS=\"--smallfiles /' /etc/sysconfig/mongod - - restart_service mongod - fi - - # Give mongodb time to start-up - sleep 5 -} - -# init_ceilometer() - Initialize etc. -function init_ceilometer { - # Create cache dir - sudo install -d -o $STACK_USER $CEILOMETER_AUTH_CACHE_DIR - rm -f $CEILOMETER_AUTH_CACHE_DIR/* - - if is_service_enabled mysql postgresql; then - if [ "$CEILOMETER_BACKEND" = 'mysql' ] || [ "$CEILOMETER_BACKEND" = 'postgresql' ] || [ "$CEILOMETER_BACKEND" = 'es' ] ; then - recreate_database ceilometer - $CEILOMETER_BIN_DIR/ceilometer-dbsync - fi - fi -} - -# install_redis() - Install the redis server. -function install_redis { - if is_ubuntu; then - install_package redis-server - restart_service redis-server - else - # This will fail (correctly) where a redis package is unavailable - install_package redis - restart_service redis - fi -} - -# install_ceilometer() - Collect source and prepare -function install_ceilometer { - git_clone $CEILOMETER_REPO $CEILOMETER_DIR $CEILOMETER_BRANCH - setup_develop $CEILOMETER_DIR - - if echo $CEILOMETER_COORDINATION_URL | grep -q '^memcached:'; then - install_package memcached - elif echo $CEILOMETER_COORDINATION_URL | grep -q '^redis:'; then - install_redis - fi - - if [ "$CEILOMETER_BACKEND" = 'mongodb' ] ; then - pip_install_gr pymongo - fi - - # Only install virt drivers if we're running nova compute - if is_service_enabled n-cpu ; then - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - pip_install_gr libvirt-python - fi - - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - pip_install_gr oslo.vmware - fi - fi - - if [ "$CEILOMETER_BACKEND" = 'es' ] ; then - ${TOP_DIR}/pkg/elasticsearch.sh download - ${TOP_DIR}/pkg/elasticsearch.sh install - fi -} - -# install_ceilometerclient() - Collect source and prepare -function install_ceilometerclient { - if use_library_from_git "python-ceilometerclient"; then - git_clone_by_name "python-ceilometerclient" - setup_dev_lib "python-ceilometerclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ceilometerclient"]}/tools/,/etc/bash_completion.d/}ceilometer.bash_completion - fi -} - -# start_ceilometer() - Start running processes, including screen -function start_ceilometer { - run_process ceilometer-acentral "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces central --config-file $CEILOMETER_CONF" - run_process ceilometer-anotification "$CEILOMETER_BIN_DIR/ceilometer-agent-notification --config-file $CEILOMETER_CONF" - run_process ceilometer-collector "$CEILOMETER_BIN_DIR/ceilometer-collector --config-file $CEILOMETER_CONF" - run_process ceilometer-aipmi "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces ipmi --config-file $CEILOMETER_CONF" - - if [[ "$CEILOMETER_USE_MOD_WSGI" == "False" ]]; then - run_process ceilometer-api "$CEILOMETER_BIN_DIR/ceilometer-api -d -v --log-dir=$CEILOMETER_API_LOG_DIR --config-file $CEILOMETER_CONF" - else - enable_apache_site ceilometer - restart_apache_server - tail_log ceilometer /var/log/$APACHE_NAME/ceilometer.log - tail_log ceilometer-api /var/log/$APACHE_NAME/ceilometer_access.log - fi - - - # Start the compute agent last to allow time for the collector to - # fully wake up and connect to the message bus. See bug #1355809 - if [[ "$VIRT_DRIVER" = 'libvirt' ]]; then - run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" $LIBVIRT_GROUP - fi - if [[ "$VIRT_DRIVER" = 'vsphere' ]]; then - run_process ceilometer-acompute "$CEILOMETER_BIN_DIR/ceilometer-polling --polling-namespaces compute --config-file $CEILOMETER_CONF" - fi - - # Only die on API if it was actually intended to be turned on - if is_service_enabled ceilometer-api; then - echo "Waiting for ceilometer-api to start..." - if ! wait_for_service $SERVICE_TIMEOUT $CEILOMETER_SERVICE_PROTOCOL://$CEILOMETER_SERVICE_HOST:$CEILOMETER_SERVICE_PORT/v2/; then - die $LINENO "ceilometer-api did not start" - fi - fi - - run_process ceilometer-alarm-notifier "$CEILOMETER_BIN_DIR/ceilometer-alarm-notifier --config-file $CEILOMETER_CONF" - run_process ceilometer-alarm-evaluator "$CEILOMETER_BIN_DIR/ceilometer-alarm-evaluator --config-file $CEILOMETER_CONF" -} - -# stop_ceilometer() - Stop running processes -function stop_ceilometer { - if [ "$CEILOMETER_USE_MOD_WSGI" == "True" ]; then - disable_apache_site ceilometer - restart_apache_server - fi - # Kill the ceilometer screen windows - for serv in ceilometer-acompute ceilometer-acentral ceilometer-aipmi ceilometer-anotification ceilometer-collector ceilometer-api ceilometer-alarm-notifier ceilometer-alarm-evaluator; do - stop_process $serv - done -} - - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/stack.sh b/stack.sh index accfd0ac3e..8a9a28fd67 100755 --- a/stack.sh +++ b/stack.sh @@ -1,9 +1,8 @@ #!/usr/bin/env bash # ``stack.sh`` is an opinionated OpenStack developer installation. It -# installs and configures various combinations of **Ceilometer**, **Cinder**, -# **Glance**, **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, -# and **Swift** +# installs and configures various combinations of **Cinder**, **Glance**, +# **Heat**, **Horizon**, **Keystone**, **Nova**, **Neutron**, and **Swift** # This script's options can be changed by setting appropriate environment # variables. You can configure things like which git repositories to use, @@ -542,7 +541,6 @@ source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap @@ -826,13 +824,6 @@ if is_service_enabled horizon; then configure_horizon fi -if is_service_enabled ceilometer; then - install_ceilometerclient - stack_install_service ceilometer - echo_summary "Configuring Ceilometer" - configure_ceilometer -fi - if is_service_enabled heat; then stack_install_service heat install_heat_other @@ -1008,10 +999,6 @@ if is_service_enabled keystone; then create_cinder_accounts create_neutron_accounts - if is_service_enabled ceilometer; then - create_ceilometer_accounts - fi - if is_service_enabled swift; then create_swift_accounts fi @@ -1255,11 +1242,6 @@ if is_service_enabled cinder; then start_cinder create_volume_types fi -if is_service_enabled ceilometer; then - echo_summary "Starting Ceilometer" - init_ceilometer - start_ceilometer -fi # Configure and launch Heat engine, api and metadata if is_service_enabled heat; then diff --git a/stackrc b/stackrc index ca897a1c4d..12c315ad78 100644 --- a/stackrc +++ b/stackrc @@ -181,10 +181,6 @@ REQUIREMENTS_DIR=$DEST/requirements # ############## -# telemetry service -CEILOMETER_REPO=${CEILOMETER_REPO:-${GIT_BASE}/openstack/ceilometer.git} -CEILOMETER_BRANCH=${CEILOMETER_BRANCH:-master} - # block storage service CINDER_REPO=${CINDER_REPO:-${GIT_BASE}/openstack/cinder.git} CINDER_BRANCH=${CINDER_BRANCH:-master} @@ -258,10 +254,6 @@ GITBRANCH["tempest-lib"]=${TEMPEST_LIB_BRANCH:-master} # ############## -# ceilometer client library -GITREPO["python-ceilometerclient"]=${CEILOMETERCLIENT_REPO:-${GIT_BASE}/openstack/python-ceilometerclient.git} -GITBRANCH["python-ceilometerclient"]=${CEILOMETERCLIENT_BRANCH:-master} - # volume client GITREPO["python-cinderclient"]=${CINDERCLIENT_REPO:-${GIT_BASE}/openstack/python-cinderclient.git} GITBRANCH["python-cinderclient"]=${CINDERCLIENT_BRANCH:-master} @@ -449,6 +441,7 @@ SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} # ceilometer middleware GITREPO["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_REPO:-${GIT_BASE}/openstack/ceilometermiddleware.git} GITBRANCH["ceilometermiddleware"]=${CEILOMETERMIDDLEWARE_BRANCH:-master} +GITDIR["ceilometermiddleware"]=$DEST/ceilometermiddleware # os-brick library to manage local volume attaches GITREPO["os-brick"]=${OS_BRICK_REPO:-${GIT_BASE}/openstack/os-brick.git} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index cf6ec1cbff..8e8c0227a9 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -37,7 +37,7 @@ ALL_LIBS+=" python-cinderclient glance_store oslo.concurrency oslo.db" ALL_LIBS+=" oslo.versionedobjects oslo.vmware keystonemiddleware" ALL_LIBS+=" oslo.serialization django_openstack_auth" ALL_LIBS+=" python-openstackclient oslo.rootwrap oslo.i18n" -ALL_LIBS+=" python-ceilometerclient oslo.utils python-swiftclient" +ALL_LIBS+=" oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports" diff --git a/unstack.sh b/unstack.sh index 10e595809a..6fa8314d4e 100755 --- a/unstack.sh +++ b/unstack.sh @@ -65,7 +65,6 @@ source $TOP_DIR/lib/glance source $TOP_DIR/lib/nova source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift -source $TOP_DIR/lib/ceilometer source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap @@ -104,10 +103,6 @@ if is_service_enabled heat; then stop_heat fi -if is_service_enabled ceilometer; then - stop_ceilometer -fi - if is_service_enabled nova; then stop_nova fi From 1c394829d1a6523b6b0b2f449b9ba92f5e8c472b Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Thu, 10 Sep 2015 12:15:16 +0200 Subject: [PATCH 0469/2941] Fix the FLOATING_RANGE in the Neutron guide The range should be 172.18.161.0/24 and not 172.18.161.1/24. Change-Id: I29bb24a1a278c285a00cd69188de340670891c53 --- doc/source/guides/neutron.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 40a5632b86..2973eb63e8 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -59,7 +59,7 @@ DevStack Configuration ## Neutron options Q_USE_SECGROUP=True - FLOATING_RANGE="172.18.161.1/24" + FLOATING_RANGE="172.18.161.0/24" FIXED_RANGE="10.0.0.0/24" Q_FLOATING_ALLOCATION_POOL=start=172.18.161.250,end=172.18.161.254 PUBLIC_NETWORK_GATEWAY="172.18.161.1" From 3db0aad63c182a88830ec61fc048240058c02d85 Mon Sep 17 00:00:00 2001 From: Christian Schwede Date: Thu, 10 Sep 2015 11:15:39 +0000 Subject: [PATCH 0470/2941] Fix is_service_enabled when using multiple arguments is_service_enabled might actually fail to return the expected result if there is a is_${service}_enabled function available and multiple services are checked. For example, if one defines swift as a service but disables glance, the following check fails: if is_service_enabled swift glance horizon; then install_swiftclient fi This is because the second for-iteration resets the local "enabled" var again to 1 and finally exits with 1 as a return code. This patch fixes this. Change-Id: Ic76b72897efe9597d1412470353895001a1a4c66 --- functions-common | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/functions-common b/functions-common index 446de5374f..705c3149dd 100644 --- a/functions-common +++ b/functions-common @@ -1803,8 +1803,7 @@ function is_service_enabled { # Look for top-level 'enabled' function for this service if type is_${service}_enabled >/dev/null 2>&1; then # A function exists for this service, use it - is_${service}_enabled - enabled=$? + is_${service}_enabled && enabled=0 fi # TODO(dtroyer): Remove these legacy special-cases after the is_XXX_enabled() From 17fe88a72c65fa1f926f3ada5a0cfb45bf6649a9 Mon Sep 17 00:00:00 2001 From: Hidekazu Nakamura Date: Fri, 11 Sep 2015 19:50:26 +0900 Subject: [PATCH 0471/2941] Fix typo in lib/keystone nonadmin => nonadmins Change-Id: I9d51e079c10f7c48b962a1d6f4577e8a6ec4a229 --- lib/keystone | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/keystone b/lib/keystone index e2448c9068..6520fc6035 100644 --- a/lib/keystone +++ b/lib/keystone @@ -353,7 +353,7 @@ function configure_keystone_extensions { # Group Users Roles Tenant # ------------------------------------------------------------------ # admins admin admin admin -# nonadmin demo Member, anotherrole demo +# nonadmins demo Member, anotherrole demo # Migrated from keystone_data.sh From 2105b9f9ce325394d205d9c5f7a7427141bc4ebd Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 10 Sep 2015 14:01:40 -0400 Subject: [PATCH 0472/2941] move back to editable install for oslo Now that we don't have namespace packages any more, editable installs should be fine. This also means that we apply constraints to these libraries during installation, which is important for future testing. This is needed in order to be able to easily sanity check LIBS_FROM_GIT, as then all libs installed from git will have pip urls with git in them. Change-Id: I46c3b8f943b97f912eccc7278e3e033ae67e7e31 --- lib/infra | 2 +- lib/oslo | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/infra b/lib/infra index 89397de792..ab32efecd9 100644 --- a/lib/infra +++ b/lib/infra @@ -41,7 +41,7 @@ function install_infra { # Install pbr if use_library_from_git "pbr"; then git_clone_by_name "pbr" - setup_lib "pbr" + setup_dev_lib "pbr" else # Always upgrade pbr to latest version as we may have pulled it # in via system packages. diff --git a/lib/oslo b/lib/oslo index 123572cd7b..f64f327ccd 100644 --- a/lib/oslo +++ b/lib/oslo @@ -59,7 +59,7 @@ function _do_install_oslo_lib { local name=$1 if use_library_from_git "$name"; then git_clone_by_name "$name" - setup_lib "$name" + setup_dev_lib "$name" fi } From c71973eb04d05c2497eb930c4e1b59dcaf983085 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 8 Sep 2015 07:12:48 -0400 Subject: [PATCH 0473/2941] check LIBS_FROM_GIT after the glance_store vs. upper-constraints bug, it's probably worth actually enforcing and sanity checking that devstack is doing what it's being asked of with LIBS_FROM_GIT. This will hopefully reduce user generated error. This *might* not work with the current oslo naming, we'll have to test and normalize that. Change-Id: Iffef2007f99a0e932b68c4c897ebbfb748cac2b4 --- inc/python | 22 ++++++++++++++++++++++ stack.sh | 7 +++++++ 2 files changed, 29 insertions(+) diff --git a/inc/python b/inc/python index 5c9dc5c3e5..210a9dbdfe 100644 --- a/inc/python +++ b/inc/python @@ -157,6 +157,28 @@ function use_library_from_git { return $enabled } +# determine if a package was installed from git +function lib_installed_from_git { + local name=$1 + pip freeze 2>/dev/null | grep -- "$name" | grep -q -- '-e git' +} + +# check that everything that's in LIBS_FROM_GIT was actually installed +# correctly, this helps double check issues with library fat fingering. +function check_libs_from_git { + local lib="" + local not_installed="" + for lib in $(echo ${LIBS_FROM_GIT} | tr "," " "); do + if ! lib_installed_from_git "$lib"; then + not_installed+=" $lib" + fi + done + # if anything is not installed, say what it is. + if [[ -n "$not_installed" ]]; then + die $LINENO "The following LIBS_FROM_GIT were not installed correct: $not_installed" + fi +} + # setup a library by name. If we are trying to use the library from # git, we'll do a git based install, otherwise we'll punt and the # library should be installed by a requirements pull from another diff --git a/stack.sh b/stack.sh index accfd0ac3e..638d471654 100755 --- a/stack.sh +++ b/stack.sh @@ -1373,9 +1373,16 @@ if [[ -x $TOP_DIR/local.sh ]]; then $TOP_DIR/local.sh fi +# Sanity checks +# ============= + # Check the status of running services service_check +# ensure that all the libraries we think we installed from git, +# actually were. +check_libs_from_git + # Bash completion # =============== From be65c6f88d5922e356178a2958afa9ae452fb85e Mon Sep 17 00:00:00 2001 From: Wei Jiangang Date: Mon, 14 Sep 2015 18:52:47 +0800 Subject: [PATCH 0474/2941] Fix typos in stackrc and unstack.sh Componets => Components pluggins => plugins Change-Id: I82634a55fd5895599099c94817af7d8d2f602859 --- stackrc | 2 +- unstack.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/stackrc b/stackrc index 156cb1f85a..05956ee0b2 100644 --- a/stackrc +++ b/stackrc @@ -255,7 +255,7 @@ GITBRANCH["tempest-lib"]=${TEMPEST_LIB_BRANCH:-master} ############## # -# OpenStack Client Library Componets +# OpenStack Client Library Components # ############## diff --git a/unstack.sh b/unstack.sh index 10e595809a..27d6719af9 100755 --- a/unstack.sh +++ b/unstack.sh @@ -45,7 +45,7 @@ fi # Configure Projects # ================== -# Plugin Phase 0: override_defaults - allow pluggins to override +# Plugin Phase 0: override_defaults - allow plugins to override # defaults before other services are run run_phase override_defaults From a29434460e869b7bb397044d8f073531e4ee112d Mon Sep 17 00:00:00 2001 From: Ivan Kolodyazhny Date: Tue, 23 Jun 2015 19:09:34 +0300 Subject: [PATCH 0475/2941] Disable Cinder v1 API support by default Cinder API v1 will be removed Mitaka so we don't need to setup it be default. To enable Cinder API v1 you need to set CINDER_ENABLE_V1_API=True in your Devstack config. Related-Bug: #1467589 Depends-On: I6916eb3e4b7c85f37be8b365b11ca8b48f88177c Change-Id: I0754e357433cfcd9fde7e937a4a1b440580b6289 --- doc/source/configuration.rst | 7 +++++++ lib/cinder | 29 +++++++++++++++++------------ lib/tempest | 9 +++++++++ 3 files changed, 33 insertions(+), 12 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 983f5c0aae..3bd246d870 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -587,6 +587,13 @@ with ``VOLUME_BACKING_FILE_SIZE``. VOLUME_BACKING_FILE_SIZE=10250M +Cinder v1 API is depricated and disabled by default. You can enable v1 API by +setting ``CINDER_ENABLE_V1_API`` to ``True``. + + :: + CINDER_ENABLE_V1_API=True + + Keystone ~~~~~~~~ diff --git a/lib/cinder b/lib/cinder index 26277ccaba..f0b0f1d7e5 100644 --- a/lib/cinder +++ b/lib/cinder @@ -27,6 +27,9 @@ set +o xtrace # Defaults # -------- +# NOTE (e0ne): Cinder API v1 is deprecated and will be disabled by default. +CINDER_ENABLE_V1_API=$(trueorfalse False CINDER_ENABLE_V1_API) + # set up default driver CINDER_DRIVER=${CINDER_DRIVER:-default} CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins @@ -225,9 +228,12 @@ function configure_cinder { iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL # NOTE(thingee): Cinder V1 API is deprecated and defaults to off as of - # Juno. Keep it enabled so we can continue testing while it's still - # supported. - iniset $CINDER_CONF DEFAULT enable_v1_api true + # Juno. + if [[ ${CINDER_ENABLE_V1_API} = True ]]; then + iniset $CINDER_CONF DEFAULT enable_v1_api true + else + iniset $CINDER_CONF DEFAULT enable_v1_api false + fi iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME" @@ -326,12 +332,13 @@ function create_cinder_accounts { if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - get_or_create_service "cinder" "volume" "Cinder Volume Service" - get_or_create_endpoint "volume" "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" - + if [[ ${CINDER_ENABLE_V1_API} = True ]]; then + get_or_create_service "cinder" "volume" "Cinder Volume Service" + get_or_create_endpoint "volume" "$REGION_NAME" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" + fi get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" get_or_create_endpoint "volumev2" "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ @@ -482,9 +489,7 @@ function create_volume_types { local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_name=${be##*:} - # FIXME(jamielennox): Remove --os-volume-api-version pinning when - # osc supports volume type create on v2 api. bug #1475060 - openstack volume type create --os-volume-api-version 1 --property volume_backend_name="${be_name}" ${be_name} + openstack volume type create --os-volume-api-version 2 --property volume_backend_name="${be_name}" ${be_name} done fi } diff --git a/lib/tempest b/lib/tempest index d372e0f6ca..3624b9605e 100644 --- a/lib/tempest +++ b/lib/tempest @@ -494,6 +494,15 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume-feature-enabled backup False fi + # Use only Cinder API v2 + if [[ ${CINDER_ENABLE_V1_API} = True ]]; then + iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 True + iniset $TEMPEST_CONFIG volume catalog_type volume + else + iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 False + iniset $TEMPEST_CONFIG volume catalog_type volumev2 + fi + # Using ``CINDER_ENABLED_BACKENDS`` if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" From 624ab1e65dac94572de04e4a12b28d31e342faf1 Mon Sep 17 00:00:00 2001 From: Flavio Percoco Date: Thu, 30 Apr 2015 08:54:15 +0200 Subject: [PATCH 0476/2941] Remove Zaqar from devstack Zaqar's devstack code has been moved into a plugin in the Zaqar repo. This patch removes the remaining code from devstack. Depends-On: Iceefabb6cd528b23075a91e8039b8264eb3f33f5 Change-Id: Ifcf54fa2d4a5bf49b6757b593bb70cdeda8edb2a --- doc/source/index.rst | 4 +- exercises/zaqar.sh | 43 -------- extras.d/70-zaqar.sh | 29 ------ files/debs/zaqar-server | 4 - files/rpms/zaqar-server | 5 - lib/tempest | 2 +- lib/zaqar | 225 ---------------------------------------- 7 files changed, 2 insertions(+), 310 deletions(-) delete mode 100755 exercises/zaqar.sh delete mode 100644 extras.d/70-zaqar.sh delete mode 100644 files/debs/zaqar-server delete mode 100644 files/rpms/zaqar-server delete mode 100644 lib/zaqar diff --git a/doc/source/index.rst b/doc/source/index.rst index 2dd0241fba..21fec590ef 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -173,7 +173,7 @@ Scripts * `lib/swift `__ * `lib/tempest `__ * `lib/tls `__ -* `lib/zaqar `__ +* `lib/trove `__ * `unstack.sh `__ * `clean.sh `__ * `run\_tests.sh `__ @@ -181,7 +181,6 @@ Scripts * `extras.d/50-ironic.sh `__ * `extras.d/60-ceph.sh `__ * `extras.d/70-tuskar.sh `__ -* `extras.d/70-zaqar.sh `__ * `extras.d/80-tempest.sh `__ * `inc/ini-config `__ @@ -239,4 +238,3 @@ Exercises * `exercises/sec\_groups.sh `__ * `exercises/swift.sh `__ * `exercises/volumes.sh `__ -* `exercises/zaqar.sh `__ diff --git a/exercises/zaqar.sh b/exercises/zaqar.sh deleted file mode 100755 index c370b12c85..0000000000 --- a/exercises/zaqar.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash - -# **zaqar.sh** - -# Sanity check that Zaqar started if enabled - -echo "*********************************************************************" -echo "Begin DevStack Exercise: $0" -echo "*********************************************************************" - -# This script exits on an error so that errors don't compound and you see -# only the first error that occurred. -set -o errexit - -# Print the commands being run so that we can see the command that triggers -# an error. It is also useful for following allowing as the install occurs. -set -o xtrace - - -# Settings -# ======== - -# Keep track of the current directory -EXERCISE_DIR=$(cd $(dirname "$0") && pwd) -TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) - -# Import common functions -source $TOP_DIR/functions - -# Import configuration -source $TOP_DIR/openrc - -# Import exercise configuration -source $TOP_DIR/exerciserc - -is_service_enabled zaqar-server || exit 55 - -$CURL_GET http://$SERVICE_HOST:8888/v1/ 2>/dev/null | grep -q 'queue_name' || die $LINENO "Zaqar API not functioning!" - -set +o xtrace -echo "*********************************************************************" -echo "SUCCESS: End DevStack Exercise: $0" -echo "*********************************************************************" diff --git a/extras.d/70-zaqar.sh b/extras.d/70-zaqar.sh deleted file mode 100644 index 63c4fd5ad5..0000000000 --- a/extras.d/70-zaqar.sh +++ /dev/null @@ -1,29 +0,0 @@ -# zaqar.sh - Devstack extras script to install Zaqar - -if is_service_enabled zaqar-server; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/zaqar - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Zaqar" - install_zaqarclient - install_zaqar - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Zaqar" - configure_zaqar - configure_zaqarclient - - if is_service_enabled key; then - create_zaqar_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - echo_summary "Initializing Zaqar" - init_zaqar - start_zaqar - fi - - if [[ "$1" == "unstack" ]]; then - stop_zaqar - fi -fi diff --git a/files/debs/zaqar-server b/files/debs/zaqar-server deleted file mode 100644 index 6c2a4d154a..0000000000 --- a/files/debs/zaqar-server +++ /dev/null @@ -1,4 +0,0 @@ -python-pymongo -mongodb-server -pkg-config -redis-server # NOPRIME \ No newline at end of file diff --git a/files/rpms/zaqar-server b/files/rpms/zaqar-server deleted file mode 100644 index 78806fb3f6..0000000000 --- a/files/rpms/zaqar-server +++ /dev/null @@ -1,5 +0,0 @@ -selinux-policy-targeted -mongodb -mongodb-server -pymongo -redis # NOPRIME diff --git a/lib/tempest b/lib/tempest index 71181cca21..f4d0a6dab0 100644 --- a/lib/tempest +++ b/lib/tempest @@ -550,7 +550,7 @@ function configure_tempest { # this tempest service list needs to be all the services that # tempest supports, otherwise we can have an erroneous set of # defaults (something defaulting true in Tempest, but not listed here). - TEMPEST_SERVICES="key,glance,nova,neutron,cinder,swift,heat,ceilometer,horizon,sahara,ironic,trove,zaqar" + TEMPEST_SERVICES="key,glance,nova,neutron,cinder,swift,heat,ceilometer,horizon,sahara,ironic,trove" for service in ${TEMPEST_SERVICES//,/ }; do if is_service_enabled $service ; then iniset $TEMPEST_CONFIG service_available $service "True" diff --git a/lib/zaqar b/lib/zaqar deleted file mode 100644 index aa21aac271..0000000000 --- a/lib/zaqar +++ /dev/null @@ -1,225 +0,0 @@ -#!/bin/bash -# -# lib/zaqar -# Install and start **Zaqar** service - -# To enable a minimal set of Zaqar services, add the following to localrc: -# -# enable_service zaqar-server -# -# Dependencies: -# - functions -# - OS_AUTH_URL for auth in api -# - DEST set to the destination directory -# - SERVICE_PASSWORD, SERVICE_TENANT_NAME for auth in api -# - STACK_USER service user - -# stack.sh -# --------- -# install_zaqar -# configure_zaqar -# init_zaqar -# start_zaqar -# stop_zaqar -# cleanup_zaqar -# cleanup_zaqar_mongodb - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# Set up default directories -ZAQAR_DIR=$DEST/zaqar -ZAQARCLIENT_DIR=$DEST/python-zaqarclient -ZAQAR_CONF_DIR=/etc/zaqar -ZAQAR_CONF=$ZAQAR_CONF_DIR/zaqar.conf -ZAQAR_AUTH_CACHE_DIR=${ZAQAR_AUTH_CACHE_DIR:-/var/cache/zaqar} - -# Support potential entry-points console scripts -ZAQAR_BIN_DIR=$(get_python_exec_prefix) - -# Set up database backend -ZAQAR_BACKEND=${ZAQAR_BACKEND:-mongodb} - - -# Set Zaqar repository -ZAQAR_REPO=${ZAQAR_REPO:-${GIT_BASE}/openstack/zaqar.git} -ZAQAR_BRANCH=${ZAQAR_BRANCH:-master} - -# Set client library repository -ZAQARCLIENT_REPO=${ZAQARCLIENT_REPO:-${GIT_BASE}/openstack/python-zaqarclient.git} -ZAQARCLIENT_BRANCH=${ZAQARCLIENT_BRANCH:-master} - -# Set Zaqar Connection Info -ZAQAR_SERVICE_HOST=${ZAQAR_SERVICE_HOST:-$SERVICE_HOST} -ZAQAR_SERVICE_PORT=${ZAQAR_SERVICE_PORT:-8888} -ZAQAR_SERVICE_PROTOCOL=${ZAQAR_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} - -# Functions -# --------- - -# Test if any Zaqar services are enabled -# is_zaqar_enabled -function is_zaqar_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"zaqar-" ]] && return 0 - return 1 -} - -# cleanup_zaqar() - Cleans up general things from previous -# runs and storage specific left overs. -function cleanup_zaqar { - if [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then - cleanup_zaqar_mongodb - fi -} - -# cleanup_zaqar_mongodb() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_zaqar_mongodb { - if ! timeout $SERVICE_TIMEOUT sh -c "while ! mongo zaqar --eval 'db.dropDatabase();'; do sleep 1; done"; then - die $LINENO "Mongo DB did not start" - else - full_version=$(mongo zaqar --eval 'db.dropDatabase();') - mongo_version=`echo $full_version | cut -d' ' -f4` - required_mongo_version='2.2' - if [[ $mongo_version < $required_mongo_version ]]; then - die $LINENO "Zaqar needs Mongo DB version >= 2.2 to run." - fi - fi -} - -# configure_zaqarclient() - Set config files, create data dirs, etc -function configure_zaqarclient { - setup_develop $ZAQARCLIENT_DIR -} - -# configure_zaqar() - Set config files, create data dirs, etc -function configure_zaqar { - setup_develop $ZAQAR_DIR - - sudo install -d -o $STACK_USER -m 755 $ZAQAR_CONF_DIR - - iniset $ZAQAR_CONF DEFAULT debug True - iniset $ZAQAR_CONF DEFAULT verbose True - iniset $ZAQAR_CONF DEFAULT admin_mode True - iniset $ZAQAR_CONF DEFAULT use_syslog $SYSLOG - iniset $ZAQAR_CONF 'drivers:transport:wsgi' bind $ZAQAR_SERVICE_HOST - - configure_auth_token_middleware $ZAQAR_CONF zaqar $ZAQAR_AUTH_CACHE_DIR - - if [ "$ZAQAR_BACKEND" = 'mysql' ] || [ "$ZAQAR_BACKEND" = 'postgresql' ] ; then - iniset $ZAQAR_CONF drivers storage sqlalchemy - iniset $ZAQAR_CONF 'drivers:storage:sqlalchemy' uri `database_connection_url zaqar` - elif [ "$ZAQAR_BACKEND" = 'mongodb' ] ; then - iniset $ZAQAR_CONF drivers storage mongodb - iniset $ZAQAR_CONF 'drivers:storage:mongodb' uri mongodb://localhost:27017/zaqar - configure_mongodb - elif [ "$ZAQAR_BACKEND" = 'redis' ] ; then - iniset $ZAQAR_CONF drivers storage redis - iniset $ZAQAR_CONF 'drivers:storage:redis' uri redis://localhost:6379 - configure_redis - fi - - iniset $ZAQAR_CONF DEFAULT notification_driver messaging - iniset $ZAQAR_CONF DEFAULT control_exchange zaqar - - iniset_rpc_backend zaqar $ZAQAR_CONF - - cleanup_zaqar -} - -function configure_redis { - if is_ubuntu; then - install_package redis-server - pip_install_gr redis - elif is_fedora; then - install_package redis - pip_install_gr redis - else - exit_distro_not_supported "redis installation" - fi -} - -function configure_mongodb { - # Set nssize to 2GB. This increases the number of namespaces supported - # # per database. - if is_ubuntu; then - sudo sed -i -e " - s|[^ \t]*#[ \t]*\(nssize[ \t]*=.*\$\)|\1| - s|^\(nssize[ \t]*=[ \t]*\).*\$|\1 2047| - " /etc/mongodb.conf - restart_service mongodb - elif is_fedora; then - sudo sed -i '/--nssize/!s/OPTIONS=\"/OPTIONS=\"--nssize 2047 /' /etc/sysconfig/mongod - restart_service mongod - fi -} - -# init_zaqar() - Initialize etc. -function init_zaqar { - # Create cache dir - sudo install -d -o $STACK_USER $ZAQAR_AUTH_CACHE_DIR - rm -f $ZAQAR_AUTH_CACHE_DIR/* -} - -# install_zaqar() - Collect source and prepare -function install_zaqar { - git_clone $ZAQAR_REPO $ZAQAR_DIR $ZAQAR_BRANCH - setup_develop $ZAQAR_DIR -} - -# install_zaqarclient() - Collect source and prepare -function install_zaqarclient { - git_clone $ZAQARCLIENT_REPO $ZAQARCLIENT_DIR $ZAQARCLIENT_BRANCH - setup_develop $ZAQARCLIENT_DIR -} - -# start_zaqar() - Start running processes, including screen -function start_zaqar { - if [[ "$USE_SCREEN" = "False" ]]; then - run_process zaqar-server "zaqar-server --config-file $ZAQAR_CONF --daemon" - else - run_process zaqar-server "zaqar-server --config-file $ZAQAR_CONF" - fi - - echo "Waiting for Zaqar to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT/v1/health; do sleep 1; done"; then - die $LINENO "Zaqar did not start" - fi -} - -# stop_zaqar() - Stop running processes -function stop_zaqar { - local serv - # Kill the zaqar screen windows - for serv in zaqar-server; do - screen -S $SCREEN_NAME -p $serv -X kill - done -} - -function create_zaqar_accounts { - create_service_user "zaqar" - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - - get_or_create_service "zaqar" "messaging" "Zaqar Service" - get_or_create_endpoint "messaging" \ - "$REGION_NAME" \ - "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \ - "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" \ - "$ZAQAR_SERVICE_PROTOCOL://$ZAQAR_SERVICE_HOST:$ZAQAR_SERVICE_PORT" - fi - -} - - -# Restore xtrace -$XTRACE - -# Local variables: -# mode: shell-script -# End: From 193d8a6e7c22695f33d2082bb330e3039b516a5b Mon Sep 17 00:00:00 2001 From: Fawad Khaliq Date: Tue, 11 Aug 2015 07:32:56 -0700 Subject: [PATCH 0477/2941] Improve PLUMgrid Install Endpoints PLUMgrid Plugin has moved out of Neutron tree and it's new home is openstack/networking-plumgrid[1]. With core vendor decomposition reaching completion, this change moves PLUMgrid Plugin install to an external DevStack plugin for better integration. [1] https://github.com/openstack/networking-plumgrid [2] http://docs.openstack.org/developer/neutron/devref/contribute.html Change-Id: I5bd6d8f611c2a134f2e8f14c074c1a4185d9c522 --- lib/neutron_plugins/plumgrid | 58 ------------------------------------ 1 file changed, 58 deletions(-) delete mode 100644 lib/neutron_plugins/plumgrid diff --git a/lib/neutron_plugins/plumgrid b/lib/neutron_plugins/plumgrid deleted file mode 100644 index 0d711fe8b2..0000000000 --- a/lib/neutron_plugins/plumgrid +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash -# -# PLUMgrid Neutron Plugin -# Edgar Magana emagana@plumgrid.com -# ------------------------------------ - -# Save trace settings -PG_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -function neutron_plugin_create_nova_conf { - : -} - -function neutron_plugin_setup_interface_driver { - : -} - -function neutron_plugin_configure_common { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/plumgrid - Q_PLUGIN_CONF_FILENAME=plumgrid.ini - Q_PLUGIN_CLASS="neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2" - PLUMGRID_DIRECTOR_IP=${PLUMGRID_DIRECTOR_IP:-localhost} - PLUMGRID_DIRECTOR_PORT=${PLUMGRID_DIRECTOR_PORT:-7766} - PLUMGRID_ADMIN=${PLUMGRID_ADMIN:-username} - PLUMGRID_PASSWORD=${PLUMGRID_PASSWORD:-password} - PLUMGRID_TIMEOUT=${PLUMGRID_TIMEOUT:-70} - PLUMGRID_DRIVER=${PLUMGRID_DRIVER:-neutron.plugins.plumgrid.drivers.fake_plumlib.Plumlib} -} - -function neutron_plugin_configure_service { - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector director_server $PLUMGRID_DIRECTOR_IP - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector director_server_port $PLUMGRID_DIRECTOR_PORT - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector username $PLUMGRID_ADMIN - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector password $PLUMGRID_PASSWORD - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector servertimeout $PLUMGRID_TIMEOUT - iniset /$Q_PLUGIN_CONF_FILE plumgriddirector driver $PLUMGRID_DRIVER -} - -function neutron_plugin_configure_debug_command { - : -} - -function is_neutron_ovs_base_plugin { - # False - return 1 -} - -function has_neutron_plugin_security_group { - # return 0 means enabled - return 0 -} - -function neutron_plugin_check_adv_test_requirements { - is_service_enabled q-agt && is_service_enabled q-dhcp && return 0 -} -# Restore xtrace -$PG_XTRACE From 7c7679ecc9f1e8dffcb886aab8ef96eafcc1d9f6 Mon Sep 17 00:00:00 2001 From: dieterly Date: Fri, 18 Sep 2015 15:10:48 -0600 Subject: [PATCH 0478/2941] Clarify new header configuration syntax Make it clear that there are no spaces in the new header section. Change-Id: I69c14017820621a3aea75e41960ac3758f7e4835 --- doc/source/configuration.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 983f5c0aae..e8de7c4f70 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -18,11 +18,12 @@ A sample is provided in ``devstack/samples`` The new header is similar to a normal INI section header but with double brackets (``[[ ... ]]``) and two internal fields separated by a pipe -(``|``): - +(``|``). Note that there are no spaces between the double brackets and the +internal fields. Likewise, there are no spaces between the pipe and the +internal fields: :: - [[ | ]] + '[[' '|' ']]' where ```` is one of a set of phase names defined by ``stack.sh`` and ```` is the configuration filename. The filename From 261d10080900908e3377d88a82adb47b607d0174 Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Sat, 19 Sep 2015 18:44:29 +0200 Subject: [PATCH 0479/2941] registry: list rally project Change-Id: I29ec5693ad1e2c3edd5a8ed2d18a4433e4eee627 --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 428efc4977..9bc7ce3f2d 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -26,6 +26,8 @@ The following are plugins that exist for official OpenStack projects. +--------------------+-------------------------------------------+--------------------+ |magnum |git://git.openstack.org/openstack/magnum | | +--------------------+-------------------------------------------+--------------------+ +|rally |git://git.openstack.org/openstack/rally | | ++--------------------+-------------------------------------------+--------------------+ |sahara |git://git.openstack.org/openstack/sahara | | +--------------------+-------------------------------------------+--------------------+ |trove |git://git.openstack.org/openstack/trove | | From 2a16b512640afd6290a4f023e359b83d7052a0de Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Sat, 19 Sep 2015 18:42:21 +0200 Subject: [PATCH 0480/2941] registry: list mistral project Change-Id: Icbc73b3df9cedf9bd228c23b20aebf9b69c4a39c --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 9bc7ce3f2d..1b54c01f54 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -26,6 +26,8 @@ The following are plugins that exist for official OpenStack projects. +--------------------+-------------------------------------------+--------------------+ |magnum |git://git.openstack.org/openstack/magnum | | +--------------------+-------------------------------------------+--------------------+ +|mistral |git://git.openstack.org/openstack/mistral | | ++--------------------+-------------------------------------------+--------------------+ |rally |git://git.openstack.org/openstack/rally | | +--------------------+-------------------------------------------+--------------------+ |sahara |git://git.openstack.org/openstack/sahara | | From 91e3c1ec9a0e003e5cea08383852be6535e11c45 Mon Sep 17 00:00:00 2001 From: Wei Jiangang Date: Mon, 21 Sep 2015 17:51:02 +0800 Subject: [PATCH 0481/2941] lib/nova: make redirects happen in correct order It should redirect stdout to /dev/null firstly, then redirect stderr to whatever stdout currently points at. Change-Id: I4666fa90a96301f0b504a8501f0ffc3fe17616b0 --- lib/nova | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6441a891eb..63489502e9 100644 --- a/lib/nova +++ b/lib/nova @@ -332,7 +332,7 @@ function configure_nova { if [ ! -e /dev/kvm ]; then echo "WARNING: Switching to QEMU" LIBVIRT_TYPE=qemu - if which selinuxenabled 2>&1 > /dev/null && selinuxenabled; then + if which selinuxenabled >/dev/null 2>&1 && selinuxenabled; then # https://bugzilla.redhat.com/show_bug.cgi?id=753589 sudo setsebool virt_use_execmem on fi From e3340f1fe6a79166e50dc2a89e7a74fa038e9014 Mon Sep 17 00:00:00 2001 From: Wei Jiangang Date: Mon, 21 Sep 2015 17:52:14 +0800 Subject: [PATCH 0482/2941] Fix typo: falure => failure Change-Id: Ic1a53eec71d5e20194505aa8655a99b2fedd7632 --- functions-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/functions-common b/functions-common index f6a525354f..16abb65fb3 100644 --- a/functions-common +++ b/functions-common @@ -1453,7 +1453,7 @@ function service_check { return fi - # Check if there is any falure flag file under $SERVICE_DIR/$SCREEN_NAME + # Check if there is any failure flag file under $SERVICE_DIR/$SCREEN_NAME # make this -o errexit safe failures=`ls "$SERVICE_DIR/$SCREEN_NAME"/*.failure 2>/dev/null || /bin/true` From bf9f9a594246e6f997c7be69910efa25b6bd80d7 Mon Sep 17 00:00:00 2001 From: dieterly Date: Mon, 21 Sep 2015 13:24:00 -0600 Subject: [PATCH 0483/2941] Fix typo Change 'This' -> 'These'. Change-Id: If2f8f92d3adbb8fe4556e5c3ec53e4da31d02d49 --- doc/source/plugins.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 803dd08a48..fda601b414 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -178,7 +178,7 @@ System Packages =============== Devstack provides a framework for getting packages installed at an early -phase of its execution. This packages may be defined in a plugin as files +phase of its execution. These packages may be defined in a plugin as files that contain new-line separated lists of packages required by the plugin Supported packaging systems include apt and yum across multiple distributions. From 16a2d64f379974e7c88ef9f7879c88511e43c2eb Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 19 Sep 2015 11:19:31 -0400 Subject: [PATCH 0484/2941] Move writing of credentials earlier in the process If something goes wrong after keystone is running with services registered, but before credentials are written, it's hard to poke at the existing half-running state because none of the auth information is recorded. Write the files right after we're done bootstrapping keystone. Change-Id: I2f8ae86e17d26ec4defa16e843faa8987d27fac9 --- stack.sh | 130 ++++++++++++++++++++++++++++--------------------------- 1 file changed, 67 insertions(+), 63 deletions(-) diff --git a/stack.sh b/stack.sh index 700a0aecad..7184e59bc0 100755 --- a/stack.sh +++ b/stack.sh @@ -1034,6 +1034,73 @@ if is_service_enabled keystone; then export OS_REGION_NAME=$REGION_NAME fi +# We now have a working keystone. From this point, everything can be done +# with normal auth. Let's write out the auth config files so that if something +# goes wrong subsequently, developers debugging have stackrc and clouds.yaml +# files to use to poke at things + +# Create account rc files +# ======================= + +# Creates source able script files for easier user switching. +# This step also creates certificates for tenants and users, +# which is helpful in image bundle steps. + +if is_service_enabled nova && is_service_enabled keystone; then + USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" + + if [ -f $SSL_BUNDLE_FILE ]; then + USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" + fi + + if [[ "$HEAT_STANDALONE" = "True" ]]; then + USERRC_PARAMS="$USERRC_PARAMS --heat-url http://$HEAT_API_HOST:$HEAT_API_PORT/v1" + fi + + $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS +fi + + +# Save some values we generated for later use +save_stackenv + +# Update/create user clouds.yaml file. +# clouds.yaml will have +# - A `devstack` entry for the `demo` user for the `demo` project. +# - A `devstack-admin` entry for the `admin` user for the `admin` project. + +# The location is a variable to allow for easier refactoring later to make it +# overridable. There is currently no usecase where doing so makes sense, so +# it's not currently configurable. +CLOUDS_YAML=~/.config/openstack/clouds.yaml + +mkdir -p $(dirname $CLOUDS_YAML) + +CA_CERT_ARG='' +if [ -f "$SSL_BUNDLE_FILE" ]; then + CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" +fi +$TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack \ + --os-region-name $REGION_NAME \ + --os-identity-api-version $IDENTITY_API_VERSION \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ + --os-username demo \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo +$TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + --os-identity-api-version $IDENTITY_API_VERSION \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin + # Horizon # ------- @@ -1275,69 +1342,6 @@ if is_service_enabled heat; then fi -# Create account rc files -# ======================= - -# Creates source able script files for easier user switching. -# This step also creates certificates for tenants and users, -# which is helpful in image bundle steps. - -if is_service_enabled nova && is_service_enabled keystone; then - USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" - - if [ -f $SSL_BUNDLE_FILE ]; then - USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" - fi - - if [[ "$HEAT_STANDALONE" = "True" ]]; then - USERRC_PARAMS="$USERRC_PARAMS --heat-url http://$HEAT_API_HOST:$HEAT_API_PORT/v1" - fi - - $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS -fi - - -# Save some values we generated for later use -save_stackenv - -# Update/create user clouds.yaml file. -# clouds.yaml will have -# - A `devstack` entry for the `demo` user for the `demo` project. -# - A `devstack-admin` entry for the `admin` user for the `admin` project. - -# The location is a variable to allow for easier refactoring later to make it -# overridable. There is currently no usecase where doing so makes sense, so -# it's not currently configurable. -CLOUDS_YAML=~/.config/openstack/clouds.yaml - -mkdir -p $(dirname $CLOUDS_YAML) - -CA_CERT_ARG='' -if [ -f "$SSL_BUNDLE_FILE" ]; then - CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" -fi -$TOP_DIR/tools/update_clouds_yaml.py \ - --file $CLOUDS_YAML \ - --os-cloud devstack \ - --os-region-name $REGION_NAME \ - --os-identity-api-version $IDENTITY_API_VERSION \ - $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ - --os-username demo \ - --os-password $ADMIN_PASSWORD \ - --os-project-name demo -$TOP_DIR/tools/update_clouds_yaml.py \ - --file $CLOUDS_YAML \ - --os-cloud devstack-admin \ - --os-region-name $REGION_NAME \ - --os-identity-api-version $IDENTITY_API_VERSION \ - $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ - --os-username admin \ - --os-password $ADMIN_PASSWORD \ - --os-project-name admin - - # Wrapup configuration # ==================== From 7224eecb98883a91e9da445baefc23a194e8fd68 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 19 Sep 2015 11:26:18 -0400 Subject: [PATCH 0485/2941] Extract writing clouds.yaml to function It's a bit wordy to be directly in stack.sh and not in a function. Change-Id: Ibddfd8018d861191f1b1dc3270e0e81c274733cd --- functions-common | 39 +++++++++++++++++++++++++++++++++++++++ stack.sh | 38 ++------------------------------------ 2 files changed, 41 insertions(+), 36 deletions(-) diff --git a/functions-common b/functions-common index 446de5374f..4fca80e14b 100644 --- a/functions-common +++ b/functions-common @@ -67,6 +67,45 @@ function save_stackenv { done } +# Update/create user clouds.yaml file. +# clouds.yaml will have +# - A `devstack` entry for the `demo` user for the `demo` project. +# - A `devstack-admin` entry for the `admin` user for the `admin` project. +# write_clouds_yaml +function write_clouds_yaml { + # The location is a variable to allow for easier refactoring later to make it + # overridable. There is currently no usecase where doing so makes sense, so + # it's not currently configurable. + CLOUDS_YAML=~/.config/openstack/clouds.yaml + + mkdir -p $(dirname $CLOUDS_YAML) + + CA_CERT_ARG='' + if [ -f "$SSL_BUNDLE_FILE" ]; then + CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" + fi + $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack \ + --os-region-name $REGION_NAME \ + --os-identity-api-version $IDENTITY_API_VERSION \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ + --os-username demo \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + --os-identity-api-version $IDENTITY_API_VERSION \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin +} + # Normalize config values to True or False # Accepts as False: 0 no No NO false False FALSE # Accepts as True: 1 yes Yes YES true True TRUE diff --git a/stack.sh b/stack.sh index 7184e59bc0..21760213ec 100755 --- a/stack.sh +++ b/stack.sh @@ -1064,42 +1064,8 @@ fi # Save some values we generated for later use save_stackenv -# Update/create user clouds.yaml file. -# clouds.yaml will have -# - A `devstack` entry for the `demo` user for the `demo` project. -# - A `devstack-admin` entry for the `admin` user for the `admin` project. - -# The location is a variable to allow for easier refactoring later to make it -# overridable. There is currently no usecase where doing so makes sense, so -# it's not currently configurable. -CLOUDS_YAML=~/.config/openstack/clouds.yaml - -mkdir -p $(dirname $CLOUDS_YAML) - -CA_CERT_ARG='' -if [ -f "$SSL_BUNDLE_FILE" ]; then - CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" -fi -$TOP_DIR/tools/update_clouds_yaml.py \ - --file $CLOUDS_YAML \ - --os-cloud devstack \ - --os-region-name $REGION_NAME \ - --os-identity-api-version $IDENTITY_API_VERSION \ - $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ - --os-username demo \ - --os-password $ADMIN_PASSWORD \ - --os-project-name demo -$TOP_DIR/tools/update_clouds_yaml.py \ - --file $CLOUDS_YAML \ - --os-cloud devstack-admin \ - --os-region-name $REGION_NAME \ - --os-identity-api-version $IDENTITY_API_VERSION \ - $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ - --os-username admin \ - --os-password $ADMIN_PASSWORD \ - --os-project-name admin +# Write a clouds.yaml file +write_clouds_yaml # Horizon # ------- From 31c313d3a35e1b222fdb3aa72993a27c288d10bb Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Sat, 19 Sep 2015 11:35:22 -0400 Subject: [PATCH 0486/2941] Use normal API not token/endpoint for image uploads There is no reason to use keystone token bootstrapping for image uploads. Glance is a service, and images can be uploaded to it normally without special shenanigans. Depends-On: If7b81c4a6746c8a1eb0302c96e045fb0f457d67b Change-Id: I7092fb10cbe243e091789134263fab081af0c7f4 --- functions | 23 ++++++++++------------- stack.sh | 2 -- 2 files changed, 10 insertions(+), 15 deletions(-) diff --git a/functions b/functions index 4001e9d5f9..3dae157a0c 100644 --- a/functions +++ b/functions @@ -71,7 +71,7 @@ function upload_image { # OpenVZ-format images are provided as .tar.gz, but not decompressed prior to loading if [[ "$image_url" =~ 'openvz' ]]; then image_name="${image_fname%.tar.gz}" - openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" --public --container-format ami --disk-format ami < "${image}" + openstack --os-cloud=devstack-admin image create "$image_name" --public --container-format ami --disk-format ami < "${image}" return fi @@ -182,7 +182,7 @@ function upload_image { vmdk_adapter_type="${props[1]:-$vmdk_adapter_type}" vmdk_net_adapter="${props[2]:-$vmdk_net_adapter}" - openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}" + openstack --os-cloud=devstack-admin image create "$image_name" --public --container-format bare --disk-format vmdk --property vmware_disktype="$vmdk_disktype" --property vmware_adaptertype="$vmdk_adapter_type" --property hw_vif_model="$vmdk_net_adapter" < "${image}" return fi @@ -199,8 +199,7 @@ function upload_image { force_vm_mode="--property vm_mode=xen" fi openstack \ - --os-token $token \ - --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \ + --os-cloud=devstack-admin \ image create \ "$image_name" --public \ --container-format=ovf --disk-format=vhd \ @@ -214,8 +213,7 @@ function upload_image { if [[ "$image_url" =~ '.xen-raw.tgz' ]]; then image_name="${image_fname%.xen-raw.tgz}" openstack \ - --os-token $token \ - --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \ + --os-cloud=devstack-admin \ image create \ "$image_name" --public \ --container-format=tgz --disk-format=raw \ @@ -231,8 +229,7 @@ function upload_image { fi openstack \ - --os-token $token \ - --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \ + --os-cloud=devstack-admin \ image create \ "$image_name" --public \ --container-format=bare --disk-format=ploop \ @@ -314,9 +311,9 @@ function upload_image { if [ "$container_format" = "bare" ]; then if [ "$unpack" = "zcat" ]; then - openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}") + openstack --os-cloud=devstack-admin image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < <(zcat --force "${image}") else - openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}" + openstack --os-cloud=devstack-admin image create "$image_name" $img_property --public --container-format=$container_format --disk-format $disk_format < "${image}" fi else # Use glance client to add the kernel the root filesystem. @@ -324,12 +321,12 @@ function upload_image { # kernel for use when uploading the root filesystem. local kernel_id="" ramdisk_id=""; if [ -n "$kernel" ]; then - kernel_id=$(openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2) + kernel_id=$(openstack --os-cloud=devstack-admin image create "$image_name-kernel" $img_property --public --container-format aki --disk-format aki < "$kernel" | grep ' id ' | get_field 2) fi if [ -n "$ramdisk" ]; then - ramdisk_id=$(openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2) + ramdisk_id=$(openstack --os-cloud=devstack-admin image create "$image_name-ramdisk" $img_property --public --container-format ari --disk-format ari < "$ramdisk" | grep ' id ' | get_field 2) fi - openstack --os-token $token --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}" + openstack --os-cloud=devstack-admin image create "${image_name%.img}" $img_property --public --container-format ami --disk-format ami ${kernel_id:+--property kernel_id=$kernel_id} ${ramdisk_id:+--property ramdisk_id=$ramdisk_id} < "${image}" fi } diff --git a/stack.sh b/stack.sh index 21760213ec..3625e5f296 100755 --- a/stack.sh +++ b/stack.sh @@ -1216,8 +1216,6 @@ fi # See https://help.ubuntu.com/community/CloudInit for more on ``cloud-init`` if is_service_enabled g-reg; then - TOKEN=$(openstack token issue -c id -f value) - die_if_not_set $LINENO TOKEN "Keystone fail to get token" echo_summary "Uploading images" From 382f982e51d6117cf8d478b94f975455dabe4ce9 Mon Sep 17 00:00:00 2001 From: Julien Danjou Date: Mon, 21 Sep 2015 14:19:52 +0000 Subject: [PATCH 0487/2941] keystone: fix prefixed URL Commit 2ad1a42ca667ff21e6f7d2ae906be23a20430036 broke entirely the Apache configuration for Keystone when used without a port on the /identity URL. This patch fixes that. Change-Id: I47805138c66456c9c5fa9af1f4ac33b03d0ce5b9 --- files/apache-keystone.template | 34 ++++++++++++---------------------- 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/files/apache-keystone.template b/files/apache-keystone.template index 4d3d2d6623..f9fa265db5 100644 --- a/files/apache-keystone.template +++ b/files/apache-keystone.template @@ -2,6 +2,16 @@ Listen %PUBLICPORT% Listen %ADMINPORT% LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" keystone_combined + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + WSGIDaemonProcess keystone-public processes=5 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup keystone-public @@ -16,16 +26,6 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - @@ -42,19 +42,9 @@ LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %D(us)" %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% - - - = 2.4> - Require all granted - - - Order allow,deny - Allow from all - - -Alias /identity %PUBLICWSGI% +Alias /identity %KEYSTONE_BIN%/keystone-wsgi-public SetHandler wsgi-script Options +ExecCGI @@ -64,7 +54,7 @@ Alias /identity %PUBLICWSGI% WSGIPassAuthorization On -Alias /identity_admin %ADMINWSGI% +Alias /identity_admin %KEYSTONE_BIN%/keystone-wsgi-admin SetHandler wsgi-script Options +ExecCGI From 91b7fa134ccc4d58dc14f08fef4641602c98db9e Mon Sep 17 00:00:00 2001 From: Clinton Knight Date: Tue, 22 Sep 2015 09:39:23 -0700 Subject: [PATCH 0488/2941] Add manila to devstack plugin registry Manila has had a devstack plugin since Kilo. The registry should reflect that. Change-Id: I910198495e98b6f8d92c4880d1bee0c16d6c1559 --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 1b54c01f54..bbab56b025 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -26,6 +26,8 @@ The following are plugins that exist for official OpenStack projects. +--------------------+-------------------------------------------+--------------------+ |magnum |git://git.openstack.org/openstack/magnum | | +--------------------+-------------------------------------------+--------------------+ +|manila |git://git.openstack.org/openstack/manila | file shares | ++--------------------+-------------------------------------------+--------------------+ |mistral |git://git.openstack.org/openstack/mistral | | +--------------------+-------------------------------------------+--------------------+ |rally |git://git.openstack.org/openstack/rally | | From 59c6377ae51c024c28a6fba72de567bc97edda10 Mon Sep 17 00:00:00 2001 From: Roxana Gherle Date: Wed, 9 Sep 2015 18:22:31 -0700 Subject: [PATCH 0489/2941] Assign admin role for admin user on default domain This patch adds an admin role assignment for the admin user on the default domain as part of the Keystone configuration stage. Closes-Bug: #1494081 Change-Id: I91c88917bd51be4572e4970c94e65d866798df26 --- functions-common | 32 ++++++++++++++++++++++++++++++++ lib/keystone | 1 + 2 files changed, 33 insertions(+) diff --git a/functions-common b/functions-common index 446de5374f..c38a77243e 100644 --- a/functions-common +++ b/functions-common @@ -803,6 +803,38 @@ function get_or_add_user_project_role { echo $user_role_id } +# Gets or adds user role to domain +# Usage: get_or_add_user_domain_role +function get_or_add_user_domain_role { + local user_role_id + # Gets user role id + user_role_id=$(openstack role list \ + --user $2 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ + --column "ID" \ + --domain $3 \ + --column "Name" \ + | grep " $1 " | get_field 1) + if [[ -z "$user_role_id" ]]; then + # Adds role to user and get it + openstack role add $1 \ + --user $2 \ + --domain $3 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 + user_role_id=$(openstack role list \ + --user $2 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 \ + --column "ID" \ + --domain $3 \ + --column "Name" \ + | grep " $1 " | get_field 1) + fi + echo $user_role_id +} + # Gets or adds group role to project # Usage: get_or_add_group_project_role function get_or_add_group_project_role { diff --git a/lib/keystone b/lib/keystone index e2448c9068..b15abe1cb5 100644 --- a/lib/keystone +++ b/lib/keystone @@ -364,6 +364,7 @@ function create_keystone_accounts { local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default) local admin_role=$(get_or_create_role "admin") get_or_add_user_project_role $admin_role $admin_user $admin_tenant + get_or_add_user_domain_role $admin_role $admin_user default # Create service project/role get_or_create_project "$SERVICE_TENANT_NAME" default From 50821bed081e94dfd4b75cf02121a42a56cdbaac Mon Sep 17 00:00:00 2001 From: Roxana Gherle Date: Tue, 22 Sep 2015 10:52:46 -0700 Subject: [PATCH 0490/2941] Fix return value of get_or_add_user_project_role get_or_add_user_project_role function was always returning an empty user_role_id because the role assignment command does not return any output. Added a command to get the user_role_id after the assignment happens. Closes-Bug: #1498599 Change-Id: If1b77eef0d4f0ebdcdf761ecb5e2011484f73871 --- functions-common | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/functions-common b/functions-common index cf140072fd..d230a29891 100644 --- a/functions-common +++ b/functions-common @@ -830,14 +830,20 @@ function get_or_add_user_project_role { --column "Name" \ | grep " $1 " | get_field 1) if [[ -z "$user_role_id" ]]; then - # Adds role to user - user_role_id=$(openstack role add \ - $1 \ + # Adds role to user and get it + openstack role add $1 \ --user $2 \ --project $3 \ --os-url=$KEYSTONE_SERVICE_URI_V3 \ + --os-identity-api-version=3 + user_role_id=$(openstack role list \ + --user $2 \ + --os-url=$KEYSTONE_SERVICE_URI_V3 \ --os-identity-api-version=3 \ - | grep " id " | get_field 2) + --column "ID" \ + --project $3 \ + --column "Name" \ + | grep " $1 " | get_field 1) fi echo $user_role_id } From 5aeea6ae3e2434d7b08bce2da672061cdba08ab0 Mon Sep 17 00:00:00 2001 From: Peter Stachowski Date: Tue, 22 Sep 2015 19:38:02 +0000 Subject: [PATCH 0491/2941] Removed unused TOKEN from upload_image Changeset https://review.openstack.org/#/c/225426/ changed how images were uploaded into Glance, however the (now) unused TOKEN variable and function argument to upload_image remained. These have been removed. Change-Id: I9910c469f72d52e56111048cc24ea3c992c1d480 --- functions | 3 +-- stack.sh | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/functions b/functions index 3dae157a0c..ff95c89ad9 100644 --- a/functions +++ b/functions @@ -36,10 +36,9 @@ function function_exists { # - ``FILES`` must be set to the cache dir # - ``GLANCE_HOSTPORT`` # -# upload_image image-url glance-token +# upload_image image-url function upload_image { local image_url=$1 - local token=$2 local image image_fname image_name diff --git a/stack.sh b/stack.sh index 3625e5f296..ae9c946aaa 100755 --- a/stack.sh +++ b/stack.sh @@ -1225,7 +1225,7 @@ if is_service_enabled g-reg; then fi for image_url in ${IMAGE_URLS//,/ }; do - upload_image $image_url $TOKEN + upload_image $image_url done fi From 5090142969e162e115d6eb1ea7582f68f34d8879 Mon Sep 17 00:00:00 2001 From: Andrey Pavlov Date: Tue, 22 Sep 2015 21:20:36 +0300 Subject: [PATCH 0492/2941] Revert change I2f8ae86e17d26ec4defa16e843faa8987d27fac9 The commit breaks creation of user rc file. Now devstack doesn't create certificate for user (because it's too early to do it) and doesn't react to changes of EC2/S3 urls if they is recreated by devstack plugins. So the commit totally broke ec2-api gating for example. Change-Id: I069f46f95656655ae7ba8f3dd929f47eae594b68 --- stack.sh | 56 ++++++++++++++++++++++++++------------------------------ 1 file changed, 26 insertions(+), 30 deletions(-) diff --git a/stack.sh b/stack.sh index 3625e5f296..d8ab52ec98 100755 --- a/stack.sh +++ b/stack.sh @@ -1034,36 +1034,6 @@ if is_service_enabled keystone; then export OS_REGION_NAME=$REGION_NAME fi -# We now have a working keystone. From this point, everything can be done -# with normal auth. Let's write out the auth config files so that if something -# goes wrong subsequently, developers debugging have stackrc and clouds.yaml -# files to use to poke at things - -# Create account rc files -# ======================= - -# Creates source able script files for easier user switching. -# This step also creates certificates for tenants and users, -# which is helpful in image bundle steps. - -if is_service_enabled nova && is_service_enabled keystone; then - USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" - - if [ -f $SSL_BUNDLE_FILE ]; then - USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" - fi - - if [[ "$HEAT_STANDALONE" = "True" ]]; then - USERRC_PARAMS="$USERRC_PARAMS --heat-url http://$HEAT_API_HOST:$HEAT_API_PORT/v1" - fi - - $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS -fi - - -# Save some values we generated for later use -save_stackenv - # Write a clouds.yaml file write_clouds_yaml @@ -1306,6 +1276,32 @@ if is_service_enabled heat; then fi +# Create account rc files +# ======================= + +# Creates source able script files for easier user switching. +# This step also creates certificates for tenants and users, +# which is helpful in image bundle steps. + +if is_service_enabled nova && is_service_enabled keystone; then + USERRC_PARAMS="-PA --target-dir $TOP_DIR/accrc" + + if [ -f $SSL_BUNDLE_FILE ]; then + USERRC_PARAMS="$USERRC_PARAMS --os-cacert $SSL_BUNDLE_FILE" + fi + + if [[ "$HEAT_STANDALONE" = "True" ]]; then + USERRC_PARAMS="$USERRC_PARAMS --heat-url http://$HEAT_API_HOST:$HEAT_API_PORT/v1" + fi + + $TOP_DIR/tools/create_userrc.sh $USERRC_PARAMS +fi + + +# Save some values we generated for later use +save_stackenv + + # Wrapup configuration # ==================== From 78f6c1d70b51c29e5d36143e6051e6ff96ceb41c Mon Sep 17 00:00:00 2001 From: jianghua wang Date: Fri, 18 Sep 2015 11:17:46 +0100 Subject: [PATCH 0493/2941] tools/Xen: failed to install domU in new XenServer Current install_os_domU.sh depends on some keywords which are changed in the next version XenServer which is upgraded to CentOS 7. So with the existing script to install domU in the new version XenServer, it will always fail. This patch is to make it to be compatible with all XenServer versions: 1. the output format of "ifconfig" is changed; the fix is to use the ip command to retrieve IP address. 2. In XS 6.5 and the previous XS, the "xe-guest-utilities" package file name is as "xe-guest-utilities__.deb" but now it delivers a single file for all Arch's and the file name is "xe-guest-utilities__all.deb". In order to make it to be compatible, the fix will try to search the old file name pattern by default. If it does't exist, then try to search the new file name pattern. Change-Id: I893e89e42a5ef7dd079b571ea308f318c9befc9e Closes-Bug: #1494241 --- tools/xen/functions | 3 ++- tools/xen/install_os_domU.sh | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/xen/functions b/tools/xen/functions index 4e9fede387..8c674dcce3 100644 --- a/tools/xen/functions +++ b/tools/xen/functions @@ -179,7 +179,8 @@ function xenapi_ip_on { local bridge_or_net_name bridge_or_net_name=$1 - ifconfig $(bridge_for "$bridge_or_net_name") | grep "inet addr" | cut -d ":" -f2 | sed "s/ .*//" + ip -4 addr show $(bridge_for "$bridge_or_net_name") |\ + awk '/inet/{split($2, ip, "/"); print ip[1];}' } function xenapi_is_listening_on { diff --git a/tools/xen/install_os_domU.sh b/tools/xen/install_os_domU.sh index b49347e09b..e24d9ed0b9 100755 --- a/tools/xen/install_os_domU.sh +++ b/tools/xen/install_os_domU.sh @@ -193,7 +193,10 @@ if [ -z "$templateuuid" ]; then TMP_DIR=/tmp/temp.$RANDOM mkdir -p $TMP_DIR mount -o loop $TOOLS_ISO $TMP_DIR - DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb) + # the target deb package maybe *amd64.deb or *all.deb, + # so use *amd64.deb by default. If it doesn't exist, + # then use *all.deb. + DEB_FILE=$(ls $TMP_DIR/Linux/*amd64.deb || ls $TMP_DIR/Linux/*all.deb) cp $DEB_FILE $HTTP_SERVER_LOCATION umount $TMP_DIR rmdir $TMP_DIR From 59756e990ca527d017e698e54bc7c6432af1b1fd Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Wed, 23 Sep 2015 17:42:54 -0400 Subject: [PATCH 0494/2941] Don't use python with run_process First noted in change id by fumihiko and kyle: I079e18b58b214bf8362945c253d6d894ca8b1a6b Neutron and few others seem to use an extra "python" along with run_process which is quite unnecessary and complicates adding python3 support in devstack. So let's clean this up. Change-Id: I2d478f2b04c04d05c18420563e2ad77eba73be3f --- lib/neutron-legacy | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 550eadb4b4..e67bd4ae32 100755 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -687,7 +687,7 @@ function start_neutron_service_and_check { service_protocol="http" fi # Start the Neutron service - run_process q-svc "python $NEUTRON_BIN_DIR/neutron-server $cfg_file_options" + run_process q-svc "$NEUTRON_BIN_DIR/neutron-server $cfg_file_options" echo "Waiting for Neutron to start..." if is_ssl_enabled_service "neutron"; then ssl_ca="--ca-certificate=${SSL_BUNDLE_FILE}" @@ -705,7 +705,7 @@ function start_neutron_service_and_check { # Control of the l2 agent is separated out to make it easier to test partial # upgrades (everything upgraded except the L2 agent) function start_neutron_l2_agent { - run_process q-agt "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" + run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" if is_provider_network; then sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE @@ -723,23 +723,23 @@ function start_neutron_l2_agent { } function start_neutron_other_agents { - run_process q-dhcp "python $AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" + run_process q-dhcp "$AGENT_DHCP_BINARY --config-file $NEUTRON_CONF --config-file=$Q_DHCP_CONF_FILE" if is_service_enabled neutron-vpnaas; then : # Started by plugin elif is_service_enabled q-vpn; then run_process q-vpn "$AGENT_VPN_BINARY $(determine_config_files neutron-vpn-agent)" else - run_process q-l3 "python $AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" + run_process q-l3 "$AGENT_L3_BINARY $(determine_config_files neutron-l3-agent)" fi - run_process q-meta "python $AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" - run_process q-lbaas "python $AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" - run_process q-metering "python $AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" + run_process q-meta "$AGENT_META_BINARY --config-file $NEUTRON_CONF --config-file=$Q_META_CONF_FILE" + run_process q-lbaas "$AGENT_LBAAS_BINARY --config-file $NEUTRON_CONF --config-file=$LBAAS_AGENT_CONF_FILENAME" + run_process q-metering "$AGENT_METERING_BINARY --config-file $NEUTRON_CONF --config-file $METERING_AGENT_CONF_FILENAME" if [ "$VIRT_DRIVER" = 'xenserver' ]; then # For XenServer, start an agent for the domU openvswitch - run_process q-domua "python $AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" + run_process q-domua "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE.domU" fi } From 1ce19ab76d67a89b04f907f1d292d013a3b699e0 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 23 Sep 2015 10:36:53 -0400 Subject: [PATCH 0495/2941] attempt to cut api workers in half One of the key reasons for the large number of API_WORKERS was that mysql would block API workers, so would start rejecting work. Now with the python mysql driver we should be eventlet aware, and life should be good. Let's see if this works. Change-Id: Iaf8730a4dcdc30ef390795bfb5fb73ec3cd665fe --- lib/nova | 2 ++ stackrc | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/nova b/lib/nova index ab5a76e2ad..db7967841c 100644 --- a/lib/nova +++ b/lib/nova @@ -607,6 +607,8 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS" iniset $NOVA_CONF DEFAULT ec2_workers "$API_WORKERS" iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS" + # don't let the conductor get out of control now that we're using a pure python db driver + iniset $NOVA_CONF conductor workers "$API_WORKERS" iniset $NOVA_CONF cinder os_region_name "$REGION_NAME" diff --git a/stackrc b/stackrc index 2641be64b7..9e05d8253a 100644 --- a/stackrc +++ b/stackrc @@ -631,7 +631,7 @@ SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # the memory used where there are a large number of CPUs present # (the default number of workers for many services is the number of CPUs) # Also sets the minimum number of workers to 2. -API_WORKERS=${API_WORKERS:=$(( ($(nproc)/2)<2 ? 2 : ($(nproc)/2) ))} +API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))} # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} From 1c42846a62dfafd43eb3cca4f7f1c8f3819cf867 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Thu, 24 Sep 2015 16:51:50 +0200 Subject: [PATCH 0496/2941] Mention ironic-inspector in plugin-registry docs Change-Id: Ia508a3af5a65e4fbf34dd8b9ae537ca133358fa3 --- doc/source/plugin-registry.rst | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index bbab56b025..0feaafb275 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -72,14 +72,16 @@ Alternate Configs Additional Services =================== -+-------------+------------------------------------------+------------+ -| Plugin Name | URL | Comments | -| | | | -+-------------+------------------------------------------+------------+ -|ec2-api |git://git.openstack.org/stackforge/ec2api |[as1]_ | -+-------------+------------------------------------------+------------+ -| | | | -+-------------+------------------------------------------+------------+ ++----------------+--------------------------------------------------+------------+ +| Plugin Name | URL | Comments | +| | | | ++----------------+--------------------------------------------------+------------+ +|ec2-api |git://git.openstack.org/stackforge/ec2api |[as1]_ | ++----------------+--------------------------------------------------+------------+ +|ironic-inspector|git://git.openstack.org/openstack/ironic-inspector| | ++----------------+--------------------------------------------------+------------+ +| | | | ++----------------+--------------------------------------------------+------------+ .. [as1] first functional devstack plugin, hence why used in most of the examples. From ebe63d826b7909b992bff988b3eac65e7b6bfa88 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Thu, 24 Sep 2015 07:43:50 -0700 Subject: [PATCH 0497/2941] Improve ERROR_ON_CLONE message In case ERROR_ON_CLONE is true and triggers a failure for a missing project, suggest a remedial action. On their own, people have come up with remedies that include altering the value of ERROR_ON_CLONE which rather defeats the purpose. Change-Id: I28d7f2c184f8440b774fefaa8ec7002d6708db95 --- functions-common | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/functions-common b/functions-common index cf140072fd..1ae4d6597d 100644 --- a/functions-common +++ b/functions-common @@ -492,8 +492,11 @@ function git_clone { if echo $git_ref | egrep -q "^refs"; then # If our branch name is a gerrit style refs/changes/... if [[ ! -d $git_dest ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && \ + if [[ "$ERROR_ON_CLONE" = "True" ]]; then + echo "The $git_dest project was not found; if this is a gate job, add" + echo "the project to the \$PROJECTS variable in the job definition." die $LINENO "Cloning not allowed in this configuration" + fi git_timed clone $git_clone_flags $git_remote $git_dest fi cd $git_dest @@ -501,8 +504,11 @@ function git_clone { else # do a full clone only if the directory doesn't exist if [[ ! -d $git_dest ]]; then - [[ "$ERROR_ON_CLONE" = "True" ]] && \ + if [[ "$ERROR_ON_CLONE" = "True" ]]; then + echo "The $git_dest project was not found; if this is a gate job, add" + echo "the project to the \$PROJECTS variable in the job definition." die $LINENO "Cloning not allowed in this configuration" + fi git_timed clone $git_clone_flags $git_remote $git_dest cd $git_dest # This checkout syntax works for both branches and tags From 8872545a0f98c5681147a08541e119813f0bdc01 Mon Sep 17 00:00:00 2001 From: Kashyap Chamarthy Date: Mon, 14 Sep 2015 13:17:56 +0200 Subject: [PATCH 0498/2941] worlddump: Use SIGUSR2 instead of SIGUSR1 The function guru_meditation_report() currently uses the User-defined signal SIGUSR1 to kill a Nova Compute process so that a Guru Meditation Report is generated. Testing locally, in a DevStack instance, manually attempting to kill a Nova compute process [kill -s USR1 `pgrep nova-compute`] does not result in process being terminated, and no error report generated. It turns out[1] that SIGUSR1 is used by Apache 'mod_wsgi'. Using the signal SIGUSR2 resolves this issue (i.e. 'nova-compute' process is terminated, and the Guru Meditation Report is generated). So, use USR2, instead of USR1. Corresponding oslo.reports related commit[2]. [1] https://code.google.com/p/modwsgi/wiki/ConfigurationDirectives#WSGIRestrictSignal [2] 45b1c02d113051d147e54ef921ce8e94135542d8 -- guru_meditation_report: Use SIGUSR2 instead of SIGUSR1 [3] Original DevStack commit that brought in this change -- 2ebe993b25462919e8aeeb896c9f91b6be7aa573 Change-Id: I8a7eaf71b83edca3c80074d6bf2d471e3db6142b --- tools/worlddump.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 1b337a9a83..33d5b8f620 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -131,7 +131,7 @@ def guru_meditation_report(): print "Skipping as nova-compute does not appear to be running" return - _dump_cmd("kill -s USR1 `pgrep nova-compute`") + _dump_cmd("kill -s USR2 `pgrep nova-compute`") print "guru meditation report in nova-compute log" From c1605550d94736f5698d3f6ea7bc1e0b0914cb4e Mon Sep 17 00:00:00 2001 From: Michal Ptacek Date: Wed, 23 Sep 2015 21:02:02 +0100 Subject: [PATCH 0499/2941] Propagate OVS_DATAPATH_TYPE to ml2_conf.ini If OVS_DATAPATH_TYPE is configured it should be visible in ML2 config Changing OVS_DATAPATH_TYPE default value to 'system' from '' Closes-Bug: 1499029 Change-Id: I88e7d2554e8a1d6dcfea71fc1fb8e9fb2491d8b7 --- lib/neutron_plugins/openvswitch_agent | 1 + lib/neutron_plugins/ovs_base | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 1ff3a40c82..48e47b3dab 100755 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -105,6 +105,7 @@ function neutron_plugin_configure_plugin_agent { iniset "/$Q_PLUGIN_CONF_FILE.domU" agent root_helper "$Q_RR_COMMAND" fi iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES + iniset /$Q_PLUGIN_CONF_FILE ovs datapath_type $OVS_DATAPATH_TYPE } function neutron_plugin_setup_interface_driver { diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index f1f7f8597b..b012683a6f 100755 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -8,7 +8,8 @@ OVSB_XTRACE=$(set +o | grep xtrace) set +o xtrace OVS_BRIDGE=${OVS_BRIDGE:-br-int} -OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-""} +# OVS recognize default 'system' datapath or 'netdev' for userspace datapath +OVS_DATAPATH_TYPE=${OVS_DATAPATH_TYPE:-system} OVS_TUNNEL_BRIDGE=${OVS_TUNNEL_BRIDGE:-br-tun} function is_neutron_ovs_base_plugin { @@ -20,7 +21,7 @@ function _neutron_ovs_base_add_bridge { local bridge=$1 local addbr_cmd="sudo ovs-vsctl --no-wait -- --may-exist add-br $bridge" - if [ "$OVS_DATAPATH_TYPE" != "" ] ; then + if [ "$OVS_DATAPATH_TYPE" != "system" ] ; then addbr_cmd="$addbr_cmd -- set Bridge $bridge datapath_type=${OVS_DATAPATH_TYPE}" fi From 1eca508c57dd987fa6f7a7e6f441096365e6892d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 25 Sep 2015 13:28:58 +1000 Subject: [PATCH 0500/2941] Simplify RDO install We can just directly install the latest RDO repo rather than having to keep this up-to-date. I don't think there is actually much there we need any more; there was a lot more coming from RDO in the centos6 days. openvswitch is one big one, however. Change-Id: I42b8bc1aea8ff61770987eecd5fc3b8309c1e210 --- stack.sh | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/stack.sh b/stack.sh index d8ab52ec98..01b1165f26 100755 --- a/stack.sh +++ b/stack.sh @@ -286,14 +286,7 @@ EOF # ... and also optional to be enabled sudo yum-config-manager --enable rhel-7-server-optional-rpms - RHEL_RDO_REPO_RPM=${RHEL7_RDO_REPO_RPM:-"https://repos.fedorapeople.org/repos/openstack/openstack-kilo/rdo-release-kilo-1.noarch.rpm"} - RHEL_RDO_REPO_ID=${RHEL7_RDO_REPO_ID:-"openstack-kilo"} - - if ! sudo yum repolist enabled $RHEL_RDO_REPO_ID | grep -q $RHEL_RDO_REPO_ID; then - echo "RDO repo not detected; installing" - yum_install $RHEL_RDO_REPO_RPM || \ - die $LINENO "Error installing RDO repo, cannot continue" - fi + sudo yum install -y https://rdoproject.org/repos/rdo-release.rpm if is_oraclelinux; then sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56 From 3c68501356c319ecda80a9eba82f0575e7714eb2 Mon Sep 17 00:00:00 2001 From: venkatamahesh Date: Sat, 26 Sep 2015 18:05:34 +0530 Subject: [PATCH 0501/2941] Replace the devstack.org with devstack docs url Change-Id: I870300b90e1e5f4f382238c209fc5416914d49f0 --- README.md | 4 ++-- setup.cfg | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index ee7f0e7809..dd394c2e07 100644 --- a/README.md +++ b/README.md @@ -13,7 +13,7 @@ DevStack is a set of scripts and utilities to quickly deploy an OpenStack cloud. * To provide an environment for the OpenStack CI testing on every commit to the projects -Read more at http://devstack.org. +Read more at http://docs.openstack.org/developer/devstack IMPORTANT: Be sure to carefully read `stack.sh` and any other scripts you execute before you run them, as they install software and will alter your @@ -94,4 +94,4 @@ DevStack can be extensively configured via the configuration file `local.conf`. It is likely that you will need to provide and modify this file if you want anything other than the most basic setup. Start by reading the [configuration guide](doc/source/configuration.rst) for -details of the configuration file and the many available options. \ No newline at end of file +details of the configuration file and the many available options. diff --git a/setup.cfg b/setup.cfg index 58871344aa..e4b2888dcb 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,7 +5,7 @@ description-file = README.md author = OpenStack author-email = openstack-dev@lists.openstack.org -home-page = http://devstack.org +home-page = http://docs.openstack.org/developer/devstack classifier = Intended Audience :: Developers License :: OSI Approved :: Apache Software License From f327b1e1196eacf25e7c4c9e3a7ad30c53bb961c Mon Sep 17 00:00:00 2001 From: Einst Crazy Date: Thu, 24 Sep 2015 18:50:30 +0800 Subject: [PATCH 0502/2941] stackrc set the LC_ALL to C It will report 'unknown locale: UTF-8', when the env is UTF-8. Default set the LC_ALL to C in the stackrc, instead. And delete the duplicate option in stack.sh. Closes-Bug: 1499296 Change-Id: I14121b25ac314a1a93e6dd6811e196ce2a7c0eb5 --- stack.sh | 7 ------- stackrc | 7 +++++++ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/stack.sh b/stack.sh index d8ab52ec98..5e57274d64 100755 --- a/stack.sh +++ b/stack.sh @@ -28,13 +28,6 @@ set -o xtrace # Make sure custom grep options don't get in the way unset GREP_OPTIONS -# Sanitize language settings to avoid commands bailing out -# with "unsupported locale setting" errors. -unset LANG -unset LANGUAGE -LC_ALL=C -export LC_ALL - # Make sure umask is sane umask 022 diff --git a/stackrc b/stackrc index fdde62f50c..82c6885db9 100644 --- a/stackrc +++ b/stackrc @@ -7,6 +7,13 @@ [[ -z "$_DEVSTACK_STACKRC" ]] || return 0 declare -r _DEVSTACK_STACKRC=1 +# Sanitize language settings to avoid commands bailing out +# with "unsupported locale setting" errors. +unset LANG +unset LANGUAGE +LC_ALL=C +export LC_ALL + # Find the other rc files RC_DIR=$(cd $(dirname "${BASH_SOURCE:-$0}") && pwd) From 651cb1ad758866a87b947c4e50b4ec995072d6ca Mon Sep 17 00:00:00 2001 From: Anton Arefiev Date: Tue, 1 Sep 2015 10:55:20 +0300 Subject: [PATCH 0503/2941] Add toggle to run Cinder API under Apache This change adds apache templates for Cinder API services. Also add possibility to switch between the old and new ways to setup Cinder API. Related Cinder blueprint: https://blueprints.launchpad.net/cinder/+spec/non-eventlet-wsgi-app Change-Id: Icfad40ee6998296727a95613199e5c2d87bd0a45 Depends-On: Ifbab059001d1567b1f7b394c0411a9ca4629f846 Co-Authored-By: Ivan Kolodyazhny --- doc/source/configuration.rst | 6 +++ files/apache-cinder-api.template | 26 +++++++++++ lib/cinder | 80 +++++++++++++++++++++++++++++--- 3 files changed, 106 insertions(+), 6 deletions(-) create mode 100644 files/apache-cinder-api.template diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index e8de7c4f70..7ca82c7fd1 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -298,6 +298,12 @@ Example (Swift): SWIFT_USE_MOD_WSGI="True" +Example (Cinder): + +:: + + CINDER_USE_MOD_WSGI="True" + Libraries from Git ------------------ diff --git a/files/apache-cinder-api.template b/files/apache-cinder-api.template new file mode 100644 index 0000000000..e1246f11b6 --- /dev/null +++ b/files/apache-cinder-api.template @@ -0,0 +1,26 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess osapi_volume processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup osapi_volume + WSGIScriptAlias / %CINDER_BIN_DIR%/cinder-wsgi + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/%APACHE_NAME%/c-api.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/lib/cinder b/lib/cinder index 26277ccaba..10144117ec 100644 --- a/lib/cinder +++ b/lib/cinder @@ -108,6 +108,8 @@ CINDER_PERIODIC_INTERVAL=${CINDER_PERIODIC_INTERVAL:-60} CINDER_ISCSI_HELPER=${CINDER_ISCSI_HELPER:-tgtadm} +# Toggle for deploying Cinder under HTTPD + mod_wsgi +CINDER_USE_MOD_WSGI=${CINDER_USE_MOD_WSGI:-False} # Source the enabled backends if is_service_enabled c-vol && [[ -n "$CINDER_ENABLED_BACKENDS" ]]; then @@ -137,6 +139,11 @@ function is_cinder_enabled { return 1 } +# _cinder_cleanup_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +function _cinder_cleanup_apache_wsgi { + sudo rm -f $(apache_site_config_for osapi-volume) +} + # cleanup_cinder() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_cinder { @@ -183,6 +190,43 @@ function cleanup_cinder { fi done fi + + if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then + _cinder_cleanup_apache_wsgi + fi +} + +# _cinder_config_apache_wsgi() - Set WSGI config files +function _cinder_config_apache_wsgi { + local cinder_apache_conf=$(apache_site_config_for osapi-volume) + local cinder_ssl="" + local cinder_certfile="" + local cinder_keyfile="" + local cinder_api_port=$CINDER_SERVICE_PORT + local venv_path="" + + if is_ssl_enabled_service c-api; then + cinder_ssl="SSLEngine On" + cinder_certfile="SSLCertificateFile $CINDER_SSL_CERT" + cinder_keyfile="SSLCertificateKeyFile $CINDER_SSL_KEY" + fi + if [[ ${USE_VENV} = True ]]; then + venv_path="python-path=${PROJECT_VENV["cinder"]}/lib/python2.7/site-packages" + fi + + # copy proxy vhost file + sudo cp $FILES/apache-cinder-api.template $cinder_apache_conf + sudo sed -e " + s|%PUBLICPORT%|$cinder_api_port|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%APIWORKERS%|$API_WORKERS|g + s|%CINDER_BIN_DIR%|$CINDER_BIN_DIR|g; + s|%SSLENGINE%|$cinder_ssl|g; + s|%SSLCERTFILE%|$cinder_certfile|g; + s|%SSLKEYFILE%|$cinder_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + " -i $cinder_apache_conf } # configure_cinder() - Set config files, create data dirs, etc @@ -276,13 +320,17 @@ function configure_cinder { fi # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then setup_colorized_logging $CINDER_CONF DEFAULT "project_id" "user_id" else # Set req-id, project-name and resource in log format iniset $CINDER_CONF DEFAULT logging_context_format_string "%(asctime)s.%(msecs)03d %(levelname)s %(name)s [%(request_id)s %(project_name)s] %(resource)s%(message)s" fi + if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then + _cinder_config_apache_wsgi + fi + if [[ -r $CINDER_PLUGINS/$CINDER_DRIVER ]]; then configure_cinder_driver fi @@ -399,6 +447,13 @@ function install_cinder { install_package tgt fi fi + + if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then + install_apache_wsgi + if is_ssl_enabled_service "c-api"; then + enable_mod_ssl + fi + fi } # install_cinderclient() - Collect source and prepare @@ -446,10 +501,16 @@ function start_cinder { fi fi - run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" - echo "Waiting for Cinder API to start..." - if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then - die $LINENO "c-api did not start" + if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then + enable_apache_site osapi-volume + restart_apache_server + tail_log c-api /var/log/$APACHE_NAME/c-api.log + else + run_process c-api "$CINDER_BIN_DIR/cinder-api --config-file $CINDER_CONF" + echo "Waiting for Cinder API to start..." + if ! wait_for_service $SERVICE_TIMEOUT $service_protocol://$CINDER_SERVICE_HOST:$service_port; then + die $LINENO "c-api did not start" + fi fi run_process c-sch "$CINDER_BIN_DIR/cinder-scheduler --config-file $CINDER_CONF" @@ -468,9 +529,16 @@ function start_cinder { # stop_cinder() - Stop running processes function stop_cinder { + if [ "$CINDER_USE_MOD_WSGI" == "True" ]; then + disable_apache_site osapi-volume + restart_apache_server + else + stop_process c-api + fi + # Kill the cinder screen windows local serv - for serv in c-api c-bak c-sch c-vol; do + for serv in c-bak c-sch c-vol; do stop_process $serv done } From d2999d0d0cc4d283873ff0d0951c4fdacd234dd1 Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Tue, 29 Sep 2015 10:02:32 +0000 Subject: [PATCH 0504/2941] Add ceilometer to plugin registry Ceilometer is now removed from devstack and only exists as a plugin so it should be in the registry. Unfortunately the length of the URL changed the table formatting so the diff is much larger than the semantic change. Change-Id: Ibe8e27e97294c2d13be8f22f41eea27775811eec --- doc/source/plugin-registry.rst | 44 ++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 0feaafb275..85fd7cc065 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -17,27 +17,29 @@ Official OpenStack Projects The following are plugins that exist for official OpenStack projects. -+--------------------+-------------------------------------------+--------------------+ -|Plugin Name |URL |Comments | -+--------------------+-------------------------------------------+--------------------+ -|aodh |git://git.openstack.org/openstack/aodh | alarming | -+--------------------+-------------------------------------------+--------------------+ -|gnocchi |git://git.openstack.org/openstack/gnocchi | metric | -+--------------------+-------------------------------------------+--------------------+ -|magnum |git://git.openstack.org/openstack/magnum | | -+--------------------+-------------------------------------------+--------------------+ -|manila |git://git.openstack.org/openstack/manila | file shares | -+--------------------+-------------------------------------------+--------------------+ -|mistral |git://git.openstack.org/openstack/mistral | | -+--------------------+-------------------------------------------+--------------------+ -|rally |git://git.openstack.org/openstack/rally | | -+--------------------+-------------------------------------------+--------------------+ -|sahara |git://git.openstack.org/openstack/sahara | | -+--------------------+-------------------------------------------+--------------------+ -|trove |git://git.openstack.org/openstack/trove | | -+--------------------+-------------------------------------------+--------------------+ -|zaqar |git://git.openstack.org/openstack/zaqar | | -+--------------------+-------------------------------------------+--------------------+ ++------------------+---------------------------------------------+--------------------+ +|Plugin Name |URL |Comments | ++------------------+---------------------------------------------+--------------------+ +|aodh |git://git.openstack.org/openstack/aodh | alarming | ++------------------+---------------------------------------------+--------------------+ +|ceilometer |git://git.openstack.org/openstack/ceilometer | metering | ++------------------+---------------------------------------------+--------------------+ +|gnocchi |git://git.openstack.org/openstack/gnocchi | metric | ++------------------+---------------------------------------------+--------------------+ +|magnum |git://git.openstack.org/openstack/magnum | | ++------------------+---------------------------------------------+--------------------+ +|manila |git://git.openstack.org/openstack/manila | file shares | ++------------------+---------------------------------------------+--------------------+ +|mistral |git://git.openstack.org/openstack/mistral | | ++------------------+---------------------------------------------+--------------------+ +|rally |git://git.openstack.org/openstack/rally | | ++------------------+---------------------------------------------+--------------------+ +|sahara |git://git.openstack.org/openstack/sahara | | ++------------------+---------------------------------------------+--------------------+ +|trove |git://git.openstack.org/openstack/trove | | ++------------------+---------------------------------------------+--------------------+ +|zaqar |git://git.openstack.org/openstack/zaqar | | ++------------------+---------------------------------------------+--------------------+ From c35eee5dbbb6b4dbc2901ebef4c4d88780aa74ec Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Mon, 28 Sep 2015 14:46:27 -0700 Subject: [PATCH 0505/2941] use nproc/2 workers for large ops job Commit 1ce19ab76d67a89b04f907f1d292d013a3b699e0 dropped API_WORKERS from nproc/2 to nproc/4 and also started using API_WORKERS for the number of conductor workers, so in gate runs that dropped conductor workers from 8 to 2. We're now seeing instance build timeouts in the large ops job. This change goes back to nproc/2 for the large ops job (VIRT_DRIVER=='fake'). Closes-Bug: #1500615 Change-Id: Ie6ef855fce0a99c930d479b7459c15b69e8de499 --- stackrc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/stackrc b/stackrc index e010b452c2..c7c631362e 100644 --- a/stackrc +++ b/stackrc @@ -650,7 +650,12 @@ SERVICE_PROTOCOL=${SERVICE_PROTOCOL:-http} # the memory used where there are a large number of CPUs present # (the default number of workers for many services is the number of CPUs) # Also sets the minimum number of workers to 2. -API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))} +if [[ "$VIRT_DRIVER" = 'fake' ]]; then + # we need more workers for the large ops job + API_WORKERS=${API_WORKERS:=$(( ($(nproc)/2)<2 ? 2 : ($(nproc)/2) ))} +else + API_WORKERS=${API_WORKERS:=$(( ($(nproc)/4)<2 ? 2 : ($(nproc)/4) ))} +fi # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} From 7dd890d6e13e3bc021952ed1e4b81d3ae4bb4356 Mon Sep 17 00:00:00 2001 From: Olivier Lemasle Date: Mon, 14 Sep 2015 14:21:12 +0200 Subject: [PATCH 0506/2941] Install python db client if a db backend is configured If `DATABASE_TYPE` is configured in `local.conf`, the database backend is currently configured with `initialize_database_backends` even if no database backend is enabled. On a multi-nodes Devstack environment, such as devstack-vagrant, the compute node currently fails because it does not have PyMysql. This compute node has no database backend enabled, but has to connect to the database on another node. We should install the python client if DATABASE_TYPE is set, even if no database backend is enabled. Closes-Bug: 1501001 Change-Id: Iffd5f7243a0dfdbe56cf6b9a87b96ed7678c81dd --- stack.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/stack.sh b/stack.sh index 700a0aecad..7bc9c20fcf 100755 --- a/stack.sh +++ b/stack.sh @@ -736,6 +736,8 @@ install_rpc_backend if is_service_enabled $DATABASE_BACKENDS; then install_database +fi +if [ -n "$DATABASE_TYPE" ]; then install_database_python fi From 050a0d5b304a013e23cd5909abf6e11b7dda5f18 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Sun, 6 Sep 2015 22:03:54 +0000 Subject: [PATCH 0507/2941] Revert "Revert "Convert identity defaults to keystone v3 api"" This reverts commit f768787bdd6dddf2790f83a884618d29677ca77c. And sets OS_AUTH_VERSION so swift CLI doesn't fall flat when not using v2 keystone Change-Id: If44a7e0d85e48020a3c90d8c5c027513129f0f3b --- functions-common | 47 +++++++++-------------------------------------- lib/swift | 6 +++--- stack.sh | 15 ++++++++------- 3 files changed, 20 insertions(+), 48 deletions(-) diff --git a/functions-common b/functions-common index a7fec41bb9..fb774002ec 100644 --- a/functions-common +++ b/functions-common @@ -88,9 +88,9 @@ function write_clouds_yaml { --file $CLOUDS_YAML \ --os-cloud devstack \ --os-region-name $REGION_NAME \ - --os-identity-api-version $IDENTITY_API_VERSION \ + --os-identity-api-version 3 \ $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ + --os-auth-url $KEYSTONE_AUTH_URI \ --os-username demo \ --os-password $ADMIN_PASSWORD \ --os-project-name demo @@ -98,9 +98,9 @@ function write_clouds_yaml { --file $CLOUDS_YAML \ --os-cloud devstack-admin \ --os-region-name $REGION_NAME \ - --os-identity-api-version $IDENTITY_API_VERSION \ + --os-identity-api-version 3 \ $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ + --os-auth-url $KEYSTONE_AUTH_URI \ --os-username admin \ --os-password $ADMIN_PASSWORD \ --os-project-name admin @@ -735,16 +735,13 @@ function policy_add { # Usage: get_or_create_domain function get_or_create_domain { local domain_id - local os_url="$KEYSTONE_SERVICE_URI_V3" # Gets domain id domain_id=$( # Gets domain id - openstack --os-token=$OS_TOKEN --os-url=$os_url \ - --os-identity-api-version=3 domain show $1 \ + openstack domain show $1 \ -f value -c id 2>/dev/null || # Creates new domain - openstack --os-token=$OS_TOKEN --os-url=$os_url \ - --os-identity-api-version=3 domain create $1 \ + openstack domain create $1 \ --description "$2" \ -f value -c id ) @@ -755,13 +752,11 @@ function get_or_create_domain { # Usage: get_or_create_group [] function get_or_create_group { local desc="${3:-}" - local os_url="$KEYSTONE_SERVICE_URI_V3" local group_id # Gets group id group_id=$( # Creates new group with --or-show - openstack --os-token=$OS_TOKEN --os-url=$os_url \ - --os-identity-api-version=3 group create $1 \ + openstack group create $1 \ --domain $2 --description "$desc" --or-show \ -f value -c id ) @@ -783,8 +778,6 @@ function get_or_create_user { openstack user create \ $1 \ --password "$2" \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --domain=$3 \ $email \ --or-show \ @@ -799,9 +792,7 @@ function get_or_create_project { local project_id project_id=$( # Creates new project with --or-show - openstack --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ - project create $1 \ + openstack project create $1 \ --domain=$2 \ --or-show -f value -c id ) @@ -815,8 +806,6 @@ function get_or_create_role { role_id=$( # Creates role with --or-show openstack role create $1 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --or-show -f value -c id ) echo $role_id @@ -829,8 +818,6 @@ function get_or_add_user_project_role { # Gets user role id user_role_id=$(openstack role list \ --user $2 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --column "ID" \ --project $3 \ --column "Name" \ @@ -839,13 +826,9 @@ function get_or_add_user_project_role { # Adds role to user and get it openstack role add $1 \ --user $2 \ - --project $3 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 + --project $3 user_role_id=$(openstack role list \ --user $2 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --column "ID" \ --project $3 \ --column "Name" \ @@ -860,21 +843,15 @@ function get_or_add_group_project_role { local group_role_id # Gets group role id group_role_id=$(openstack role list \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --group $2 \ --project $3 \ -c "ID" -f value) if [[ -z "$group_role_id" ]]; then # Adds role to group and get it openstack role add $1 \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --group $2 \ --project $3 group_role_id=$(openstack role list \ - --os-url=$KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --group $2 \ --project $3 \ -c "ID" -f value) @@ -892,8 +869,6 @@ function get_or_create_service { openstack service show $2 -f value -c id 2>/dev/null || # Creates new service if not exists openstack service create \ - --os-url $KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ $2 \ --name $1 \ --description="$3" \ @@ -912,8 +887,6 @@ function _get_or_create_endpoint_with_interface { # gets support for this, the check for the region name can be removed. # Related bug in keystone: https://bugs.launchpad.net/keystone/+bug/1482772 endpoint_id=$(openstack endpoint list \ - --os-url $KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ --service $1 \ --interface $2 \ --region $4 \ @@ -921,8 +894,6 @@ function _get_or_create_endpoint_with_interface { if [[ -z "$endpoint_id" ]]; then # Creates new endpoint endpoint_id=$(openstack endpoint create \ - --os-url $KEYSTONE_SERVICE_URI_V3 \ - --os-identity-api-version=3 \ $1 $2 $3 --region $4 -f value -c id) fi diff --git a/lib/swift b/lib/swift index 645bfd7cd9..f0eb56abbe 100644 --- a/lib/swift +++ b/lib/swift @@ -799,10 +799,10 @@ function stop_swift { function swift_configure_tempurls { OS_USERNAME=swift \ - OS_TENANT_NAME=$SERVICE_TENANT_NAME \ + OS_PROJECT_NAME=$SERVICE_TENANT_NAME \ OS_PASSWORD=$SERVICE_PASSWORD \ - OS_AUTH_URL=$KEYSTONE_AUTH_URI/v$IDENTITY_API_VERSION \ - swift post -m "Temp-URL-Key: $SWIFT_TEMPURL_KEY" + OS_AUTH_URL=$SERVICE_ENDPOINT \ + swift post --auth-version 3 -m "Temp-URL-Key: $SWIFT_TEMPURL_KEY" } # Restore xtrace diff --git a/stack.sh b/stack.sh index cd8a11e66c..11dd2009e8 100755 --- a/stack.sh +++ b/stack.sh @@ -973,13 +973,15 @@ if is_service_enabled keystone; then start_keystone fi + export OS_IDENTITY_API_VERSION=3 + # Set up a temporary admin URI for Keystone - SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v2.0 + SERVICE_ENDPOINT=$KEYSTONE_AUTH_URI/v3 if is_service_enabled tls-proxy; then export OS_CACERT=$INT_CA_DIR/ca-chain.pem # Until the client support is fixed, just use the internal endpoint - SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v2.0 + SERVICE_ENDPOINT=http://$KEYSTONE_AUTH_HOST:$KEYSTONE_AUTH_PORT_INT/v3 fi # Setup OpenStackClient token-endpoint auth @@ -1003,14 +1005,13 @@ if is_service_enabled keystone; then # Begone token auth unset OS_TOKEN OS_URL - # force set to use v2 identity authentication even with v3 commands - export OS_AUTH_TYPE=v2password - # Set up password auth credentials now that Keystone is bootstrapped - export OS_AUTH_URL=$SERVICE_ENDPOINT - export OS_TENANT_NAME=admin + export OS_AUTH_URL=$KEYSTONE_AUTH_URI export OS_USERNAME=admin + export OS_USER_DOMAIN_ID=default export OS_PASSWORD=$ADMIN_PASSWORD + export OS_PROJECT_NAME=admin + export OS_PROJECT_DOMAIN_ID=default export OS_REGION_NAME=$REGION_NAME fi From c295bca61fbef22d4816b2db8cec40e924c709c4 Mon Sep 17 00:00:00 2001 From: Nick Date: Tue, 4 Aug 2015 09:28:19 +0800 Subject: [PATCH 0508/2941] Fix tunneling support for linuxbridge-agent When I deploy linuxbridge-agent and enable tunneling, the configuration of neutron isn't right. It lacks the whole section [vxlan] to be properly configured. Change-Id: Ib3bfe0f3445f466f4dbb36f7f0cb0d940114e7f6 Closes-Bug: #1481126 --- lib/neutron_plugins/linuxbridge_agent | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index fefc1c33a8..bd4438db04 100755 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -69,6 +69,18 @@ function neutron_plugin_configure_plugin_agent { fi AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-linuxbridge-agent" iniset /$Q_PLUGIN_CONF_FILE agent tunnel_types $Q_TUNNEL_TYPES + + # Configure vxlan tunneling + if [[ "$ENABLE_TENANT_TUNNELS" == "True" ]]; then + if [[ "$Q_ML2_TENANT_NETWORK_TYPE" == "vxlan" ]]; then + iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "True" + iniset /$Q_PLUGIN_CONF_FILE vxlan local_ip $TUNNEL_ENDPOINT_IP + else + iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" + fi + else + iniset /$Q_PLUGIN_CONF_FILE vxlan enable_vxlan "False" + fi } function neutron_plugin_setup_interface_driver { From 713fd2f6c644e13ed8ad7e8d819f6a3d44ff5370 Mon Sep 17 00:00:00 2001 From: Sirushti Murugesan Date: Wed, 30 Sep 2015 15:12:50 +0530 Subject: [PATCH 0509/2941] Additionally install test-requirements with pip_install When moving to Python 3, we also need to install test-requriements to allow projects to install any python 3 test dependencies they might be missing otherwise. Change-Id: I2d19aa2f7ec8de869a82aa7764ab72cc8693101f --- inc/python | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inc/python b/inc/python index 210a9dbdfe..fd0d616b62 100644 --- a/inc/python +++ b/inc/python @@ -124,7 +124,7 @@ function pip_install { $@ # Also install test requirements - local test_req="$@/test-requirements.txt" + local test_req="${!#}/test-requirements.txt" if [[ -e "$test_req" ]]; then echo "Installing test-requirements for $test_req" $sudo_pip \ From 36218e6c50c9eb22003eefe0389b00cbf7132dfb Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 30 Sep 2015 10:33:57 +0000 Subject: [PATCH 0510/2941] Revert "Disable Cinder v1 API support by default" There has been a ton of fall out from this change, and I think it's been premature. We should revert and try again when more of the client space supports this. This reverts commit a29434460e869b7bb397044d8f073531e4ee112d. Change-Id: I1658dc48a024627be0fdb39c46137aaa3d9b911a --- doc/source/configuration.rst | 7 ------- lib/cinder | 29 ++++++++++++----------------- lib/tempest | 9 --------- 3 files changed, 12 insertions(+), 33 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 3bd246d870..983f5c0aae 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -587,13 +587,6 @@ with ``VOLUME_BACKING_FILE_SIZE``. VOLUME_BACKING_FILE_SIZE=10250M -Cinder v1 API is depricated and disabled by default. You can enable v1 API by -setting ``CINDER_ENABLE_V1_API`` to ``True``. - - :: - CINDER_ENABLE_V1_API=True - - Keystone ~~~~~~~~ diff --git a/lib/cinder b/lib/cinder index f0b0f1d7e5..26277ccaba 100644 --- a/lib/cinder +++ b/lib/cinder @@ -27,9 +27,6 @@ set +o xtrace # Defaults # -------- -# NOTE (e0ne): Cinder API v1 is deprecated and will be disabled by default. -CINDER_ENABLE_V1_API=$(trueorfalse False CINDER_ENABLE_V1_API) - # set up default driver CINDER_DRIVER=${CINDER_DRIVER:-default} CINDER_PLUGINS=$TOP_DIR/lib/cinder_plugins @@ -228,12 +225,9 @@ function configure_cinder { iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL # NOTE(thingee): Cinder V1 API is deprecated and defaults to off as of - # Juno. - if [[ ${CINDER_ENABLE_V1_API} = True ]]; then - iniset $CINDER_CONF DEFAULT enable_v1_api true - else - iniset $CINDER_CONF DEFAULT enable_v1_api false - fi + # Juno. Keep it enabled so we can continue testing while it's still + # supported. + iniset $CINDER_CONF DEFAULT enable_v1_api true iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME" @@ -332,13 +326,12 @@ function create_cinder_accounts { if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - if [[ ${CINDER_ENABLE_V1_API} = True ]]; then - get_or_create_service "cinder" "volume" "Cinder Volume Service" - get_or_create_endpoint "volume" "$REGION_NAME" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ - "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" - fi + get_or_create_service "cinder" "volume" "Cinder Volume Service" + get_or_create_endpoint "volume" "$REGION_NAME" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" \ + "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v1/\$(tenant_id)s" + get_or_create_service "cinderv2" "volumev2" "Cinder Volume Service V2" get_or_create_endpoint "volumev2" "$REGION_NAME" \ "$CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT/v2/\$(tenant_id)s" \ @@ -489,7 +482,9 @@ function create_volume_types { local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_name=${be##*:} - openstack volume type create --os-volume-api-version 2 --property volume_backend_name="${be_name}" ${be_name} + # FIXME(jamielennox): Remove --os-volume-api-version pinning when + # osc supports volume type create on v2 api. bug #1475060 + openstack volume type create --os-volume-api-version 1 --property volume_backend_name="${be_name}" ${be_name} done fi } diff --git a/lib/tempest b/lib/tempest index 3624b9605e..d372e0f6ca 100644 --- a/lib/tempest +++ b/lib/tempest @@ -494,15 +494,6 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume-feature-enabled backup False fi - # Use only Cinder API v2 - if [[ ${CINDER_ENABLE_V1_API} = True ]]; then - iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 True - iniset $TEMPEST_CONFIG volume catalog_type volume - else - iniset $TEMPEST_CONFIG volume-feature-enabled api_v1 False - iniset $TEMPEST_CONFIG volume catalog_type volumev2 - fi - # Using ``CINDER_ENABLED_BACKENDS`` if [[ -n "$CINDER_ENABLED_BACKENDS" ]] && [[ $CINDER_ENABLED_BACKENDS =~ .*,.* ]]; then iniset $TEMPEST_CONFIG volume-feature-enabled multi_backend "True" From bd5e6b16590f852402ae60eb6e2b45178be85870 Mon Sep 17 00:00:00 2001 From: Aaron Rosen Date: Fri, 25 Sep 2015 17:55:45 -0700 Subject: [PATCH 0511/2941] Remove unnecessary execute permissions These files have acquired execute permissions that are not strictly necessary because they are being sourced, and not intended to be run separately. Restore to 644 Change-Id: I0b8654123416a07521502b61610ca45c94494a07 --- lib/neutron-legacy | 0 lib/neutron_plugins/linuxbridge_agent | 0 lib/neutron_plugins/ml2 | 0 lib/neutron_plugins/openvswitch_agent | 0 lib/neutron_plugins/ovs_base | 0 lib/nova_plugins/functions-libvirt | 0 6 files changed, 0 insertions(+), 0 deletions(-) mode change 100755 => 100644 lib/neutron-legacy mode change 100755 => 100644 lib/neutron_plugins/linuxbridge_agent mode change 100755 => 100644 lib/neutron_plugins/ml2 mode change 100755 => 100644 lib/neutron_plugins/openvswitch_agent mode change 100755 => 100644 lib/neutron_plugins/ovs_base mode change 100755 => 100644 lib/nova_plugins/functions-libvirt diff --git a/lib/neutron-legacy b/lib/neutron-legacy old mode 100755 new mode 100644 diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent old mode 100755 new mode 100644 diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 old mode 100755 new mode 100644 diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent old mode 100755 new mode 100644 diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base old mode 100755 new mode 100644 diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt old mode 100755 new mode 100644 From ce7246a34ba51a5ccff0ac08d6e85a8cda7d275b Mon Sep 17 00:00:00 2001 From: Dan Smith Date: Thu, 23 Apr 2015 09:41:06 -0700 Subject: [PATCH 0512/2941] Workaround potential failure to shutdown services Kill them twice to make sure they're good'n'dead. There is a supposed fix to oslo-incubator code, but we're working around that here in the meantime with this change. This returned in Liberty. Change-Id: I02a7af995dc7de857c4efcf2cef2f95d357c007a Related-Bug: #1446583 (cherry picked from commit 953baa7998f253681ed31013fd18bd8aa8098b34) --- functions-common | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/functions-common b/functions-common index a7fec41bb9..cfe0c8d4ba 100644 --- a/functions-common +++ b/functions-common @@ -1476,6 +1476,22 @@ function stop_process { # Kill via pid if we have one available if [[ -r $SERVICE_DIR/$SCREEN_NAME/$service.pid ]]; then pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) + # oslo.service tends to stop actually shutting down + # reliably in between releases because someone believes it + # is dying too early due to some inflight work they + # have. This is a tension. It happens often enough we're + # going to just account for it in devstack and assume it + # doesn't work. + # + # Set OSLO_SERVICE_WORKS=True to skip this block + if [[ -z "$OSLO_SERVICE_WORKS" ]]; then + # TODO(danms): Remove this double-kill when we have + # this fixed in all services: + # https://bugs.launchpad.net/oslo-incubator/+bug/1446583 + sleep 1 + # /bin/true becakse pkill on a non existant process returns an error + pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) || /bin/true + fi rm $SERVICE_DIR/$SCREEN_NAME/$service.pid fi if [[ "$USE_SCREEN" = "True" ]]; then From dbe0868d11531204b1dcc3a7eafdf9da711a7cc6 Mon Sep 17 00:00:00 2001 From: Denis Afonso Date: Fri, 2 Oct 2015 23:51:41 -0400 Subject: [PATCH 0513/2941] Change the default bind_port for swift The default bind_port (6011-6013) in the sample config files for swift use port numbers that are in the range registered by X11 (6000-6063) and can prevent swift from starting if the ports are in use. We should use an unregistered range (6611-6613). Change-Id: Ifd95b99004aead5ddc8ae1a8dd3ccd9c4f2abe91 Closes-Bug: #1254328 --- lib/swift | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/swift b/lib/swift index 645bfd7cd9..caf2fdf00f 100644 --- a/lib/swift +++ b/lib/swift @@ -130,9 +130,9 @@ SWIFT_MAX_HEADER_SIZE=${SWIFT_MAX_HEADER_SIZE:-16384} # Port bases used in port number calclution for the service "nodes" # The specified port number will be used, the additinal ports calculated by # base_port + node_num * 10 -OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6013} -CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6011} -ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6012} +OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6613} +CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6611} +ACCOUNT_PORT_BASE=${ACCOUNT_PORT_BASE:-6612} # Enable tempurl feature SWIFT_ENABLE_TEMPURLS=${SWIFT_ENABLE_TEMPURLS:-False} From 1759618adb90853a76b77bb6ba24f2bed8b3b1f5 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 5 Oct 2015 15:26:43 -0400 Subject: [PATCH 0514/2941] update faq entry about running other branches Change-Id: I4f982f9050024245c4a656e9535d4fdfb4413f36 --- doc/source/faq.rst | 32 +++++++++++++++++++------------- 1 file changed, 19 insertions(+), 13 deletions(-) diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 0db8932e9c..3562bfacee 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -124,24 +124,30 @@ Of course! enable_service q-svc -How do I run a specific OpenStack milestone? +How do I run a specific OpenStack release? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -OpenStack milestones have tags set in the git repo. Set the -appropriate tag in the ``*_BRANCH`` variables in ``local.conf``. -Swift is on its own release schedule so pick a tag in the Swift repo -that is just before the milestone release. For example: +DevStack master tracks the upstream master of all the projects. If you +would like to run a stable branch of OpenStack, you should use the +corresponding stable branch of DevStack as well. For instance the +``stable/kilo`` version of DevStack will already default to all the +projects running at ``stable/kilo`` levels. - :: +Note: it's also possible to manually adjust the ``*_BRANCH`` variables +further if you would like to test specific milestones, or even custom +out of tree branches. This is done with entries like the following in +your ``local.conf`` + +:: [[local|localrc]] - GLANCE_BRANCH=stable/kilo - HORIZON_BRANCH=stable/kilo - KEYSTONE_BRANCH=stable/kilo - NOVA_BRANCH=stable/kilo - GLANCE_BRANCH=stable/kilo - NEUTRON_BRANCH=stable/kilo - SWIFT_BRANCH=2.3.0 + GLANCE_BRANCH=11.0.0.0rc1 + NOVA_BRANCH=12.0.0.0.rc1 + + +Upstream DevStack is only tested with master and stable +branches. Setting custom BRANCH definitions is not guarunteed to +produce working results. What can I do about RabbitMQ not wanting to start on my fresh new VM? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ From c961e791c1068c3054260c0fa59eed412a1aa6ed Mon Sep 17 00:00:00 2001 From: armando-migliaccio Date: Mon, 5 Oct 2015 16:51:33 -0700 Subject: [PATCH 0515/2941] Remove explicit support for OneConvergence plugin This is being removed from the Neutron tree, so there is no need to keep it here anymore. Change-Id: Ice869bc445cb9dab6f227c30d38fb9b7ba04442b Depends-on: I949a51873ee5af654b577952d423dd29a6ced8e7 --- lib/neutron_plugins/oneconvergence | 77 ------------------------------ 1 file changed, 77 deletions(-) delete mode 100644 lib/neutron_plugins/oneconvergence diff --git a/lib/neutron_plugins/oneconvergence b/lib/neutron_plugins/oneconvergence deleted file mode 100644 index 0c570e534b..0000000000 --- a/lib/neutron_plugins/oneconvergence +++ /dev/null @@ -1,77 +0,0 @@ -#!/bin/bash -# -# Neutron One Convergence plugin -# ------------------------------ - -# Save trace setting -OC_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/neutron_plugins/ovs_base - -Q_L3_ENABLED=true -Q_L3_ROUTER_PER_TENANT=true -Q_USE_NAMESPACE=true - -function neutron_plugin_install_agent_packages { - _neutron_ovs_base_install_agent_packages -} -# Configure common parameters -function neutron_plugin_configure_common { - - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/oneconvergence - Q_PLUGIN_CONF_FILENAME=nvsdplugin.ini - Q_PLUGIN_CLASS="neutron.plugins.oneconvergence.plugin.OneConvergencePluginV2" -} - -# Configure plugin specific information -function neutron_plugin_configure_service { - iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_ip $NVSD_IP - iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_port $NVSD_PORT - iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_user $NVSD_USER - iniset /$Q_PLUGIN_CONF_FILE nvsd nvsd_passwd $NVSD_PASSWD -} - -function neutron_plugin_configure_debug_command { - _neutron_ovs_base_configure_debug_command -} - -function neutron_plugin_setup_interface_driver { - local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver -} - -function has_neutron_plugin_security_group { - # 1 means False here - return 0 -} - -function setup_integration_bridge { - _neutron_ovs_base_setup_bridge $OVS_BRIDGE -} - -function neutron_plugin_configure_dhcp_agent { - setup_integration_bridge - iniset $Q_DHCP_CONF_FILE DEFAULT dhcp_agent_manager neutron.agent.dhcp_agent.DhcpAgentWithStateReport -} - -function neutron_plugin_configure_l3_agent { - _neutron_ovs_base_configure_l3_agent - iniset $Q_L3_CONF_FILE DEFAULT l3_agent_manager neutron.agent.l3_agent.L3NATAgentWithStateReport -} - -function neutron_plugin_configure_plugin_agent { - - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-nvsd-agent" - - _neutron_ovs_base_configure_firewall_driver -} - -function neutron_plugin_create_nova_conf { - if ( is_service_enabled n-cpu && ! ( is_service_enabled q-dhcp )) ; then - setup_integration_bridge - fi -} - -# Restore xtrace -$OC_XTRACE From f80c37dd47ac11b6cbd4cd08a0af2c4c17e31198 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 6 Oct 2015 20:18:15 +1100 Subject: [PATCH 0516/2941] Ignore bashate long-line warnings (E006) Since Ic2532676e46e93f129d590d1fa7a044ef65f50fb bashate warns on long-lines. Traditionally, for whatever reason, devstack hasn't cared too much about long lines unless it really damages readability. So ignore this to avoid thousands of warnings on the long lines. Note even though released bashate doesn't have this, ignoring a missing test doesn't matter. Change-Id: I16aeaa3b334fac1eec5085f2cfe26c81c53023a8 --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 788fea9c4f..1c238add8a 100644 --- a/tox.ini +++ b/tox.ini @@ -24,7 +24,7 @@ commands = bash -c "find {toxinidir} \ -wholename \*/inc/\* -or \ # /inc files and -wholename \*/lib/\* \ # /lib files are shell, but \) \ # have no extension - -print0 | xargs -0 bashate -v" + -print0 | xargs -0 bashate -v -iE006" [testenv:docs] deps = From f0131e14b8550d5d3637b29f0151ad280c77cb63 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Wed, 23 Sep 2015 12:55:02 -0500 Subject: [PATCH 0517/2941] Nano and Micro flavors should run really small cirros only Guests with large memory requirements can use default flavors, so removing the special flavor for ppc64 since new qemu requires more memory - http://wiki.qemu.org/ChangeLog/2.4 - PowerPC. Users should set DEFAULT_INSTANCE_TYPE to one of the default flavors available in local.conf, as m1.tiny. DocImpact Change-Id: I0fd275dc7342cc2daa83e9a2bd79d30e7defa3e4 --- doc/source/configuration.rst | 22 ++++++++++++++++++++++ lib/tempest | 13 ++----------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index e8de7c4f70..8f311e037b 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -402,6 +402,28 @@ these default images; in that case, you will want to populate IMAGE_URLS="http://foo.bar.com/image.qcow," IMAGE_URLS+="http://foo.bar.com/image2.qcow" + +Instance Type +------------- + +``DEFAULT_INSTANCE_TYPE`` can be used to configure the default instance +type. When this parameter is not specified, Devstack creates additional +micro & nano flavors for really small instances to run Tempest tests. + +For guests with larger memory requirements, ``DEFAULT_INSTANCE_TYPE`` +should be specified in the configuration file so Tempest selects the +default flavors instead. + +KVM on Power with QEMU 2.4 requires 512 MB to load the firmware - +`QEMU 2.4 - PowerPC `__ so users +running instances on ppc64/ppc64le can choose one of the default +created flavors as follows: + + :: + + DEFAULT_INSTANCE_TYPE=m1.tiny + + IP Version ---------- diff --git a/lib/tempest b/lib/tempest index f4d0a6dab0..6eeab4e231 100644 --- a/lib/tempest +++ b/lib/tempest @@ -205,21 +205,12 @@ function configure_tempest { if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then available_flavors=$(nova flavor-list) if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then - if is_arch "ppc64"; then - # Qemu needs at least 128MB of memory to boot on ppc64 - nova flavor-create m1.nano 42 128 0 1 - else - nova flavor-create m1.nano 42 64 0 1 - fi + nova flavor-create m1.nano 42 64 0 1 fi flavor_ref=42 boto_instance_type=m1.nano if [[ ! ( $available_flavors =~ 'm1.micro' ) ]]; then - if is_arch "ppc64"; then - nova flavor-create m1.micro 84 256 0 1 - else - nova flavor-create m1.micro 84 128 0 1 - fi + nova flavor-create m1.micro 84 128 0 1 fi flavor_ref_alt=84 else From bb4654b869722feb400d65869350465d8ff5439c Mon Sep 17 00:00:00 2001 From: Ihar Hrachyshka Date: Tue, 6 Oct 2015 18:09:07 +0200 Subject: [PATCH 0518/2941] Don't configure neutron metadata agent for neutron API access Metadata agent now talks to neutron-server thru AMQP, so there is no use for API access configuration. Change-Id: I8f81eea91fe3448d5098e77312f64f2eaba68a68 Depends-On: I254c575c66214f50fb93a94c46c4c9caebfc2937 Closes-Bug: #1502947 --- lib/neutron-legacy | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index e67bd4ae32..8b964a72c8 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1050,11 +1050,6 @@ function _configure_neutron_metadata_agent { if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then iniset $Q_META_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" fi - - # Configures keystone for metadata_agent - # The third argument "True" sets auth_url needed to communicate with keystone - _neutron_setup_keystone $Q_META_CONF_FILE DEFAULT True - } function _configure_neutron_ceilometer_notifications { @@ -1212,17 +1207,10 @@ function _neutron_setup_rootwrap { fi } -# Configures keystone integration for neutron service and agents +# Configures keystone integration for neutron service function _neutron_setup_keystone { local conf_file=$1 local section=$2 - local use_auth_url=$3 - - # Configures keystone for metadata_agent - # metadata_agent needs auth_url to communicate with keystone - if [[ "$use_auth_url" == "True" ]]; then - iniset $conf_file $section auth_url $KEYSTONE_SERVICE_URI/v2.0 - fi create_neutron_cache_dir configure_auth_token_middleware $conf_file $Q_ADMIN_USERNAME $NEUTRON_AUTH_CACHE_DIR $section From 872a2622b9dc9295918784114bce4f4f991187b7 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Tue, 6 Oct 2015 12:45:06 -0400 Subject: [PATCH 0519/2941] Move the firewall disable section into a misc section It broke the flow of the section it was in. Change-Id: I4c6ec7ccbe7e856600037eb5a3a73863319aa232 --- doc/source/guides/neutron.rst | 71 +++++++++++++++++++---------------- 1 file changed, 38 insertions(+), 33 deletions(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 2973eb63e8..424844547c 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -125,39 +125,6 @@ connectivity. -Disabling Next Generation Firewall Tools -======================================== - -DevStack does not properly operate with modern firewall tools. Specifically -it will appear as if the guest VM can access the external network via ICMP, -but UDP and TCP packets will not be delivered to the guest VM. The root cause -of the issue is that both ufw (Uncomplicated Firewall) and firewalld (Fedora's -firewall manager) apply firewall rules to all interfaces in the system, rather -then per-device. One solution to this problem is to revert to iptables -functionality. - -To get a functional firewall configuration for Fedora do the following: - -:: - - sudo service iptables save - sudo systemctl disable firewalld - sudo systemctl enable iptables - sudo systemctl stop firewalld - sudo systemctl start iptables - - -To get a functional firewall configuration for distributions containing ufw, -disable ufw. Note ufw is generally not enabled by default in Ubuntu. To -disable ufw if it was enabled, do the following: - -:: - - sudo service iptables save - sudo ufw disable - - - Neutron Networking with Open vSwitch ==================================== @@ -301,3 +268,41 @@ For example, with the above configuration, a bridge is created, named `br-ex` which is managed by Open vSwitch, and the second interface on the compute node, `eth1` is attached to the bridge, to forward traffic sent by guest VMs. + +Miscellaneous Tips +================== + + +Disabling Next Generation Firewall Tools +---------------------------------------- + +DevStack does not properly operate with modern firewall tools. Specifically +it will appear as if the guest VM can access the external network via ICMP, +but UDP and TCP packets will not be delivered to the guest VM. The root cause +of the issue is that both ufw (Uncomplicated Firewall) and firewalld (Fedora's +firewall manager) apply firewall rules to all interfaces in the system, rather +then per-device. One solution to this problem is to revert to iptables +functionality. + +To get a functional firewall configuration for Fedora do the following: + +:: + + sudo service iptables save + sudo systemctl disable firewalld + sudo systemctl enable iptables + sudo systemctl stop firewalld + sudo systemctl start iptables + + +To get a functional firewall configuration for distributions containing ufw, +disable ufw. Note ufw is generally not enabled by default in Ubuntu. To +disable ufw if it was enabled, do the following: + +:: + + sudo service iptables save + sudo ufw disable + + + From 433a9b10ddd6fa67d7459c4943a92ce4f488cebc Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 7 Oct 2015 13:29:31 +1100 Subject: [PATCH 0520/2941] Don't set xtrace directly in local call Ia0957b47187c3dcadd46154b17022c4213781112 detects setting local variables with subshell commands. Although this is a particuarly benign case, it trips the test. Rather than putting in an ignore for this, we can easily change it to make the test pass. This seems better than putting in special work-arounds to bashate, etc. Change-Id: I37c3967c0f2d780a636a7d26cda83755085c5c69 --- functions-common | 33 ++++++++++++++++++++++----------- inc/ini-config | 30 ++++++++++++++++++++---------- inc/python | 6 ++++-- 3 files changed, 46 insertions(+), 23 deletions(-) diff --git a/functions-common b/functions-common index cfe0c8d4ba..53b64d668a 100644 --- a/functions-common +++ b/functions-common @@ -111,7 +111,8 @@ function write_clouds_yaml { # Accepts as True: 1 yes Yes YES true True TRUE # VAR=$(trueorfalse default-value test-value) function trueorfalse { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local default=$1 @@ -169,7 +170,8 @@ function die { # die_if_not_set $LINENO env-var "message" function die_if_not_set { local exitcode=$? - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local line=$1; shift local evar=$1; shift @@ -183,7 +185,8 @@ function die_if_not_set { # err $LINENO "message" function err { local exitcode=$? - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local msg="[ERROR] ${BASH_SOURCE[2]}:$1 $2" echo $msg 1>&2; @@ -200,7 +203,8 @@ function err { # err_if_not_set $LINENO env-var "message" function err_if_not_set { local exitcode=$? - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local line=$1; shift local evar=$1; shift @@ -236,7 +240,8 @@ function is_set { # warn $LINENO "message" function warn { local exitcode=$? - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local msg="[WARNING] ${BASH_SOURCE[2]}:$1 $2" echo $msg @@ -986,7 +991,8 @@ function _get_package_dir { # Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] function apt_get { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace [[ "$OFFLINE" = "True" || -z "$@" ]] && return @@ -1055,7 +1061,8 @@ function _parse_package_files { # - ``# dist:DISTRO`` or ``dist:DISTRO1,DISTRO2`` limits the selection # of the package to the distros listed. The distro names are case insensitive. function get_packages { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local services=$@ local package_dir=$(_get_package_dir) @@ -1123,7 +1130,8 @@ function get_packages { # The same metadata used in the main DevStack prerequisite files may be used # in these prerequisite files, see get_packages() for more info. function get_plugin_packages { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local files_to_parse="" local package_dir="" @@ -1148,7 +1156,8 @@ function update_package_repo { fi if is_ubuntu; then - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace if [[ "$REPOS_UPDATED" != "True" || "$RETRY_UPDATE" = "True" ]]; then # if there are transient errors pulling the updates, that's fine. @@ -1854,7 +1863,8 @@ function enable_service { # Uses global ``ENABLED_SERVICES`` # is_service_enabled service [service ...] function is_service_enabled { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local enabled=1 local services=$@ @@ -1933,7 +1943,8 @@ function use_exclusive_service { # Only run the command if the target file (the last arg) is not on an # NFS filesystem. function _safe_permission_operation { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local args=( $@ ) local last diff --git a/inc/ini-config b/inc/ini-config index 58386e2441..ba2d827ae9 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -17,7 +17,8 @@ set +o xtrace # Append a new option in an ini file without replacing the old value # iniadd [-sudo] config-file section option value1 value2 value3 ... function iniadd { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local sudo="" if [ $1 == "-sudo" ]; then @@ -37,7 +38,8 @@ function iniadd { # Comment an option in an INI file # inicomment [-sudo] config-file section option function inicomment { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local sudo="" if [ $1 == "-sudo" ]; then @@ -55,7 +57,8 @@ function inicomment { # Get an option from an INI file # iniget config-file section option function iniget { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 local section=$2 @@ -70,7 +73,8 @@ function iniget { # Get a multiple line option from an INI file # iniget_multiline config-file section option function iniget_multiline { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 local section=$2 @@ -85,7 +89,8 @@ function iniget_multiline { # Determinate is the given option present in the INI file # ini_has_option config-file section option function ini_has_option { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local file=$1 local section=$2 @@ -107,7 +112,8 @@ function ini_has_option { # # iniadd_literal [-sudo] config-file section option value function iniadd_literal { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local sudo="" if [ $1 == "-sudo" ]; then @@ -135,7 +141,8 @@ $option = $value # Remove an option from an INI file # inidelete [-sudo] config-file section option function inidelete { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local sudo="" if [ $1 == "-sudo" ]; then @@ -161,7 +168,8 @@ function inidelete { # iniset [-sudo] config-file section option value # - if the file does not exist, it is created function iniset { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local sudo="" if [ $1 == "-sudo" ]; then @@ -198,7 +206,8 @@ $option = $value # Set a multiple line option in an INI file # iniset_multiline [-sudo] config-file section option value1 value2 valu3 ... function iniset_multiline { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local sudo="" if [ $1 == "-sudo" ]; then @@ -236,7 +245,8 @@ $option = $v # Uncomment an option in an INI file # iniuncomment config-file section option function iniuncomment { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local sudo="" if [ $1 == "-sudo" ]; then diff --git a/inc/python b/inc/python index fd0d616b62..fe7bba6992 100644 --- a/inc/python +++ b/inc/python @@ -38,7 +38,8 @@ function get_pip_command { # Get the path to the direcotry where python executables are installed. # get_python_exec_prefix function get_python_exec_prefix { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace if [[ -z "$os_PACKAGE" ]]; then GetOSVersion @@ -69,7 +70,8 @@ function pip_install_gr { # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``, # pip_install package [package ...] function pip_install { - local xtrace=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace local upgrade="" local offline=${OFFLINE:-False} From ada886dd43ccc07f48d3a82d8d3d840fe5096c03 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 7 Oct 2015 14:06:26 +1100 Subject: [PATCH 0521/2941] Don't mix declaration and set of locals Ia0957b47187c3dcadd46154b17022c4213781112 proposes to have bashate find instances of setting a local value. The issue is that "local" always returns 0, thus hiding any failure in the commands running to set the variable. This is an automated replacement of such instances Depends-On: I676c805e8f0401f75cc5367eee83b3d880cdef81 Change-Id: I9c8912a8fd596535589b207d7fc553b9d951d3fe --- exercises/neutron-adv-test.sh | 36 ++++++++++----- functions | 9 ++-- functions-common | 36 ++++++++++----- inc/ini-config | 3 +- inc/meta-config | 3 +- inc/python | 18 +++++--- inc/rootwrap | 6 ++- lib/apache | 12 +++-- lib/ceph | 9 ++-- lib/cinder | 6 ++- lib/glance | 6 ++- lib/heat | 3 +- lib/horizon | 9 ++-- lib/ironic | 63 ++++++++++++++++++--------- lib/keystone | 36 ++++++++++----- lib/ldap | 9 ++-- lib/lvm | 6 ++- lib/neutron-legacy | 33 +++++++++----- lib/neutron_plugins/embrane | 3 +- lib/neutron_plugins/ovs_base | 6 ++- lib/nova | 15 ++++--- lib/swift | 42 ++++++++++++------ lib/tempest | 3 +- lib/tls | 3 +- tests/unittest.sh | 24 ++++++---- tools/create_userrc.sh | 3 +- tools/peakmem_tracker.sh | 6 ++- tools/xen/scripts/install-os-vpx.sh | 3 +- tools/xen/scripts/uninstall-os-vpx.sh | 12 +++-- tools/xen/test_functions.sh | 6 ++- 30 files changed, 286 insertions(+), 143 deletions(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index a0de4ccd37..a8fbd86473 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -122,41 +122,47 @@ function foreach_tenant_net { } function get_image_id { - local IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) + local IMAGE_ID + IMAGE_ID=$(openstack image list | egrep " $DEFAULT_IMAGE_NAME " | get_field 1) die_if_not_set $LINENO IMAGE_ID "Failure retrieving IMAGE_ID" echo "$IMAGE_ID" } function get_tenant_id { local TENANT_NAME=$1 - local TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1` + local TENANT_ID + TENANT_ID=`openstack project list | grep " $TENANT_NAME " | head -n 1 | get_field 1` die_if_not_set $LINENO TENANT_ID "Failure retrieving TENANT_ID for $TENANT_NAME" echo "$TENANT_ID" } function get_user_id { local USER_NAME=$1 - local USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'` + local USER_ID + USER_ID=`openstack user list | grep $USER_NAME | awk '{print $2}'` die_if_not_set $LINENO USER_ID "Failure retrieving USER_ID for $USER_NAME" echo "$USER_ID" } function get_role_id { local ROLE_NAME=$1 - local ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'` + local ROLE_ID + ROLE_ID=`openstack role list | grep $ROLE_NAME | awk '{print $2}'` die_if_not_set $LINENO ROLE_ID "Failure retrieving ROLE_ID for $ROLE_NAME" echo "$ROLE_ID" } function get_network_id { local NETWORK_NAME="$1" - local NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'` + local NETWORK_ID + NETWORK_ID=`neutron net-list -F id -- --name=$NETWORK_NAME | awk "NR==4" | awk '{print $2}'` echo $NETWORK_ID } function get_flavor_id { local INSTANCE_TYPE=$1 - local FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` + local FLAVOR_ID + FLAVOR_ID=`nova flavor-list | grep $INSTANCE_TYPE | awk '{print $2}'` die_if_not_set $LINENO FLAVOR_ID "Failure retrieving FLAVOR_ID for $INSTANCE_TYPE" echo "$FLAVOR_ID" } @@ -185,13 +191,15 @@ function add_tenant { function remove_tenant { local TENANT=$1 - local TENANT_ID=$(get_tenant_id $TENANT) + local TENANT_ID + TENANT_ID=$(get_tenant_id $TENANT) openstack project delete $TENANT_ID } function remove_user { local USER=$1 - local USER_ID=$(get_user_id $USER) + local USER_ID + USER_ID=$(get_user_id $USER) openstack user delete $USER_ID } @@ -221,9 +229,11 @@ function create_network { local NET_NAME="${TENANT}-net$NUM" local ROUTER_NAME="${TENANT}-router${NUM}" source $TOP_DIR/openrc admin admin - local TENANT_ID=$(get_tenant_id $TENANT) + local TENANT_ID + TENANT_ID=$(get_tenant_id $TENANT) source $TOP_DIR/openrc $TENANT $TENANT - local NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) + local NET_ID + NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA" neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR neutron_debug_admin probe-create --device-owner compute $NET_ID @@ -251,7 +261,8 @@ function create_vm { done #TODO (nati) Add multi-nic test #TODO (nati) Add public-net test - local VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \ + local VM_UUID + VM_UUID=`nova boot --flavor $(get_flavor_id m1.tiny) \ --image $(get_image_id) \ $NIC \ $TENANT-server$NUM | grep ' id ' | cut -d"|" -f3 | sed 's/ //g'` @@ -309,7 +320,8 @@ function delete_network { local NUM=$2 local NET_NAME="${TENANT}-net$NUM" source $TOP_DIR/openrc admin admin - local TENANT_ID=$(get_tenant_id $TENANT) + local TENANT_ID + TENANT_ID=$(get_tenant_id $TENANT) #TODO(nati) comment out until l3-agent merged #for res in port subnet net router;do for net_id in `neutron net-list -c id -c name | grep $NET_NAME | awk '{print $2}'`;do diff --git a/functions b/functions index ff95c89ad9..8cf7a25f49 100644 --- a/functions +++ b/functions @@ -264,7 +264,8 @@ function upload_image { ;; *.img) image_name=$(basename "$image" ".img") - local format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }') + local format + format=$(qemu-img info ${image} | awk '/^file format/ { print $3; exit }') if [[ ",qcow2,raw,vdi,vmdk,vpc," =~ ",$format," ]]; then disk_format=$format else @@ -405,7 +406,8 @@ function get_instance_ip { local vm_id=$1 local network_name=$2 local nova_result="$(nova show $vm_id)" - local ip=$(echo "$nova_result" | grep "$network_name" | get_field 2) + local ip + ip=$(echo "$nova_result" | grep "$network_name" | get_field 2) if [[ $ip = "" ]];then echo "$nova_result" die $LINENO "[Fail] Coudn't get ipaddress of VM" @@ -455,7 +457,8 @@ function check_path_perm_sanity { # homedir permissions on RHEL and common practice of making DEST in # the stack user's homedir. - local real_path=$(readlink -f $1) + local real_path + real_path=$(readlink -f $1) local rebuilt_path="" for i in $(echo ${real_path} | tr "/" " "); do rebuilt_path=$rebuilt_path"/"$i diff --git a/functions-common b/functions-common index 53b64d668a..c831b261e1 100644 --- a/functions-common +++ b/functions-common @@ -140,7 +140,8 @@ function isset { # backtrace level function backtrace { local level=$1 - local deep=$((${#BASH_SOURCE[@]} - 1)) + local deep + deep=$((${#BASH_SOURCE[@]} - 1)) echo "[Call Trace]" while [ $level -le $deep ]; do echo "${BASH_SOURCE[$deep]}:${BASH_LINENO[$deep-1]}:${FUNCNAME[$deep-1]}" @@ -477,7 +478,8 @@ function git_clone { local git_remote=$1 local git_dest=$2 local git_ref=$3 - local orig_dir=$(pwd) + local orig_dir + orig_dir=$(pwd) local git_clone_flags="" RECLONE=$(trueorfalse False RECLONE) @@ -641,7 +643,8 @@ function get_default_host_ip { host_ip="" # Find the interface used for the default route host_ip_iface=${host_ip_iface:-$(ip -f $af route | awk '/default/ {print $5}' | head -1)} - local host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}') + local host_ips + host_ips=$(LC_ALL=C ip -f $af addr show ${host_ip_iface} | sed /temporary/d |awk /$af'/ {split($2,parts,"/"); print parts[1]}') local ip for ip in $host_ips; do # Attempt to filter out IP addresses that are part of the fixed and @@ -690,7 +693,8 @@ function get_field { # copy over a default policy.json and policy.d for projects function install_default_policy { local project=$1 - local project_uc=$(echo $1|tr a-z A-Z) + local project_uc + project_uc=$(echo $1|tr a-z A-Z) local conf_dir="${project_uc}_CONF_DIR" # eval conf dir to get the variable conf_dir="${!conf_dir}" @@ -723,7 +727,8 @@ function policy_add { # Add a terminating comma to policy lines without one # Remove the closing '}' and all lines following to the end-of-file - local tmpfile=$(mktemp) + local tmpfile + tmpfile=$(mktemp) uniq ${policy_file} | sed -e ' s/]$/],/ /^[}]/,$d @@ -945,7 +950,8 @@ function get_or_create_endpoint { # scenarios currently that use the returned id. Ideally this behaviour # should be pushed out to the service setups and let them create the # endpoints they need. - local public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2) + local public_id + public_id=$(_get_or_create_endpoint_with_interface $1 public $3 $2) _get_or_create_endpoint_with_interface $1 admin $4 $2 _get_or_create_endpoint_with_interface $1 internal $5 $2 @@ -1065,7 +1071,8 @@ function get_packages { xtrace=$(set +o | grep xtrace) set +o xtrace local services=$@ - local package_dir=$(_get_package_dir) + local package_dir + package_dir=$(_get_package_dir) local file_to_parse="" local service="" @@ -1980,8 +1987,10 @@ function address_in_net { local ip=$1 local range=$2 local masklen=${range#*/} - local network=$(maskip ${range%/*} $(cidr2netmask $masklen)) - local subnet=$(maskip $ip $(cidr2netmask $masklen)) + local network + network=$(maskip ${range%/*} $(cidr2netmask $masklen)) + local subnet + subnet=$(maskip $ip $(cidr2netmask $masklen)) [[ $network == $subnet ]] } @@ -2033,7 +2042,8 @@ function export_proxy_variables { # Returns true if the directory is on a filesystem mounted via NFS. function is_nfs_directory { - local mount_type=`stat -f -L -c %T $1` + local mount_type + mount_type=`stat -f -L -c %T $1` test "$mount_type" == "nfs" } @@ -2044,13 +2054,15 @@ function maskip { local ip=$1 local mask=$2 local l="${ip%.*}"; local r="${ip#*.}"; local n="${mask%.*}"; local m="${mask#*.}" - local subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) + local subnet + subnet=$((${ip%%.*}&${mask%%.*})).$((${r%%.*}&${m%%.*})).$((${l##*.}&${n##*.})).$((${ip##*.}&${mask##*.})) echo $subnet } # Return the current python as "python." function python_version { - local python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])') + local python_version + python_version=$(python -c 'import sys; print("%s.%s" % sys.version_info[0:2])') echo "python${python_version}" } diff --git a/inc/ini-config b/inc/ini-config index ba2d827ae9..42a66c63b6 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -196,7 +196,8 @@ function iniset { $option = $value " "$file" else - local sep=$(echo -ne "\x01") + local sep + sep=$(echo -ne "\x01") # Replace it $sudo sed -i -e '/^\['${section}'\]/,/^\[.*\]/ s'${sep}'^\('${option}'[ \t]*=[ \t]*\).*$'${sep}'\1'"${value}"${sep} "$file" fi diff --git a/inc/meta-config b/inc/meta-config index e5f902d1dd..d74db59bb3 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -89,7 +89,8 @@ function merge_config_file { # note, configfile might be a variable (note the iniset, etc # created in the mega-awk below is "eval"ed too, so we just leave # it alone. - local real_configfile=$(eval echo $configfile) + local real_configfile + real_configfile=$(eval echo $configfile) if [ ! -f $real_configfile ]; then touch $real_configfile fi diff --git a/inc/python b/inc/python index fe7bba6992..c7ba51a81d 100644 --- a/inc/python +++ b/inc/python @@ -61,7 +61,8 @@ function get_python_exec_prefix { # pip_install_gr packagename function pip_install_gr { local name=$1 - local clean_name=$(get_from_global_requirements $name) + local clean_name + clean_name=$(get_from_global_requirements $name) pip_install $clean_name } @@ -100,7 +101,8 @@ function pip_install { local cmd_pip=$PIP_VIRTUAL_ENV/bin/pip local sudo_pip="env" else - local cmd_pip=$(get_pip_command) + local cmd_pip + cmd_pip=$(get_pip_command) local sudo_pip="sudo -H" fi fi @@ -109,7 +111,8 @@ function pip_install { # Always apply constraints cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt" - local pip_version=$(python -c "import pip; \ + local pip_version + pip_version=$(python -c "import pip; \ print(pip.__version__.strip('.')[0])") if (( pip_version<6 )); then die $LINENO "Currently installed pip version ${pip_version} does not" \ @@ -143,7 +146,8 @@ function pip_install { # get_from_global_requirements function get_from_global_requirements { local package=$1 - local required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) + local required_pkg + required_pkg=$(grep -i -h ^${package} $REQUIREMENTS_DIR/global-requirements.txt | cut -d\# -f1) if [[ $required_pkg == "" ]]; then die $LINENO "Can't find package $package in requirements" fi @@ -222,7 +226,8 @@ function setup_develop { # practical ways. function is_in_projects_txt { local project_dir=$1 - local project_name=$(basename $project_dir) + local project_name + project_name=$(basename $project_dir) grep -q "/$project_name\$" $REQUIREMENTS_DIR/projects.txt } @@ -241,7 +246,8 @@ function setup_package_with_constraints_edit { if [ -n "$REQUIREMENTS_DIR" ]; then # Constrain this package to this project directory from here on out. - local name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) + local name + name=$(awk '/^name.*=/ {print $3}' $project_dir/setup.cfg) $REQUIREMENTS_DIR/.venv/bin/edit-constraints \ $REQUIREMENTS_DIR/upper-constraints.txt -- $name \ "$flags file://$project_dir#egg=$name" diff --git a/inc/rootwrap b/inc/rootwrap index f91e557e68..63ab59adc7 100644 --- a/inc/rootwrap +++ b/inc/rootwrap @@ -41,7 +41,8 @@ function add_sudo_secure_path { # configure_rootwrap project function configure_rootwrap { local project=$1 - local project_uc=$(echo $1|tr a-z A-Z) + local project_uc + project_uc=$(echo $1|tr a-z A-Z) local bin_dir="${project_uc}_BIN_DIR" bin_dir="${!bin_dir}" local project_dir="${project_uc}_DIR" @@ -60,7 +61,8 @@ function configure_rootwrap { sudo sed -e "s:^filters_path=.*$:filters_path=/etc/${project}/rootwrap.d:" -i /etc/${project}/rootwrap.conf # Set up the rootwrap sudoers - local tempfile=$(mktemp) + local tempfile + tempfile=$(mktemp) # Specify rootwrap.conf as first parameter to rootwrap rootwrap_sudo_cmd="${rootwrap_bin} /etc/${project}/rootwrap.conf *" echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_sudo_cmd" >$tempfile diff --git a/lib/apache b/lib/apache index a8e9bc5ad2..17526c74d0 100644 --- a/lib/apache +++ b/lib/apache @@ -72,11 +72,14 @@ function install_apache_wsgi { # various differences between Apache 2.2 and 2.4 that warrant special handling. function get_apache_version { if is_ubuntu; then - local version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/) + local version_str + version_str=$(sudo /usr/sbin/apache2ctl -v | awk '/Server version/ {print $3}' | cut -f2 -d/) elif is_fedora; then - local version_str=$(rpm -qa --queryformat '%{VERSION}' httpd) + local version_str + version_str=$(rpm -qa --queryformat '%{VERSION}' httpd) elif is_suse; then - local version_str=$(rpm -qa --queryformat '%{VERSION}' apache2) + local version_str + version_str=$(rpm -qa --queryformat '%{VERSION}' apache2) else exit_distro_not_supported "cannot determine apache version" fi @@ -115,7 +118,8 @@ function get_apache_version { function apache_site_config_for { local site=$@ if is_ubuntu; then - local apache_version=$(get_apache_version) + local apache_version + apache_version=$(get_apache_version) if [[ "$apache_version" == "2.2" ]]; then # Ubuntu 12.04 - Apache 2.2 echo $APACHE_CONF_DIR/${site} diff --git a/lib/ceph b/lib/ceph index 8e34aa49a4..29d2aca54f 100644 --- a/lib/ceph +++ b/lib/ceph @@ -83,7 +83,8 @@ ATTACH_ENCRYPTED_VOLUME_AVAILABLE=False # ------------ function get_ceph_version { - local ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.') + local ceph_version_str + ceph_version_str=$(sudo ceph daemon mon.$(hostname) version | cut -d '"' -f 4 | cut -f 1,2 -d '.') echo $ceph_version_str } @@ -106,7 +107,8 @@ EOF # undefine_virsh_secret() - Undefine Cinder key secret from libvirt function undefine_virsh_secret { if is_service_enabled cinder || is_service_enabled nova; then - local virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') + local virsh_uuid + virsh_uuid=$(sudo virsh secret-list | awk '/^ ?[0-9a-z]/ { print $1 }') sudo virsh secret-undefine ${virsh_uuid} >/dev/null 2>&1 fi } @@ -219,7 +221,8 @@ EOF done # pools data and metadata were removed in the Giant release so depending on the version we apply different commands - local ceph_version=$(get_ceph_version) + local ceph_version + ceph_version=$(get_ceph_version) # change pool replica size according to the CEPH_REPLICAS set by the user if [[ ${ceph_version%%.*} -eq 0 ]] && [[ ${ceph_version##*.} -lt 87 ]]; then sudo ceph -c ${CEPH_CONF_FILE} osd pool set rbd size ${CEPH_REPLICAS} diff --git a/lib/cinder b/lib/cinder index 10144117ec..73941c6973 100644 --- a/lib/cinder +++ b/lib/cinder @@ -150,7 +150,8 @@ function cleanup_cinder { # ensure the volume group is cleared up because fails might # leave dead volumes in the group if [ "$CINDER_ISCSI_HELPER" = "tgtadm" ]; then - local targets=$(sudo tgtadm --op show --mode target) + local targets + targets=$(sudo tgtadm --op show --mode target) if [ $? -ne 0 ]; then # If tgt driver isn't running this won't work obviously # So check the response and restart if need be @@ -198,7 +199,8 @@ function cleanup_cinder { # _cinder_config_apache_wsgi() - Set WSGI config files function _cinder_config_apache_wsgi { - local cinder_apache_conf=$(apache_site_config_for osapi-volume) + local cinder_apache_conf + cinder_apache_conf=$(apache_site_config_for osapi-volume) local cinder_ssl="" local cinder_certfile="" local cinder_keyfile="" diff --git a/lib/glance b/lib/glance index 7be3a8495c..2eb93a46e6 100644 --- a/lib/glance +++ b/lib/glance @@ -106,7 +106,8 @@ function configure_glance { iniset $GLANCE_REGISTRY_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $GLANCE_REGISTRY_CONF DEFAULT bind_host $GLANCE_SERVICE_LISTEN_ADDRESS inicomment $GLANCE_REGISTRY_CONF DEFAULT log_file - local dburl=`database_connection_url glance` + local dburl + dburl=`database_connection_url glance` iniset $GLANCE_REGISTRY_CONF database connection $dburl iniset $GLANCE_REGISTRY_CONF DEFAULT use_syslog $SYSLOG iniset $GLANCE_REGISTRY_CONF DEFAULT workers "$API_WORKERS" @@ -265,7 +266,8 @@ function create_glance_accounts { # required for swift access if is_service_enabled s-proxy; then - local glance_swift_user=$(get_or_create_user "glance-swift" \ + local glance_swift_user + glance_swift_user=$(get_or_create_user "glance-swift" \ "$SERVICE_PASSWORD" "default" "glance-swift@example.com") get_or_add_user_project_role "ResellerAdmin" $glance_swift_user $SERVICE_TENANT_NAME fi diff --git a/lib/heat b/lib/heat index 3e6975ae26..df85c72cfe 100644 --- a/lib/heat +++ b/lib/heat @@ -321,7 +321,8 @@ function build_heat_pip_mirror { echo "" >> $HEAT_PIP_REPO/index.html - local heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo) + local heat_pip_repo_apache_conf + heat_pip_repo_apache_conf=$(apache_site_config_for heat_pip_repo) sudo cp $FILES/apache-heat-pip-repo.template $heat_pip_repo_apache_conf sudo sed -e " diff --git a/lib/horizon b/lib/horizon index b2539d1b7d..6ecd755795 100644 --- a/lib/horizon +++ b/lib/horizon @@ -49,7 +49,8 @@ function _horizon_config_set { sed -e "/^$option/d" -i $local_settings echo -e "\n$option=$value" >> $file elif grep -q "^$section" $file; then - local line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file) + local line + line=$(sed -ne "/^$section/,/^}/ { /^ *'$option':/ p; }" $file) if [ -n "$line" ]; then sed -i -e "/^$section/,/^}/ s/^\( *'$option'\) *:.*$/\1: $value,/" $file else @@ -68,7 +69,8 @@ function _horizon_config_set { # cleanup_horizon() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_horizon { - local horizon_conf=$(apache_site_config_for horizon) + local horizon_conf + horizon_conf=$(apache_site_config_for horizon) sudo rm -f $horizon_conf } @@ -112,7 +114,8 @@ function init_horizon { # Create an empty directory that apache uses as docroot sudo mkdir -p $HORIZON_DIR/.blackhole - local horizon_conf=$(apache_site_config_for horizon) + local horizon_conf + horizon_conf=$(apache_site_config_for horizon) # Configure apache to run horizon sudo sh -c "sed -e \" diff --git a/lib/ironic b/lib/ironic index 40475e0a83..8eb0d80e07 100644 --- a/lib/ironic +++ b/lib/ironic @@ -225,7 +225,8 @@ function _cleanup_ironic_apache_wsgi { # _config_ironic_apache_wsgi() - Set WSGI config files of Ironic function _config_ironic_apache_wsgi { - local ironic_apache_conf=$(apache_site_config_for ironic) + local ironic_apache_conf + ironic_apache_conf=$(apache_site_config_for ironic) sudo cp $FILES/apache-ironic.template $ironic_apache_conf sudo sed -e " s|%PUBLICPORT%|$IRONIC_HTTP_PORT|g; @@ -325,11 +326,13 @@ function configure_ironic_api { function configure_ironic_conductor { cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR - local ironic_rootwrap=$(get_rootwrap_location ironic) + local ironic_rootwrap + ironic_rootwrap=$(get_rootwrap_location ironic) local rootwrap_isudoer_cmd="$ironic_rootwrap $IRONIC_CONF_DIR/rootwrap.conf *" # Set up the rootwrap sudoers for ironic - local tempfile=`mktemp` + local tempfile + tempfile=`mktemp` echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_isudoer_cmd" >$tempfile chmod 0440 $tempfile sudo chown root:root $tempfile @@ -370,7 +373,8 @@ function configure_ironic_conductor { fi iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080} iniset $IRONIC_CONF_FILE glance swift_api_version v1 - local tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default) + local tenant_id + tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default) iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id} iniset $IRONIC_CONF_FILE glance swift_container glance iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600 @@ -379,7 +383,8 @@ function configure_ironic_conductor { fi if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - local pxebin=`basename $IRONIC_PXE_BOOT_IMAGE` + local pxebin + pxebin=`basename $IRONIC_PXE_BOOT_IMAGE` iniset $IRONIC_CONF_FILE pxe ipxe_enabled True iniset $IRONIC_CONF_FILE pxe pxe_config_template '\$pybasedir/drivers/modules/ipxe_config.template' iniset $IRONIC_CONF_FILE pxe pxe_bootfile_name $pxebin @@ -445,7 +450,8 @@ function init_ironic { # _ironic_bm_vm_names() - Generates list of names for baremetal VMs. function _ironic_bm_vm_names { local idx - local num_vms=$(($IRONIC_VM_COUNT - 1)) + local num_vms + num_vms=$(($IRONIC_VM_COUNT - 1)) for idx in $(seq 0 $num_vms); do echo "baremetal${IRONIC_VM_NETWORK_BRIDGE}_${idx}" done @@ -498,22 +504,27 @@ function stop_ironic { } function create_ovs_taps { - local ironic_net_id=$(neutron net-list | grep private | get_field 1) + local ironic_net_id + ironic_net_id=$(neutron net-list | grep private | get_field 1) # Work around: No netns exists on host until a Neutron port is created. We # need to create one in Neutron to know what netns to tap into prior to the # first node booting. - local port_id=$(neutron port-create private | grep " id " | get_field 2) + local port_id + port_id=$(neutron port-create private | grep " id " | get_field 2) # intentional sleep to make sure the tag has been set to port sleep 10 if [[ "$Q_USE_NAMESPACE" = "True" ]]; then - local tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-) + local tapdev + tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-) else - local tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-) + local tapdev + tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-) fi - local tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-) + local tag_id + tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-) # make sure veth pair is not existing, otherwise delete its links sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1 @@ -570,7 +581,8 @@ function wait_for_nova_resources { } function enroll_nodes { - local chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2) + local chassis_id + chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2) if ! is_ironic_hardware; then local ironic_node_cpu=$IRONIC_VM_SPECS_CPU @@ -602,10 +614,14 @@ function enroll_nodes { if ! is_ironic_hardware; then local mac_address=$hardware_info elif [[ -z "${IRONIC_DEPLOY_DRIVER##*_ipmitool}" ]]; then - local ipmi_address=$(echo $hardware_info |awk '{print $1}') - local mac_address=$(echo $hardware_info |awk '{print $2}') - local ironic_ipmi_username=$(echo $hardware_info |awk '{print $3}') - local ironic_ipmi_passwd=$(echo $hardware_info |awk '{print $4}') + local ipmi_address + ipmi_address=$(echo $hardware_info |awk '{print $1}') + local mac_address + mac_address=$(echo $hardware_info |awk '{print $2}') + local ironic_ipmi_username + ironic_ipmi_username=$(echo $hardware_info |awk '{print $3}') + local ironic_ipmi_passwd + ironic_ipmi_passwd=$(echo $hardware_info |awk '{print $4}') # Currently we require all hardware platform have same CPU/RAM/DISK info # in future, this can be enhanced to support different type, and then # we create the bare metal flavor with minimum value @@ -617,9 +633,11 @@ function enroll_nodes { # First node created will be used for testing in ironic w/o glance # scenario, so we need to know its UUID. - local standalone_node_uuid=$([ $total_nodes -eq 0 ] && echo "--uuid $IRONIC_NODE_UUID") + local standalone_node_uuid + standalone_node_uuid=$([ $total_nodes -eq 0 ] && echo "--uuid $IRONIC_NODE_UUID") - local node_id=$(ironic node-create $standalone_node_uuid\ + local node_id + node_id=$(ironic node-create $standalone_node_uuid\ --chassis_uuid $chassis_id \ --driver $IRONIC_DEPLOY_DRIVER \ --name node-$total_nodes \ @@ -640,7 +658,8 @@ function enroll_nodes { # NOTE(adam_g): Attempting to use an autogenerated UUID for flavor id here uncovered # bug (LP: #1333852) in Trove. This can be changed to use an auto flavor id when the # bug is fixed in Juno. - local adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk)) + local adjusted_disk + adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk)) nova flavor-create --ephemeral $ironic_ephemeral_disk baremetal 551 $ironic_node_ram $adjusted_disk $ironic_node_cpu nova flavor-key baremetal set "cpu_arch"="x86_64" @@ -771,7 +790,8 @@ function upload_baremetal_ironic_deploy { fi fi - local token=$(openstack token issue -c id -f value) + local token + token=$(openstack token issue -c id -f value) die_if_not_set $LINENO token "Keystone fail to get token" # load them into glance @@ -809,7 +829,8 @@ function prepare_baremetal_basic_ops { function cleanup_baremetal_basic_ops { rm -f $IRONIC_VM_MACS_CSV_FILE if [ -f $IRONIC_KEY_FILE ]; then - local key=$(cat $IRONIC_KEY_FILE.pub) + local key + key=$(cat $IRONIC_KEY_FILE.pub) # remove public key from authorized_keys grep -v "$key" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE diff --git a/lib/keystone b/lib/keystone index ec28b46341..cdcc13a326 100644 --- a/lib/keystone +++ b/lib/keystone @@ -132,7 +132,8 @@ function _cleanup_keystone_apache_wsgi { # _config_keystone_apache_wsgi() - Set WSGI config files of Keystone function _config_keystone_apache_wsgi { - local keystone_apache_conf=$(apache_site_config_for keystone) + local keystone_apache_conf + keystone_apache_conf=$(apache_site_config_for keystone) local keystone_ssl="" local keystone_certfile="" local keystone_keyfile="" @@ -347,9 +348,12 @@ function configure_keystone_extensions { function create_keystone_accounts { # admin - local admin_tenant=$(get_or_create_project "admin" default) - local admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default) - local admin_role=$(get_or_create_role "admin") + local admin_tenant + admin_tenant=$(get_or_create_project "admin" default) + local admin_user + admin_user=$(get_or_create_user "admin" "$ADMIN_PASSWORD" default) + local admin_role + admin_role=$(get_or_create_role "admin") get_or_add_user_project_role $admin_role $admin_user $admin_tenant # Create service project/role @@ -365,18 +369,23 @@ function create_keystone_accounts { get_or_create_role ResellerAdmin # The Member role is used by Horizon and Swift so we need to keep it: - local member_role=$(get_or_create_role "Member") + local member_role + member_role=$(get_or_create_role "Member") # another_role demonstrates that an arbitrary role may be created and used # TODO(sleepsonthefloor): show how this can be used for rbac in the future! - local another_role=$(get_or_create_role "anotherrole") + local another_role + another_role=$(get_or_create_role "anotherrole") # invisible tenant - admin can't see this one - local invis_tenant=$(get_or_create_project "invisible_to_admin" default) + local invis_tenant + invis_tenant=$(get_or_create_project "invisible_to_admin" default) # demo - local demo_tenant=$(get_or_create_project "demo" default) - local demo_user=$(get_or_create_user "demo" \ + local demo_tenant + demo_tenant=$(get_or_create_project "demo" default) + local demo_user + demo_user=$(get_or_create_user "demo" \ "$ADMIN_PASSWORD" "default" "demo@example.com") get_or_add_user_project_role $member_role $demo_user $demo_tenant @@ -384,9 +393,11 @@ function create_keystone_accounts { get_or_add_user_project_role $another_role $demo_user $demo_tenant get_or_add_user_project_role $member_role $demo_user $invis_tenant - local admin_group=$(get_or_create_group "admins" \ + local admin_group + admin_group=$(get_or_create_group "admins" \ "default" "openstack admin group") - local non_admin_group=$(get_or_create_group "nonadmins" \ + local non_admin_group + non_admin_group=$(get_or_create_group "nonadmins" \ "default" "non-admin group") get_or_add_group_project_role $member_role $non_admin_group $demo_tenant @@ -415,7 +426,8 @@ function create_keystone_accounts { function create_service_user { local role=${2:-service} - local user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default) + local user + user=$(get_or_create_user "$1" "$SERVICE_PASSWORD" default) get_or_add_user_project_role "$role" "$user" "$SERVICE_TENANT_NAME" } diff --git a/lib/ldap b/lib/ldap index d2dbc3b728..0414fea639 100644 --- a/lib/ldap +++ b/lib/ldap @@ -82,7 +82,8 @@ function cleanup_ldap { function init_ldap { local keystone_ldif - local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + local tmp_ldap_dir + tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) # Remove data but not schemas clear_ldap_state @@ -113,7 +114,8 @@ function install_ldap { echo "Installing LDAP inside function" echo "os_VENDOR is $os_VENDOR" - local tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) + local tmp_ldap_dir + tmp_ldap_dir=$(mktemp -d -t ldap.$$.XXXXXXXXXX) printf "installing OpenLDAP" if is_ubuntu; then @@ -129,7 +131,8 @@ function install_ldap { fi echo "LDAP_PASSWORD is $LDAP_PASSWORD" - local slappass=$(slappasswd -s $LDAP_PASSWORD) + local slappass + slappass=$(slappasswd -s $LDAP_PASSWORD) printf "LDAP secret is $slappass\n" # Create manager.ldif and add to olcdb diff --git a/lib/lvm b/lib/lvm index 8afd543f34..468a99aecc 100644 --- a/lib/lvm +++ b/lib/lvm @@ -56,7 +56,8 @@ function _clean_lvm_backing_file { # If the backing physical device is a loop device, it was probably setup by DevStack if [[ -n "$backing_file" ]] && [[ -e "$backing_file" ]]; then - local vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}') + local vg_dev + vg_dev=$(sudo losetup -j $backing_file | awk -F':' '/'$BACKING_FILE_SUFFIX'/ { print $1}') sudo losetup -d $vg_dev rm -f $backing_file fi @@ -89,7 +90,8 @@ function _create_lvm_volume_group { if ! sudo vgs $vg; then # Only create if the file doesn't already exists [[ -f $backing_file ]] || truncate -s $size $backing_file - local vg_dev=`sudo losetup -f --show $backing_file` + local vg_dev + vg_dev=`sudo losetup -f --show $backing_file` # Only create volume group if it doesn't already exist if ! sudo vgs $vg; then diff --git a/lib/neutron-legacy b/lib/neutron-legacy index e67bd4ae32..e9f88fbd1b 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -806,7 +806,8 @@ function _move_neutron_addresses_route { local IP_ADD="" local IP_DEL="" - local DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }") + local DEFAULT_ROUTE_GW + DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }") local ADD_OVS_PORT="" if [[ $af == "inet" ]]; then @@ -1244,7 +1245,8 @@ function _neutron_create_private_subnet_v4 { subnet_params+="--gateway $NETWORK_GATEWAY " subnet_params+="--name $PRIVATE_SUBNET_NAME " subnet_params+="$NET_ID $FIXED_RANGE" - local subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) + local subnet_id + subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO subnet_id "Failure creating private IPv4 subnet for $TENANT_ID" echo $subnet_id } @@ -1259,7 +1261,8 @@ function _neutron_create_private_subnet_v6 { subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME " subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes" - local ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) + local ipv6_subnet_id + ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) die_if_not_set $LINENO ipv6_subnet_id "Failure creating private IPv6 subnet for $TENANT_ID" echo $ipv6_subnet_id } @@ -1272,7 +1275,8 @@ function _neutron_create_public_subnet_v4 { subnet_params+="--name $PUBLIC_SUBNET_NAME " subnet_params+="$EXT_NET_ID $FLOATING_RANGE " subnet_params+="-- --enable_dhcp=False" - local id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') + local id_and_ext_gw_ip + id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO id_and_ext_gw_ip "Failure creating public IPv4 subnet" echo $id_and_ext_gw_ip } @@ -1284,7 +1288,8 @@ function _neutron_create_public_subnet_v6 { subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME " subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE " subnet_params+="-- --enable_dhcp=False" - local ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') + local ipv6_id_and_ext_gw_ip + ipv6_id_and_ext_gw_ip=$(neutron subnet-create $subnet_params | grep -e 'gateway_ip' -e ' id ') die_if_not_set $LINENO ipv6_id_and_ext_gw_ip "Failure creating an IPv6 public subnet" echo $ipv6_id_and_ext_gw_ip } @@ -1293,8 +1298,10 @@ function _neutron_create_public_subnet_v6 { function _neutron_configure_router_v4 { neutron router-interface-add $ROUTER_ID $SUBNET_ID # Create a public subnet on the external network - local id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) - local ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) + local id_and_ext_gw_ip + id_and_ext_gw_ip=$(_neutron_create_public_subnet_v4 $EXT_NET_ID) + local ext_gw_ip + ext_gw_ip=$(echo $id_and_ext_gw_ip | get_field 2) PUB_SUBNET_ID=$(echo $id_and_ext_gw_ip | get_field 5) # Configure the external network as the default router gateway neutron router-gateway-set $ROUTER_ID $EXT_NET_ID @@ -1331,9 +1338,12 @@ function _neutron_configure_router_v4 { function _neutron_configure_router_v6 { neutron router-interface-add $ROUTER_ID $IPV6_SUBNET_ID # Create a public subnet on the external network - local ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) - local ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2) - local ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5) + local ipv6_id_and_ext_gw_ip + ipv6_id_and_ext_gw_ip=$(_neutron_create_public_subnet_v6 $EXT_NET_ID) + local ipv6_ext_gw_ip + ipv6_ext_gw_ip=$(echo $ipv6_id_and_ext_gw_ip | get_field 2) + local ipv6_pub_subnet_id + ipv6_pub_subnet_id=$(echo $ipv6_id_and_ext_gw_ip | get_field 5) # If the external network has not already been set as the default router # gateway when configuring an IPv4 public subnet, do so now @@ -1351,7 +1361,8 @@ function _neutron_configure_router_v6 { die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then - local ext_gw_interface=$(_neutron_get_ext_gw_interface) + local ext_gw_interface + ext_gw_interface=$(_neutron_get_ext_gw_interface) local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} # Configure interface for public bridge diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane index 6b4819ef70..2028496ca1 100644 --- a/lib/neutron_plugins/embrane +++ b/lib/neutron_plugins/embrane @@ -10,7 +10,8 @@ set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch function save_function { - local ORIG_FUNC=$(declare -f $1) + local ORIG_FUNC + ORIG_FUNC=$(declare -f $1) local NEW_FUNC="$2${ORIG_FUNC#$1}" eval "$NEW_FUNC" } diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index b012683a6f..d3fd198b08 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -49,8 +49,10 @@ function neutron_ovs_base_cleanup { function _neutron_ovs_base_install_ubuntu_dkms { # install Dynamic Kernel Module Support packages if needed - local kernel_version=$(uname -r) - local kernel_major_minor=`echo $kernel_version | cut -d. -f1-2` + local kernel_version + kernel_version=$(uname -r) + local kernel_major_minor + kernel_major_minor=`echo $kernel_version | cut -d. -f1-2` # From kernel 3.13 on, openvswitch-datapath-dkms is not needed if [ `vercmp_numbers "$kernel_major_minor" "3.13"` -lt "0" ]; then install_package "dkms openvswitch-datapath-dkms linux-headers-$kernel_version" diff --git a/lib/nova b/lib/nova index 9830276232..6e6075cae6 100644 --- a/lib/nova +++ b/lib/nova @@ -202,14 +202,16 @@ function cleanup_nova { clean_iptables # Destroy old instances - local instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` + local instances + instances=`sudo virsh list --all | grep $INSTANCE_NAME_PREFIX | sed "s/.*\($INSTANCE_NAME_PREFIX[0-9a-fA-F]*\).*/\1/g"` if [ ! "$instances" = "" ]; then echo $instances | xargs -n1 sudo virsh destroy || true echo $instances | xargs -n1 sudo virsh undefine --managed-save || true fi # Logout and delete iscsi sessions - local tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2) + local tgts + tgts=$(sudo iscsiadm --mode node | grep $VOLUME_NAME_PREFIX | cut -d ' ' -f2) local target for target in $tgts; do sudo iscsiadm --mode node -T $target --logout || true @@ -245,8 +247,10 @@ function _cleanup_nova_apache_wsgi { function _config_nova_apache_wsgi { sudo mkdir -p $NOVA_WSGI_DIR - local nova_apache_conf=$(apache_site_config_for nova-api) - local nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api) + local nova_apache_conf + nova_apache_conf=$(apache_site_config_for nova-api) + local nova_ec2_apache_conf + nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api) local nova_ssl="" local nova_certfile="" local nova_keyfile="" @@ -784,7 +788,8 @@ function start_nova_api { export PATH=$NOVA_BIN_DIR:$PATH # If the site is not enabled then we are in a grenade scenario - local enabled_site_file=$(apache_site_config_for nova-api) + local enabled_site_file + enabled_site_file=$(apache_site_config_for nova-api) if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then enable_apache_site nova-api enable_apache_site nova-ec2-api diff --git a/lib/swift b/lib/swift index 645bfd7cd9..62f47dce50 100644 --- a/lib/swift +++ b/lib/swift @@ -205,9 +205,12 @@ function _config_swift_apache_wsgi { # copy apache vhost file and set name and port local node_number for node_number in ${SWIFT_REPLICAS_SEQ}; do - local object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) - local container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) - local account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) + local object_port + object_port=$(( OBJECT_PORT_BASE + 10 * (node_number - 1) )) + local container_port + container_port=$(( CONTAINER_PORT_BASE + 10 * (node_number - 1) )) + local account_port + account_port=$(( ACCOUNT_PORT_BASE + 10 * (node_number - 1) )) sudo cp ${SWIFT_DIR}/examples/apache2/object-server.template $(apache_site_config_for object-server-${node_number}) sudo sed -e " @@ -504,7 +507,8 @@ EOF if is_service_enabled keystone; then iniuncomment ${testfile} func_test auth_version - local auth_vers=$(iniget ${testfile} func_test auth_version) + local auth_vers + auth_vers=$(iniget ${testfile} func_test auth_version) iniset ${testfile} func_test auth_host ${KEYSTONE_SERVICE_HOST} iniset ${testfile} func_test auth_port ${KEYSTONE_AUTH_PORT} if [[ $auth_vers == "3" ]]; then @@ -514,7 +518,8 @@ EOF fi fi - local user_group=$(id -g ${STACK_USER}) + local user_group + user_group=$(id -g ${STACK_USER}) sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR} local swift_log_dir=${SWIFT_DATA_DIR}/logs @@ -540,7 +545,8 @@ function create_swift_disk { # First do a bit of setup by creating the directories and # changing the permissions so we can run it as our user. - local user_group=$(id -g ${STACK_USER}) + local user_group + user_group=$(id -g ${STACK_USER}) sudo install -d -o ${STACK_USER} -g ${user_group} ${SWIFT_DATA_DIR}/{drives,cache,run,logs} # Create a loopback disk and format it to XFS. @@ -607,7 +613,8 @@ function create_swift_accounts { KEYSTONE_CATALOG_BACKEND=${KEYSTONE_CATALOG_BACKEND:-sql} - local another_role=$(get_or_create_role "anotherrole") + local another_role + another_role=$(get_or_create_role "anotherrole") # NOTE(jroll): Swift doesn't need the admin role here, however Ironic uses # temp urls, which break when uploaded by a non-admin role @@ -623,33 +630,40 @@ function create_swift_accounts { "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" fi - local swift_tenant_test1=$(get_or_create_project swifttenanttest1 default) + local swift_tenant_test1 + swift_tenant_test1=$(get_or_create_project swifttenanttest1 default) die_if_not_set $LINENO swift_tenant_test1 "Failure creating swift_tenant_test1" SWIFT_USER_TEST1=$(get_or_create_user swiftusertest1 $swiftusertest1_password \ "default" "test@example.com") die_if_not_set $LINENO SWIFT_USER_TEST1 "Failure creating SWIFT_USER_TEST1" get_or_add_user_project_role admin $SWIFT_USER_TEST1 $swift_tenant_test1 - local swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \ + local swift_user_test3 + swift_user_test3=$(get_or_create_user swiftusertest3 $swiftusertest3_password \ "default" "test3@example.com") die_if_not_set $LINENO swift_user_test3 "Failure creating swift_user_test3" get_or_add_user_project_role $another_role $swift_user_test3 $swift_tenant_test1 - local swift_tenant_test2=$(get_or_create_project swifttenanttest2 default) + local swift_tenant_test2 + swift_tenant_test2=$(get_or_create_project swifttenanttest2 default) die_if_not_set $LINENO swift_tenant_test2 "Failure creating swift_tenant_test2" - local swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \ + local swift_user_test2 + swift_user_test2=$(get_or_create_user swiftusertest2 $swiftusertest2_password \ "default" "test2@example.com") die_if_not_set $LINENO swift_user_test2 "Failure creating swift_user_test2" get_or_add_user_project_role admin $swift_user_test2 $swift_tenant_test2 - local swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing') + local swift_domain + swift_domain=$(get_or_create_domain swift_test 'Used for swift functional testing') die_if_not_set $LINENO swift_domain "Failure creating swift_test domain" - local swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain) + local swift_tenant_test4 + swift_tenant_test4=$(get_or_create_project swifttenanttest4 $swift_domain) die_if_not_set $LINENO swift_tenant_test4 "Failure creating swift_tenant_test4" - local swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \ + local swift_user_test4 + swift_user_test4=$(get_or_create_user swiftusertest4 $swiftusertest4_password \ $swift_domain "test4@example.com") die_if_not_set $LINENO swift_user_test4 "Failure creating swift_user_test4" get_or_add_user_project_role admin $swift_user_test4 $swift_tenant_test4 diff --git a/lib/tempest b/lib/tempest index f4d0a6dab0..e7ea429847 100644 --- a/lib/tempest +++ b/lib/tempest @@ -372,7 +372,8 @@ function configure_tempest { # Compute Features # Run ``verify_tempest_config -ur`` to retrieve enabled extensions on API endpoints # NOTE(mtreinish): This must be done after auth settings are added to the tempest config - local tmp_cfg_file=$(mktemp) + local tmp_cfg_file + tmp_cfg_file=$(mktemp) cd $TEMPEST_DIR tox -revenv -- verify-tempest-config -uro $tmp_cfg_file diff --git a/lib/tls b/lib/tls index 8ff2027819..f4740b88be 100644 --- a/lib/tls +++ b/lib/tls @@ -346,7 +346,8 @@ function make_root_CA { # we need to change it. function fix_system_ca_bundle_path { if is_service_enabled tls-proxy || [ "$USE_SSL" == "True" ]; then - local capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass') + local capath + capath=$(python -c $'try:\n from requests import certs\n print certs.where()\nexcept ImportError: pass') if [[ ! $capath == "" && ! $capath =~ ^/etc/.* && ! -L $capath ]]; then if is_fedora; then diff --git a/tests/unittest.sh b/tests/unittest.sh index 603652a216..df7a8b4534 100644 --- a/tests/unittest.sh +++ b/tests/unittest.sh @@ -20,8 +20,10 @@ FAILED_FUNCS="" # pass a test, printing out MSG # usage: passed message function passed { - local lineno=$(caller 0 | awk '{print $1}') - local function=$(caller 0 | awk '{print $2}') + local lineno + lineno=$(caller 0 | awk '{print $1}') + local function + function=$(caller 0 | awk '{print $2}') local msg="$1" if [ -z "$msg" ]; then msg="OK" @@ -33,8 +35,10 @@ function passed { # fail a test, printing out MSG # usage: failed message function failed { - local lineno=$(caller 0 | awk '{print $1}') - local function=$(caller 0 | awk '{print $2}') + local lineno + lineno=$(caller 0 | awk '{print $1}') + local function + function=$(caller 0 | awk '{print $2}') local msg="$1" FAILED_FUNCS+="$function:L$lineno\n" echo "ERROR: $function:L$lineno!" @@ -45,8 +49,10 @@ function failed { # assert string comparision of val1 equal val2, printing out msg # usage: assert_equal val1 val2 msg function assert_equal { - local lineno=`caller 0 | awk '{print $1}'` - local function=`caller 0 | awk '{print $2}'` + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` local msg=$3 if [ -z "$msg" ]; then @@ -66,8 +72,10 @@ function assert_equal { # assert variable is empty/blank, printing out msg # usage: assert_empty VAR msg function assert_empty { - local lineno=`caller 0 | awk '{print $1}'` - local function=`caller 0 | awk '{print $2}'` + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` local msg=$2 if [ -z "$msg" ]; then diff --git a/tools/create_userrc.sh b/tools/create_userrc.sh index de44abbbe5..25f713ca93 100755 --- a/tools/create_userrc.sh +++ b/tools/create_userrc.sh @@ -190,7 +190,8 @@ function add_entry { local user_passwd=$5 # The admin user can see all user's secret AWS keys, it does not looks good - local line=`openstack ec2 credentials list --user $user_id | grep " $project_id "` + local line + line=$(openstack ec2 credentials list --user $user_id | grep " $project_id " || true) if [ -z "$line" ]; then openstack ec2 credentials create --user $user_id --project $project_id 1>&2 line=`openstack ec2 credentials list --user $user_id | grep " $project_id "` diff --git a/tools/peakmem_tracker.sh b/tools/peakmem_tracker.sh index 0d5728a538..ecbd79a0bc 100755 --- a/tools/peakmem_tracker.sh +++ b/tools/peakmem_tracker.sh @@ -41,10 +41,12 @@ function get_mem_available { # snapshot of current usage; i.e. checking the latest entry in the # file will give the peak-memory usage function tracker { - local low_point=$(get_mem_available) + local low_point + low_point=$(get_mem_available) while [ 1 ]; do - local mem_available=$(get_mem_available) + local mem_available + mem_available=$(get_mem_available) if [[ $mem_available -lt $low_point ]]; then low_point=$mem_available diff --git a/tools/xen/scripts/install-os-vpx.sh b/tools/xen/scripts/install-os-vpx.sh index 1ebbeaf564..66f7ef4763 100755 --- a/tools/xen/scripts/install-os-vpx.sh +++ b/tools/xen/scripts/install-os-vpx.sh @@ -100,7 +100,8 @@ create_vif() { local v="$1" echo "Installing VM interface on [$BRIDGE]" - local out_network_uuid=$(find_network "$BRIDGE") + local out_network_uuid + out_network_uuid=$(find_network "$BRIDGE") xe vif-create vm-uuid="$v" network-uuid="$out_network_uuid" device="0" } diff --git a/tools/xen/scripts/uninstall-os-vpx.sh b/tools/xen/scripts/uninstall-os-vpx.sh index 1ed249433a..96dad7e852 100755 --- a/tools/xen/scripts/uninstall-os-vpx.sh +++ b/tools/xen/scripts/uninstall-os-vpx.sh @@ -35,9 +35,12 @@ xe_min() destroy_vdi() { local vbd_uuid="$1" - local type=$(xe_min vbd-list uuid=$vbd_uuid params=type) - local dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) - local vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) + local type + type=$(xe_min vbd-list uuid=$vbd_uuid params=type) + local dev + dev=$(xe_min vbd-list uuid=$vbd_uuid params=userdevice) + local vdi_uuid + vdi_uuid=$(xe_min vbd-list uuid=$vbd_uuid params=vdi-uuid) if [ "$type" == 'Disk' ] && [ "$dev" != 'xvda' ] && [ "$dev" != '0' ]; then xe vdi-destroy uuid=$vdi_uuid @@ -47,7 +50,8 @@ destroy_vdi() uninstall() { local vm_uuid="$1" - local power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) + local power_state + power_state=$(xe_min vm-list uuid=$vm_uuid params=power-state) if [ "$power_state" != "halted" ]; then xe vm-shutdown vm=$vm_uuid force=true diff --git a/tools/xen/test_functions.sh b/tools/xen/test_functions.sh index 924e773c00..324e6a1a1e 100755 --- a/tools/xen/test_functions.sh +++ b/tools/xen/test_functions.sh @@ -165,7 +165,8 @@ EOF function test_get_local_sr { setup_xe_response "uuid123" - local RESULT=$(. mocks && get_local_sr) + local RESULT + RESULT=$(. mocks && get_local_sr) [ "$RESULT" == "uuid123" ] @@ -173,7 +174,8 @@ function test_get_local_sr { } function test_get_local_sr_path { - local RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path) + local RESULT + RESULT=$(mock_out get_local_sr "uuid1" && get_local_sr_path) [ "/var/run/sr-mount/uuid1" == "$RESULT" ] } From 056a0c6bbc277937ee79361f901e4f6bd5513eec Mon Sep 17 00:00:00 2001 From: Christian Berendt Date: Wed, 7 Oct 2015 18:11:46 +0200 Subject: [PATCH 0522/2941] build_docs: do not handle md and conf files with shocco At the moment the following md and conf files are handled with shocco. This should not be the case. * samples/local.conf * lib/neutron_thirdparty/README.md * lib/neutron_plugins/README.md Change-Id: I11ea5ebda111e6cdab71d3cffaeb4f16443bfd3c --- tools/build_docs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/build_docs.sh b/tools/build_docs.sh index fa843432b5..7dc492e2a4 100755 --- a/tools/build_docs.sh +++ b/tools/build_docs.sh @@ -81,7 +81,7 @@ for f in $(find . \( -name .git -o -name .tox \) -prune -o \( -type f -name \*.s mkdir -p $FQ_HTML_BUILD/`dirname $f`; $SHOCCO $f > $FQ_HTML_BUILD/$f.html done -for f in $(find functions functions-common inc lib pkg samples -type f -name \*); do +for f in $(find functions functions-common inc lib pkg samples -type f -name \* ! -name *.md ! -name *.conf); do echo $f FILES+="$f " mkdir -p $FQ_HTML_BUILD/`dirname $f`; From 1de9e330de9fd509fcdbe04c4722951b3acf199c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 7 Oct 2015 08:46:13 -0400 Subject: [PATCH 0523/2941] add big warning for extras.d usage This adds a warning for extras.d usage. This will give us something to keep an eye on in logstash to build up the list of projects that will break at Mitaka-1. This also makes the deprecated handling done through a function, which will hopefully make it more consistent in the future. Change-Id: Icd393bc3e7095fe58be0fd13ef74fece3aa5c5f1 --- functions-common | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/functions-common b/functions-common index cfe0c8d4ba..ee5c5cd03c 100644 --- a/functions-common +++ b/functions-common @@ -179,6 +179,12 @@ function die_if_not_set { $xtrace } +function deprecated { + local text=$1 + DEPRECATED_TEXT+="\n$text" + echo "WARNING: $text" +} + # Prints line number and "message" in error format # err $LINENO "message" function err { @@ -1728,6 +1734,16 @@ function run_phase { if [[ -d $TOP_DIR/extras.d ]]; then for i in $TOP_DIR/extras.d/*.sh; do [[ -r $i ]] && source $i $mode $phase + # NOTE(sdague): generate a big warning about using + # extras.d in an unsupported way which will let us track + # unsupported usage in the gate. + local exceptions="50-ironic.sh 60-ceph.sh 80-tempest.sh" + local extra=$(basename $i) + if [[ ! ( $exceptions =~ "$extra" ) ]]; then + deprecated "extras.d support is being removed in Mitaka-1" + deprecated "jobs for project $extra will break after that point" + deprecated "please move project to a supported devstack plugin model" + fi done fi # the source phase corresponds to settings loading in plugins From 72ad942796adb03c96154cd1df4f97ad775e6092 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 7 Oct 2015 11:51:40 -0400 Subject: [PATCH 0524/2941] use deprecated function instead of modifying global It's safer to run this through a common function to let us make changes later. Change-Id: Ic661824027577e1946726c1843a29ac8325915bf --- functions | 2 +- lib/cinder | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/functions b/functions index ff95c89ad9..3e17f2f9e9 100644 --- a/functions +++ b/functions @@ -341,7 +341,7 @@ function use_database { # No backends registered means this is likely called from ``localrc`` # This is now deprecated usage DATABASE_TYPE=$1 - DEPRECATED_TEXT="$DEPRECATED_TEXT\nThe database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc\n" + deprecated "The database backend needs to be properly set in ENABLED_SERVICES; use_database is deprecated localrc" else # This should no longer get called...here for posterity use_exclusive_service DATABASE_BACKENDS DATABASE_TYPE $1 diff --git a/lib/cinder b/lib/cinder index 10144117ec..2cda8b7841 100644 --- a/lib/cinder +++ b/lib/cinder @@ -93,7 +93,7 @@ if [[ -n $CINDER_SECURE_DELETE ]]; then if [[ $CINDER_SECURE_DELETE == "False" ]]; then CINDER_VOLUME_CLEAR_DEFAULT="none" fi - DEPRECATED_TEXT="$DEPRECATED_TEXT\nConfigure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE.\n" + deprecated "Configure secure Cinder volume deletion using CINDER_VOLUME_CLEAR instead of CINDER_SECURE_DELETE." fi CINDER_VOLUME_CLEAR=${CINDER_VOLUME_CLEAR:-${CINDER_VOLUME_CLEAR_DEFAULT:-zero}} CINDER_VOLUME_CLEAR=$(echo ${CINDER_VOLUME_CLEAR} | tr '[:upper:]' '[:lower:]') From a0cc2918adb4fc9f43c4f2e7f2cec9f46630636f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 7 Oct 2015 09:06:42 -0400 Subject: [PATCH 0525/2941] fix warning in install_get_pip we were unconditionally adding -z to the curl command even if the file doesn't exist that we are referencing. That produces a scary warning for users. Lets not do that. Change-Id: Id2860c1c702510b0f8fd496abce579d0fa3ff867 --- tools/install_pip.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 7b42c8c485..41261800e4 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -53,8 +53,15 @@ function install_get_pip { # since and only download if a new version is out -- but only if # it seems we downloaded the file originally. if [[ ! -r $LOCAL_PIP || -r $LOCAL_PIP.downloaded ]]; then + # only test freshness if LOCAL_PIP is actually there, + # otherwise we generate a scary warning. + local timecond="" + if [[ -r $LOCAL_PIP ]]; then + timecond="-z $LOCAL_PIP" + fi + curl --retry 6 --retry-delay 5 \ - -z $LOCAL_PIP -o $LOCAL_PIP $PIP_GET_PIP_URL || \ + $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \ die $LINENO "Download of get-pip.py failed" touch $LOCAL_PIP.downloaded fi From 33e3969081e9d3acd332f909cf405193603ec915 Mon Sep 17 00:00:00 2001 From: Dave McCowan Date: Wed, 7 Oct 2015 16:57:58 -0400 Subject: [PATCH 0526/2941] Add Barbican to plugin registry list Barbican can now be installed with devstack via: enable_plugin barbican https://git.openstack.org/openstack/barbican Change-Id: I81af04bb6600d1e58590c39efdc2c0c91563321d --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 85fd7cc065..eb09988a53 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -22,6 +22,8 @@ The following are plugins that exist for official OpenStack projects. +------------------+---------------------------------------------+--------------------+ |aodh |git://git.openstack.org/openstack/aodh | alarming | +------------------+---------------------------------------------+--------------------+ +|barbican |git://git.openstack.org/openstack/barbican | key management | ++------------------+---------------------------------------------+--------------------+ |ceilometer |git://git.openstack.org/openstack/ceilometer | metering | +------------------+---------------------------------------------+--------------------+ |gnocchi |git://git.openstack.org/openstack/gnocchi | metric | From 7adf15df5c0812a74a1697b930003bc1dcddb127 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 23 Sep 2015 11:56:02 +1000 Subject: [PATCH 0527/2941] Add a debugging userrc after keystone is up As a follow-on to the issues raised by I069f46f95656655ae7ba8f3dd929f47eae594b68, rather than a re-write of create_userrc.sh logic, create a temporary userrc that can be helpful for debugging until we have the whole system bootstrapped Change-Id: I3325acffd259cf7f6f4a153c88037cfe8405ca50 --- stack.sh | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/stack.sh b/stack.sh index 01668c208b..58a708c497 100755 --- a/stack.sh +++ b/stack.sh @@ -1007,14 +1007,27 @@ if is_service_enabled keystone; then # Begone token auth unset OS_TOKEN OS_URL - # Set up password auth credentials now that Keystone is bootstrapped - export OS_AUTH_URL=$KEYSTONE_AUTH_URI - export OS_USERNAME=admin - export OS_USER_DOMAIN_ID=default - export OS_PASSWORD=$ADMIN_PASSWORD - export OS_PROJECT_NAME=admin - export OS_PROJECT_DOMAIN_ID=default - export OS_REGION_NAME=$REGION_NAME + # Rather than just export these, we write them out to a + # intermediate userrc file that can also be used to debug if + # something goes wrong between here and running + # tools/create_userrc.sh (this script relies on services other + # than keystone being available, so we can't call it right now) + cat > $TOP_DIR/userrc_early < Date: Thu, 8 Oct 2015 06:40:21 +0100 Subject: [PATCH 0528/2941] XenServer: the cron job shouldn't print debug text into stderr one cron job attempts to print debug text into stderr; so the file of /root/dead.letter gets created and its size grows continuously. It could eventually threaten dom0 disk space. Maybe there are two solutions: one is to redirect the output to a specific log file; and rotate log files in the script. And the other one is simply to redirect the output /dev/null. By considering the function of this cron job and the printed contents are straight and simple, this patch set goes with the later solution. Change-Id: I4875e5e3837e6f0249e314c6c5f408c79145c6c1 Closes-Bug: 1503966 --- lib/nova_plugins/hypervisor-xenserver | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index efce383222..e097990bd3 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -79,7 +79,7 @@ function configure_nova_hypervisor { # Create a cron job that will rotate guest logs $ssh_dom0 crontab - << CRONTAB -* * * * * /root/rotate_xen_guest_logs.sh +* * * * * /root/rotate_xen_guest_logs.sh >/dev/null 2>&1 CRONTAB # Create directories for kernels and images From c988bf6fde5e692e768f7fbd6b70d2d5715cb85e Mon Sep 17 00:00:00 2001 From: "Swapnil (coolsvap) Kulkarni" Date: Thu, 8 Oct 2015 13:10:43 +0530 Subject: [PATCH 0529/2941] Updated configuration and docs for MYSQL_PASSWORD Updated MYSQL_PASSWORD to DATABASE_PASSWORD in sample configuration and existing docs Change-Id: Iafb295a0b7707e08a81e6528620db8543d40f7ae --- doc/source/guides/multinode-lab.rst | 4 ++-- doc/source/guides/neutron.rst | 2 +- doc/source/guides/single-machine.rst | 2 +- doc/source/guides/single-vm.rst | 2 +- samples/local.conf | 2 +- tools/xen/README.md | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/source/guides/multinode-lab.rst b/doc/source/guides/multinode-lab.rst index 1530a84523..5660bc5222 100644 --- a/doc/source/guides/multinode-lab.rst +++ b/doc/source/guides/multinode-lab.rst @@ -128,7 +128,7 @@ cluster controller's DevStack in ``local.conf``: MULTI_HOST=1 LOGFILE=/opt/stack/logs/stack.sh.log ADMIN_PASSWORD=labstack - MYSQL_PASSWORD=supersecret + DATABASE_PASSWORD=supersecret RABBIT_PASSWORD=supersecrete SERVICE_PASSWORD=supersecrete SERVICE_TOKEN=xyzpdqlazydog @@ -169,7 +169,7 @@ machines, create a ``local.conf`` with: MULTI_HOST=1 LOGFILE=/opt/stack/logs/stack.sh.log ADMIN_PASSWORD=labstack - MYSQL_PASSWORD=supersecret + DATABASE_PASSWORD=supersecret RABBIT_PASSWORD=supersecrete SERVICE_PASSWORD=supersecrete SERVICE_TOKEN=xyzpdqlazydog diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 424844547c..9d4f54a05e 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -52,7 +52,7 @@ DevStack Configuration RABBIT_HOST=172.18.161.6 GLANCE_HOSTPORT=172.18.161.6:9292 ADMIN_PASSWORD=secrete - MYSQL_PASSWORD=secrete + DATABASE_PASSWORD=secrete RABBIT_PASSWORD=secrete SERVICE_PASSWORD=secrete SERVICE_TOKEN=secrete diff --git a/doc/source/guides/single-machine.rst b/doc/source/guides/single-machine.rst index 236ece9c01..a01c368213 100644 --- a/doc/source/guides/single-machine.rst +++ b/doc/source/guides/single-machine.rst @@ -105,7 +105,7 @@ do the following: FIXED_NETWORK_SIZE=256 FLAT_INTERFACE=eth0 ADMIN_PASSWORD=supersecret - MYSQL_PASSWORD=iheartdatabases + DATABASE_PASSWORD=iheartdatabases RABBIT_PASSWORD=flopsymopsy SERVICE_PASSWORD=iheartksl SERVICE_TOKEN=xyzpdqlazydog diff --git a/doc/source/guides/single-vm.rst b/doc/source/guides/single-vm.rst index 515cd505c3..53c3fa973f 100644 --- a/doc/source/guides/single-vm.rst +++ b/doc/source/guides/single-vm.rst @@ -64,7 +64,7 @@ passed as the user-data file when booting the VM. cd devstack echo '[[local|localrc]]' > local.conf echo ADMIN_PASSWORD=password >> local.conf - echo MYSQL_PASSWORD=password >> local.conf + echo DATABASE_PASSWORD=password >> local.conf echo RABBIT_PASSWORD=password >> local.conf echo SERVICE_PASSWORD=password >> local.conf echo SERVICE_TOKEN=tokentoken >> local.conf diff --git a/samples/local.conf b/samples/local.conf index ce7007391d..cb293b6c15 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -28,7 +28,7 @@ # and they will be added to ``local.conf``. SERVICE_TOKEN=azertytoken ADMIN_PASSWORD=nomoresecrete -MYSQL_PASSWORD=stackdb +DATABASE_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue SERVICE_PASSWORD=$ADMIN_PASSWORD diff --git a/tools/xen/README.md b/tools/xen/README.md index 6212cc54d7..a1adf590a6 100644 --- a/tools/xen/README.md +++ b/tools/xen/README.md @@ -77,7 +77,7 @@ Of course, use real passwords if this machine is exposed. # NOTE: these need to be specified, otherwise devstack will try # to prompt for these passwords, blocking the install process. - MYSQL_PASSWORD=my_super_secret + DATABASE_PASSWORD=my_super_secret SERVICE_TOKEN=my_super_secret ADMIN_PASSWORD=my_super_secret SERVICE_PASSWORD=my_super_secret From 9e11e098c3346efd7cf70283df7c725e5a3e86c6 Mon Sep 17 00:00:00 2001 From: Einst Crazy Date: Tue, 29 Sep 2015 20:01:44 +0800 Subject: [PATCH 0530/2941] Move $DEST creation after logging setup Setup the log output before calling functions like check_path_perm_sanity that want to write out to the error log. Change-Id: I9815965257c399a48f8cf0f344814d954137aecb Closes-Bug: #1500834 --- stack.sh | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/stack.sh b/stack.sh index 01668c208b..061289053b 100755 --- a/stack.sh +++ b/stack.sh @@ -306,9 +306,6 @@ sudo mkdir -p $DEST safe_chown -R $STACK_USER $DEST safe_chmod 0755 $DEST -# Basic test for ``$DEST`` path permissions (fatal on error unless skipped) -check_path_perm_sanity ${DEST} - # Destination path for service data DATA_DIR=${DATA_DIR:-${DEST}/data} sudo mkdir -p $DATA_DIR @@ -443,6 +440,8 @@ if [[ -n "$SCREEN_LOGDIR" ]]; then fi fi +# Basic test for ``$DEST`` path permissions (fatal on error unless skipped) +check_path_perm_sanity ${DEST} # Configure Error Traps # --------------------- From 0280f6f6c83b45b06220050e0a9353dfe364ef18 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 7 Oct 2015 09:19:53 -0400 Subject: [PATCH 0531/2941] remove corrupt get-pip.py If get-pip servers fall over and return 503 for a few hours (which they do medium regularly) we'll cache crud html, and everything will suck. We know this script should be python, so if it isn't, delete it. Change-Id: Ia9f6f7c7217939bc1ab5745f4a9d568acfbf04c8 --- tools/install_pip.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 41261800e4..dd4e4339cb 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -42,6 +42,15 @@ function get_versions { function install_get_pip { + # If get-pip.py isn't python, delete it. This was probably an + # outage on the server. + if [[ -r $LOCAL_PIP ]]; then + if ! head -1 $LOCAL_PIP | grep -q '#!/usr/bin/env python'; then + echo "WARNING: Corrupt $LOCAL_PIP found removing" + rm $LOCAL_PIP + fi + fi + # The OpenStack gate and others put a cached version of get-pip.py # for this to find, explicitly to avoid download issues. # From 3d6eaae21c0b11361b4d83a47e3e345682641e57 Mon Sep 17 00:00:00 2001 From: gong yong sheng Date: Tue, 15 Sep 2015 15:00:29 +0800 Subject: [PATCH 0532/2941] don't install root filters when Q_USE_ROOTWRAP is false Change-Id: I2f6edfcfb3789310fbeea8a509e0d9a13428becc Closes-bug: #1495822 --- lib/neutron-legacy | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index e67bd4ae32..350706c9b7 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1165,6 +1165,9 @@ function _neutron_service_plugin_class_add { # _neutron_deploy_rootwrap_filters() - deploy rootwrap filters to $Q_CONF_ROOTWRAP_D (owned by root). function _neutron_deploy_rootwrap_filters { + if [[ "$Q_USE_ROOTWRAP" == "False" ]]; then + return + fi local srcdir=$1 sudo install -d -o root -m 755 $Q_CONF_ROOTWRAP_D sudo install -o root -m 644 $srcdir/etc/neutron/rootwrap.d/* $Q_CONF_ROOTWRAP_D/ From fa41b5b47ebbf6f2d973bdde235cb58694a2103f Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 8 Oct 2015 06:05:20 -0400 Subject: [PATCH 0533/2941] make curl fail on pypi errors This will make curl fail on pypi errors, and should prevent corrupt images from pypi going offline for a few hours randomly, which it does from time to time. Closes-Bug: #1503909 Change-Id: Ib4a740b7d1772e1e36aa701e42d3ac0f0ee12883 --- tools/install_pip.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index dd4e4339cb..13c1786fb9 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -69,7 +69,7 @@ function install_get_pip { timecond="-z $LOCAL_PIP" fi - curl --retry 6 --retry-delay 5 \ + curl -f --retry 6 --retry-delay 5 \ $timecond -o $LOCAL_PIP $PIP_GET_PIP_URL || \ die $LINENO "Download of get-pip.py failed" touch $LOCAL_PIP.downloaded From 56037e9a6e0286640fce1f812f3a9d10c3f8535b Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 8 Oct 2015 12:27:07 -0400 Subject: [PATCH 0534/2941] provide devstack lockout with .no-devstack file. This lets you specify that devstack should not be run by the user on the box that you are on. Helps with running commands in the wrong window. Change-Id: I7aa26df1a2e02331d596bbfefb0697937787252f --- stack.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/stack.sh b/stack.sh index 01668c208b..1abae9c1fd 100755 --- a/stack.sh +++ b/stack.sh @@ -93,6 +93,15 @@ if [[ $EUID -eq 0 ]]; then exit 1 fi +# Provide a safety switch for devstack. If you do a lot of devstack, +# on a lot of different environments, you sometimes run it on the +# wrong box. This makes there be a way to prevent that. +if [[ -e $HOME/.no-devstack ]]; then + echo "You've marked this host as a no-devstack host, to save yourself from" + echo "running devstack accidentally. If this is in error, please remove the" + echo "~/.no-devstack file" + exit 1 +fi # Prepare the environment # ----------------------- From e82bac04981c8e41a919907d16621c9c952d9224 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 25 Aug 2015 14:29:08 +1000 Subject: [PATCH 0535/2941] Detect blank variable in trueorfalse As a follow-on to I8cefb58f49dcd2cb2def8a5071d0892af520e7f7, put in some detection around missing variable-to-test arguments in trueorfalse. Correct a couple of places where we were passing in blank strings, resulting in the default always being applied. Add test-cases and enhance the documentation a little. Depends-On: I8cefb58f49dcd2cb2def8a5071d0892af520e7f7 Change-Id: Icc0eb3808a2b6583828d8c47f0af4181e7e2c75a --- functions-common | 19 +++++++++++++++---- lib/heat | 4 ++-- tests/test_truefalse.sh | 8 ++++++++ tools/fixup_stuff.sh | 2 +- 4 files changed, 26 insertions(+), 7 deletions(-) diff --git a/functions-common b/functions-common index f9e0b5adaa..d506b846bd 100644 --- a/functions-common +++ b/functions-common @@ -106,16 +106,27 @@ function write_clouds_yaml { --os-project-name admin } -# Normalize config values to True or False -# Accepts as False: 0 no No NO false False FALSE -# Accepts as True: 1 yes Yes YES true True TRUE -# VAR=$(trueorfalse default-value test-value) +# trueorfalse +# +# Normalize config-value provided in variable VAR to either "True" or +# "False". If VAR is unset (i.e. $VAR evaluates as empty), the value +# of the second argument will be used as the default value. +# +# Accepts as False: 0 no No NO false False FALSE +# Accepts as True: 1 yes Yes YES true True TRUE +# +# usage: +# VAL=$(trueorfalse False VAL) function trueorfalse { local xtrace xtrace=$(set +o | grep xtrace) set +o xtrace local default=$1 + + if [ -z $2 ]; then + die $LINENO "variable to normalize required" + fi local testval=${!2:-} case "$testval" in diff --git a/lib/heat b/lib/heat index 3e6975ae26..c22369f56c 100644 --- a/lib/heat +++ b/lib/heat @@ -59,10 +59,10 @@ HEAT_BIN_DIR=$(get_python_exec_prefix) # other default options if [[ "$HEAT_STANDALONE" = "True" ]]; then # for standalone, use defaults which require no service user - HEAT_STACK_DOMAIN=`trueorfalse False $HEAT_STACK_DOMAIN` + HEAT_STACK_DOMAIN=$(trueorfalse False HEAT_STACK_DOMAIN) HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-password} else - HEAT_STACK_DOMAIN=`trueorfalse True $HEAT_STACK_DOMAIN` + HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN) HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts} fi diff --git a/tests/test_truefalse.sh b/tests/test_truefalse.sh index 2689589dc9..03996ceab4 100755 --- a/tests/test_truefalse.sh +++ b/tests/test_truefalse.sh @@ -8,6 +8,14 @@ TOP=$(cd $(dirname "$0")/.. && pwd) source $TOP/functions source $TOP/tests/unittest.sh +# common mistake is to use $FOO instead of "FOO"; in that case we +# should die +bash -c "source $TOP/functions-common; VAR=\$(trueorfalse False \$FOO)" &> /dev/null +assert_equal 1 $? "missing test-value" + +VAL=$(trueorfalse False MISSING_VARIABLE) +assert_equal "False" $VAL "blank test-value" + function test_trueorfalse { local one=1 local captrue=True diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index a601cf2f67..6ef32c8894 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -108,7 +108,7 @@ if is_fedora; then sudo setenforce 0 fi - FORCE_FIREWALLD=$(trueorfalse False $FORCE_FIREWALLD) + FORCE_FIREWALLD=$(trueorfalse False FORCE_FIREWALLD) if [[ $FORCE_FIREWALLD == "False" ]]; then # On Fedora 20 firewalld interacts badly with libvirt and # slows things down significantly (this issue was fixed in From 022c6672ce0e49273e21ece35186d8291f905ca2 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 22 Jun 2015 15:26:26 +0000 Subject: [PATCH 0536/2941] Revert "Install g-r version of OSC in configure_tempest" This reverts commit 1fa82aab6634bf815d162978e33b211e1fdef343. Change-Id: I931756e6d534839a6c9fb3cc6f5dc32c9a1e6436 --- lib/tempest | 4 ---- 1 file changed, 4 deletions(-) diff --git a/lib/tempest b/lib/tempest index 6eeab4e231..498a0c7338 100644 --- a/lib/tempest +++ b/lib/tempest @@ -119,10 +119,6 @@ function configure_tempest { pip_install_gr testrepository fi - # Used during configuration so make sure we have the correct - # version installed - pip_install_gr python-openstackclient - local image_lines local images local num_images From d8aa10e583efbd6574abad03f41674178fa89925 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Fri, 9 Oct 2015 12:21:30 -0400 Subject: [PATCH 0537/2941] docs: Move tip about Extension Drivers into misc section Change-Id: Ifd458495992a0fd1b7437c315b4fe45906830cb1 --- doc/source/guides/neutron.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 9d4f54a05e..cf48e22fa2 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -164,11 +164,6 @@ In this configuration we are defining FLOATING_RANGE to be a subnet that exists in the private RFC1918 address space - however in in a real setup FLOATING_RANGE would be a public IP address range. -Note that extension drivers for the ML2 plugin is set by -`Q_ML2_PLUGIN_EXT_DRIVERS`, and it includes 'port_security' by default. If you -want to remove all the extension drivers (even 'port_security'), set -`Q_ML2_PLUGIN_EXT_DRIVERS` to blank. - Neutron Networking with Open vSwitch and Provider Networks ========================================================== @@ -304,5 +299,11 @@ disable ufw if it was enabled, do the following: sudo service iptables save sudo ufw disable +Configuring Extension Drivers for the ML2 Plugin +------------------------------------------------ +Extension drivers for the ML2 plugin are set with the variable +`Q_ML2_PLUGIN_EXT_DRIVERS`, and includes the 'port_security' extension +by default. If you want to remove all the extension drivers (even +'port_security'), set `Q_ML2_PLUGIN_EXT_DRIVERS` to blank. From c6d470142e0a0359a7322e9b76d61ba15caf95bc Mon Sep 17 00:00:00 2001 From: Chris Dent Date: Fri, 9 Oct 2015 14:57:05 +0000 Subject: [PATCH 0538/2941] Perform additional disable_service checks With the advent of plugins and their settings files it has become possible to disable_service in local.conf only to have the service re-enabled in a plugin settings file. This happens because of processing order. To get around this the disable_service function now aggregates service names into a DISABLED_SERVICES variable which is then checked during enable_service. If something tries to enable something that was previously disabled, a warning is produced in the log and the service is not enabled. Then after all configuration has been sourced a final check is to done by verify_disabled_services to confirm that something has not manually adjusted ENABLED_SERVICES to overcome a previously called disable_service. If something has, the stack dies with an error. Change-Id: I0f9403f44ed2fe693a46cd02486bd94043ce6b1a Closes-Bug: #1504304 --- functions-common | 34 ++++++++++++++++++++++++++-------- stack.sh | 1 + 2 files changed, 27 insertions(+), 8 deletions(-) diff --git a/functions-common b/functions-common index f9e0b5adaa..08e5e7fb35 100644 --- a/functions-common +++ b/functions-common @@ -1729,6 +1729,7 @@ function run_phase { # the source phase corresponds to settings loading in plugins if [[ "$mode" == "source" ]]; then load_plugin_settings + verify_disabled_services elif [[ "$mode" == "override_defaults" ]]; then plugin_override_defaults else @@ -1784,25 +1785,26 @@ function disable_negated_services { ENABLED_SERVICES=$(remove_disabled_services "$remaining" "$to_remove") } -# disable_service() removes the services passed as argument to the -# ``ENABLED_SERVICES`` list, if they are present. +# disable_service() prepares the services passed as argument to be +# removed from the ``ENABLED_SERVICES`` list, if they are present. # # For example: # disable_service rabbit # -# This function does not know about the special cases -# for nova, glance, and neutron built into is_service_enabled(). -# Uses global ``ENABLED_SERVICES`` +# Uses global ``DISABLED_SERVICES`` # disable_service service [service ...] function disable_service { - local tmpsvcs=",${ENABLED_SERVICES}," + local disabled_svcs="${DISABLED_SERVICES}" + local enabled_svcs=",${ENABLED_SERVICES}," local service for service in $@; do + disabled_svcs+=",$service" if is_service_enabled $service; then - tmpsvcs=${tmpsvcs//,$service,/,} + enabled_svcs=${enabled_svcs//,$service,/,} fi done - ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") + DISABLED_SERVICES=$(_cleanup_service_list "$disabled_svcs") + ENABLED_SERVICES=$(_cleanup_service_list "$enabled_svcs") } # enable_service() adds the services passed as argument to the @@ -1819,6 +1821,10 @@ function enable_service { local tmpsvcs="${ENABLED_SERVICES}" local service for service in $@; do + if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then + warn $LINENO "Attempt to enable_service ${service} when it has been disabled" + continue + fi if ! is_service_enabled $service; then tmpsvcs+=",$service" fi @@ -1923,6 +1929,18 @@ function use_exclusive_service { return 0 } +# Make sure that nothing has manipulated ENABLED_SERVICES in a way +# that conflicts with prior calls to disable_service. +# Uses global ``ENABLED_SERVICES`` +function verify_disabled_services { + local service + for service in ${ENABLED_SERVICES//,/ }; do + if [[ ,${DISABLED_SERVICES}, =~ ,${service}, ]]; then + die $LINENO "ENABLED_SERVICES directly modified to overcome 'disable_service ${service}'" + fi + done +} + # System Functions # ================ diff --git a/stack.sh b/stack.sh index db0ff98429..b65c55803c 100755 --- a/stack.sh +++ b/stack.sh @@ -553,6 +553,7 @@ source $TOP_DIR/lib/dstat # Phase: source run_phase source + # Interactive Configuration # ------------------------- From 4696db94a9b3b749cac61608daffdd883e057479 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Fri, 9 Oct 2015 12:31:57 -0400 Subject: [PATCH 0539/2941] docs: Add network diagram for provider net section Change-Id: Id39aaab5a7eadfa3fc09ba3d30c48b452d685904 --- doc/source/guides/neutron.rst | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 9d4f54a05e..99b7811e9c 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -183,6 +183,34 @@ given a VLAN tag and IP address range, so that instances created via DevStack will use the external router for L3 connectivity, as opposed to the neutron L3 service. +Physical Network Setup +---------------------- + +.. nwdiag:: + + nwdiag { + inet [ shape = cloud ]; + router; + inet -- router; + + network provider_net { + address = "203.0.113.0/24" + router [ address = "203.0.113.1" ]; + controller; + compute1; + compute2; + } + + network control_plane { + router [ address = "10.0.0.1" ] + address = "10.0.0.0/24" + controller [ address = "10.0.0.2" ] + compute1 [ address = "10.0.0.3" ] + compute2 [ address = "10.0.0.4" ] + } + } + + Service Configuration --------------------- From 611cab4b48f14227c636f34cec155dbd99a1d7f2 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Fri, 9 Oct 2015 12:54:32 -0400 Subject: [PATCH 0540/2941] docs: Add IPs to provider net node configurations Also remove variable definitions from compute node localrc that is only applicable on the control node. Change-Id: I37b00611ff08d8973f21af7db340d287b1deb4af --- doc/source/guides/neutron.rst | 36 ++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 99b7811e9c..67be067b38 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -236,8 +236,21 @@ controller node. :: + HOST_IP=10.0.0.2 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + GLANCE_HOSTPORT=10.0.0.2:9292 PUBLIC_INTERFACE=eth1 + ADMIN_PASSWORD=secrete + MYSQL_PASSWORD=secrete + RABBIT_PASSWORD=secrete + SERVICE_PASSWORD=secrete + SERVICE_TOKEN=secrete + ## Neutron options Q_USE_SECGROUP=True ENABLE_TENANT_VLANS=True @@ -269,24 +282,37 @@ would be a public IP address range that you or your organization has allocated to you, so that you could access your instances from the public internet. -The following is a snippet of the DevStack configuration on the -compute node. +The following is the DevStack configuration on +compute node 1. :: + HOST_IP=10.0.0.3 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + SERVICE_HOST=10.0.0.2 + MYSQL_HOST=10.0.0.2 + RABBIT_HOST=10.0.0.2 + GLANCE_HOSTPORT=10.0.0.2:9292 + ADMIN_PASSWORD=secrete + MYSQL_PASSWORD=secrete + RABBIT_PASSWORD=secrete + SERVICE_PASSWORD=secrete + SERVICE_TOKEN=secrete + # Services that a compute node runs ENABLED_SERVICES=n-cpu,rabbit,q-agt ## Neutron options - Q_USE_SECGROUP=True - ENABLE_TENANT_VLANS=True - TENANT_VLAN_RANGE=3001:4000 PHYSICAL_NETWORK=default OVS_PHYSICAL_BRIDGE=br-ex PUBLIC_INTERFACE=eth1 Q_USE_PROVIDER_NETWORKING=True Q_L3_ENABLED=False +Compute node 2's configuration will be exactly the same, except +`HOST_IP` will be `10.0.0.4` + When DevStack is configured to use provider networking (via `Q_USE_PROVIDER_NETWORKING` is True and `Q_L3_ENABLED` is False) - DevStack will automatically add the network interface defined in From 64be3210e6bae709ee88736f2b7554db6e82f28e Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Mon, 12 Oct 2015 13:10:24 +0200 Subject: [PATCH 0541/2941] Don't assume that $i variable won't be overriden in extras.d plugins This causes an incorrect warning about ironic jobs putting an unexpected file in extras.d. Change-Id: I57acf91fba3fe13b3cc8dd739034e146a0b237c4 --- functions-common | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/functions-common b/functions-common index 08e5e7fb35..3e5b3c23a1 100644 --- a/functions-common +++ b/functions-common @@ -1712,13 +1712,14 @@ function run_phase { local mode=$1 local phase=$2 if [[ -d $TOP_DIR/extras.d ]]; then - for i in $TOP_DIR/extras.d/*.sh; do - [[ -r $i ]] && source $i $mode $phase + local extra_plugin_file_name + for extra_plugin_file_name in $TOP_DIR/extras.d/*.sh; do + [[ -r $extra_plugin_file_name ]] && source $extra_plugin_file_name $mode $phase # NOTE(sdague): generate a big warning about using # extras.d in an unsupported way which will let us track # unsupported usage in the gate. local exceptions="50-ironic.sh 60-ceph.sh 80-tempest.sh" - local extra=$(basename $i) + local extra=$(basename $extra_plugin_file_name) if [[ ! ( $exceptions =~ "$extra" ) ]]; then deprecated "extras.d support is being removed in Mitaka-1" deprecated "jobs for project $extra will break after that point" From 95d4226c4ce4e3ddd0d159572790d04c17bea831 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 12 Oct 2015 07:34:41 -0400 Subject: [PATCH 0542/2941] make i local This prevents bleed out of the i variable to other functions that might call this inside their own iteration loop. Change-Id: I42d0c287a6f4bb24ae3871de9abb7e0de98a8462 --- lib/ironic | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ironic b/lib/ironic index 40475e0a83..6ec7e8092b 100644 --- a/lib/ironic +++ b/lib/ironic @@ -559,6 +559,7 @@ function wait_for_nova_resources { # timing out. local resource=$1 local expected_count=$2 + local i echo_summary "Waiting 2 minutes for Nova resource tracker to pick up $resource >= $expected_count" for i in $(seq 1 120); do if [ $(nova hypervisor-stats | grep " $resource " | get_field 2) -ge $expected_count ]; then From 887f182fa146b20011f9127e5653df8b42fa4897 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Mon, 12 Oct 2015 10:36:34 -0400 Subject: [PATCH 0543/2941] docs: merge multiple interface sections with provider network section In this guide, multiple interfaces in DevStack is only used when doing provider networking, so let's go ahead and just put the information inside the provider network section. That way it won't be confusing. Change-Id: I66f58ffb936230e72ac4cf8c04668e25dac5b17a --- doc/source/guides/neutron.rst | 134 +++++++++++----------------------- 1 file changed, 42 insertions(+), 92 deletions(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index e99a143592..5891f68033 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -72,98 +72,6 @@ DevStack Configuration - - -Using Neutron with Multiple Interfaces -====================================== - -The first interface, eth0 is used for the OpenStack management (API, -message bus, etc) as well as for ssh for an administrator to access -the machine. - -:: - - stack@compute:~$ ifconfig eth0 - eth0 Link encap:Ethernet HWaddr bc:16:65:20:af:fc - inet addr:192.168.1.18 - -eth1 is manually configured at boot to not have an IP address. -Consult your operating system documentation for the appropriate -technique. For Ubuntu, the contents of `/etc/network/interfaces` -contains: - -:: - - auto eth1 - iface eth1 inet manual - up ifconfig $IFACE 0.0.0.0 up - down ifconfig $IFACE 0.0.0.0 down - -The second physical interface, eth1 is added to a bridge (in this case -named br-ex), which is used to forward network traffic from guest VMs. -Network traffic from eth1 on the compute nodes is then NAT'd by the -controller node that runs Neutron's `neutron-l3-agent` and provides L3 -connectivity. - -:: - - stack@compute:~$ sudo ovs-vsctl add-br br-ex - stack@compute:~$ sudo ovs-vsctl add-port br-ex eth1 - stack@compute:~$ sudo ovs-vsctl show - 9a25c837-32ab-45f6-b9f2-1dd888abcf0f - Bridge br-ex - Port br-ex - Interface br-ex - type: internal - Port phy-br-ex - Interface phy-br-ex - type: patch - options: {peer=int-br-ex} - Port "eth1" - Interface "eth1" - - - - - -Neutron Networking with Open vSwitch -==================================== - -Configuring neutron, OpenStack Networking in DevStack is very similar to -configuring `nova-network` - many of the same configuration variables -(like `FIXED_RANGE` and `FLOATING_RANGE`) used by `nova-network` are -used by neutron, which is intentional. - -The only difference is the disabling of `nova-network` in your -local.conf, and the enabling of the neutron components. - - -Configuration -------------- - -:: - - FIXED_RANGE=10.0.0.0/24 - FLOATING_RANGE=192.168.27.0/24 - PUBLIC_NETWORK_GATEWAY=192.168.27.2 - - disable_service n-net - enable_service q-svc - enable_service q-agt - enable_service q-dhcp - enable_service q-meta - enable_service q-l3 - - Q_USE_SECGROUP=True - ENABLE_TENANT_VLANS=True - TENANT_VLAN_RANGE=1000:1999 - PHYSICAL_NETWORK=default - OVS_PHYSICAL_BRIDGE=br-ex - -In this configuration we are defining FLOATING_RANGE to be a -subnet that exists in the private RFC1918 address space - however in -in a real setup FLOATING_RANGE would be a public IP address range. - Neutron Networking with Open vSwitch and Provider Networks ========================================================== @@ -206,6 +114,48 @@ Physical Network Setup } +On a compute node, the first interface, eth0 is used for the OpenStack +management (API, message bus, etc) as well as for ssh for an +administrator to access the machine. + +:: + + stack@compute:~$ ifconfig eth0 + eth0 Link encap:Ethernet HWaddr bc:16:65:20:af:fc + inet addr:10.0.0.3 + +eth1 is manually configured at boot to not have an IP address. +Consult your operating system documentation for the appropriate +technique. For Ubuntu, the contents of `/etc/network/interfaces` +contains: + +:: + + auto eth1 + iface eth1 inet manual + up ifconfig $IFACE 0.0.0.0 up + down ifconfig $IFACE 0.0.0.0 down + +The second physical interface, eth1 is added to a bridge (in this case +named br-ex), which is used to forward network traffic from guest VMs. + +:: + + stack@compute:~$ sudo ovs-vsctl add-br br-ex + stack@compute:~$ sudo ovs-vsctl add-port br-ex eth1 + stack@compute:~$ sudo ovs-vsctl show + 9a25c837-32ab-45f6-b9f2-1dd888abcf0f + Bridge br-ex + Port br-ex + Interface br-ex + type: internal + Port phy-br-ex + Interface phy-br-ex + type: patch + options: {peer=int-br-ex} + Port "eth1" + Interface "eth1" + Service Configuration --------------------- From 95c33d532f5d69516c0fbe123595f00f00792995 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 7 Oct 2015 11:05:59 -0400 Subject: [PATCH 0544/2941] add timing infrastructure to devstack this adds a timing infrastructure to devstack to account for time taken up by set of operations. The first instance of this is accounting the time taken up by doing apt_get calls. Change-Id: I855ffe9c7a75e9943106af0f70cf715c34ae25c5 --- functions-common | 70 ++++++++++++++++++++++++++++++++++++++++++++++++ stack.sh | 2 ++ 2 files changed, 72 insertions(+) diff --git a/functions-common b/functions-common index 3e5b3c23a1..6b9a861bdf 100644 --- a/functions-common +++ b/functions-common @@ -976,12 +976,18 @@ function apt_get { local sudo="sudo" [[ "$(id -u)" = "0" ]] && sudo="env" + # time all the apt operations + time_start "apt-get" + $xtrace $sudo DEBIAN_FRONTEND=noninteractive \ http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \ no_proxy=${no_proxy:-} \ apt-get --option "Dpkg::Options::=--force-confold" --assume-yes "$@" + + # stop the clock + time_stop "apt-get" } function _parse_package_files { @@ -2115,6 +2121,70 @@ function test_with_retry { fi } +# Timing infrastructure - figure out where large blocks of time are +# used in DevStack +# +# The timing infrastructure for DevStack is about collecting buckets +# of time that are spend in some subtask. For instance, that might be +# 'apt', 'pip', 'osc', even database migrations. We do this by a pair +# of functions: time_start / time_stop. +# +# These take a single parameter: $name - which specifies the name of +# the bucket to be accounted against. time_totals function spits out +# the results. +# +# Resolution is only in whole seconds, so should be used for long +# running activities. + +declare -A TOTAL_TIME +declare -A START_TIME + +# time_start $name +# +# starts the clock for a timer by name. Errors if that clock is +# already started. +function time_start { + local name=$1 + local start_time=${START_TIME[$name]} + if [[ -n "$start_time" ]]; then + die $LINENO "Trying to start the clock on $name, but it's already been started" + fi + START_TIME[$name]=$(date +%s) +} + +# time_stop $name +# +# stops the clock for a timer by name, and accumulate that time in the +# global counter for that name. Errors if that clock had not +# previously been started. +function time_stop { + local name=$1 + local start_time=${START_TIME[$name]} + if [[ -z "$start_time" ]]; then + die $LINENO "Trying to stop the clock on $name, but it was never started" + fi + local end_time=$(date +%s) + local elapsed_time=$(($end_time - $start_time)) + local total=${TOTAL_TIME[$name]:-0} + # reset the clock so we can start it in the future + START_TIME[$name]="" + TOTAL_TIME[$name]=$(($total + $elapsed_time)) +} + +# time_totals +# +# prints out total time +function time_totals { + echo + echo "========================" + echo "DevStack Components Timed" + echo "========================" + echo + for t in ${!TOTAL_TIME[*]}; do + local v=${TOTAL_TIME[$t]} + echo "$t - $v secs" + done +} # Restore xtrace $XTRACE diff --git a/stack.sh b/stack.sh index b65c55803c..1976dff274 100755 --- a/stack.sh +++ b/stack.sh @@ -1366,6 +1366,8 @@ else exec 1>&3 fi +# Dump out the time totals +time_totals # Using the cloud # =============== From cb658fab15dbf8074038bc76fc54ec4afccf5716 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 8 Oct 2015 17:12:03 -0400 Subject: [PATCH 0545/2941] add pip install timing Change-Id: I368fec44858bd97fc6a314fb20eed2b10932cbb1 --- inc/python | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/inc/python b/inc/python index fe7bba6992..7d026c5933 100644 --- a/inc/python +++ b/inc/python @@ -80,6 +80,8 @@ function pip_install { return fi + time_start "pip_install" + PIP_UPGRADE=$(trueorfalse False PIP_UPGRADE) if [[ "$PIP_UPGRADE" = "True" ]] ; then upgrade="--upgrade" @@ -137,6 +139,8 @@ function pip_install { $cmd_pip $upgrade \ -r $test_req fi + + time_stop "pip_install" } # get version of a package from global requirements file From 1d662e86bbafebbdef01307b20a7f8a21d1f8e03 Mon Sep 17 00:00:00 2001 From: Tong Li Date: Tue, 22 Sep 2015 11:16:11 -0400 Subject: [PATCH 0546/2941] HOST_IP should not be used in moving address to route In neutron-legacy function _move_neutron_addresses_route, there are few lines trying to figure out the bridge IP by assuming that the bridge IP will be always same as the HOST_IP, this is not always true. When the nic bears the HOST_IP and the nic which will be used as the public network are different nics, the code in that method fails. Eventually the function fails with network unreachable error. This patch set fixes the problem, so that when HOST_IP and the IP for the bridge are different, devstack will still be setup correctly. Change-Id: I4d67f61c2ffd93f1e8ea2f8fe3b551044fab294e Closes-bug: #1498538 --- lib/neutron-legacy | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 350706c9b7..f1e5998ab7 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -810,11 +810,11 @@ function _move_neutron_addresses_route { local ADD_OVS_PORT="" if [[ $af == "inet" ]]; then - IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IP | awk '{ print $2, $3, $4; exit }') + IP_BRD=$(ip -f $af a s dev $from_intf | grep inet | awk '{ print $2, $3, $4; exit }') fi if [[ $af == "inet6" ]]; then - IP_BRD=$(ip -f $af a s dev $from_intf | grep $HOST_IPV6 | awk '{ print $2, $3, $4; exit }') + IP_BRD=$(ip -f $af a s dev $from_intf | grep inet6 | awk '{ print $2, $3, $4; exit }') fi if [ "$DEFAULT_ROUTE_GW" != "" ]; then From 085855479f5a56e9ce21fdb83e2691c8aad56aa0 Mon Sep 17 00:00:00 2001 From: Marian Horban Date: Mon, 12 Oct 2015 11:36:51 -0400 Subject: [PATCH 0547/2941] Added processing /ec2 URL With config option NOVA_USE_MOD_WSGI=True nova-ec2-api handles requests on /ec2 URL. Change-Id: I0c2e99bf8b5e5cf53cd176685b206038a4b0f78b --- files/apache-nova-ec2-api.template | 9 +++++++++ lib/nova | 13 +++++++++---- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template index 235d958d1a..6443567740 100644 --- a/files/apache-nova-ec2-api.template +++ b/files/apache-nova-ec2-api.template @@ -14,3 +14,12 @@ Listen %PUBLICPORT% %SSLCERTFILE% %SSLKEYFILE% + +Alias /ec2 %PUBLICWSGI% + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup nova-ec2-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/lib/nova b/lib/nova index 9830276232..eb1ae1cfaa 100644 --- a/lib/nova +++ b/lib/nova @@ -440,13 +440,18 @@ function create_nova_accounts { # EC2 if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then - + local nova_ec2_api_url + if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then + nova_ec2_api_url="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:$EC2_SERVICE_PORT/" + else + nova_ec2_api_url="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST/ec2" + fi get_or_create_service "ec2" "ec2" "EC2 Compatibility Layer" get_or_create_endpoint "ec2" \ "$REGION_NAME" \ - "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \ - "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" \ - "$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:8773/" + "$nova_ec2_api_url" \ + "$nova_ec2_api_url" \ + "$nova_ec2_api_url" fi fi From 108b75d7a1ca653efa21e80bfb5ec3ead029954c Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Tue, 13 Oct 2015 15:51:43 +0200 Subject: [PATCH 0548/2941] lib/tempest: remove duplicate iniset calls In Tempest config, `image-feature-enabled deactivate_image` is enabled twice. This patch removes one of the redundant call to iniset. Change-Id: Idbfcd6d6ee171c2c83736e17bbaf3d7a32c738b1 --- lib/tempest | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 6eeab4e231..da0fe41ca3 100644 --- a/lib/tempest +++ b/lib/tempest @@ -331,7 +331,6 @@ function configure_tempest { if [[ ! -z "$TEMPEST_HTTP_IMAGE" ]]; then iniset $TEMPEST_CONFIG image http_image $TEMPEST_HTTP_IMAGE fi - iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image true # Image Features iniset $TEMPEST_CONFIG image-feature-enabled deactivate_image True From 8043bfaf5ec6059e7245ff397672b1da0e563013 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 14 Oct 2015 14:53:18 +1100 Subject: [PATCH 0549/2941] Turn off tracing for service functions These functions commonly externally called (as part of stackrc inclusion, even) and do a fair bit of iteration over long service-lists, which really fills up the logs of devstack and grenade with unnecessary details. The functions are well tested by unit-tests, so we are very unlikely to need to debug internal issues with them in a hurry. Thus turn logging down for them. Change-Id: I63b9a05a0678c7e0c7012f6d768c29fd67f090d2 --- functions-common | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/functions-common b/functions-common index be3f81c412..c8f551de3c 100644 --- a/functions-common +++ b/functions-common @@ -1147,7 +1147,7 @@ function update_package_repo { if is_ubuntu; then local xtrace - xtrace=$(set +o | grep xtrace) + xtrace=$(set +o | grep xtrace) set +o xtrace if [[ "$REPOS_UPDATED" != "True" || "$RETRY_UPDATE" = "True" ]]; then # if there are transient errors pulling the updates, that's fine. @@ -1758,11 +1758,17 @@ function run_phase { # remove extra commas from the input string (i.e. ``ENABLED_SERVICES``) # _cleanup_service_list service-list function _cleanup_service_list { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + echo "$1" | sed -e ' s/,,/,/g; s/^,//; s/,$// ' + + $xtrace } # disable_all_services() removes all current services @@ -1780,6 +1786,10 @@ function disable_all_services { # Uses global ``ENABLED_SERVICES`` # disable_negated_services function disable_negated_services { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local to_remove="" local remaining="" local service @@ -1797,6 +1807,8 @@ function disable_negated_services { # go through the service list. if this service appears in the "to # be removed" list, drop it ENABLED_SERVICES=$(remove_disabled_services "$remaining" "$to_remove") + + $xtrace } # disable_service() prepares the services passed as argument to be @@ -1808,6 +1820,10 @@ function disable_negated_services { # Uses global ``DISABLED_SERVICES`` # disable_service service [service ...] function disable_service { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local disabled_svcs="${DISABLED_SERVICES}" local enabled_svcs=",${ENABLED_SERVICES}," local service @@ -1819,6 +1835,8 @@ function disable_service { done DISABLED_SERVICES=$(_cleanup_service_list "$disabled_svcs") ENABLED_SERVICES=$(_cleanup_service_list "$enabled_svcs") + + $xtrace } # enable_service() adds the services passed as argument to the @@ -1832,6 +1850,10 @@ function disable_service { # Uses global ``ENABLED_SERVICES`` # enable_service service [service ...] function enable_service { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local tmpsvcs="${ENABLED_SERVICES}" local service for service in $@; do @@ -1845,6 +1867,8 @@ function enable_service { done ENABLED_SERVICES=$(_cleanup_service_list "$tmpsvcs") disable_negated_services + + $xtrace } # is_service_enabled() checks if the service(s) specified as arguments are @@ -1873,6 +1897,7 @@ function is_service_enabled { local xtrace xtrace=$(set +o | grep xtrace) set +o xtrace + local enabled=1 local services=$@ local service @@ -1898,6 +1923,7 @@ function is_service_enabled { [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0 [[ ${service} == s-* && ${ENABLED_SERVICES} =~ "swift" ]] && enabled=0 done + $xtrace return $enabled } @@ -1905,6 +1931,10 @@ function is_service_enabled { # remove specified list from the input string # remove_disabled_services service-list remove-list function remove_disabled_services { + local xtrace + xtrace=$(set +o | grep xtrace) + set +o xtrace + local service_list=$1 local remove_list=$2 local service @@ -1923,6 +1953,9 @@ function remove_disabled_services { enabled="${enabled},$service" fi done + + $xtrace + _cleanup_service_list "$enabled" } From 316b348ad6068c485090761713685dfeb9ac4d38 Mon Sep 17 00:00:00 2001 From: Yalei Wang Date: Wed, 15 Jul 2015 21:00:31 +0800 Subject: [PATCH 0550/2941] Add verification of OVS_PHYSICAL_BRIDGE OVS_PHYSICAL_BRIDGE is not always set, like when you don't need specify the bridge mapping, and also it has no default value. So we need to add verification of OVS_PHYSICAL_BRIDGE in cleanup_neutron function where we refer to it. Change-Id: I69d113a7f3f7e67b09cb72fa0b0d3bba188e783a Close-Bug: #1474634 --- lib/neutron-legacy | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 350706c9b7..b3db77e874 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -838,18 +838,20 @@ function _move_neutron_addresses_route { # runs that a clean run would need to clean up function cleanup_neutron { - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet" + if [[ -n "$OVS_PHYSICAL_BRIDGE" ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet" - if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then - _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6" - fi + if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6" + fi - if is_provider_network && is_ironic_hardware; then - for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do - sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE - sudo ip addr add $IP dev $PUBLIC_INTERFACE - done - sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + if is_provider_network && is_ironic_hardware; then + for IP in $(ip addr show dev $OVS_PHYSICAL_BRIDGE | grep ' inet ' | awk '{print $2}'); do + sudo ip addr del $IP dev $OVS_PHYSICAL_BRIDGE + sudo ip addr add $IP dev $PUBLIC_INTERFACE + done + sudo route del -net $FIXED_RANGE gw $NETWORK_GATEWAY dev $OVS_PHYSICAL_BRIDGE + fi fi if is_neutron_ovs_base_plugin; then From 746e72d4c3494b9765c2ba221c50f0ca20128c29 Mon Sep 17 00:00:00 2001 From: Mark Hamzy Date: Wed, 14 Oct 2015 13:42:18 -0500 Subject: [PATCH 0551/2941] Restrict requests to fedora The os_RELEASE for RHEL is 7.1 (for example). Which does not work for comparisons to an integer. And, while I am at it, change base_path to not use a hard-coded directory. Change-Id: I64a04810cc7ba4668c2cb7a8df79c206301e9e16 --- tools/fixup_stuff.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/fixup_stuff.sh b/tools/fixup_stuff.sh index a601cf2f67..b57d140d6d 100755 --- a/tools/fixup_stuff.sh +++ b/tools/fixup_stuff.sh @@ -135,7 +135,7 @@ if is_fedora; then fi fi - if [[ "$os_RELEASE" -ge "21" ]]; then + if [[ "$os_VENDOR" == "Fedora" ]] && [[ "$os_RELEASE" -ge "21" ]]; then # requests ships vendored version of chardet/urllib3, but on # fedora these are symlinked back to the primary versions to # avoid duplication of code on disk. This is fine when @@ -152,9 +152,9 @@ if is_fedora; then # https://bugs.launchpad.net/glance/+bug/1476770 # https://bugzilla.redhat.com/show_bug.cgi?id=1253823 - base_path=/usr/lib/python2.7/site-packages/requests/packages + base_path=$(get_package_path requests)/packages if [ -L $base_path/chardet -o -L $base_path/urllib3 ]; then - sudo rm -f /usr/lib/python2.7/site-packages/requests/packages/{chardet,urllib3} + sudo rm -f $base_path/{chardet,urllib3} # install requests with the bundled urllib3 to avoid conflicts pip_install --upgrade --force-reinstall requests fi From 975f4209d0d7e6a38ec82ce9caafabaea18f2e9d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Wed, 14 Oct 2015 15:12:32 +1100 Subject: [PATCH 0552/2941] Save interactive passwords to separate file The interactive password prompt currently saves to .localrc.auto However, this is removed when you re-run stack; that is required as it is how we source the localrc bits of local.conf, and we want the users' changes to be picked up. The passwords, however, should remain constant, because everything has already been setup with them. So write them to a separate file. Note we source before localrc so it can still overwrite them. Some minor flow-changes too Change-Id: I9871c8b8c7569626faf552628de69b811ba4dac0 Closes-Bug: #1505872 --- clean.sh | 4 +++- stack.sh | 10 ++++++---- stackrc | 5 +++++ 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/clean.sh b/clean.sh index b22a29cb41..ae28aa9ab7 100755 --- a/clean.sh +++ b/clean.sh @@ -134,7 +134,9 @@ rm -rf $DIRS_TO_CLEAN # Clean up files -FILES_TO_CLEAN=".localrc.auto docs/files docs/html shocco/ stack-screenrc test*.conf* test.ini*" +FILES_TO_CLEAN=".localrc.auto .localrc.password " +FILES_TO_CLEAN+="docs/files docs/html shocco/ " +FILES_TO_CLEAN+="stack-screenrc test*.conf* test.ini* " FILES_TO_CLEAN+=".stackenv .prereqs" for file in $FILES_TO_CLEAN; do diff --git a/stack.sh b/stack.sh index 0720744028..886d449f12 100755 --- a/stack.sh +++ b/stack.sh @@ -569,7 +569,7 @@ function read_password { if [[ -f $RC_DIR/localrc ]]; then localrc=$TOP_DIR/localrc else - localrc=$TOP_DIR/.localrc.auto + localrc=$TOP_DIR/.localrc.password fi # If the password is not defined yet, proceed to prompt user for a password. @@ -579,13 +579,15 @@ function read_password { touch $localrc fi - # Presumably if we got this far it can only be that our localrc is missing - # the required password. Prompt user for a password and write to localrc. + # Presumably if we got this far it can only be that our + # localrc is missing the required password. Prompt user for a + # password and write to localrc. + echo '' echo '################################################################################' echo $msg echo '################################################################################' - echo "This value will be written to your localrc file so you don't have to enter it " + echo "This value will be written to ${localrc} file so you don't have to enter it " echo "again. Use only alphanumeric characters." echo "If you leave this blank, a random default value will be used." pw=" " diff --git a/stackrc b/stackrc index c7c631362e..a1643356e0 100644 --- a/stackrc +++ b/stackrc @@ -103,6 +103,11 @@ HORIZON_APACHE_ROOT="/dashboard" # be disabled for automated testing by setting this value to False. USE_SCREEN=True +# Passwords generated by interactive devstack runs +if [[ -r $RC_DIR/.localrc.password ]]; then + source $RC_DIR/.localrc.password +fi + # allow local overrides of env variables, including repo config if [[ -f $RC_DIR/localrc ]]; then # Old-style user-supplied config From 394968fa3d6b0f3b296b49d038aac25b74c2dca7 Mon Sep 17 00:00:00 2001 From: Jamie Lennox Date: Fri, 28 Aug 2015 09:18:26 +1000 Subject: [PATCH 0553/2941] Always use v3 nova/neutron authentication There is no need to test here whether v2 is disabled or not. V3 Authentication will always be available and we should just use that. Change-Id: I0d2d76ebdf261917f1a2b23c65f0f843ae50f49a --- lib/neutron-legacy | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index ebb757058f..bbfe7f374b 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -468,19 +468,13 @@ function configure_neutron { function create_nova_conf_neutron { iniset $NOVA_CONF DEFAULT network_api_class "nova.network.neutronv2.api.API" - - if [ "$ENABLE_IDENTITY_V2" == "False" ]; then - iniset $NOVA_CONF neutron auth_plugin "v3password" - iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" - iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME" - iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD" - iniset $NOVA_CONF neutron user_domain_name "default" - else - iniset $NOVA_CONF neutron admin_username "$Q_ADMIN_USERNAME" - iniset $NOVA_CONF neutron admin_password "$SERVICE_PASSWORD" - iniset $NOVA_CONF neutron admin_auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v2.0" - iniset $NOVA_CONF neutron admin_tenant_name "$SERVICE_TENANT_NAME" - fi + iniset $NOVA_CONF neutron auth_plugin "v3password" + iniset $NOVA_CONF neutron auth_url "$KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_AUTH_PORT/v3" + iniset $NOVA_CONF neutron username "$Q_ADMIN_USERNAME" + iniset $NOVA_CONF neutron password "$SERVICE_PASSWORD" + iniset $NOVA_CONF neutron user_domain_name "Default" + iniset $NOVA_CONF neutron project_name "$SERVICE_TENANT_NAME" + iniset $NOVA_CONF neutron project_domain_name "Default" iniset $NOVA_CONF neutron auth_strategy "$Q_AUTH_STRATEGY" iniset $NOVA_CONF neutron region_name "$REGION_NAME" iniset $NOVA_CONF neutron url "${Q_PROTOCOL}://$Q_HOST:$Q_PORT" From 5ed8af671328a0f9824bb33f9c637cc779a83ae3 Mon Sep 17 00:00:00 2001 From: Dmitry Tantsur Date: Thu, 15 Oct 2015 14:30:50 +0200 Subject: [PATCH 0554/2941] Fix devstack failure due to incorrect variable assignment After I9c8912a8fd596535589b207d7fc553b9d951d3fe this approach leads to a failure and breaks (at least) ironic-inspector gate. Change-Id: I19bb8ada9a6f42d375838cc88a376715918c2a3e --- lib/ironic | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/lib/ironic b/lib/ironic index 61eba6fcf3..74e2f931ff 100644 --- a/lib/ironic +++ b/lib/ironic @@ -634,8 +634,10 @@ function enroll_nodes { # First node created will be used for testing in ironic w/o glance # scenario, so we need to know its UUID. - local standalone_node_uuid - standalone_node_uuid=$([ $total_nodes -eq 0 ] && echo "--uuid $IRONIC_NODE_UUID") + local standalone_node_uuid="" + if [ $total_nodes -eq 0 ]; then + standalone_node_uuid="--uuid $IRONIC_NODE_UUID" + fi local node_id node_id=$(ironic node-create $standalone_node_uuid\ From c94403d8e6c480dad6d962e517c623e1c14ac6d2 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 15 Oct 2015 12:51:13 -0700 Subject: [PATCH 0555/2941] Disable compute interface attach Tempest tests if using cells Cells doesn't support the os-attach-interfaces API so disable those tests in Tempest if running with Cells. Change-Id: I5c7884407868eae70ea125f3f893c73214c04c75 --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index d29a6f9853..10dd652750 100644 --- a/lib/tempest +++ b/lib/tempest @@ -388,6 +388,8 @@ function configure_tempest { if is_service_enabled n-cell; then # Cells doesn't support shelving/unshelving iniset $TEMPEST_CONFIG compute-feature-enabled shelve False + # Cells doesn't support hot-plugging virtual interfaces. + iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False fi # Network From b14f96fb7aa9512d17399eb38024af652df7bc11 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 15 Oct 2015 11:50:10 +1100 Subject: [PATCH 0556/2941] Pin bashate and allow for substitution I want to release a new bashate, but I also don't want to risk consuming it before fully testing it. By pinning here, we can run all our usual CI on new versions before accepting them. Additionally, allow substitution of the bashate dependency via an environment variable. We can use this in a bashate test to substitute a path to a checkout with any changes applied. Change-Id: I165c4d66db8b7bdcff235ef7d8c99029637bb76a --- tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index 1c238add8a..0df9877ba8 100644 --- a/tox.ini +++ b/tox.ini @@ -8,7 +8,8 @@ usedevelop = False install_command = pip install {opts} {packages} [testenv:bashate] -deps = bashate +deps = + {env:BASHATE_INSTALL_PATH:bashate==0.3.1} whitelist_externals = bash commands = bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ # prune all 'dot' dirs From c581c78c92dc5ea453bee8dd0a244554b4d6d57d Mon Sep 17 00:00:00 2001 From: ZhiQiang Fan Date: Sun, 18 Oct 2015 02:31:40 -0600 Subject: [PATCH 0557/2941] add file userrc_early to .gitignore list After devstack runs ./stack.sh, there is a file named userrc_early which contains sensitive information is created, we should add it to ignore list, because it is only for end user debugging. Change-Id: Ic99c279ec6a3606dc6f6ba9a7612b5ca7a2b6b10 --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 2778a65c89..8870bb362c 100644 --- a/.gitignore +++ b/.gitignore @@ -24,3 +24,4 @@ proto shocco src stack-screenrc +userrc_early From cc481740a0766f0f762cc1fc9f7f6db66e792cdc Mon Sep 17 00:00:00 2001 From: Einst Crazy Date: Tue, 20 Oct 2015 01:16:25 +0800 Subject: [PATCH 0558/2941] modify stackforge/swift3 to openstack/swift3 As swift3.git has move from stackforge/swift3 to openstack/swift3, so modify it. Change-Id: Ieaff4c93889c46c7d4b8ecada1a5d7cf3c775965 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index a1643356e0..4026ff8feb 100644 --- a/stackrc +++ b/stackrc @@ -447,7 +447,7 @@ GITREPO["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_REPO:-${GIT_BASE}/openstack/k GITBRANCH["keystonemiddleware"]=${KEYSTONEMIDDLEWARE_BRANCH:-master} # s3 support for swift -SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/stackforge/swift3.git} +SWIFT3_REPO=${SWIFT3_REPO:-${GIT_BASE}/openstack/swift3.git} SWIFT3_BRANCH=${SWIFT3_BRANCH:-master} # ceilometer middleware From e3a239b2990edfd6517c9bdb1b55fce5618f5277 Mon Sep 17 00:00:00 2001 From: Einst Crazy Date: Tue, 20 Oct 2015 01:34:05 +0800 Subject: [PATCH 0559/2941] Modify the build-wheels.sh to build_wheels.sh The path is tools/build_wheels.sh, but in the Makefile which is tools/build-wheels.sh. Modify it to the correct one. Change-Id: If297b65b539403af10a73adbbadfcd8281d40009 Closes-Bug: #1507699 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 082aff21d2..a6bb230708 100644 --- a/Makefile +++ b/Makefile @@ -26,7 +26,7 @@ unstack: ./unstack.sh wheels: - WHEELHOUSE=$(WHEELHOUSE) tools/build-wheels.sh + WHEELHOUSE=$(WHEELHOUSE) tools/build_wheels.sh docs: tox -edocs From 9f6b542a25dd6b48265c5e7317eb068886dc51c1 Mon Sep 17 00:00:00 2001 From: Zhang Jinnan Date: Tue, 20 Oct 2015 01:19:06 +0800 Subject: [PATCH 0560/2941] change stackforge url to openstack url beacuse of the stackforge project move to openstack project, so change the document url to git://git.openstack.org/openstack/. Change-Id: I1628c0aeb62ee519867fdaee56386e22978c4271 --- doc/source/plugin-registry.rst | 4 ++-- doc/source/plugins.rst | 13 ++++++------- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index eb09988a53..49b3a7fc02 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -68,7 +68,7 @@ Alternate Configs | Plugin Name | URL | Comments | | | | | +-------------+------------------------------------------------------------+------------+ -|glusterfs |git://git.openstack.org/stackforge/devstack-plugin-glusterfs| | +|glusterfs |git://git.openstack.org/openstack/devstack-plugin-glusterfs | | +-------------+------------------------------------------------------------+------------+ | | | | +-------------+------------------------------------------------------------+------------+ @@ -80,7 +80,7 @@ Additional Services | Plugin Name | URL | Comments | | | | | +----------------+--------------------------------------------------+------------+ -|ec2-api |git://git.openstack.org/stackforge/ec2api |[as1]_ | +|ec2-api |git://git.openstack.org/openstack/ec2-api |[as1]_ | +----------------+--------------------------------------------------+------------+ |ironic-inspector|git://git.openstack.org/openstack/ironic-inspector| | +----------------+--------------------------------------------------+------------+ diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index fda601b414..8bd3797cd2 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -56,7 +56,7 @@ They are added in the following format:: An example would be as follows:: - enable_plugin ec2api git://git.openstack.org/stackforge/ec2api + enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api plugin.sh contract ================== @@ -202,13 +202,12 @@ Using Plugins in the OpenStack Gate For everyday use, DevStack plugins can exist in any git tree that's accessible on the internet. However, when using DevStack plugins in the OpenStack gate, they must live in projects in OpenStack's -gerrit. Both ``openstack`` namespace and ``stackforge`` namespace are -fine. This allows testing of the plugin as well as provides network +gerrit. This allows testing of the plugin as well as provides network isolation against upstream git repository failures (which we see often enough to be an issue). Ideally a plugin will be included within the ``devstack`` directory of -the project they are being tested. For example, the stackforge/ec2-api +the project they are being tested. For example, the openstack/ec2-api project has its plugin support in its own tree. However, some times a DevStack plugin might be used solely to @@ -218,7 +217,7 @@ include: integration of back end storage (e.g. ceph or glusterfs), integration of SDN controllers (e.g. ovn, OpenDayLight), or integration of alternate RPC systems (e.g. zmq, qpid). In these cases the best practice is to build a dedicated -``stackforge/devstack-plugin-FOO`` project. +``openstack/devstack-plugin-FOO`` project. To enable a plugin to be used in a gate job, the following lines will be needed in your ``jenkins/jobs/.yaml`` definition in @@ -228,12 +227,12 @@ be needed in your ``jenkins/jobs/.yaml`` definition in # Because we are testing a non standard project, add the # our project repository. This makes zuul do the right # reference magic for testing changes. - export PROJECTS="stackforge/ec2-api $PROJECTS" + export PROJECTS="openstack/ec2-api $PROJECTS" # note the actual url here is somewhat irrelevant because it # caches in nodepool, however make it a valid url for # documentation purposes. - export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/stackforge/ec2-api" + export DEVSTACK_LOCAL_CONFIG="enable_plugin ec2-api git://git.openstack.org/openstack/ec2-api" See Also ======== From 8f385dde22815de5e7487e52cf78374f5c2967b3 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 19 Oct 2015 15:13:30 -0400 Subject: [PATCH 0561/2941] Write clouds.yaml to /etc as well There are more than one user that need to access clouds.yaml values in tests. Rather than copying the file everywhere, simply output it to /etc/openstack. However, we have things copying it at the moment, so output to both places. A follow up patch will remove the homedir version. Change-Id: I21d3c2ad7a020a5ab02dc1ab532feae70b718892 --- functions-common | 59 +++++++++++++++++++++++++----------------------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/functions-common b/functions-common index f95bfe543f..42555a95f5 100644 --- a/functions-common +++ b/functions-common @@ -76,34 +76,37 @@ function write_clouds_yaml { # The location is a variable to allow for easier refactoring later to make it # overridable. There is currently no usecase where doing so makes sense, so # it's not currently configurable. - CLOUDS_YAML=~/.config/openstack/clouds.yaml - - mkdir -p $(dirname $CLOUDS_YAML) - - CA_CERT_ARG='' - if [ -f "$SSL_BUNDLE_FILE" ]; then - CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" - fi - $TOP_DIR/tools/update_clouds_yaml.py \ - --file $CLOUDS_YAML \ - --os-cloud devstack \ - --os-region-name $REGION_NAME \ - --os-identity-api-version 3 \ - $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI \ - --os-username demo \ - --os-password $ADMIN_PASSWORD \ - --os-project-name demo - $TOP_DIR/tools/update_clouds_yaml.py \ - --file $CLOUDS_YAML \ - --os-cloud devstack-admin \ - --os-region-name $REGION_NAME \ - --os-identity-api-version 3 \ - $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI \ - --os-username admin \ - --os-password $ADMIN_PASSWORD \ - --os-project-name admin + for clouds_path in /etc/openstack ~/.config/openstack ; do + CLOUDS_YAML=$clouds_path/clouds.yaml + + sudo mkdir -p $(dirname $CLOUDS_YAML) + sudo chown -R $STACK_USER $(dirname $CLOUDS_YAML) + + CA_CERT_ARG='' + if [ -f "$SSL_BUNDLE_FILE" ]; then + CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" + fi + $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack \ + --os-region-name $REGION_NAME \ + --os-identity-api-version 3 \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_AUTH_URI \ + --os-username demo \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + --os-identity-api-version 3 \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_AUTH_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin + done } # trueorfalse From c49917250f2dc5243d652e59d8c70c1437975dea Mon Sep 17 00:00:00 2001 From: Marian Horban Date: Thu, 22 Oct 2015 04:21:34 -0400 Subject: [PATCH 0562/2941] Removed starting of nova-ec2-api service Since nova-ec2-api service was removed from nova it is not needed in devstack. Change-Id: I91d4be02a1a9c2ca4d18256d9a37a5c2559f53b7 Closes-Bug: #1530798 --- files/apache-nova-ec2-api.template | 25 ----------------- lib/nova | 44 ------------------------------ 2 files changed, 69 deletions(-) delete mode 100644 files/apache-nova-ec2-api.template diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template deleted file mode 100644 index 7b1d68b106..0000000000 --- a/files/apache-nova-ec2-api.template +++ /dev/null @@ -1,25 +0,0 @@ -Listen %PUBLICPORT% - - - WSGIDaemonProcess nova-ec2-api processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% - WSGIProcessGroup nova-ec2-api - WSGIScriptAlias / %PUBLICWSGI% - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - = 2.4> - ErrorLogFormat "%M" - - ErrorLog /var/log/%APACHE_NAME%/nova-ec2-api.log - %SSLENGINE% - %SSLCERTFILE% - %SSLKEYFILE% - - -Alias /ec2 %PUBLICWSGI% - - SetHandler wsgi-script - Options +ExecCGI - WSGIProcessGroup nova-ec2-api - WSGIApplicationGroup %{GLOBAL} - WSGIPassAuthorization On - diff --git a/lib/nova b/lib/nova index 6337f875ef..c75623f7d6 100644 --- a/lib/nova +++ b/lib/nova @@ -242,7 +242,6 @@ function cleanup_nova { function _cleanup_nova_apache_wsgi { sudo rm -f $NOVA_WSGI_DIR/* sudo rm -f $(apache_site_config_for nova-api) - sudo rm -f $(apache_site_config_for nova-ec2-api) sudo rm -f $(apache_site_config_for nova-metadata) } @@ -252,15 +251,12 @@ function _config_nova_apache_wsgi { local nova_apache_conf nova_apache_conf=$(apache_site_config_for nova-api) - local nova_ec2_apache_conf - nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api) local nova_metadata_apache_conf nova_metadata_apache_conf=$(apache_site_config_for nova-metadata) local nova_ssl="" local nova_certfile="" local nova_keyfile="" local nova_api_port=$NOVA_SERVICE_PORT - local nova_ec2_api_port=$EC2_SERVICE_PORT local nova_metadata_port=$METADATA_SERVICE_PORT local venv_path="" @@ -275,7 +271,6 @@ function _config_nova_apache_wsgi { # copy proxy vhost and wsgi helper files sudo cp $NOVA_DIR/nova/wsgi/nova-api.py $NOVA_WSGI_DIR/nova-api - sudo cp $NOVA_DIR/nova/wsgi/nova-ec2-api.py $NOVA_WSGI_DIR/nova-ec2-api sudo cp $NOVA_DIR/nova/wsgi/nova-metadata.py $NOVA_WSGI_DIR/nova-metadata sudo cp $FILES/apache-nova-api.template $nova_apache_conf @@ -291,19 +286,6 @@ function _config_nova_apache_wsgi { s|%APIWORKERS%|$API_WORKERS|g " -i $nova_apache_conf - sudo cp $FILES/apache-nova-ec2-api.template $nova_ec2_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$nova_ec2_api_port|g; - s|%APACHE_NAME%|$APACHE_NAME|g; - s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-ec2-api|g; - s|%SSLENGINE%|$nova_ssl|g; - s|%SSLCERTFILE%|$nova_certfile|g; - s|%SSLKEYFILE%|$nova_keyfile|g; - s|%USER%|$STACK_USER|g; - s|%VIRTUALENV%|$venv_path|g - s|%APIWORKERS%|$API_WORKERS|g - " -i $nova_ec2_apache_conf - sudo cp $FILES/apache-nova-metadata.template $nova_metadata_apache_conf sudo sed -e " s|%PUBLICPORT%|$nova_metadata_port|g; @@ -461,22 +443,6 @@ function create_nova_accounts { # swift through the s3 api. get_or_add_user_project_role ResellerAdmin nova $SERVICE_TENANT_NAME fi - - # EC2 - if [[ "$KEYSTONE_CATALOG_BACKEND" = "sql" ]]; then - local nova_ec2_api_url - if [[ "$NOVA_USE_MOD_WSGI" == "False" ]]; then - nova_ec2_api_url="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST:$EC2_SERVICE_PORT/" - else - nova_ec2_api_url="$EC2_SERVICE_PROTOCOL://$SERVICE_HOST/ec2" - fi - get_or_create_service "ec2" "ec2" "EC2 Compatibility Layer" - get_or_create_endpoint "ec2" \ - "$REGION_NAME" \ - "$nova_ec2_api_url" \ - "$nova_ec2_api_url" \ - "$nova_ec2_api_url" - fi fi # S3 @@ -522,7 +488,6 @@ function create_nova_conf { iniset $NOVA_CONF api_database connection `database_connection_url nova_api` iniset $NOVA_CONF DEFAULT instance_name_template "${INSTANCE_NAME_PREFIX}%08x" iniset $NOVA_CONF DEFAULT osapi_compute_listen "$NOVA_SERVICE_LISTEN_ADDRESS" - iniset $NOVA_CONF DEFAULT ec2_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT metadata_listen "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT s3_listen "$NOVA_SERVICE_LISTEN_ADDRESS" @@ -629,12 +594,10 @@ function create_nova_conf { fi iniset $NOVA_CONF DEFAULT ec2_dmz_host "$EC2_DMZ_HOST" - iniset $NOVA_CONF DEFAULT keystone_ec2_url $KEYSTONE_SERVICE_PROTOCOL://$KEYSTONE_SERVICE_HOST:$KEYSTONE_SERVICE_PORT/v2.0/ec2tokens iniset_rpc_backend nova $NOVA_CONF iniset $NOVA_CONF glance api_servers "${GLANCE_SERVICE_PROTOCOL}://${GLANCE_HOSTPORT}" iniset $NOVA_CONF DEFAULT osapi_compute_workers "$API_WORKERS" - iniset $NOVA_CONF DEFAULT ec2_workers "$API_WORKERS" iniset $NOVA_CONF DEFAULT metadata_workers "$API_WORKERS" # don't let the conductor get out of control now that we're using a pure python db driver iniset $NOVA_CONF conductor workers "$API_WORKERS" @@ -660,10 +623,6 @@ function create_nova_conf { iniset $NOVA_CONF DEFAULT enabled_ssl_apis "$NOVA_ENABLED_APIS" fi - if is_service_enabled tls-proxy; then - iniset $NOVA_CONF DEFAULT ec2_listen_port $EC2_SERVICE_PORT_INT - fi - if is_service_enabled n-sproxy; then iniset $NOVA_CONF serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF serial_console enabled True @@ -817,11 +776,9 @@ function start_nova_api { enabled_site_file=$(apache_site_config_for nova-api) if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then enable_apache_site nova-api - enable_apache_site nova-ec2-api enable_apache_site nova-metadata restart_apache_server tail_log nova-api /var/log/$APACHE_NAME/nova-api.log - tail_log nova-ec2-api /var/log/$APACHE_NAME/nova-ec2-api.log tail_log nova-metadata /var/log/$APACHE_NAME/nova-metadata.log else run_process n-api "$NOVA_BIN_DIR/nova-api" @@ -937,7 +894,6 @@ function stop_nova_compute { function stop_nova_rest { if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then disable_apache_site nova-api - disable_apache_site nova-ec2-api disable_apache_site nova-metadata restart_apache_server else From c148b13c60525be637d23a1330cf56376f11432f Mon Sep 17 00:00:00 2001 From: Matthew Treinish Date: Thu, 22 Oct 2015 10:05:00 -0400 Subject: [PATCH 0563/2941] Ensure we disable tenant isolation without admin This commit fixes an issue with the tempest configuration when TEMPEST_HAS_ADMIN is disabled. Without admin credentials tempest is unable to create credentials at all so enabling tenant isolation is not going to work. Previously devstack wasn't setting it one way or the other when TEMPEST_HAS_ADMIN was set, which results in the default of being enabled. So jobs that try to run tempest without admin were failing. Change-Id: Iff496cb5cbf29f17c130cfad746b48d8547ca965 --- lib/tempest | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/tempest b/lib/tempest index 10dd652750..bd76be570d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -564,9 +564,13 @@ function configure_tempest { fi iniset $TEMPEST_CONFIG auth allow_tenant_isolation False iniset $TEMPEST_CONFIG auth test_accounts_file "etc/accounts.yaml" + elif [[ $TEMPEST_HAS_ADMIN == "False" ]]; then + iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-False} + else iniset $TEMPEST_CONFIG auth allow_tenant_isolation ${TEMPEST_ALLOW_TENANT_ISOLATION:-True} fi + # Restore IFS IFS=$ifs } From 22f747b8df023dd74808df097f5fa5a92168f620 Mon Sep 17 00:00:00 2001 From: Falk Reimann Date: Fri, 28 Aug 2015 12:40:19 +0200 Subject: [PATCH 0564/2941] Use swift port variable in keystone and cinder This patch alows specifiying a deviation of the swift default port 8080 with variable SWIFT_DEFAULT_BIND_PORT. The created endpoints in keystone for object-store and the backup_swift_url in cinder.conf will use variable SWIFT_DEFAULT_BIND_PORT instead of the fixed port 8080. Change-Id: I47bbcf77368c430718fb8f29b7de1ff305e64422 Closes-Bug: #1489767 --- lib/cinder | 2 +- lib/keystone | 6 +++--- lib/swift | 17 +++++++++-------- 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/lib/cinder b/lib/cinder index e5ed2db1a3..2ed02e8b33 100644 --- a/lib/cinder +++ b/lib/cinder @@ -256,7 +256,7 @@ function configure_cinder { fi if is_service_enabled swift; then - iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_" + iniset $CINDER_CONF DEFAULT backup_swift_url "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_" fi if is_service_enabled ceilometer; then diff --git a/lib/keystone b/lib/keystone index e2448c9068..31d5448499 100644 --- a/lib/keystone +++ b/lib/keystone @@ -266,9 +266,9 @@ function configure_keystone { # Add swift endpoints to service catalog if swift is enabled if is_service_enabled s-proxy; then - echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:8080/" >> $KEYSTONE_CATALOG - echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:8080/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.publicURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.adminURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/" >> $KEYSTONE_CATALOG + echo "catalog.RegionOne.object_store.internalURL = http://%SERVICE_HOST%:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" >> $KEYSTONE_CATALOG echo "catalog.RegionOne.object_store.name = Swift Service" >> $KEYSTONE_CATALOG fi diff --git a/lib/swift b/lib/swift index fc736a60bc..4c2b292bd7 100644 --- a/lib/swift +++ b/lib/swift @@ -44,6 +44,7 @@ SWIFT_APACHE_WSGI_DIR=${SWIFT_APACHE_WSGI_DIR:-/var/www/swift} SWIFT3_DIR=$DEST/swift3 SWIFT_SERVICE_PROTOCOL=${SWIFT_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} +SWIFT_DEFAULT_BIND_PORT=${SWIFT_DEFAULT_BIND_PORT:-8080} SWIFT_DEFAULT_BIND_PORT_INT=${SWIFT_DEFAULT_BIND_PORT_INT:-8081} SWIFT_SERVICE_LOCAL_HOST=${SWIFT_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} SWIFT_SERVICE_LISTEN_ADDRESS=${SWIFT_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} @@ -62,7 +63,7 @@ SWIFT_CONF_DIR=${SWIFT_CONF_DIR:-/etc/swift} if is_service_enabled s-proxy && is_service_enabled swift3; then # If we are using ``swift3``, we can default the S3 port to swift instead # of nova-objectstore - S3_SERVICE_PORT=${S3_SERVICE_PORT:-8080} + S3_SERVICE_PORT=${S3_SERVICE_PORT:-$SWIFT_DEFAULT_BIND_PORT} fi if is_service_enabled g-api; then @@ -187,7 +188,7 @@ function _cleanup_swift_apache_wsgi { # _config_swift_apache_wsgi() - Set WSGI config files of Swift function _config_swift_apache_wsgi { sudo mkdir -p ${SWIFT_APACHE_WSGI_DIR} - local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080} + local proxy_port=${SWIFT_DEFAULT_BIND_PORT} # copy proxy vhost and wsgi file sudo cp ${SWIFT_DIR}/examples/apache2/proxy-server.template $(apache_site_config_for proxy-server) @@ -348,7 +349,7 @@ function configure_swift { local csyncfile=${SWIFT_CONF_DIR}/container-sync-realms.conf cp ${SWIFT_DIR}/etc/container-sync-realms.conf-sample ${csyncfile} iniset ${csyncfile} realm1 key realm1key - iniset ${csyncfile} realm1 cluster_name1 "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/" + iniset ${csyncfile} realm1 cluster_name1 "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/" iniuncomment ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT user ${STACK_USER} @@ -369,7 +370,7 @@ function configure_swift { if is_service_enabled tls-proxy; then iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT_INT} else - iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT:-8080} + iniset ${SWIFT_CONFIG_PROXY_SERVER} DEFAULT bind_port ${SWIFT_DEFAULT_BIND_PORT} fi if is_ssl_enabled_service s-proxy; then @@ -621,9 +622,9 @@ function create_swift_accounts { get_or_create_service "swift" "object-store" "Swift Service" get_or_create_endpoint "object-store" \ "$REGION_NAME" \ - "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" \ - "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080" \ - "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:8080/v1/AUTH_\$(tenant_id)s" + "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" \ + "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT" \ + "$SWIFT_SERVICE_PROTOCOL://$SERVICE_HOST:$SWIFT_DEFAULT_BIND_PORT/v1/AUTH_\$(tenant_id)s" fi local swift_tenant_test1=$(get_or_create_project swifttenanttest1 default) @@ -764,7 +765,7 @@ function start_swift { swift-init --run-dir=${SWIFT_DATA_DIR}/run ${type} stop || true done if is_service_enabled tls-proxy; then - local proxy_port=${SWIFT_DEFAULT_BIND_PORT:-8080} + local proxy_port=${SWIFT_DEFAULT_BIND_PORT} start_tls_proxy '*' $proxy_port $SERVICE_HOST $SWIFT_DEFAULT_BIND_PORT_INT & fi run_process s-proxy "$SWIFT_DIR/bin/swift-proxy-server ${SWIFT_CONF_DIR}/proxy-server.conf -v" From 7792bc39d4f9f7ec7fd1b81ddbb30c30988f03e6 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Fri, 23 Oct 2015 13:57:14 +1100 Subject: [PATCH 0565/2941] Add new oslo.privsep library to oslo repos A new project olos.privsep has been created but failes sdvm testing as even though the library is added ro PROJECTS and LIBS_FROM_GIT it isn't installed by devstack. Add oslo.privsep to the install_oslo function Change-Id: Ia4d56747d56dcfe50889ebbdf9d553df13e1b950 --- lib/oslo | 2 ++ stackrc | 4 ++++ tests/test_libs_from_pypi.sh | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/oslo b/lib/oslo index f64f327ccd..56615faaa3 100644 --- a/lib/oslo +++ b/lib/oslo @@ -36,6 +36,7 @@ GITDIR["oslo.log"]=$DEST/oslo.log GITDIR["oslo.messaging"]=$DEST/oslo.messaging GITDIR["oslo.middleware"]=$DEST/oslo.middleware GITDIR["oslo.policy"]=$DEST/oslo.policy +GITDIR["oslo.privsep"]=$DEST/oslo.privsep GITDIR["oslo.reports"]=$DEST/oslo.reports GITDIR["oslo.rootwrap"]=$DEST/oslo.rootwrap GITDIR["oslo.serialization"]=$DEST/oslo.serialization @@ -79,6 +80,7 @@ function install_oslo { _do_install_oslo_lib "oslo.messaging" _do_install_oslo_lib "oslo.middleware" _do_install_oslo_lib "oslo.policy" + _do_install_oslo_lib "oslo.privsep" _do_install_oslo_lib "oslo.reports" _do_install_oslo_lib "oslo.rootwrap" _do_install_oslo_lib "oslo.serialization" diff --git a/stackrc b/stackrc index 4026ff8feb..819aa0125c 100644 --- a/stackrc +++ b/stackrc @@ -371,6 +371,10 @@ GITBRANCH["oslo.middleware"]=${OSLOMID_BRANCH:-master} GITREPO["oslo.policy"]=${OSLOPOLICY_REPO:-${GIT_BASE}/openstack/oslo.policy.git} GITBRANCH["oslo.policy"]=${OSLOPOLICY_BRANCH:-master} +# oslo.privsep +GITREPO["oslo.privsep"]=${OSLOPRIVSEP_REPO:-${GIT_BASE}/openstack/oslo.privsep.git} +GITBRANCH["oslo.privsep"]=${OSLOPRIVSEP_BRANCH:-master} + # oslo.reports GITREPO["oslo.reports"]=${OSLOREPORTS_REPO:-${GIT_BASE}/openstack/oslo.reports.git} GITBRANCH["oslo.reports"]=${OSLOREPORTS_BRANCH:-master} diff --git a/tests/test_libs_from_pypi.sh b/tests/test_libs_from_pypi.sh index 8e8c0227a9..f31560a1fe 100755 --- a/tests/test_libs_from_pypi.sh +++ b/tests/test_libs_from_pypi.sh @@ -41,7 +41,7 @@ ALL_LIBS+=" oslo.utils python-swiftclient" ALL_LIBS+=" python-neutronclient tooz ceilometermiddleware oslo.policy" ALL_LIBS+=" debtcollector os-brick automaton futurist oslo.service" ALL_LIBS+=" oslo.cache oslo.reports" -ALL_LIBS+=" keystoneauth ironic-lib" +ALL_LIBS+=" keystoneauth ironic-lib oslo.privsep" # Generate the above list with # echo ${!GITREPO[@]} From b814b536c54f4ed840cf3eb436c4841d2ed575c4 Mon Sep 17 00:00:00 2001 From: Cedric Brandily Date: Thu, 22 Oct 2015 22:25:45 +0200 Subject: [PATCH 0566/2941] Prepare neutron "use_namespaces" option removal The neutron use_namespaces option is deprecated since Kilo, it's time remove it from neutron and devstack. Related-bug: #1508188 Change-Id: I4feb2a15c7e1e4bfdbed2531b18b8e7d798ab3cc --- lib/ironic | 9 ++------- lib/neutron-legacy | 15 ++++----------- lib/tempest | 19 +++---------------- 3 files changed, 9 insertions(+), 34 deletions(-) diff --git a/lib/ironic b/lib/ironic index 74e2f931ff..de07b49667 100644 --- a/lib/ironic +++ b/lib/ironic @@ -516,13 +516,8 @@ function create_ovs_taps { # intentional sleep to make sure the tag has been set to port sleep 10 - if [[ "$Q_USE_NAMESPACE" = "True" ]]; then - local tapdev - tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-) - else - local tapdev - tapdev=$(sudo ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-) - fi + local tapdev + tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-) local tag_id tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 4e51425ffc..3c3c96b7bd 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -144,8 +144,6 @@ Q_LISTEN_ADDRESS=${Q_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} Q_ADMIN_USERNAME=${Q_ADMIN_USERNAME:-neutron} # Default auth strategy Q_AUTH_STRATEGY=${Q_AUTH_STRATEGY:-keystone} -# Use namespace or not -Q_USE_NAMESPACE=${Q_USE_NAMESPACE:-True} # RHEL's support for namespaces requires using veths with ovs Q_OVS_USE_VETH=${Q_OVS_USE_VETH:-False} Q_USE_ROOTWRAP=${Q_USE_ROOTWRAP:-True} @@ -208,7 +206,7 @@ Q_PUBLIC_VETH_INT=${Q_PUBLIC_VETH_INT:-veth-pub-int} # The plugin supports L3. Q_L3_ENABLED=${Q_L3_ENABLED:-False} # L3 routers exist per tenant -Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-False} +Q_L3_ROUTER_PER_TENANT=${Q_L3_ROUTER_PER_TENANT:-True} # List of config file names in addition to the main plugin config file # See _configure_neutron_common() for details about setting it up @@ -968,7 +966,6 @@ function _configure_neutron_debug_command { iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False - iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then iniset $NEUTRON_TEST_CONFIG_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" @@ -985,7 +982,6 @@ function _configure_neutron_dhcp_agent { iniset $Q_DHCP_CONF_FILE DEFAULT verbose True iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_DHCP_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_DHCP_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then iniset $Q_DHCP_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" @@ -1009,8 +1005,6 @@ function _configure_neutron_dhcp_agent { function _configure_neutron_l3_agent { Q_L3_ENABLED=True - # for l3-agent, only use per tenant router if we have namespaces - Q_L3_ROUTER_PER_TENANT=$Q_USE_NAMESPACE if is_service_enabled q-vpn; then neutron_vpn_configure_agent @@ -1020,7 +1014,6 @@ function _configure_neutron_l3_agent { iniset $Q_L3_CONF_FILE DEFAULT verbose True iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL - iniset $Q_L3_CONF_FILE DEFAULT use_namespaces $Q_USE_NAMESPACE iniset $Q_L3_CONF_FILE AGENT root_helper "$Q_RR_COMMAND" if [[ "$Q_USE_ROOTWRAP_DAEMON" == "True" ]]; then iniset $Q_L3_CONF_FILE AGENT root_helper_daemon "$Q_RR_DAEMON_COMMAND" @@ -1309,7 +1302,7 @@ function _neutron_configure_router_v4 { if is_service_enabled q-l3; then # Configure and enable public bridge local ext_gw_interface="none" - if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then + if is_neutron_ovs_base_plugin; then ext_gw_interface=$(_neutron_get_ext_gw_interface) elif [[ "$Q_AGENT" = "linuxbridge" ]]; then # Search for the brq device the neutron router and network for $FIXED_RANGE @@ -1359,7 +1352,7 @@ function _neutron_configure_router_v6 { IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'` die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" - if is_neutron_ovs_base_plugin && [[ "$Q_USE_NAMESPACE" = "True" ]]; then + if is_neutron_ovs_base_plugin; then local ext_gw_interface ext_gw_interface=$(_neutron_get_ext_gw_interface) local ipv6_cidr_len=${IPV6_PUBLIC_RANGE#*/} @@ -1374,7 +1367,7 @@ function _neutron_configure_router_v6 { # Explicitly set router id in l3 agent configuration function _neutron_set_router_id { - if [[ "$Q_USE_NAMESPACE" == "False" ]]; then + if [[ "$Q_L3_ROUTER_PER_TENANT" == "False" ]]; then iniset $Q_L3_CONF_FILE DEFAULT router_id $ROUTER_ID fi } diff --git a/lib/tempest b/lib/tempest index 10dd652750..1d10da4c86 100644 --- a/lib/tempest +++ b/lib/tempest @@ -15,7 +15,6 @@ # - ``SERVICE_HOST`` # - ``BASE_SQL_CONN`` ``lib/database`` declares # - ``PUBLIC_NETWORK_NAME`` -# - ``Q_USE_NAMESPACE`` # - ``Q_ROUTER_NAME`` # - ``Q_L3_ENABLED`` # - ``VIRT_DRIVER`` @@ -132,7 +131,6 @@ function configure_tempest { local flavor_lines local public_network_id local public_router_id - local tenant_networks_reachable local boto_instance_type="m1.tiny" local ssh_connect_method="fixed" @@ -246,13 +244,8 @@ function configure_tempest { fi fi - if [ "$Q_USE_NAMESPACE" != "False" ]; then - tenant_networks_reachable=false - if ! is_service_enabled n-net; then - ssh_connect_method="floating" - fi - else - tenant_networks_reachable=true + if ! is_service_enabled n-net; then + ssh_connect_method="floating" fi ssh_connect_method=${TEMPEST_SSH_CONNECT_METHOD:-$ssh_connect_method} @@ -260,12 +253,6 @@ function configure_tempest { if [ "$Q_L3_ENABLED" = "True" ]; then public_network_id=$(neutron net-list | grep $PUBLIC_NETWORK_NAME | \ awk '{print $2}') - if [ "$Q_USE_NAMESPACE" == "False" ]; then - # If namespaces are disabled, DevStack will create a single - # public router that tempest should be configured to use. - public_router_id=$(neutron router-list | awk "/ $Q_ROUTER_NAME / \ - { print \$2 }") - fi fi EC2_URL=$(get_endpoint_url ec2 public || true) @@ -394,7 +381,7 @@ function configure_tempest { # Network iniset $TEMPEST_CONFIG network api_version 2.0 - iniset $TEMPEST_CONFIG network tenant_networks_reachable "$tenant_networks_reachable" + iniset $TEMPEST_CONFIG network tenant_networks_reachable false iniset $TEMPEST_CONFIG network public_network_id "$public_network_id" iniset $TEMPEST_CONFIG network public_router_id "$public_router_id" iniset $TEMPEST_CONFIG network default_network "$FIXED_RANGE" From 4abb31d9d86bee139a0bc3e9f7dfc5041e8b875f Mon Sep 17 00:00:00 2001 From: Marian Horban Date: Mon, 26 Oct 2015 12:35:45 -0400 Subject: [PATCH 0567/2941] Format of nova-api log files was corrected When nova-api and nova-ec2-api services are started by Apache (it can be done with devstack config option NOVA_USE_MOD_WSGI=True) log files contained duplication of timestamp value. Change-Id: I5439ea8f89ca3073600456f67220e9d3f5257d97 Closes-Bug: #1510517 --- files/apache-nova-api.template | 2 +- files/apache-nova-ec2-api.template | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/files/apache-nova-api.template b/files/apache-nova-api.template index 49081528ff..bcf406edf3 100644 --- a/files/apache-nova-api.template +++ b/files/apache-nova-api.template @@ -7,7 +7,7 @@ Listen %PUBLICPORT% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On = 2.4> - ErrorLogFormat "%{cu}t %M" + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-api.log %SSLENGINE% diff --git a/files/apache-nova-ec2-api.template b/files/apache-nova-ec2-api.template index 235d958d1a..a9be15b73a 100644 --- a/files/apache-nova-ec2-api.template +++ b/files/apache-nova-ec2-api.template @@ -7,7 +7,7 @@ Listen %PUBLICPORT% WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On = 2.4> - ErrorLogFormat "%{cu}t %M" + ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/nova-ec2-api.log %SSLENGINE% From 313ddaee6102fc03a3885bec3bf5e0d1e0214878 Mon Sep 17 00:00:00 2001 From: Lenny Verkhovsky Date: Tue, 20 Oct 2015 11:26:34 +0300 Subject: [PATCH 0568/2941] Remove sudo from mkdir in $STACK_USER folder This fails in the environment where sudo does not have permissions to write in /home/$USER folder, e.g. [1] Also clean-up the comment/variable usage a bit; the location isn't actually variable at all (and that's fine, but we don't need a global here) [1] http://144.76.193.39/ci-artifacts/228979/10/Nova-ML2-Sriov/console.html.gz Change-Id: I6807eae9d1c27219aa7c19de29f24fa851aa787c --- functions-common | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/functions-common b/functions-common index 42555a95f5..6607d0be70 100644 --- a/functions-common +++ b/functions-common @@ -73,21 +73,23 @@ function save_stackenv { # - A `devstack-admin` entry for the `admin` user for the `admin` project. # write_clouds_yaml function write_clouds_yaml { - # The location is a variable to allow for easier refactoring later to make it - # overridable. There is currently no usecase where doing so makes sense, so - # it's not currently configurable. - for clouds_path in /etc/openstack ~/.config/openstack ; do - CLOUDS_YAML=$clouds_path/clouds.yaml + local clouds_yaml + + sudo mkdir -p /etc/openstack + sudo chown -R $STACK_USER /etc/openstack + # XXX: to be removed, see https://review.openstack.org/237149/ + # careful not to sudo this, incase ~ is NFS mounted + mkdir -p ~/.config/openstack - sudo mkdir -p $(dirname $CLOUDS_YAML) - sudo chown -R $STACK_USER $(dirname $CLOUDS_YAML) + for clouds_path in /etc/openstack ~/.config/openstack ; do + clouds_yaml=$clouds_path/clouds.yaml CA_CERT_ARG='' if [ -f "$SSL_BUNDLE_FILE" ]; then CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" fi $TOP_DIR/tools/update_clouds_yaml.py \ - --file $CLOUDS_YAML \ + --file $clouds_yaml \ --os-cloud devstack \ --os-region-name $REGION_NAME \ --os-identity-api-version 3 \ @@ -97,7 +99,7 @@ function write_clouds_yaml { --os-password $ADMIN_PASSWORD \ --os-project-name demo $TOP_DIR/tools/update_clouds_yaml.py \ - --file $CLOUDS_YAML \ + --file $clouds_yaml \ --os-cloud devstack-admin \ --os-region-name $REGION_NAME \ --os-identity-api-version 3 \ From ee9bb76647e1424e0dc84d32df6ab8607dc4ec96 Mon Sep 17 00:00:00 2001 From: Monty Taylor Date: Mon, 19 Oct 2015 15:16:18 -0400 Subject: [PATCH 0569/2941] Write clouds.yaml to only /etc After having migrated the copies of clouds.yaml to just consume from /etc, remove the duplicate copy. Change-Id: I036704734785958c95d2234917d7b40bd797a375 --- functions-common | 63 +++++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 33 deletions(-) diff --git a/functions-common b/functions-common index 6607d0be70..922938f95d 100644 --- a/functions-common +++ b/functions-common @@ -73,42 +73,39 @@ function save_stackenv { # - A `devstack-admin` entry for the `admin` user for the `admin` project. # write_clouds_yaml function write_clouds_yaml { - local clouds_yaml + # The location is a variable to allow for easier refactoring later to make it + # overridable. There is currently no usecase where doing so makes sense, so + # it's not currently configurable. - sudo mkdir -p /etc/openstack - sudo chown -R $STACK_USER /etc/openstack - # XXX: to be removed, see https://review.openstack.org/237149/ - # careful not to sudo this, incase ~ is NFS mounted - mkdir -p ~/.config/openstack + CLOUDS_YAML=/etc/openstack/clouds.yaml - for clouds_path in /etc/openstack ~/.config/openstack ; do - clouds_yaml=$clouds_path/clouds.yaml + sudo mkdir -p $(dirname $CLOUDS_YAML) + sudo chown -R $STACK_USER /etc/openstack - CA_CERT_ARG='' - if [ -f "$SSL_BUNDLE_FILE" ]; then - CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" - fi - $TOP_DIR/tools/update_clouds_yaml.py \ - --file $clouds_yaml \ - --os-cloud devstack \ - --os-region-name $REGION_NAME \ - --os-identity-api-version 3 \ - $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI \ - --os-username demo \ - --os-password $ADMIN_PASSWORD \ - --os-project-name demo - $TOP_DIR/tools/update_clouds_yaml.py \ - --file $clouds_yaml \ - --os-cloud devstack-admin \ - --os-region-name $REGION_NAME \ - --os-identity-api-version 3 \ - $CA_CERT_ARG \ - --os-auth-url $KEYSTONE_AUTH_URI \ - --os-username admin \ - --os-password $ADMIN_PASSWORD \ - --os-project-name admin - done + CA_CERT_ARG='' + if [ -f "$SSL_BUNDLE_FILE" ]; then + CA_CERT_ARG="--os-cacert $SSL_BUNDLE_FILE" + fi + $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack \ + --os-region-name $REGION_NAME \ + --os-identity-api-version 3 \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_AUTH_URI \ + --os-username demo \ + --os-password $ADMIN_PASSWORD \ + --os-project-name demo + $TOP_DIR/tools/update_clouds_yaml.py \ + --file $CLOUDS_YAML \ + --os-cloud devstack-admin \ + --os-region-name $REGION_NAME \ + --os-identity-api-version 3 \ + $CA_CERT_ARG \ + --os-auth-url $KEYSTONE_AUTH_URI \ + --os-username admin \ + --os-password $ADMIN_PASSWORD \ + --os-project-name admin } # trueorfalse From a692810ef05304856e5fd12ec786f3445ecca576 Mon Sep 17 00:00:00 2001 From: Oleksii Chuprykov Date: Thu, 11 Jun 2015 08:56:58 -0400 Subject: [PATCH 0570/2941] Add toggle to run Heat API services via Apache2 Add templates for running Heat API services via apache mod_wsgi. Also add appropriate functions to lib/heat for configuring Heat. Change-Id: I1bdd678c44ddfa616a9db7db85ff6f490ff08947 --- doc/source/configuration.rst | 6 ++ files/apache-heat-api-cfn.template | 27 ++++++ files/apache-heat-api-cloudwatch.template | 27 ++++++ files/apache-heat-api.template | 27 ++++++ lib/heat | 109 ++++++++++++++++++++-- 5 files changed, 188 insertions(+), 8 deletions(-) create mode 100644 files/apache-heat-api-cfn.template create mode 100644 files/apache-heat-api-cloudwatch.template create mode 100644 files/apache-heat-api.template diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index aae4f33562..d70d3dae17 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -297,6 +297,12 @@ Example (Swift): SWIFT_USE_MOD_WSGI="True" +Example (Heat): + +:: + + HEAT_USE_MOD_WSGI="True" + Example (Cinder): diff --git a/files/apache-heat-api-cfn.template b/files/apache-heat-api-cfn.template new file mode 100644 index 0000000000..ab33c66f7e --- /dev/null +++ b/files/apache-heat-api-cfn.template @@ -0,0 +1,27 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess heat-api-cfn processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup heat-api-cfn + WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cfn + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + AllowEncodedSlashes On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/%APACHE_NAME%/heat-api-cfn.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/files/apache-heat-api-cloudwatch.template b/files/apache-heat-api-cloudwatch.template new file mode 100644 index 0000000000..06c91bbdb1 --- /dev/null +++ b/files/apache-heat-api-cloudwatch.template @@ -0,0 +1,27 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess heat-api-cloudwatch processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup heat-api-cloudwatch + WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api-cloudwatch + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + AllowEncodedSlashes On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/%APACHE_NAME%/heat-api-cloudwatch.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/files/apache-heat-api.template b/files/apache-heat-api.template new file mode 100644 index 0000000000..4924b3978b --- /dev/null +++ b/files/apache-heat-api.template @@ -0,0 +1,27 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess heat-api processes=3 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup heat-api + WSGIScriptAlias / %HEAT_BIN_DIR%/heat-wsgi-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + AllowEncodedSlashes On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog /var/log/%APACHE_NAME%/heat-api.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/lib/heat b/lib/heat index 615198cc7d..85fdaa1eb2 100644 --- a/lib/heat +++ b/lib/heat @@ -16,6 +16,7 @@ # - install_heat # - configure_heatclient # - configure_heat +# - _config_heat_apache_wsgi # - init_heat # - start_heat # - stop_heat @@ -32,6 +33,9 @@ set +o xtrace # set up default directories GITDIR["python-heatclient"]=$DEST/python-heatclient +# Toggle for deploying Heat-API under HTTPD + mod_wsgi +HEAT_USE_MOD_WSGI=${HEAT_USE_MOD_WSGI:-False} + HEAT_DIR=$DEST/heat HEAT_CFNTOOLS_DIR=$DEST/heat-cfntools HEAT_TEMPLATES_REPO_DIR=$DEST/heat-templates @@ -117,13 +121,17 @@ function configure_heat { # logging iniset $HEAT_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $HEAT_CONF DEFAULT use_syslog $SYSLOG - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then + if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$HEAT_USE_MOD_WSGI" == "False" ] ; then # Add color to logging output setup_colorized_logging $HEAT_CONF DEFAULT tenant user fi iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH + if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then + _config_heat_apache_wsgi + fi + # NOTE(jamielennox): heat re-uses specific values from the # keystone_authtoken middleware group and so currently fails when using the # auth plugin setup. This should be fixed in heat. Heat is also the only @@ -211,6 +219,9 @@ function install_heatclient { function install_heat { git_clone $HEAT_REPO $HEAT_DIR $HEAT_BRANCH setup_develop $HEAT_DIR + if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then + install_apache_wsgi + fi } # install_heat_other() - Collect source and prepare @@ -226,20 +237,102 @@ function install_heat_other { # start_heat() - Start running processes, including screen function start_heat { run_process h-eng "$HEAT_BIN_DIR/heat-engine --config-file=$HEAT_CONF" - run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF" - run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF" - run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF" + + # If the site is not enabled then we are in a grenade scenario + local enabled_site_file=$(apache_site_config_for heat-api) + if [ -f ${enabled_site_file} ] && [ "$HEAT_USE_MOD_WSGI" == "True" ]; then + enable_apache_site heat-api + enable_apache_site heat-api-cfn + enable_apache_site heat-api-cloudwatch + restart_apache_server + tail_log heat-api /var/log/$APACHE_NAME/heat-api.log + tail_log heat-api-cfn /var/log/$APACHE_NAME/heat-api-cfn.log + tail_log heat-api-cloudwatch /var/log/$APACHE_NAME/heat-api-cloudwatch.log + else + run_process h-api "$HEAT_BIN_DIR/heat-api --config-file=$HEAT_CONF" + run_process h-api-cfn "$HEAT_BIN_DIR/heat-api-cfn --config-file=$HEAT_CONF" + run_process h-api-cw "$HEAT_BIN_DIR/heat-api-cloudwatch --config-file=$HEAT_CONF" + fi } # stop_heat() - Stop running processes function stop_heat { # Kill the screen windows - local serv - for serv in h-eng h-api h-api-cfn h-api-cw; do - stop_process $serv - done + stop_process h-eng + + if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then + disable_apache_site heat-api + disable_apache_site heat-api-cfn + disable_apache_site heat-api-cloudwatch + restart_apache_server + else + local serv + for serv in h-api h-api-cfn h-api-cw; do + stop_process $serv + done + fi + } +# _cleanup_heat_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file +function _cleanup_heat_apache_wsgi { + sudo rm -f $(apache_site_config_for heat-api) + sudo rm -f $(apache_site_config_for heat-api-cfn) + sudo rm -f $(apache_site_config_for heat-api-cloudwatch) +} + +# _config_heat_apache_wsgi() - Set WSGI config files of Heat +function _config_heat_apache_wsgi { + + local heat_apache_conf=$(apache_site_config_for heat-api) + local heat_cfn_apache_conf=$(apache_site_config_for heat-api-cfn) + local heat_cloudwatch_apache_conf=$(apache_site_config_for heat-api-cloudwatch) + local heat_ssl="" + local heat_certfile="" + local heat_keyfile="" + local heat_api_port=$HEAT_API_PORT + local heat_cfn_api_port=$HEAT_API_CFN_PORT + local heat_cw_api_port=$HEAT_API_CW_PORT + local venv_path="" + + sudo cp $FILES/apache-heat-api.template $heat_apache_conf + sudo sed -e " + s|%PUBLICPORT%|$heat_api_port|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g; + s|%SSLENGINE%|$heat_ssl|g; + s|%SSLCERTFILE%|$heat_certfile|g; + s|%SSLKEYFILE%|$heat_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + " -i $heat_apache_conf + + sudo cp $FILES/apache-heat-api-cfn.template $heat_cfn_apache_conf + sudo sed -e " + s|%PUBLICPORT%|$heat_cfn_api_port|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g; + s|%SSLENGINE%|$heat_ssl|g; + s|%SSLCERTFILE%|$heat_certfile|g; + s|%SSLKEYFILE%|$heat_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + " -i $heat_cfn_apache_conf + + sudo cp $FILES/apache-heat-api-cloudwatch.template $heat_cloudwatch_apache_conf + sudo sed -e " + s|%PUBLICPORT%|$heat_cw_api_port|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%HEAT_BIN_DIR%|$HEAT_BIN_DIR|g; + s|%SSLENGINE%|$heat_ssl|g; + s|%SSLCERTFILE%|$heat_certfile|g; + s|%SSLKEYFILE%|$heat_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + " -i $heat_cloudwatch_apache_conf +} + + # create_heat_accounts() - Set up common required heat accounts function create_heat_accounts { if [[ "$HEAT_STANDALONE" != "True" ]]; then From cf94edcbbdde75de3ce627d7e092936bc014d5f6 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Wed, 28 Oct 2015 09:50:01 -0700 Subject: [PATCH 0571/2941] Disable shelve/snapshot/cinder when running Tempest with libvirt+lxc The libvirt+lxc backend in nova does not support shelve, image snapshot or any volume-related actions (so pretty much anything to do with cinder), so we need to configure tempest to not run tests that hit those operations/service when using libvirt/lxc. This is part of an overall effort to get a CI job running for nova with the libvirt+lxc configuration per: Ic07c39e219121ba6b8b20de2b83a193bb735133d Change-Id: I4decfcc5a5dfbabdecb3eb9fc93f1d1d6c2af805 --- lib/tempest | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/tempest b/lib/tempest index 10dd652750..565e965836 100644 --- a/lib/tempest +++ b/lib/tempest @@ -531,6 +531,8 @@ function configure_tempest { if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then iniset $TEMPEST_CONFIG compute-feature-enabled rescue False iniset $TEMPEST_CONFIG compute-feature-enabled resize False + iniset $TEMPEST_CONFIG compute-feature-enabled shelve False + iniset $TEMPEST_CONFIG compute-feature-enabled snapshot False iniset $TEMPEST_CONFIG compute-feature-enabled suspend False fi @@ -548,6 +550,12 @@ function configure_tempest { fi done + if [ "$VIRT_DRIVER" = "libvirt" ] && [ "$LIBVIRT_TYPE" = "lxc" ]; then + # libvirt-lxc does not support boot from volume or attaching volumes + # so basically anything with cinder is out of the question. + iniset $TEMPEST_CONFIG service_available cinder "False" + fi + if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then # Use the ``BOTO_CONFIG`` environment variable to point to this file iniset -sudo $BOTO_CONF Boto ca_certificates_file $SSL_BUNDLE_FILE From 5cdee8dd3311e501302993cd8c81b39cb6b45090 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 19 Oct 2015 14:17:18 +1100 Subject: [PATCH 0572/2941] Update to bashate 0.3.2 Bashate 0.3.2 has a few new checks -- firstly make sure some of the plugins have #!/bin/bash, and fix up a couple of "local" changes that were missed by I9c8912a8fd596535589b207d7fc553b9d951d3fe Change-Id: I9e4b1c0dc9e0f709d8e76f9c9bf1c9478b2605ed --- functions-common | 20 ++++++++++++++------ lib/heat | 12 ++++++++---- lib/neutron_plugins/services/firewall | 2 ++ lib/neutron_plugins/services/loadbalancer | 2 ++ lib/neutron_plugins/services/metering | 2 ++ lib/neutron_plugins/services/vpn | 2 ++ lib/neutron_thirdparty/vmware_nsx | 2 ++ tox.ini | 7 ++++--- 8 files changed, 36 insertions(+), 13 deletions(-) diff --git a/functions-common b/functions-common index 42555a95f5..ceefd443b1 100644 --- a/functions-common +++ b/functions-common @@ -1746,7 +1746,8 @@ function run_phase { # extras.d in an unsupported way which will let us track # unsupported usage in the gate. local exceptions="50-ironic.sh 60-ceph.sh 80-tempest.sh" - local extra=$(basename $extra_plugin_file_name) + local extra + extra=$(basename $extra_plugin_file_name) if [[ ! ( $exceptions =~ "$extra" ) ]]; then deprecated "extras.d support is being removed in Mitaka-1" deprecated "jobs for project $extra will break after that point" @@ -2184,14 +2185,21 @@ function time_start { # global counter for that name. Errors if that clock had not # previously been started. function time_stop { - local name=$1 - local start_time=${START_TIME[$name]} + local name + local end_time + local elpased_time + local total + local start_time + + name=$1 + start_time=${START_TIME[$name]} + if [[ -z "$start_time" ]]; then die $LINENO "Trying to stop the clock on $name, but it was never started" fi - local end_time=$(date +%s) - local elapsed_time=$(($end_time - $start_time)) - local total=${TOTAL_TIME[$name]:-0} + end_time=$(date +%s) + elapsed_time=$(($end_time - $start_time)) + total=${TOTAL_TIME[$name]:-0} # reset the clock so we can start it in the future START_TIME[$name]="" TOTAL_TIME[$name]=$(($total + $elapsed_time)) diff --git a/lib/heat b/lib/heat index 85fdaa1eb2..f3f0548947 100644 --- a/lib/heat +++ b/lib/heat @@ -239,7 +239,8 @@ function start_heat { run_process h-eng "$HEAT_BIN_DIR/heat-engine --config-file=$HEAT_CONF" # If the site is not enabled then we are in a grenade scenario - local enabled_site_file=$(apache_site_config_for heat-api) + local enabled_site_file + enabled_site_file=$(apache_site_config_for heat-api) if [ -f ${enabled_site_file} ] && [ "$HEAT_USE_MOD_WSGI" == "True" ]; then enable_apache_site heat-api enable_apache_site heat-api-cfn @@ -284,9 +285,12 @@ function _cleanup_heat_apache_wsgi { # _config_heat_apache_wsgi() - Set WSGI config files of Heat function _config_heat_apache_wsgi { - local heat_apache_conf=$(apache_site_config_for heat-api) - local heat_cfn_apache_conf=$(apache_site_config_for heat-api-cfn) - local heat_cloudwatch_apache_conf=$(apache_site_config_for heat-api-cloudwatch) + local heat_apache_conf + heat_apache_conf=$(apache_site_config_for heat-api) + local heat_cfn_apache_conf + heat_cfn_apache_conf=$(apache_site_config_for heat-api-cfn) + local heat_cloudwatch_apache_conf + heat_cloudwatch_apache_conf=$(apache_site_config_for heat-api-cloudwatch) local heat_ssl="" local heat_certfile="" local heat_keyfile="" diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 61a148e596..3496da82f8 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -1,3 +1,5 @@ +#!/bin/bash + # Neutron firewall plugin # --------------------------- diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index 34190f9a56..7865f6fd6e 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -1,3 +1,5 @@ +#!/bin/bash + # Neutron loadbalancer plugin # --------------------------- diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 37ba019b98..c75ab19d4e 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -1,3 +1,5 @@ +#!/bin/bash + # Neutron metering plugin # --------------------------- diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index 4d6a2bf9a0..c0e7457413 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -1,3 +1,5 @@ +#!/bin/bash + # Neutron VPN plugin # --------------------------- diff --git a/lib/neutron_thirdparty/vmware_nsx b/lib/neutron_thirdparty/vmware_nsx index 03853a9bf4..e182fca1ae 100644 --- a/lib/neutron_thirdparty/vmware_nsx +++ b/lib/neutron_thirdparty/vmware_nsx @@ -1,2 +1,4 @@ +#!/bin/bash + # REVISIT(roeyc): this file left empty so that 'enable_service vmware_nsx' # continues to work. diff --git a/tox.ini b/tox.ini index 0df9877ba8..9279455bb8 100644 --- a/tox.ini +++ b/tox.ini @@ -9,7 +9,7 @@ install_command = pip install {opts} {packages} [testenv:bashate] deps = - {env:BASHATE_INSTALL_PATH:bashate==0.3.1} + {env:BASHATE_INSTALL_PATH:bashate==0.3.2} whitelist_externals = bash commands = bash -c "find {toxinidir} \ -not \( -type d -name .?\* -prune \) \ # prune all 'dot' dirs @@ -20,12 +20,13 @@ commands = bash -c "find {toxinidir} \ -not -name \*.md \ \( \ -name \*.sh -or \ - -name \*rc -or \ + -name \*.orig -or \ + -name \*rc -or \ # openrc files, etc -name functions\* -or \ -wholename \*/inc/\* -or \ # /inc files and -wholename \*/lib/\* \ # /lib files are shell, but \) \ # have no extension - -print0 | xargs -0 bashate -v -iE006" + -print0 | xargs -0 bashate -v -iE006 -eE005,E042" [testenv:docs] deps = From 347dbac04e8988a01e3330e34b57f1e5b3c6aa2a Mon Sep 17 00:00:00 2001 From: Nicolas Simonds Date: Wed, 21 Jan 2015 13:40:42 -0800 Subject: [PATCH 0573/2941] Add the ability to install/enable Heat plugins Adds the $ENABLE_HEAT_PLUGINS variable, which should be a list of the names of the plugins the user wishes to install. Change-Id: I2ba90002a8fad1cdce6543c89dc37c5912fe133e --- lib/heat | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/lib/heat b/lib/heat index f3f0548947..e42bdf0b9e 100644 --- a/lib/heat +++ b/lib/heat @@ -69,6 +69,8 @@ else HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN) HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts} fi +HEAT_PLUGIN_DIR=${HEAT_PLUGIN_DIR:-$DATA_DIR/heat/plugins} +ENABLE_HEAT_PLUGINS=${ENABLE_HEAT_PLUGINS:-} # Functions # --------- @@ -188,6 +190,35 @@ function configure_heat { # copy the default templates cp $HEAT_DIR/etc/heat/templates/* $HEAT_TEMPLATES_DIR/ + # Enable heat plugins. + # NOTE(nic): The symlink nonsense is necessary because when + # plugins are installed in "developer mode", the final component + # of their target directory is always "resources", which confuses + # Heat's plugin loader into believing that all plugins are named + # "resources", and therefore are all the same plugin; so it + # will only load one of them. Linking them all to a common + # location with unique names avoids that type of collision, + # while still allowing the plugins to be edited in-tree. + local err_count=0 + + if [ -n "$ENABLE_HEAT_PLUGINS" ]; then + mkdir -p $HEAT_PLUGIN_DIR + # Clean up cruft from any previous runs + rm -f $HEAT_PLUGIN_DIR/* + iniset $HEAT_CONF DEFAULT plugin_dirs $HEAT_PLUGIN_DIR + fi + + for heat_plugin in $ENABLE_HEAT_PLUGINS; do + if [ -d $HEAT_DIR/contrib/$heat_plugin ]; then + setup_package $HEAT_DIR/contrib/$heat_plugin -e + ln -s $HEAT_DIR/contrib/$heat_plugin/$heat_plugin/resources $HEAT_PLUGIN_DIR/$heat_plugin + else + : # clear retval on the test so that we can roll up errors + err $LINENO "Requested Heat plugin(${heat_plugin}) not found." + err_count=$(($err_count + 1)) + fi + done + [ $err_count -eq 0 ] || die $LINENO "$err_count of the requested Heat plugins could not be installed." } # init_heat() - Initialize database From abb40f61939355f471c1a37c671774923e12b660 Mon Sep 17 00:00:00 2001 From: Thiago Paiva Date: Thu, 29 Oct 2015 11:38:24 -0300 Subject: [PATCH 0574/2941] Correct Cinder protocol for connections on Ironic The protocol for connections with Cinder is wrong for the Ironic script. This patch changes the script to use $GLANCE_SERVICE_PROTOCOL, which is https when USE_SSL=true or tls-proxy is on ENABLED_SERVICES. Change-Id: I4d4c6f9dc6f6ee53166db109848dca64334b8748 --- lib/ironic | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ironic b/lib/ironic index de07b49667..d786870165 100644 --- a/lib/ironic +++ b/lib/ironic @@ -795,7 +795,7 @@ function upload_baremetal_ironic_deploy { # load them into glance IRONIC_DEPLOY_KERNEL_ID=$(openstack \ --os-token $token \ - --os-url http://$GLANCE_HOSTPORT \ + --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \ image create \ $(basename $IRONIC_DEPLOY_KERNEL_PATH) \ --public --disk-format=aki \ @@ -803,7 +803,7 @@ function upload_baremetal_ironic_deploy { < $IRONIC_DEPLOY_KERNEL_PATH | grep ' id ' | get_field 2) IRONIC_DEPLOY_RAMDISK_ID=$(openstack \ --os-token $token \ - --os-url http://$GLANCE_HOSTPORT \ + --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \ image create \ $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \ --public --disk-format=ari \ From 7159b4ba5956c0fd3141fe13ac40057364683c9c Mon Sep 17 00:00:00 2001 From: Marian Horban Date: Thu, 22 Oct 2015 15:47:49 -0400 Subject: [PATCH 0575/2941] Config graceful_shutdown_timeout option for services To avoid hanging services during gracefull shutdown option graceful_shutdown_timeout should be configured. Closes-Bug: #1446583 Change-Id: I2b7f0df831d65c55ae8cae241478f49c9641d99f --- lib/cinder | 2 +- lib/glance | 2 ++ lib/nova | 1 + stackrc | 3 +++ 4 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/cinder b/lib/cinder index ed9a1038d6..a916c6540f 100644 --- a/lib/cinder +++ b/lib/cinder @@ -357,7 +357,7 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT os_privileged_user_name nova iniset $CINDER_CONF DEFAULT os_privileged_user_password "$SERVICE_PASSWORD" iniset $CINDER_CONF DEFAULT os_privileged_user_tenant "$SERVICE_TENANT_NAME" - + iniset $CINDER_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" } # create_cinder_accounts() - Set up common required cinder accounts diff --git a/lib/glance b/lib/glance index 2eb93a46e6..5a1b2834d0 100644 --- a/lib/glance +++ b/lib/glance @@ -115,6 +115,7 @@ function configure_glance { configure_auth_token_middleware $GLANCE_REGISTRY_CONF glance $GLANCE_AUTH_CACHE_DIR/registry iniset $GLANCE_REGISTRY_CONF DEFAULT notification_driver messaging iniset_rpc_backend glance $GLANCE_REGISTRY_CONF + iniset $GLANCE_REGISTRY_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" cp $GLANCE_DIR/etc/glance-api.conf $GLANCE_API_CONF iniset $GLANCE_API_CONF DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL @@ -149,6 +150,7 @@ function configure_glance { iniset $GLANCE_API_CONF glance_store swift_store_config_file $GLANCE_SWIFT_STORE_CONF iniset $GLANCE_API_CONF glance_store default_swift_reference ref1 iniset $GLANCE_API_CONF glance_store stores "file, http, swift" + iniset $GLANCE_API_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" iniset $GLANCE_SWIFT_STORE_CONF ref1 user $SERVICE_TENANT_NAME:glance-swift iniset $GLANCE_SWIFT_STORE_CONF ref1 key $SERVICE_PASSWORD diff --git a/lib/nova b/lib/nova index 6c414030ae..cce598d853 100644 --- a/lib/nova +++ b/lib/nova @@ -649,6 +649,7 @@ function create_nova_conf { iniset $NOVA_CONF serial_console serialproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF serial_console enabled True fi + iniset $NOVA_CONF DEFAULT graceful_shutdown_timeout "$SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT" } function init_nova_cells { diff --git a/stackrc b/stackrc index 819aa0125c..1a95e2b7ed 100644 --- a/stackrc +++ b/stackrc @@ -669,6 +669,9 @@ fi # Service startup timeout SERVICE_TIMEOUT=${SERVICE_TIMEOUT:-60} +# Service graceful shutdown timeout +SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT=${SERVICE_GRACEFUL_SHUTDOWN_TIMEOUT:-5} + # Support alternative yum -- in future Fedora 'dnf' will become the # only supported installer, but for now 'yum' and 'dnf' are both # available in parallel with compatible CLIs. Allow manual switching From cdcdeb64602ea271bedf20f58a9f1ca1c0075c84 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Thu, 29 Oct 2015 09:48:17 -0700 Subject: [PATCH 0576/2941] Remove CINDER_VOLUME_CLEAR value validation 132fbcd38ebae52bdd20da54905131b75581520f in cinder changed the volume_clear StrOpt to use the choices kwarg which enforces the value specified and raises a ValueError if an invalid value is set for the option in cinder.conf. This lets us remove the validation that devstack was doing. Change-Id: Ia7eead6297ed0f3a972de2021170fe9c7225e856 --- lib/cinder | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/cinder b/lib/cinder index ed9a1038d6..1307c11f7a 100644 --- a/lib/cinder +++ b/lib/cinder @@ -317,9 +317,7 @@ function configure_cinder { iniset_rpc_backend cinder $CINDER_CONF - if [[ "$CINDER_VOLUME_CLEAR" == "none" ]] || [[ "$CINDER_VOLUME_CLEAR" == "zero" ]] || [[ "$CINDER_VOLUME_CLEAR" == "shred" ]]; then - iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR - fi + iniset $CINDER_CONF DEFAULT volume_clear $CINDER_VOLUME_CLEAR # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$CINDER_USE_MOD_WSGI" == "False" ]; then From 5ea1e16d83d6312fb17976ded0ab1a6a3773ca4d Mon Sep 17 00:00:00 2001 From: Stephen Finucane Date: Thu, 29 Oct 2015 20:12:26 +0000 Subject: [PATCH 0577/2941] lib/nova: Remove 'DEFAULT/verbose' The 'verbose' option has been deprecated by oslo_log. Using it results in a warning for the 'nova-manage' command and likely many other OpenStack commands. Change-Id: Icc11b25f56ebc62443c6afa90b9572d5c63b3882 Partial-bug: #1511505 --- lib/nova | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/nova b/lib/nova index 6c414030ae..47c43bd193 100644 --- a/lib/nova +++ b/lib/nova @@ -480,7 +480,6 @@ function create_nova_conf { # (Re)create ``nova.conf`` rm -f $NOVA_CONF - iniset $NOVA_CONF DEFAULT verbose "True" iniset $NOVA_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" if [ "$NOVA_ALLOW_MOVE_TO_SAME_HOST" == "True" ]; then iniset $NOVA_CONF DEFAULT allow_resize_to_same_host "True" From 0ec80802735163fdbdb4ceaa339aa73f93bbd87a Mon Sep 17 00:00:00 2001 From: Daniel Gonzalez Date: Fri, 30 Oct 2015 14:25:41 +0100 Subject: [PATCH 0578/2941] Remove multi-region workaround When keystone API v3 was introduced, filtering regions when listing endpoints was not supported (see [1]). This caused multi-region devstack deployments to fail (see [2]). A workaround was introduced to devstack to enable for multi-region deployments until region filtering would work in keystone API v3. Now that the bug related to region filtering in keystone is resolved, the workaround should be removed. [1]: https://bugs.launchpad.net/keystone/+bug/1482772 [2]: https://bugs.launchpad.net/devstack/+bug/1483784 Closes-Bug: #1511745 Related-Bug: #1483784 Related-Bug: #1482772 Change-Id: I52d13c3f5e7b77a7f8fb1be4dcea437988ebddfe --- functions-common | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/functions-common b/functions-common index d36d323820..d282203d45 100644 --- a/functions-common +++ b/functions-common @@ -913,16 +913,11 @@ function get_or_create_service { # Usage: _get_or_create_endpoint_with_interface function _get_or_create_endpoint_with_interface { local endpoint_id - # TODO(dgonzalez): The check of the region name, as done in the grep - # statement below, exists only because keystone does currently - # not allow filtering the region name when listing endpoints. If keystone - # gets support for this, the check for the region name can be removed. - # Related bug in keystone: https://bugs.launchpad.net/keystone/+bug/1482772 endpoint_id=$(openstack endpoint list \ --service $1 \ --interface $2 \ --region $4 \ - -c ID -c Region -f value | grep $4 | cut -f 1 -d " ") + -c ID -f value) if [[ -z "$endpoint_id" ]]; then # Creates new endpoint endpoint_id=$(openstack endpoint create \ From 55a1bca28215a7623c9bd067a663e176d4b3e672 Mon Sep 17 00:00:00 2001 From: Andrey Kurilin Date: Fri, 30 Oct 2015 16:24:19 +0200 Subject: [PATCH 0579/2941] Remove wrong paramter COMPUTE_API_VERSION Since: - novaclient doesn't require specify the *compute api* version (default is 2.latest now) - novaclient doesn't use COMPUTE_API_VERSION, since it's wrong name( OS_COMPUTE_API_VERSION is a correct name) we can remove COMPUTE_API_VERSION and NOVA_VERSION vars Change-Id: I47856863e9403870b8d60c778b97d3de1a212ae1 --- exercises/client-args.sh | 1 - exercises/client-env.sh | 4 ---- openrc | 6 ------ 3 files changed, 11 deletions(-) diff --git a/exercises/client-args.sh b/exercises/client-args.sh index c33ef44e9a..7cfef1c807 100755 --- a/exercises/client-args.sh +++ b/exercises/client-args.sh @@ -41,7 +41,6 @@ unset NOVA_PROJECT_ID unset NOVA_REGION_NAME unset NOVA_URL unset NOVA_USERNAME -unset NOVA_VERSION # Save the known variables for later export x_TENANT_NAME=$OS_TENANT_NAME diff --git a/exercises/client-env.sh b/exercises/client-env.sh index 4a0609a944..1d2f4f5689 100755 --- a/exercises/client-env.sh +++ b/exercises/client-env.sh @@ -41,7 +41,6 @@ unset NOVA_PROJECT_ID unset NOVA_REGION_NAME unset NOVA_URL unset NOVA_USERNAME -unset NOVA_VERSION for i in OS_TENANT_NAME OS_USERNAME OS_PASSWORD OS_AUTH_URL; do is_set $i @@ -101,9 +100,6 @@ if [[ "$ENABLED_SERVICES" =~ "n-api" ]]; then STATUS_EC2="Failed" RETURN=1 fi - - # Clean up side effects - unset NOVA_VERSION fi fi diff --git a/openrc b/openrc index 71ba5a6ea5..9bc0fd77f4 100644 --- a/openrc +++ b/openrc @@ -95,12 +95,6 @@ if [[ ! -v OS_CACERT ]] ; then fi fi -# Currently novaclient needs you to specify the *compute api* version. This -# needs to match the config of your catalog returned by Keystone. -export NOVA_VERSION=${NOVA_VERSION:-1.1} -# In the future this will change names: -export COMPUTE_API_VERSION=${COMPUTE_API_VERSION:-$NOVA_VERSION} - # Currently cinderclient needs you to specify the *volume api* version. This # needs to match the config of your catalog returned by Keystone. export CINDER_VERSION=${CINDER_VERSION:-2} From f54f60a63ea146e54d430e343f666638bf7947d2 Mon Sep 17 00:00:00 2001 From: Einst Crazy Date: Fri, 30 Oct 2015 23:00:57 +0800 Subject: [PATCH 0580/2941] Delete some comment or messages of keystone_data.sh(removed) As files/keystone_data.sh has been removed in the commit https://review.openstack.org/#/c/79366/, we should remove some related documations and comments. Change-Id: I7802d0052fa28d8debb7f361d36a4f108869554c --- HACKING.rst | 3 +-- doc/source/configuration.rst | 2 +- lib/tempest | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index 6bd24b0174..d66687e351 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -250,8 +250,7 @@ These scripts are executed serially by ``exercise.sh`` in testing situations. database access from the exercise itself. * If specific configuration needs to be present for the exercise to complete, - it should be staged in ``stack.sh``, or called from ``stack.sh`` (see - ``files/keystone_data.sh`` for an example of this). + it should be staged in ``stack.sh``, or called from ``stack.sh``. * The ``OS_*`` environment variables should be the only ones used for all authentication to OpenStack clients as documented in the CLIAuth_ wiki page. diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index d70d3dae17..9ba657a50d 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -393,7 +393,7 @@ with ``KEYSTONE_CATALOG_BACKEND``: KEYSTONE_CATALOG_BACKEND=template DevStack's default configuration in ``sql`` mode is set in -``files/keystone_data.sh`` +``lib/keystone`` Guest Images diff --git a/lib/tempest b/lib/tempest index 32630dbf59..03e2aacf97 100644 --- a/lib/tempest +++ b/lib/tempest @@ -23,7 +23,7 @@ # # Optional Dependencies: # -# - ``ALT_*`` (similar vars exists in keystone_data.sh) +# - ``ALT_*`` # - ``LIVE_MIGRATION_AVAILABLE`` # - ``USE_BLOCK_MIGRATION_FOR_LIVE_MIGRATION`` # - ``DEFAULT_INSTANCE_TYPE`` From 08abba008e735d333f9180180c6a28e7cd5a1171 Mon Sep 17 00:00:00 2001 From: Marian Horban Date: Thu, 11 Jun 2015 13:01:41 -0400 Subject: [PATCH 0581/2941] Add ability to run Nova metadata under Apache2 This patch allows to run Nova metadata service using Apache on port 80 under /metadata URL. Change-Id: I18f3399738c31166eac884a9b0d5c4045d3f445c --- files/apache-nova-metadata.template | 25 +++++++++++++++++++++++++ lib/nova | 23 +++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 files/apache-nova-metadata.template diff --git a/files/apache-nova-metadata.template b/files/apache-nova-metadata.template new file mode 100644 index 0000000000..6231c1ced8 --- /dev/null +++ b/files/apache-nova-metadata.template @@ -0,0 +1,25 @@ +Listen %PUBLICPORT% + + + WSGIDaemonProcess nova-metadata processes=%APIWORKERS% threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% + WSGIProcessGroup nova-metadata + WSGIScriptAlias / %PUBLICWSGI% + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%M" + + ErrorLog /var/log/%APACHE_NAME%/nova-metadata.log + %SSLENGINE% + %SSLCERTFILE% + %SSLKEYFILE% + + +Alias /metadata %PUBLICWSGI% + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup nova-metadata + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/lib/nova b/lib/nova index 6c414030ae..e82c28be4d 100644 --- a/lib/nova +++ b/lib/nova @@ -7,6 +7,7 @@ # # - ``functions`` file # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined +# - ``FILES`` # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # - ``LIBVIRT_TYPE`` must be defined # - ``INSTANCE_NAME_PREFIX``, ``VOLUME_NAME_PREFIX`` must be defined @@ -87,6 +88,7 @@ NOVA_SERVICE_LOCAL_HOST=${NOVA_SERVICE_LOCAL_HOST:-$SERVICE_LOCAL_HOST} NOVA_SERVICE_LISTEN_ADDRESS=${NOVA_SERVICE_LISTEN_ADDRESS:-$SERVICE_LISTEN_ADDRESS} EC2_SERVICE_PORT=${EC2_SERVICE_PORT:-8773} EC2_SERVICE_PORT_INT=${EC2_SERVICE_PORT_INT:-18773} +METADATA_SERVICE_PORT=${METADATA_SERVICE_PORT:-8775} # Option to enable/disable config drive # NOTE: Set ``FORCE_CONFIG_DRIVE="False"`` to turn OFF config drive @@ -241,6 +243,7 @@ function _cleanup_nova_apache_wsgi { sudo rm -f $NOVA_WSGI_DIR/* sudo rm -f $(apache_site_config_for nova-api) sudo rm -f $(apache_site_config_for nova-ec2-api) + sudo rm -f $(apache_site_config_for nova-metadata) } # _config_nova_apache_wsgi() - Set WSGI config files of Keystone @@ -251,11 +254,14 @@ function _config_nova_apache_wsgi { nova_apache_conf=$(apache_site_config_for nova-api) local nova_ec2_apache_conf nova_ec2_apache_conf=$(apache_site_config_for nova-ec2-api) + local nova_metadata_apache_conf + nova_metadata_apache_conf=$(apache_site_config_for nova-metadata) local nova_ssl="" local nova_certfile="" local nova_keyfile="" local nova_api_port=$NOVA_SERVICE_PORT local nova_ec2_api_port=$EC2_SERVICE_PORT + local nova_metadata_port=$METADATA_SERVICE_PORT local venv_path="" if is_ssl_enabled_service nova-api; then @@ -270,6 +276,7 @@ function _config_nova_apache_wsgi { # copy proxy vhost and wsgi helper files sudo cp $NOVA_DIR/nova/wsgi/nova-api.py $NOVA_WSGI_DIR/nova-api sudo cp $NOVA_DIR/nova/wsgi/nova-ec2-api.py $NOVA_WSGI_DIR/nova-ec2-api + sudo cp $NOVA_DIR/nova/wsgi/nova-metadata.py $NOVA_WSGI_DIR/nova-metadata sudo cp $FILES/apache-nova-api.template $nova_apache_conf sudo sed -e " @@ -296,6 +303,19 @@ function _config_nova_apache_wsgi { s|%VIRTUALENV%|$venv_path|g s|%APIWORKERS%|$API_WORKERS|g " -i $nova_ec2_apache_conf + + sudo cp $FILES/apache-nova-metadata.template $nova_metadata_apache_conf + sudo sed -e " + s|%PUBLICPORT%|$nova_metadata_port|g; + s|%APACHE_NAME%|$APACHE_NAME|g; + s|%PUBLICWSGI%|$NOVA_WSGI_DIR/nova-metadata|g; + s|%SSLENGINE%|$nova_ssl|g; + s|%SSLCERTFILE%|$nova_certfile|g; + s|%SSLKEYFILE%|$nova_keyfile|g; + s|%USER%|$STACK_USER|g; + s|%VIRTUALENV%|$venv_path|g + s|%APIWORKERS%|$API_WORKERS|g + " -i $nova_metadata_apache_conf } # configure_nova() - Set config files, create data dirs, etc @@ -798,9 +818,11 @@ function start_nova_api { if [ -f ${enabled_site_file} ] && [ "$NOVA_USE_MOD_WSGI" == "True" ]; then enable_apache_site nova-api enable_apache_site nova-ec2-api + enable_apache_site nova-metadata restart_apache_server tail_log nova-api /var/log/$APACHE_NAME/nova-api.log tail_log nova-ec2-api /var/log/$APACHE_NAME/nova-ec2-api.log + tail_log nova-metadata /var/log/$APACHE_NAME/nova-metadata.log else run_process n-api "$NOVA_BIN_DIR/nova-api" fi @@ -916,6 +938,7 @@ function stop_nova_rest { if [ "$NOVA_USE_MOD_WSGI" == "True" ]; then disable_apache_site nova-api disable_apache_site nova-ec2-api + disable_apache_site nova-metadata restart_apache_server else stop_process n-api From 463a0e6d980690d00b17b9ebcfaf83c2f12d7615 Mon Sep 17 00:00:00 2001 From: keiji niwa Date: Sat, 11 Jul 2015 22:09:49 +0900 Subject: [PATCH 0582/2941] Replace hard-coded --ipv6-address-mode Looks like this was just a typo in the original d1498d74db816b3edbb8376ca5acb7cc5792ea5c ; replace with environment variable Change-Id: I877c1a570a68e926c91fc8a393217e6b18245f82 --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 3c3c96b7bd..c244e5470a 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -547,7 +547,7 @@ function create_neutron_initial_network { fi if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode slaac --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2) + SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2) die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $PROVIDER_SUBNET_NAME_V6 $TENANT_ID" fi From 508931ff367df646d1fa6068008fe550c5572d02 Mon Sep 17 00:00:00 2001 From: Hirofumi Ichihara Date: Mon, 2 Nov 2015 17:39:48 +0900 Subject: [PATCH 0583/2941] Remove Ubuntu 14.10 as supported distribution Ubuntu 14.10(utopic) reached end of life[1]. [1]: https://lists.ubuntu.com/archives/ubuntu-announce/2015-July/000197.html Change-Id: Iab13ca797bda56462d9d117aa500d3ba0d9bebcb --- lib/nova_plugins/functions-libvirt | 2 +- stack.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 5525cfd951..78c59786d8 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -23,7 +23,7 @@ DEBUG_LIBVIRT=$(trueorfalse True DEBUG_LIBVIRT) # Installs required distro-specific libvirt packages. function install_libvirt { if is_ubuntu; then - if is_arch "aarch64" && [[ ${DISTRO} =~ (trusty|utopic) ]]; then + if is_arch "aarch64" && [[ ${DISTRO} == "trusty" ]]; then install_package qemu-system else install_package qemu-kvm diff --git a/stack.sh b/stack.sh index 80247317cf..bdbb025a0b 100755 --- a/stack.sh +++ b/stack.sh @@ -178,7 +178,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|trusty|utopic|vivid|7.0|wheezy|sid|testing|jessie|f21|f22|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (precise|trusty|vivid|7.0|wheezy|sid|testing|jessie|f21|f22|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From c30657d7de1a253ffd541d07a0b449d395cb8eab Mon Sep 17 00:00:00 2001 From: Markus Zoeller Date: Mon, 2 Nov 2015 11:27:46 +0100 Subject: [PATCH 0584/2941] Cleanup ReST format issues * ReST doesn't allow monospace in italic sections. bash$ grep -R \`\` doc/build/html/ --include "*.html" * The code-block section "::" needed an empty line before the code, otherwise it gets shown in the HTML output. bash$ egrep -R "
::" doc/build/html/ --include "*.html" * Monospaced font incorrectly marked with a single back tick bash$ egrep -nR '\w`(\s|[\.,;:])' doc/source/ --include "*.rst" Change-Id: I66c3f685f33851c3f3f0f859996037fc24930246 --- doc/source/configuration.rst | 33 ++++++++++--------- doc/source/guides/devstack-with-lbaas-v2.rst | 2 +- .../guides/devstack-with-nested-kvm.rst | 12 +++---- doc/source/guides/neutron.rst | 18 +++++----- 4 files changed, 33 insertions(+), 32 deletions(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index d70d3dae17..e9921f049c 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -202,8 +202,8 @@ process to a file in ``LOGDIR``. LOGDIR=$DEST/logs -*Note the use of ``DEST`` to locate the main install directory; this -is why we suggest setting it in ``local.conf``.* +Note the use of ``DEST`` to locate the main install directory; this +is why we suggest setting it in ``local.conf``. Enabling Syslog ~~~~~~~~~~~~~~~ @@ -239,15 +239,15 @@ Database Backend Multiple database backends are available. The available databases are defined in the lib/databases directory. -`mysql` is the default database, choose a different one by putting the -following in the `localrc` section: +``mysql`` is the default database, choose a different one by putting the +following in the ``localrc`` section: :: disable_service mysql enable_service postgresql -`mysql` is the default database. +``mysql`` is the default database. RPC Backend ----------- @@ -260,6 +260,7 @@ RabbitMQ is handled via the usual service functions and Example disabling RabbitMQ in ``local.conf``: :: + disable_service rabbit @@ -511,7 +512,7 @@ VM. When running with only one replica the account, container and object services will run directly in screen. The others services like replicator, updaters or auditor runs in background. -If you would like to enable Swift you can add this to your `localrc` +If you would like to enable Swift you can add this to your ``localrc`` section: :: @@ -519,7 +520,7 @@ section: enable_service s-proxy s-object s-container s-account If you want a minimal Swift install with only Swift and Keystone you -can have this instead in your `localrc` section: +can have this instead in your ``localrc`` section: :: @@ -528,24 +529,24 @@ can have this instead in your `localrc` section: If you only want to do some testing of a real normal swift cluster with multiple replicas you can do so by customizing the variable -`SWIFT_REPLICAS` in your `localrc` section (usually to 3). +``SWIFT_REPLICAS`` in your ``localrc`` section (usually to 3). Swift S3 ++++++++ -If you are enabling `swift3` in `ENABLED_SERVICES` DevStack will +If you are enabling ``swift3`` in ``ENABLED_SERVICES`` DevStack will install the swift3 middleware emulation. Swift will be configured to act as a S3 endpoint for Keystone so effectively replacing the -`nova-objectstore`. +``nova-objectstore``. Only Swift proxy server is launched in the screen session all other -services are started in background and managed by `swift-init` tool. +services are started in background and managed by ``swift-init`` tool. Heat ~~~~ -Heat is disabled by default (see `stackrc` file). To enable it -explicitly you'll need the following settings in your `localrc` +Heat is disabled by default (see ``stackrc`` file). To enable it +explicitly you'll need the following settings in your ``localrc`` section :: @@ -554,7 +555,7 @@ section Heat can also run in standalone mode, and be configured to orchestrate on an external OpenStack cloud. To launch only Heat in standalone mode -you'll need the following settings in your `localrc` section +you'll need the following settings in your ``localrc`` section :: @@ -590,14 +591,14 @@ Xenserver ~~~~~~~~~ If you would like to use Xenserver as the hypervisor, please refer to -the instructions in `./tools/xen/README.md`. +the instructions in ``./tools/xen/README.md``. Cells ~~~~~ `Cells `__ is an alternative scaling option. To setup a cells environment add the -following to your `localrc` section: +following to your ``localrc`` section: :: diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index f67978310d..4e5f874787 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -17,7 +17,7 @@ Install devstack cd devstack -Edit your `local.conf` to look like +Edit your ``local.conf`` to look like :: diff --git a/doc/source/guides/devstack-with-nested-kvm.rst b/doc/source/guides/devstack-with-nested-kvm.rst index c652bacced..85a5656198 100644 --- a/doc/source/guides/devstack-with-nested-kvm.rst +++ b/doc/source/guides/devstack-with-nested-kvm.rst @@ -50,7 +50,7 @@ the host: parm: nested:bool Start your VM, now it should have KVM capabilities -- you can verify -that by ensuring `/dev/kvm` character device is present. +that by ensuring ``/dev/kvm`` character device is present. Configure Nested KVM for AMD-based Machines @@ -97,7 +97,7 @@ To make the above value persistent across reboots, add an entry in Expose Virtualization Extensions to DevStack VM ----------------------------------------------- -Edit the VM's libvirt XML configuration via `virsh` utility: +Edit the VM's libvirt XML configuration via ``virsh`` utility: :: @@ -115,10 +115,10 @@ Ensure DevStack VM is Using KVM ------------------------------- Before invoking ``stack.sh`` in the VM, ensure that KVM is enabled. This -can be verified by checking for the presence of the file `/dev/kvm` in +can be verified by checking for the presence of the file ``/dev/kvm`` in your VM. If it is present, DevStack will default to using the config -attribute `virt_type = kvm` in `/etc/nova.conf`; otherwise, it'll fall -back to `virt_type=qemu`, i.e. plain QEMU emulation. +attribute ``virt_type = kvm`` in ``/etc/nova.conf``; otherwise, it'll fall +back to ``virt_type=qemu``, i.e. plain QEMU emulation. Optionally, to explicitly set the type of virtualization, to KVM, by the libvirt driver in nova, the below config attribute can be used in @@ -131,7 +131,7 @@ DevStack's ``local.conf``: Once DevStack is configured successfully, verify if the Nova instances are using KVM by noticing the QEMU CLI invoked by Nova is using the -parameter `accel=kvm`, e.g.: +parameter ``accel=kvm``, e.g.: :: diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 5891f68033..5e231315f7 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -170,8 +170,8 @@ nova, neutron) **Compute Nodes** In this example, the nodes that will host guest instances will run -the `neutron-openvswitch-agent` for network connectivity, as well as -the compute service `nova-compute`. +the ``neutron-openvswitch-agent`` for network connectivity, as well as +the compute service ``nova-compute``. DevStack Configuration ---------------------- @@ -256,16 +256,16 @@ compute node 1. Q_L3_ENABLED=False Compute node 2's configuration will be exactly the same, except -`HOST_IP` will be `10.0.0.4` +``HOST_IP`` will be ``10.0.0.4`` When DevStack is configured to use provider networking (via -`Q_USE_PROVIDER_NETWORKING` is True and `Q_L3_ENABLED` is False) - +``Q_USE_PROVIDER_NETWORKING`` is True and ``Q_L3_ENABLED`` is False) - DevStack will automatically add the network interface defined in -`PUBLIC_INTERFACE` to the `OVS_PHYSICAL_BRIDGE` +``PUBLIC_INTERFACE`` to the ``OVS_PHYSICAL_BRIDGE`` For example, with the above configuration, a bridge is -created, named `br-ex` which is managed by Open vSwitch, and the -second interface on the compute node, `eth1` is attached to the +created, named ``br-ex`` which is managed by Open vSwitch, and the +second interface on the compute node, ``eth1`` is attached to the bridge, to forward traffic sent by guest VMs. Miscellaneous Tips @@ -307,7 +307,7 @@ Configuring Extension Drivers for the ML2 Plugin ------------------------------------------------ Extension drivers for the ML2 plugin are set with the variable -`Q_ML2_PLUGIN_EXT_DRIVERS`, and includes the 'port_security' extension +``Q_ML2_PLUGIN_EXT_DRIVERS``, and includes the 'port_security' extension by default. If you want to remove all the extension drivers (even -'port_security'), set `Q_ML2_PLUGIN_EXT_DRIVERS` to blank. +'port_security'), set ``Q_ML2_PLUGIN_EXT_DRIVERS`` to blank. From 85f42f698c7bd74d53309da486fbf44ed860c348 Mon Sep 17 00:00:00 2001 From: Thomas Morin Date: Tue, 1 Sep 2015 10:33:10 +0200 Subject: [PATCH 0585/2941] Provide an error message on bogus config file spec If local.conf specifies a config file addition in the following way... [[post-config|$MY_CONF_FILE]] [xyz] foo=bar ...and $MY_CONF_FILE points to a file whose directory is not writable by the user running the script, then stack.sh aborts with the following obscure message: 2015-09-01 08:20:08.113 | touch: setting times of '/': Permission denied This patch modifies inc/meta-config to provide a useful error message, such as: 2015-09-01 08:20:08.114 | could not create config file / ($MY_CONF_FILE) This patch also modifies inc/meta-config so that it provides an error message if $MY_CONF_FILE is empty (instead of silently ignoring this local.conf statement): 2015-09-01 09:38:53.406 | bogus config file specification: $MY_CONF_FILE is undefined Change-Id: I9b78407420318548561012a8672762bc7fdd6db6 Closes-Bug: 1490881 --- inc/meta-config | 17 ++++++++++++--- tests/test_meta_config.sh | 45 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 3 deletions(-) diff --git a/inc/meta-config b/inc/meta-config index e5f902d1dd..b9ab6b207f 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -89,9 +89,10 @@ function merge_config_file { # note, configfile might be a variable (note the iniset, etc # created in the mega-awk below is "eval"ed too, so we just leave # it alone. - local real_configfile=$(eval echo $configfile) + local real_configfile + real_configfile=$(eval echo $configfile) if [ ! -f $real_configfile ]; then - touch $real_configfile + touch $real_configfile || die $LINENO "could not create config file $real_configfile ($configfile)" fi get_meta_section $file $matchgroup $configfile | \ @@ -177,8 +178,18 @@ function merge_config_group { local configfile group for group in $matchgroups; do for configfile in $(get_meta_section_files $localfile $group); do - if [[ -d $(dirname $(eval "echo $configfile")) ]]; then + local realconfigfile + local dir + + realconfigfile=$(eval "echo $configfile") + if [[ -z $realconfigfile ]]; then + die $LINENO "bogus config file specification: $configfile is undefined" + fi + dir=$(dirname $realconfigfile) + if [[ -d $dir ]]; then merge_config_file $localfile $group $configfile + else + die $LINENO "bogus config file specification $configfile ($configfile=$realconfigfile, $dir is not a directory)" fi done done diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index a04c081854..f3e94af8f8 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -23,6 +23,12 @@ function check_result { fi } +# mock function-common:die so that it does not +# interupt our test script +function die { + exit -1 +} + TEST_1C_ADD="[eee] type=new multi = foo2" @@ -110,6 +116,15 @@ attr = strip_trailing_space [DEFAULT] servers=10.11.12.13:80 +[[test8|/permission-denied.conf]] +foo=bar + +[[test9|\$UNDEF]] +foo=bar + +[[test10|does-not-exist-dir/test.conf]] +foo=bar + [[test-multi-sections|test-multi-sections.conf]] [sec-1] cfg_item1 = abcd @@ -340,6 +355,36 @@ EXPECT_VAL=" servers = 10.11.12.13:80" check_result "$VAL" "$EXPECT_VAL" +echo "merge_config_file test8 non-touchable conf file: " +set +e +# function is expected to fail and exit, running it +# in a subprocess to let this script proceed +(merge_config_file test.conf test8 /permission-denied.conf) +VAL=$? +EXPECT_VAL=255 +check_result "$VAL" "$EXPECT_VAL" +set -e + +echo -n "merge_config_group test9 undefined conf file: " +set +e +# function is expected to fail and exit, running it +# in a subprocess to let this script proceed +(merge_config_group test.conf test9) +VAL=$? +EXPECT_VAL=255 +check_result "$VAL" "$EXPECT_VAL" +set -e + +echo -n "merge_config_group test10 not directory: " +set +e +# function is expected to fail and exit, running it +# in a subprocess to let this script proceed +(merge_config_group test.conf test10) +VAL=$? +EXPECT_VAL=255 +check_result "$VAL" "$EXPECT_VAL" +set -e + rm -f test.conf test1c.conf test2a.conf \ test-space.conf test-equals.conf test-strip.conf \ test-colon.conf test-env.conf test-multiline.conf \ From dca49de22f57f3b2f502380b2cbfedb0dcdba209 Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Tue, 20 Oct 2015 12:13:19 +0100 Subject: [PATCH 0586/2941] Use stevedore aliases for interface_driver configuration interface_driver configuration was updated to use stevedore aliases. This patch is to change devstack scripts to now use the aliases instead of the previous class imports. Closes-Bug: #1504536 Change-Id: Ic56bfcc1f9da05a999e6fd328e4dd6617e9470ff --- lib/neutron_plugins/bigswitch_floodlight | 4 ++-- lib/neutron_plugins/brocade | 2 +- lib/neutron_plugins/cisco | 2 +- lib/neutron_plugins/linuxbridge_agent | 2 +- lib/neutron_plugins/openvswitch_agent | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index 4166131534..f52105e658 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -58,9 +58,9 @@ function neutron_plugin_configure_service { function neutron_plugin_setup_interface_driver { local conf_file=$1 if [ "$BS_FL_VIF_DRIVER" = "ivs" ]; then - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.IVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver ivs else - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver openvswitch fi } diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 557b94dec0..953360e070 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -68,7 +68,7 @@ function neutron_plugin_configure_plugin_agent { function neutron_plugin_setup_interface_driver { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver + iniset $conf_file DEFAULT interface_driver linuxbridge } function has_neutron_plugin_security_group { diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index 90dcd574c0..7d0cf1af39 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -150,7 +150,7 @@ function neutron_plugin_create_initial_network_profile { function neutron_plugin_setup_interface_driver { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver openvswitch } # Restore xtrace diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index bd4438db04..f28bcfeadd 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -85,7 +85,7 @@ function neutron_plugin_configure_plugin_agent { function neutron_plugin_setup_interface_driver { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.BridgeInterfaceDriver + iniset $conf_file DEFAULT interface_driver linuxbridge } function neutron_plugin_check_adv_test_requirements { diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 48e47b3dab..5a843ffba7 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -110,7 +110,7 @@ function neutron_plugin_configure_plugin_agent { function neutron_plugin_setup_interface_driver { local conf_file=$1 - iniset $conf_file DEFAULT interface_driver neutron.agent.linux.interface.OVSInterfaceDriver + iniset $conf_file DEFAULT interface_driver openvswitch } function neutron_plugin_check_adv_test_requirements { From a91d455e23f68613db0e67fea339a13fd9eae7d4 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Tue, 20 Oct 2015 23:15:38 -0400 Subject: [PATCH 0587/2941] change `swift post` to openstackclient cli command with the release of osc 1.8.0, modifying object store account properties is now available. use this mechanism and avoid setting environment variable that are only helpful for swift CLI. Change-Id: Ie51e3e2bb86162763f23d0a6bed36208811f89fc --- lib/swift | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/swift b/lib/swift index 3a8e80dd44..ee0238d975 100644 --- a/lib/swift +++ b/lib/swift @@ -813,11 +813,13 @@ function stop_swift { } function swift_configure_tempurls { + # note we are using swift credentials! OS_USERNAME=swift \ - OS_PROJECT_NAME=$SERVICE_TENANT_NAME \ - OS_PASSWORD=$SERVICE_PASSWORD \ - OS_AUTH_URL=$SERVICE_ENDPOINT \ - swift post --auth-version 3 -m "Temp-URL-Key: $SWIFT_TEMPURL_KEY" + OS_PASSWORD=$SERVICE_PASSWORD \ + OS_PROJECT_NAME=$SERVICE_TENANT_NAME \ + OS_AUTH_URL=$SERVICE_ENDPOINT \ + openstack object store account \ + set --property "Temp-URL-Key=$SWIFT_TEMPURL_KEY" } # Restore xtrace From 0c96c37b5d9775a5c0ad257f210112f76f7987e7 Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Sun, 1 Nov 2015 21:45:29 -0500 Subject: [PATCH 0588/2941] Enable devstack for Ubuntu 15.10 Wily Change-Id: I2056fd26d42f29ececc4c38fdd791589ec7037a0 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index bdbb025a0b..72253f81d7 100755 --- a/stack.sh +++ b/stack.sh @@ -178,7 +178,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|trusty|vivid|7.0|wheezy|sid|testing|jessie|f21|f22|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (precise|trusty|vivid|wily|7.0|wheezy|sid|testing|jessie|f21|f22|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 33a96ffc2609d8477542002e56515a5026c2c48d Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Mon, 2 Nov 2015 17:23:39 -0500 Subject: [PATCH 0589/2941] Zookeeper for DLM scenarios In Tokyo, there was a cross project session on distributed key locking: https://etherpad.openstack.org/p/mitaka-cross-project-dlm In support of the discussion there, we'll need support for a zookeeper service in Devstack and ability to use libraries like Tooz for DLM functionality. In this review, we pick up some configuration files from monasca-api and copy the lib/template to implement the zookeeper lifecycle. Those services that need zookeeper need to add "zookeeper" in ENABLED_SERVICES. Change-Id: Icef26e5cdaa930a581e27d330e47706776a7f98f --- files/debs/zookeeper | 1 + files/rpms/zookeeper | 1 + files/zookeeper/environment | 36 +++++++++++++ files/zookeeper/log4j.properties | 69 +++++++++++++++++++++++++ files/zookeeper/myid | 1 + files/zookeeper/zoo.cfg | 74 +++++++++++++++++++++++++++ lib/zookeeper | 86 ++++++++++++++++++++++++++++++++ stack.sh | 15 ++++++ stackrc | 2 +- unstack.sh | 5 ++ 10 files changed, 289 insertions(+), 1 deletion(-) create mode 100644 files/debs/zookeeper create mode 100644 files/rpms/zookeeper create mode 100644 files/zookeeper/environment create mode 100644 files/zookeeper/log4j.properties create mode 100644 files/zookeeper/myid create mode 100644 files/zookeeper/zoo.cfg create mode 100644 lib/zookeeper diff --git a/files/debs/zookeeper b/files/debs/zookeeper new file mode 100644 index 0000000000..66227f7e31 --- /dev/null +++ b/files/debs/zookeeper @@ -0,0 +1 @@ +zookeeperd \ No newline at end of file diff --git a/files/rpms/zookeeper b/files/rpms/zookeeper new file mode 100644 index 0000000000..c0d1c3066d --- /dev/null +++ b/files/rpms/zookeeper @@ -0,0 +1 @@ +zookeeper \ No newline at end of file diff --git a/files/zookeeper/environment b/files/zookeeper/environment new file mode 100644 index 0000000000..afa2d2f89f --- /dev/null +++ b/files/zookeeper/environment @@ -0,0 +1,36 @@ +# +# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Modified from http://packages.ubuntu.com/saucy/zookeeperd +NAME=zookeeper +ZOOCFGDIR=/etc/zookeeper/conf + +# seems, that log4j requires the log4j.properties file to be in the classpath +CLASSPATH="$ZOOCFGDIR:/usr/share/java/jline.jar:/usr/share/java/log4j-1.2.jar:/usr/share/java/xercesImpl.jar:/usr/share/java/xmlParserAPIs.jar:/usr/share/java/netty.jar:/usr/share/java/slf4j-api.jar:/usr/share/java/slf4j-log4j12.jar:/usr/share/java/zookeeper.jar" + +ZOOCFG="$ZOOCFGDIR/zoo.cfg" +ZOO_LOG_DIR=/var/log/zookeeper +USER=$NAME +GROUP=$NAME +PIDDIR=/var/run/$NAME +PIDFILE=$PIDDIR/$NAME.pid +SCRIPTNAME=/etc/init.d/$NAME +JAVA=/usr/bin/java +ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain" +ZOO_LOG4J_PROP="INFO,ROLLINGFILE" +JMXLOCALONLY=false +JAVA_OPTS="" diff --git a/files/zookeeper/log4j.properties b/files/zookeeper/log4j.properties new file mode 100644 index 0000000000..6c45a4aad9 --- /dev/null +++ b/files/zookeeper/log4j.properties @@ -0,0 +1,69 @@ +# +# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# From http://packages.ubuntu.com/saucy/zookeeperd + +# ZooKeeper Logging Configuration +# + +# Format is " (, )+ + +log4j.rootLogger=${zookeeper.root.logger} + +# Example: console appender only +# log4j.rootLogger=INFO, CONSOLE + +# Example with rolling log file +#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE + +# Example with rolling log file and tracing +#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE + +# +# Log INFO level and above messages to the console +# +log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender +log4j.appender.CONSOLE.Threshold=INFO +log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout +log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n + +# +# Add ROLLINGFILE to rootLogger to get log file output +# Log DEBUG level and above messages to a log file +log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender +log4j.appender.ROLLINGFILE.Threshold=WARN +log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/zookeeper.log + +# Max log file size of 10MB +log4j.appender.ROLLINGFILE.MaxFileSize=10MB +# uncomment the next line to limit number of backup files +#log4j.appender.ROLLINGFILE.MaxBackupIndex=10 + +log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout +log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n + + +# +# Add TRACEFILE to rootLogger to get log file output +# Log DEBUG level and above messages to a log file +log4j.appender.TRACEFILE=org.apache.log4j.FileAppender +log4j.appender.TRACEFILE.Threshold=TRACE +log4j.appender.TRACEFILE.File=${zookeeper.log.dir}/zookeeper_trace.log + +log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout +### Notice we are including log4j's NDC here (%x) +log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n diff --git a/files/zookeeper/myid b/files/zookeeper/myid new file mode 100644 index 0000000000..c227083464 --- /dev/null +++ b/files/zookeeper/myid @@ -0,0 +1 @@ +0 \ No newline at end of file diff --git a/files/zookeeper/zoo.cfg b/files/zookeeper/zoo.cfg new file mode 100644 index 0000000000..b8f55827e3 --- /dev/null +++ b/files/zookeeper/zoo.cfg @@ -0,0 +1,74 @@ +# +# (C) Copyright 2015 Hewlett Packard Enterprise Development Company LP +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +#    http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# http://hadoop.apache.org/zookeeper/docs/current/zookeeperAdmin.html + +# The number of milliseconds of each tick +tickTime=2000 +# The number of ticks that the initial +# synchronization phase can take +initLimit=10 +# The number of ticks that can pass between +# sending a request and getting an acknowledgement +syncLimit=5 +# the directory where the snapshot is stored. +dataDir=/var/lib/zookeeper +# Place the dataLogDir to a separate physical disc for better performance +# dataLogDir=/disk2/zookeeper + +# the port at which the clients will connect +clientPort=2181 + +# Maximum number of clients that can connect from one client +maxClientCnxns=60 + +# specify all zookeeper servers +# The fist port is used by followers to connect to the leader +# The second one is used for leader election + +server.0=127.0.0.1:2888:3888 + +# To avoid seeks ZooKeeper allocates space in the transaction log file in +# blocks of preAllocSize kilobytes. The default block size is 64M. One reason +# for changing the size of the blocks is to reduce the block size if snapshots +# are taken more often. (Also, see snapCount). +#preAllocSize=65536 + +# Clients can submit requests faster than ZooKeeper can process them, +# especially if there are a lot of clients. To prevent ZooKeeper from running +# out of memory due to queued requests, ZooKeeper will throttle clients so that +# there is no more than globalOutstandingLimit outstanding requests in the +# system. The default limit is 1,000.ZooKeeper logs transactions to a +# transaction log. After snapCount transactions are written to a log file a +# snapshot is started and a new transaction log file is started. The default +# snapCount is 10,000. +#snapCount=1000 + +# If this option is defined, requests will be will logged to a trace file named +# traceFile.year.month.day. +#traceFile= + +# Leader accepts client connections. Default value is "yes". The leader machine +# coordinates updates. For higher update throughput at thes slight expense of +# read throughput the leader can be configured to not accept clients and focus +# on coordination. +#leaderServes=yes + +# Autopurge every hour to avoid using lots of disk in bursts +# Order of the next 2 properties matters. +# autopurge.snapRetainCount must be before autopurge.purgeInterval. +autopurge.snapRetainCount=3 +autopurge.purgeInterval=1 \ No newline at end of file diff --git a/lib/zookeeper b/lib/zookeeper new file mode 100644 index 0000000000..e62ba8ada8 --- /dev/null +++ b/lib/zookeeper @@ -0,0 +1,86 @@ +#!/bin/bash +# +# lib/zookeeper +# Functions to control the installation and configuration of **zookeeper** + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - is_zookeeper_enabled +# - install_zookeeper +# - configure_zookeeper +# - init_zookeeper +# - start_zookeeper +# - stop_zookeeper +# - cleanup_zookeeper + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +ZOOKEEPER_DATA_DIR=$DEST/data/zookeeper +ZOOKEEPER_CONF_DIR=/etc/zookeeper + + +# Entry Points +# ------------ + +# Test if any zookeeper service us enabled +# is_zookeeper_enabled +function is_zookeeper_enabled { + [[ ,${ENABLED_SERVICES}, =~ ,"zookeeper", ]] && return 0 + return 1 +} + +# cleanup_zookeeper() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_zookeeper { + sudo rm -rf $ZOOKEEPER_DATA_DIR +} + +# configure_zookeeper() - Set config files, create data dirs, etc +function configure_zookeeper { + sudo cp $FILES/zookeeper/* $ZOOKEEPER_CONF_DIR + sudo sed -i -e 's|.*dataDir.*|dataDir='$ZOOKEEPER_DATA_DIR'|' $ZOOKEEPER_CONF_DIR/zoo.cfg +} + +# init_zookeeper() - Initialize databases, etc. +function init_zookeeper { + # clean up from previous (possibly aborted) runs + # create required data files + sudo rm -rf $ZOOKEEPER_DATA_DIR + sudo mkdir -p $ZOOKEEPER_DATA_DIR +} + +# install_zookeeper() - Collect source and prepare +function install_zookeeper { + install_package zookeeperd +} + +# start_zookeeper() - Start running processes, including screen +function start_zookeeper { + start_service zookeeper +} + +# stop_zookeeper() - Stop running processes (non-screen) +function stop_zookeeper { + stop_service zookeeper +} + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/stack.sh b/stack.sh index bdbb025a0b..118785150a 100755 --- a/stack.sh +++ b/stack.sh @@ -539,6 +539,7 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/zookeeper # Extras Source # -------------- @@ -729,6 +730,11 @@ run_phase stack pre-install install_rpc_backend +if is_service_enabled zookeeper; then + cleanup_zookeeper + configure_zookeeper + init_zookeeper +fi if is_service_enabled $DATABASE_BACKENDS; then install_database fi @@ -968,6 +974,15 @@ save_stackenv $LINENO start_dstat +# Zookeeper +# ----- + +# zookeeper for use with tooz for Distributed Lock Management capabilities etc., +if is_service_enabled zookeeper; then + start_zookeeper +fi + + # Keystone # -------- diff --git a/stackrc b/stackrc index 819aa0125c..19538c0679 100644 --- a/stackrc +++ b/stackrc @@ -69,7 +69,7 @@ if ! isset ENABLED_SERVICES ; then # Dashboard ENABLED_SERVICES+=,horizon # Additional services - ENABLED_SERVICES+=,rabbit,tempest,mysql,dstat + ENABLED_SERVICES+=,rabbit,tempest,mysql,dstat,zookeeper fi # SQLAlchemy supports multiple database drivers for each database server diff --git a/unstack.sh b/unstack.sh index 30447a7005..0cace3254a 100755 --- a/unstack.sh +++ b/unstack.sh @@ -69,6 +69,7 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat +source $TOP_DIR/lib/zookeeper # Extras Source # -------------- @@ -172,6 +173,10 @@ if is_service_enabled dstat; then stop_dstat fi +if is_service_enabled zookeeper; then + stop_zookeeper +fi + # Clean up the remainder of the screen processes SCREEN=$(which screen) if [[ -n "$SCREEN" ]]; then From 9013bb0c24623ce8d064b561d4fd331e370762e1 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 4 Nov 2015 12:31:39 -0500 Subject: [PATCH 0590/2941] remove wheel cache code Thanks to lifeless, pip now implicitly has a wheel cache so that it builds a wheel before every install, and uses that cache. All our clever attempts at manually doing wheelhouse things is actually bypassing the existing cache and making things take longer. We should remove all of this code and just let pip do this thing, which is does very well, and get out of the way. Change-Id: Ia140dc34638d893b92f66d1ba20efd9522c5923b --- Makefile | 6 +-- doc/source/index.rst | 1 - files/venv-requirements.txt | 11 ----- stack.sh | 6 --- stackrc | 5 --- tools/build_wheels.sh | 86 ------------------------------------- 6 files changed, 1 insertion(+), 114 deletions(-) delete mode 100644 files/venv-requirements.txt delete mode 100755 tools/build_wheels.sh diff --git a/Makefile b/Makefile index a6bb230708..a94d60a0ef 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,6 @@ # Duplicated from stackrc for now DEST=/opt/stack -WHEELHOUSE=$(DEST)/.wheelhouse all: echo "This just saved you from a terrible mistake!" @@ -25,9 +24,6 @@ stack: unstack: ./unstack.sh -wheels: - WHEELHOUSE=$(WHEELHOUSE) tools/build_wheels.sh - docs: tox -edocs @@ -57,7 +53,7 @@ clean: # Clean out the cache too realclean: clean - rm -rf files/cirros*.tar.gz files/Fedora*.qcow2 $(WHEELHOUSE) + rm -rf files/cirros*.tar.gz files/Fedora*.qcow2 # Repo stuffs diff --git a/doc/source/index.rst b/doc/source/index.rst index 99e96b1d1a..b65730ffe8 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -206,7 +206,6 @@ Tools * `tools/build\_docs.sh `__ * `tools/build\_venv.sh `__ -* `tools/build\_wheels.sh `__ * `tools/create-stack-user.sh `__ * `tools/create\_userrc.sh `__ * `tools/fixup\_stuff.sh `__ diff --git a/files/venv-requirements.txt b/files/venv-requirements.txt deleted file mode 100644 index b9a55b423d..0000000000 --- a/files/venv-requirements.txt +++ /dev/null @@ -1,11 +0,0 @@ -# Once we can prebuild wheels before a devstack run, uncomment the skipped libraries -cryptography -# lxml # still install from from packages -# netifaces # still install from packages -#numpy # slowest wheel by far, stop building until we are actually using the output -posix-ipc -# psycopg # still install from packages -pycrypto -pyOpenSSL -PyYAML -xattr diff --git a/stack.sh b/stack.sh index bdbb025a0b..aedc5d4006 100755 --- a/stack.sh +++ b/stack.sh @@ -716,12 +716,6 @@ source $TOP_DIR/tools/fixup_stuff.sh # Install required infra support libraries install_infra -# Pre-build some problematic wheels -if [[ -n ${WHEELHOUSE:-} && ! -d ${WHEELHOUSE:-} ]]; then - source $TOP_DIR/tools/build_wheels.sh -fi - - # Extras Pre-install # ------------------ # Phase: pre-install diff --git a/stackrc b/stackrc index 819aa0125c..d9f477e8ce 100644 --- a/stackrc +++ b/stackrc @@ -143,11 +143,6 @@ USE_VENV=$(trueorfalse False USE_VENV) # requirmenets files here, in a comma-separated list ADDITIONAL_VENV_PACKAGES=${ADITIONAL_VENV_PACKAGES:-""} -# Configure wheel cache location -export WHEELHOUSE=${WHEELHOUSE:-$DEST/.wheelhouse} -export PIP_WHEEL_DIR=${PIP_WHEEL_DIR:-$WHEELHOUSE} -export PIP_FIND_LINKS=${PIP_FIND_LINKS:-file://$WHEELHOUSE} - # This can be used to turn database query logging on and off # (currently only implemented for MySQL backend) DATABASE_QUERY_LOGGING=$(trueorfalse False DATABASE_QUERY_LOGGING) diff --git a/tools/build_wheels.sh b/tools/build_wheels.sh deleted file mode 100755 index 14c2999c8f..0000000000 --- a/tools/build_wheels.sh +++ /dev/null @@ -1,86 +0,0 @@ -#!/usr/bin/env bash -# -# **tools/build_wheels.sh** - Build a cache of Python wheels -# -# build_wheels.sh [package [...]] -# -# System package prerequisites listed in ``files/*/devlibs`` will be installed -# -# Builds wheels for all virtual env requirements listed in -# ``venv-requirements.txt`` plus any supplied on the command line. -# -# Assumes: -# - ``tools/install_pip.sh`` has been run and a suitable ``pip/setuptools`` is available. - -# If ``TOP_DIR`` is set we're being sourced rather than running stand-alone -# or in a sub-shell -if [[ -z "$TOP_DIR" ]]; then - - set -o errexit - set -o nounset - - # Keep track of the DevStack directory - TOP_DIR=$(cd $(dirname "$0")/.. && pwd) - FILES=$TOP_DIR/files - - # Import common functions - source $TOP_DIR/functions - - GetDistro - - source $TOP_DIR/stackrc - - trap err_trap ERR - -fi - -# Get additional packages to build -MORE_PACKAGES="$@" - -# Exit on any errors so that errors don't compound -function err_trap { - local r=$? - set +o xtrace - - rm -rf $TMP_VENV_PATH - - exit $r -} - -# Get system prereqs -install_package $(get_packages devlibs) - -# Get a modern ``virtualenv`` -pip_install virtualenv - -# Prepare the workspace -TMP_VENV_PATH=$(mktemp -d tmp-venv-XXXX) -virtualenv $TMP_VENV_PATH - -# Install modern pip and wheel -PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install -U pip wheel - -# BUG: cffi has a lot of issues. It has no stable ABI, if installed -# code is built with a different ABI than the one that's detected at -# load time, it tries to compile on the fly for the new ABI in the -# install location (which will probably be /usr and not -# writable). Also cffi is often included via setup_requires by -# packages, which have different install rules (allowing betas) than -# pip has. -# -# Because of this we must pip install cffi into the venv to build -# wheels. -PIP_VIRTUAL_ENV=$TMP_VENV_PATH pip_install_gr cffi - -# ``VENV_PACKAGES`` is a list of packages we want to pre-install -VENV_PACKAGE_FILE=$FILES/venv-requirements.txt -if [[ -r $VENV_PACKAGE_FILE ]]; then - VENV_PACKAGES=$(grep -v '^#' $VENV_PACKAGE_FILE) -fi - -for pkg in ${VENV_PACKAGES,/ } ${MORE_PACKAGES}; do - $TMP_VENV_PATH/bin/pip wheel $pkg -done - -# Clean up wheel workspace -rm -rf $TMP_VENV_PATH From 9127c1a56bc1504ae77df25b0da7a6d0a5f0bfe8 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 5 Nov 2015 10:09:02 +0100 Subject: [PATCH 0591/2941] Do not remove python-pip package on Fedora-23 python on fedora 23 compiled with rewheel support, in this case the python-pip is a required package, and cannot be removed. [1] http://pkgs.fedoraproject.org/cgit/python.git/tree/python.spec?id=3b6fac0339bab69ca5fbf2881568f0565ab0e252#n174 Change-Id: I499b7bec97c4360b32d156079f2b7f3923e3888a --- tools/install_pip.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index 13c1786fb9..ab5efb2e77 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -110,7 +110,11 @@ get_versions # Do pip # Eradicate any and all system packages -uninstall_package python-pip + +# python in f23 depends on the python-pip package +if ! { is_fedora && [[ $DISTRO == "f23" ]]; }; then + uninstall_package python-pip +fi install_get_pip From e3c2673ae447d8b3cc4649f5efde5b84e26f6cd9 Mon Sep 17 00:00:00 2001 From: Atsushi SAKAI Date: Fri, 6 Nov 2015 13:23:47 +0900 Subject: [PATCH 0592/2941] Fix two typos on faq.rst guarunteed => guaranteed Centos => CentOS Change-Id: Id356443fcdc4128ff20d7a89158265aa16c105b2 --- doc/source/faq.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/faq.rst b/doc/source/faq.rst index 3562bfacee..7aca8d0b4a 100644 --- a/doc/source/faq.rst +++ b/doc/source/faq.rst @@ -54,7 +54,7 @@ combinations listed in ``README.md``. DevStack is only supported on releases other than those documented in ``README.md`` on a best-effort basis. -Are there any differences between Ubuntu and Centos/Fedora support? +Are there any differences between Ubuntu and CentOS/Fedora support? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Both should work well and are tested by DevStack CI. @@ -146,7 +146,7 @@ your ``local.conf`` Upstream DevStack is only tested with master and stable -branches. Setting custom BRANCH definitions is not guarunteed to +branches. Setting custom BRANCH definitions is not guaranteed to produce working results. What can I do about RabbitMQ not wanting to start on my fresh new VM? From ec7f490a687ab01f520835341c09cef9f697f05d Mon Sep 17 00:00:00 2001 From: Atsushi SAKAI Date: Fri, 6 Nov 2015 13:35:24 +0900 Subject: [PATCH 0593/2941] Remove 'enable_service tempest' from sample/local.conf tempest is already defined in stackrc as default. Without this definition in local.conf, tempest is installed successfully. If it still needs "enable_service tempest" definition on local.conf, devstack itself has some problem. In my environment, tempest installation works without this definition on local.conf. Change-Id: I25cda0142538d21bb9656b471e65ca5b018e8378 --- samples/local.conf | 6 ------ 1 file changed, 6 deletions(-) diff --git a/samples/local.conf b/samples/local.conf index cb293b6c15..b92097dd8d 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -93,9 +93,3 @@ SWIFT_REPLICAS=1 # moved by setting ``SWIFT_DATA_DIR``. The directory will be created # if it does not exist. SWIFT_DATA_DIR=$DEST/data - -# Tempest -# ------- - -# Install the tempest test suite -enable_service tempest From e9ef0fefa52a2d30079eba1dead1a4df61a6ad7d Mon Sep 17 00:00:00 2001 From: Atsushi SAKAI Date: Fri, 6 Nov 2015 14:25:46 +0900 Subject: [PATCH 0594/2941] Remove lib/neutron_plugins/ibm SDN-VE on neutron is already removed. This patch removes SDN-VE from devstack. Ref. Removing the SDN-VE monolithic plugin https://review.openstack.org/#/c/217703/ Remove IBM SDN-VE left-overs https://review.openstack.org/#/c/237716/ Change-Id: Ie1b531153d1632798235b1100cdf9b068edcce26 --- lib/neutron_plugins/ibm | 133 ---------------------------------------- 1 file changed, 133 deletions(-) delete mode 100644 lib/neutron_plugins/ibm diff --git a/lib/neutron_plugins/ibm b/lib/neutron_plugins/ibm deleted file mode 100644 index dd5cfa6694..0000000000 --- a/lib/neutron_plugins/ibm +++ /dev/null @@ -1,133 +0,0 @@ -#!/bin/bash -# -# Neutron IBM SDN-VE plugin -# --------------------------- - -# Save trace setting -IBM_XTRACE=$(set +o | grep xtrace) -set +o xtrace - -source $TOP_DIR/lib/neutron_plugins/ovs_base - -function neutron_plugin_install_agent_packages { - _neutron_ovs_base_install_agent_packages -} - -function _neutron_interface_setup { - # Setup one interface on the integration bridge if needed - # The plugin agent to be used if more than one interface is used - local bridge=$1 - local interface=$2 - sudo ovs-vsctl --no-wait -- --may-exist add-port $bridge $interface -} - -function neutron_setup_integration_bridge { - # Setup integration bridge if needed - if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then - neutron_ovs_base_cleanup - _neutron_ovs_base_setup_bridge $SDNVE_INTEGRATION_BRIDGE - if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then - interfaces=(${SDNVE_INTERFACE_MAPPINGS//[,:]/ }) - _neutron_interface_setup $SDNVE_INTEGRATION_BRIDGE ${interfaces[1]} - fi - fi - - # Set controller to SDNVE controller (1st of list) if exists - if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then - # Get the first controller - controllers=(${SDNVE_CONTROLLER_IPS//[\[,\]]/ }) - SDNVE_IP=${controllers[0]} - sudo ovs-vsctl set-controller $SDNVE_INTEGRATION_BRIDGE tcp:$SDNVE_IP - fi -} - -function neutron_plugin_create_nova_conf { - # if n-cpu is enabled, then setup integration bridge - if is_service_enabled n-cpu; then - neutron_setup_integration_bridge - fi -} - -function is_neutron_ovs_base_plugin { - if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then - # Yes, we use OVS. - return 0 - else - # No, we do not use OVS. - return 1 - fi -} - -function neutron_plugin_configure_common { - Q_PLUGIN_CONF_PATH=etc/neutron/plugins/ibm - Q_PLUGIN_CONF_FILENAME=sdnve_neutron_plugin.ini - Q_PLUGIN_CLASS="neutron.plugins.ibm.sdnve_neutron_plugin.SdnvePluginV2" -} - -function neutron_plugin_configure_service { - # Define extra "SDNVE" configuration options when q-svc is configured - - iniset /$Q_PLUGIN_CONF_FILE securitygroup firewall_driver neutron.agent.firewall.NoopFirewallDriver - - if [[ "$SDNVE_CONTROLLER_IPS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve controller_ips $SDNVE_CONTROLLER_IPS - fi - - if [[ "$SDNVE_INTEGRATION_BRIDGE" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve integration_bridge $SDNVE_INTEGRATION_BRIDGE - fi - - if [[ "$SDNVE_RESET_BRIDGE" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve reset_bridge $SDNVE_RESET_BRIDGE - fi - - if [[ "$SDNVE_OUT_OF_BAND" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve out_of_band $SDNVE_OUT_OF_BAND - fi - - if [[ "$SDNVE_INTERFACE_MAPPINGS" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve interface_mappings $SDNVE_INTERFACE_MAPPINGS - fi - - if [[ "$SDNVE_FAKE_CONTROLLER" != "" ]]; then - iniset /$Q_PLUGIN_CONF_FILE sdnve use_fake_controller $SDNVE_FAKE_CONTROLLER - fi - - - iniset $NEUTRON_CONF DEFAULT notification_driver neutron.openstack.common.notifier.no_op_notifier - -} - -function neutron_plugin_configure_plugin_agent { - AGENT_BINARY="$NEUTRON_BIN_DIR/neutron-ibm-agent" -} - -function neutron_plugin_configure_debug_command { - : -} - -function neutron_plugin_setup_interface_driver { - return 0 -} - -function has_neutron_plugin_security_group { - # Does not support Security Groups - return 1 -} - -function neutron_ovs_base_cleanup { - if [[ "$SDNVE_RESET_BRIDGE" != False ]]; then - # remove all OVS ports that look like Neutron created ports - for port in $(sudo ovs-vsctl list port | grep -o -e tap[0-9a-f\-]* -e q[rg]-[0-9a-f\-]*); do - sudo ovs-vsctl del-port ${port} - done - - # remove integration bridge created by Neutron - for bridge in $(sudo ovs-vsctl list-br | grep -o -e ${SDNVE_INTEGRATION_BRIDGE}); do - sudo ovs-vsctl del-br ${bridge} - done - fi -} - -# Restore xtrace -$IBM_XTRACE From 5f8133caac097235ed4fe73d878df0ee907eb51c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Fri, 6 Nov 2015 11:48:19 -0500 Subject: [PATCH 0595/2941] remove gratuitous python packages With pip + upper-constraints we're nearly always over installing all python packages because we no longer support a range, we support *exactly* one version. This removes a bunch of the gratuitous package installs which we're going to install over, lxml, numpy, libvirt. All of these we had coming from packages in the past for speed concerns, but upper constraints removes that. It also ensures that all the headers to build all those are in general, so they are guarunteed available at all times. Change-Id: Ia76de730d65c84d81c4fb2c980ae1b4d595f9f5b --- files/debs/general | 4 +++- files/debs/keystone | 1 - files/debs/n-novnc | 1 - files/debs/nova | 7 ++----- 4 files changed, 5 insertions(+), 8 deletions(-) delete mode 100644 files/debs/n-novnc diff --git a/files/debs/general b/files/debs/general index 146052643c..80e81f5841 100644 --- a/files/debs/general +++ b/files/debs/general @@ -20,8 +20,10 @@ python2.7 python-gdbm # needed for testr bc libyaml-dev -libffi-dev +libffi-dev # for pyOpenSSL libssl-dev # for pyOpenSSL +libxml2-dev # lxml +libxslt1-dev # lxml gettext # used for compiling message catalogs openjdk-7-jre-headless # NOPRIME pkg-config diff --git a/files/debs/keystone b/files/debs/keystone index 70a56499e9..f5816b59de 100644 --- a/files/debs/keystone +++ b/files/debs/keystone @@ -1,4 +1,3 @@ -python-lxml sqlite3 python-mysqldb python-mysql.connector diff --git a/files/debs/n-novnc b/files/debs/n-novnc deleted file mode 100644 index c8722b9f66..0000000000 --- a/files/debs/n-novnc +++ /dev/null @@ -1 +0,0 @@ -python-numpy diff --git a/files/debs/nova b/files/debs/nova index 346b8b337a..d1678a7a6f 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -8,7 +8,8 @@ libmysqlclient-dev mysql-server # NOPRIME python-mysqldb python-mysql.connector -python-lxml # needed for glance which is needed for nova --- this shouldn't be here +libxml2-dev # needed for building lxml +libxslt1-dev gawk iptables ebtables @@ -25,7 +26,3 @@ curl genisoimage # required for config_drive rabbitmq-server # NOPRIME socat # used by ajaxterm -python-libvirt # NOPRIME -python-libxml2 -python-numpy # used by websockify for spice console -python-m2crypto From 6cd616a9edf6561ebc802f3083eb24f2713d4e96 Mon Sep 17 00:00:00 2001 From: Matt Riedemann Date: Fri, 6 Nov 2015 10:26:14 -0800 Subject: [PATCH 0596/2941] Disable resize tests in Tempest if using cells with custom flavors By default, devstack creates it's own test flavors for Tempest runs. These are not in the cells API database since they are non-default for nova so any resize tests in Tempest with cells and these custom flavors fail. Configure Tempest to not run resize tests if using cells and custom flavors. This allows us to also clean up a bunch of the resize skips found in nova/devstack/tempest-dsvm-cells-rc. Change-Id: I20f46024e45e32c60275703a193a56ae8cfe7eca Closes-Bug: #1513925 --- lib/tempest | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/tempest b/lib/tempest index 32630dbf59..691ad86a2d 100644 --- a/lib/tempest +++ b/lib/tempest @@ -377,6 +377,15 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute-feature-enabled shelve False # Cells doesn't support hot-plugging virtual interfaces. iniset $TEMPEST_CONFIG compute-feature-enabled interface_attach False + + if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then + # Cells supports resize but does not currently work with devstack + # because of the custom flavors created for Tempest runs which are + # not in the cells database. + # TODO(mriedem): work on adding a nova-manage command to sync + # flavors into the cells database. + iniset $TEMPEST_CONFIG compute-feature-enabled resize False + fi fi # Network From 63cac536efa3474af40ea24603fca5e1d0a74e13 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Fri, 6 Nov 2015 12:37:32 -0800 Subject: [PATCH 0597/2941] Ironic: Explicitly allow DHCP ports This adds an iptables rule to allow ports 67 and 68. We see occassionally dropped DHCP packets, which may be causing PXE failures in ironic jobs. I'm not 100% confident this fixes the issue, however I don't think it can break anything and it rules out one theory. Change-Id: I4630afb6f010a4c2cb146a79264c480c64c6e4b7 Related-Bug: #1393099 --- lib/ironic | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/ironic b/lib/ironic index d786870165..016e639d03 100644 --- a/lib/ironic +++ b/lib/ironic @@ -672,6 +672,8 @@ function configure_iptables { # enable tftp natting for allowing connections to HOST_IP's tftp server sudo modprobe nf_conntrack_tftp sudo modprobe nf_nat_tftp + # explicitly allow DHCP - packets are occassionally being dropped here + sudo iptables -I INPUT -p udp --dport 67:68 --sport 67:68 -j ACCEPT || true # nodes boot from TFTP and callback to the API server listening on $HOST_IP sudo iptables -I INPUT -d $HOST_IP -p udp --dport 69 -j ACCEPT || true sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true From b8509f09d670460aa3e9e2b09cf60dbf8cdbb03b Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 9 Nov 2015 11:55:56 +1100 Subject: [PATCH 0598/2941] Really get the "general" packages We are specifying the argument to get_packages incorrectly, so we are not actually adding the packages in "general" to the list of packages. In most cases, this is hidden as other more specific plugins/services request their packages. However, as I2dafd32f211fcbc9fff53030d736d97a5f1bb2df shows, not always. I think this was uncovered by 5f8133caac097235ed4fe73d878df0ee907eb51c Change-Id: Ie1b8d09369281059d21da61b2725a457f708ae9e --- tools/install_prereqs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index a07e58d3e6..38452cd90f 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -61,7 +61,7 @@ export_proxy_variables # ================ # Install package requirements -PACKAGES=$(get_packages general $ENABLED_SERVICES) +PACKAGES=$(get_packages general,$ENABLED_SERVICES) PACKAGES="$PACKAGES $(get_plugin_packages)" if is_ubuntu && echo $PACKAGES | grep -q dkms ; then From a7e0b39a64f53f77f970c90d17f621be74f95215 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 9 Nov 2015 12:02:51 +1100 Subject: [PATCH 0599/2941] Move devlib packages into "general" The removal of the wheel caching code (Ia140dc34638d893b92f66d1ba20efd9522c5923b) removed the install of the "devlib" packages, which was being done with a call in tools/build_wheels.sh The idea of "devlibs" and "general" seems to be pretty much the same thing -- global build requirements. I have removed the unused devlibs files, and moved any missing packages into the "general" package install file. Change-Id: I8f34a164d6785a122394b42387d4221a7b447ae1 --- files/debs/devlibs | 7 ------- files/debs/general | 2 ++ files/rpms-suse/devlibs | 6 ------ files/rpms-suse/general | 5 +++++ files/rpms/devlibs | 8 -------- files/rpms/general | 7 +++++-- 6 files changed, 12 insertions(+), 23 deletions(-) delete mode 100644 files/debs/devlibs delete mode 100644 files/rpms-suse/devlibs delete mode 100644 files/rpms/devlibs diff --git a/files/debs/devlibs b/files/debs/devlibs deleted file mode 100644 index 0446ceb6b6..0000000000 --- a/files/debs/devlibs +++ /dev/null @@ -1,7 +0,0 @@ -libffi-dev # pyOpenSSL -libmysqlclient-dev # MySQL-python -libpq-dev # psycopg2 -libssl-dev # pyOpenSSL -libxml2-dev # lxml -libxslt1-dev # lxml -python-dev # pyOpenSSL diff --git a/files/debs/general b/files/debs/general index 80e81f5841..7bc90a6864 100644 --- a/files/debs/general +++ b/files/debs/general @@ -27,3 +27,5 @@ libxslt1-dev # lxml gettext # used for compiling message catalogs openjdk-7-jre-headless # NOPRIME pkg-config +libmysqlclient-dev # MySQL-python +libpq-dev # psycopg2 diff --git a/files/rpms-suse/devlibs b/files/rpms-suse/devlibs deleted file mode 100644 index 54d13a33e9..0000000000 --- a/files/rpms-suse/devlibs +++ /dev/null @@ -1,6 +0,0 @@ -libffi-devel # pyOpenSSL -libopenssl-devel # pyOpenSSL -libxslt-devel # lxml -postgresql-devel # psycopg2 -libmysqlclient-devel # MySQL-python -python-devel # pyOpenSSL diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 42756d8fcc..3a7c4b56fd 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -21,3 +21,8 @@ tcpdump unzip wget net-tools +libffi-devel # pyOpenSSL +libxslt-devel # lxml +postgresql-devel # psycopg2 +libmysqlclient-devel # MySQL-python +python-devel # pyOpenSSL diff --git a/files/rpms/devlibs b/files/rpms/devlibs deleted file mode 100644 index 385ed3b84c..0000000000 --- a/files/rpms/devlibs +++ /dev/null @@ -1,8 +0,0 @@ -libffi-devel # pyOpenSSL -libxml2-devel # lxml -libxslt-devel # lxml -mariadb-devel # MySQL-python -openssl-devel # pyOpenSSL -postgresql-devel # psycopg2 -python-devel # pyOpenSSL -redhat-rpm-config # MySQL-python rhbz-1195207 f21 diff --git a/files/rpms/general b/files/rpms/general index c3f3de893a..2a45227e0f 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -10,8 +10,8 @@ openssh-server openssl openssl-devel # to rebuild pyOpenSSL if needed libffi-devel -libxml2-devel -libxslt-devel +libxml2-devel # lxml +libxslt-devel # lxml pkgconfig psmisc python-devel @@ -29,3 +29,6 @@ java-1.7.0-openjdk-headless # NOPRIME rhel7 java-1.8.0-openjdk-headless # NOPRIME f21,f22 pyOpenSSL # version in pip uses too much memory iptables-services # NOPRIME f21,f22 +mariadb-devel # MySQL-python +postgresql-devel # psycopg2 +redhat-rpm-config # MySQL-python rhbz-1195207 f21 From a5e4c0f279b6101436820dbb6b3da3d06131c12d Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 9 Nov 2015 12:21:10 +1100 Subject: [PATCH 0600/2941] Clear out some duplicate package dependencies I think these duplicate dependencies came in because we were not correctly always installing "general" packages (see Ie1b8d09369281059d21da61b2725a457f708ae9e) Most of these are just extras for the lxml dependencies; I added zlib devel to general for glance (seems pretty generic), and then that can go too, as all other packages are specified. Change-Id: I44b14ca15c64fad9daf1ac8d851704b02ea2eae0 --- files/debs/ceilometer-collector | 3 --- files/debs/general | 1 + files/debs/glance | 6 ------ files/debs/nova | 2 -- files/debs/tempest | 2 -- files/debs/trove | 1 - files/rpms/general | 1 + files/rpms/glance | 6 ------ files/rpms/keystone | 1 - files/rpms/tempest | 1 - files/rpms/trove | 1 - 11 files changed, 2 insertions(+), 23 deletions(-) delete mode 100644 files/debs/glance delete mode 100644 files/debs/tempest delete mode 100644 files/debs/trove delete mode 100644 files/rpms/glance delete mode 100644 files/rpms/tempest delete mode 100644 files/rpms/trove diff --git a/files/debs/ceilometer-collector b/files/debs/ceilometer-collector index f1b692ac71..94c82e0843 100644 --- a/files/debs/ceilometer-collector +++ b/files/debs/ceilometer-collector @@ -1,6 +1,3 @@ python-pymongo #NOPRIME mongodb-server #NOPRIME libnspr4-dev -pkg-config -libxml2-dev -libxslt-dev \ No newline at end of file diff --git a/files/debs/general b/files/debs/general index 7bc90a6864..58d1e8b3f5 100644 --- a/files/debs/general +++ b/files/debs/general @@ -29,3 +29,4 @@ openjdk-7-jre-headless # NOPRIME pkg-config libmysqlclient-dev # MySQL-python libpq-dev # psycopg2 +zlib1g-dev diff --git a/files/debs/glance b/files/debs/glance deleted file mode 100644 index 37877a85c2..0000000000 --- a/files/debs/glance +++ /dev/null @@ -1,6 +0,0 @@ -libmysqlclient-dev -libpq-dev -libssl-dev -libxml2-dev -libxslt1-dev -zlib1g-dev diff --git a/files/debs/nova b/files/debs/nova index d1678a7a6f..fa394e734b 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -8,8 +8,6 @@ libmysqlclient-dev mysql-server # NOPRIME python-mysqldb python-mysql.connector -libxml2-dev # needed for building lxml -libxslt1-dev gawk iptables ebtables diff --git a/files/debs/tempest b/files/debs/tempest deleted file mode 100644 index bb095297e0..0000000000 --- a/files/debs/tempest +++ /dev/null @@ -1,2 +0,0 @@ -libxml2-dev -libxslt1-dev diff --git a/files/debs/trove b/files/debs/trove deleted file mode 100644 index 96f8f29277..0000000000 --- a/files/debs/trove +++ /dev/null @@ -1 +0,0 @@ -libxslt1-dev diff --git a/files/rpms/general b/files/rpms/general index 2a45227e0f..eb479d2050 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -32,3 +32,4 @@ iptables-services # NOPRIME f21,f22 mariadb-devel # MySQL-python postgresql-devel # psycopg2 redhat-rpm-config # MySQL-python rhbz-1195207 f21 +zlib-devel diff --git a/files/rpms/glance b/files/rpms/glance deleted file mode 100644 index 479194f918..0000000000 --- a/files/rpms/glance +++ /dev/null @@ -1,6 +0,0 @@ -libxml2-devel -libxslt-devel -mysql-devel -openssl-devel -postgresql-devel -zlib-devel diff --git a/files/rpms/keystone b/files/rpms/keystone index 8074119fdb..7384150a4a 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,4 +1,3 @@ MySQL-python -libxslt-devel sqlite mod_ssl diff --git a/files/rpms/tempest b/files/rpms/tempest deleted file mode 100644 index e7bbd43cd6..0000000000 --- a/files/rpms/tempest +++ /dev/null @@ -1 +0,0 @@ -libxslt-devel diff --git a/files/rpms/trove b/files/rpms/trove deleted file mode 100644 index e7bbd43cd6..0000000000 --- a/files/rpms/trove +++ /dev/null @@ -1 +0,0 @@ -libxslt-devel From c00df207eed1d4dec808824d8e9dcd238e7d4e08 Mon Sep 17 00:00:00 2001 From: Sirushti Murugesan Date: Wed, 7 Oct 2015 15:06:51 +0530 Subject: [PATCH 0601/2941] Add a more accurate expression for obtaining (IPV6_)ROUTER_GW_IP neutron port-list returns a dictionary that's of random order in python 3. This expression sometimes returns a NULL value thus failing devstack. Add an expression that always returns a consistent ROUTER_GW_IP. Change-Id: Id23d9afda275051ca68bcba2dfd1b6e30f02c628 --- lib/neutron-legacy | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index c244e5470a..a74da2ccb7 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1318,7 +1318,7 @@ function _neutron_configure_router_v4 { sudo ip addr add $ext_gw_ip/$cidr_len dev $ext_gw_interface sudo ip link set $ext_gw_interface up fi - ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F '"' -v subnet_id=$PUB_SUBNET_ID '$4 == subnet_id { print $8; }'` + ROUTER_GW_IP=`neutron port-list -c fixed_ips -c device_owner | grep router_gateway | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' '` die_if_not_set $LINENO ROUTER_GW_IP "Failure retrieving ROUTER_GW_IP" sudo ip route replace $FIXED_RANGE via $ROUTER_GW_IP fi @@ -1349,7 +1349,7 @@ function _neutron_configure_router_v6 { sudo sysctl -w net.ipv6.conf.all.forwarding=1 # Configure and enable public bridge # Override global IPV6_ROUTER_GW_IP with the true value from neutron - IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F '"' -v subnet_id=$ipv6_pub_subnet_id '$4 == subnet_id { print $8; }'` + IPV6_ROUTER_GW_IP=`neutron port-list -c fixed_ips | grep $ipv6_pub_subnet_id | awk -F'ip_address' '{ print $2 }' | cut -f3 -d\" | tr '\n' ' '` die_if_not_set $LINENO IPV6_ROUTER_GW_IP "Failure retrieving IPV6_ROUTER_GW_IP" if is_neutron_ovs_base_plugin; then From 7d515b5db8e98a54ff9c8c5211383f9f2c33b2db Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 9 Nov 2015 15:04:32 +1100 Subject: [PATCH 0602/2941] Add check for get_packages argument Add a quick check so we don't reintroduce bad arguments as in Ie1b8d09369281059d21da61b2725a457f708ae9e Change-Id: Ibebc71791f2743eef64d6f7c2596d54a73ea92aa --- functions-common | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/functions-common b/functions-common index 497bed26e6..e252139e78 100644 --- a/functions-common +++ b/functions-common @@ -1075,6 +1075,10 @@ function get_packages { local file_to_parse="" local service="" + if [ $# -ne 1 ]; then + die $LINENO "get_packages takes a single, comma-separated argument" + fi + if [[ -z "$package_dir" ]]; then echo "No package directory supplied" return 1 From c416d8b94f473908a82f4e842c768927b62fc20a Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 9 Nov 2015 15:20:22 +1100 Subject: [PATCH 0603/2941] Sort rpm/deb files alphabetically This is rather trivial, but it makes comparing the files much easier Change-Id: I01e42defbf778626afd8dd457f93f0b02dd1a19d --- files/debs/ceilometer-collector | 4 +-- files/debs/cinder | 6 ++--- files/debs/general | 40 ++++++++++++++-------------- files/debs/ironic | 2 +- files/debs/keystone | 8 +++--- files/debs/ldap | 2 +- files/debs/n-cpu | 10 +++---- files/debs/neutron | 14 +++++----- files/debs/nova | 34 +++++++++++------------ files/debs/zookeeper | 2 +- files/rpms-suse/ceilometer-collector | 2 +- files/rpms-suse/ceph | 2 +- files/rpms-suse/cinder | 8 +++--- files/rpms-suse/general | 12 ++++----- files/rpms-suse/horizon | 2 +- files/rpms-suse/n-api | 2 +- files/rpms-suse/n-cpu | 6 ++--- files/rpms-suse/neutron | 2 +- files/rpms-suse/nova | 6 ++--- files/rpms-suse/openvswitch | 2 +- files/rpms/ceilometer-collector | 4 +-- files/rpms/ceph | 2 +- files/rpms/cinder | 6 ++--- files/rpms/dstat | 2 +- files/rpms/general | 28 +++++++++---------- files/rpms/horizon | 2 +- files/rpms/keystone | 2 +- files/rpms/ldap | 2 +- files/rpms/n-cpu | 8 +++--- files/rpms/neutron | 4 +-- files/rpms/nova | 8 +++--- files/rpms/swift | 2 +- files/rpms/zookeeper | 2 +- 33 files changed, 119 insertions(+), 119 deletions(-) diff --git a/files/debs/ceilometer-collector b/files/debs/ceilometer-collector index 94c82e0843..d1e9eef3bb 100644 --- a/files/debs/ceilometer-collector +++ b/files/debs/ceilometer-collector @@ -1,3 +1,3 @@ -python-pymongo #NOPRIME -mongodb-server #NOPRIME libnspr4-dev +mongodb-server #NOPRIME +python-pymongo #NOPRIME diff --git a/files/debs/cinder b/files/debs/cinder index 51908eb27b..48b8d0f7d3 100644 --- a/files/debs/cinder +++ b/files/debs/cinder @@ -1,6 +1,6 @@ -tgt # NOPRIME -lvm2 -qemu-utils libpq-dev +lvm2 open-iscsi open-iscsi-utils # Deprecated since quantal dist:precise +qemu-utils +tgt # NOPRIME diff --git a/files/debs/general b/files/debs/general index 58d1e8b3f5..9b2715685e 100644 --- a/files/debs/general +++ b/files/debs/general @@ -1,32 +1,32 @@ +bc bridge-utils -screen -unzip -wget -psmisc -gcc +curl g++ +gcc +gettext # used for compiling message catalogs git graphviz # needed for docs -lsof # useful when debugging -openssh-server -openssl iputils-ping -wget -curl -tcpdump -tar -python-dev -python2.7 -python-gdbm # needed for testr -bc -libyaml-dev libffi-dev # for pyOpenSSL +libmysqlclient-dev # MySQL-python +libpq-dev # psycopg2 libssl-dev # for pyOpenSSL libxml2-dev # lxml libxslt1-dev # lxml -gettext # used for compiling message catalogs +libyaml-dev +lsof # useful when debugging openjdk-7-jre-headless # NOPRIME +openssh-server +openssl pkg-config -libmysqlclient-dev # MySQL-python -libpq-dev # psycopg2 +psmisc +python2.7 +python-dev +python-gdbm # needed for testr +screen +tar +tcpdump +unzip +wget +wget zlib1g-dev diff --git a/files/debs/ironic b/files/debs/ironic index 0a906dbffc..4d5a6aa6b7 100644 --- a/files/debs/ironic +++ b/files/debs/ironic @@ -6,8 +6,8 @@ libguestfs0 libvirt-bin open-iscsi openssh-client -openvswitch-switch openvswitch-datapath-dkms +openvswitch-switch python-libguestfs python-libvirt qemu diff --git a/files/debs/keystone b/files/debs/keystone index f5816b59de..0795167047 100644 --- a/files/debs/keystone +++ b/files/debs/keystone @@ -1,6 +1,6 @@ -sqlite3 -python-mysqldb -python-mysql.connector +libkrb5-dev libldap2-dev libsasl2-dev -libkrb5-dev +python-mysql.connector +python-mysqldb +sqlite3 diff --git a/files/debs/ldap b/files/debs/ldap index 26f7aeffe3..aa3a934d95 100644 --- a/files/debs/ldap +++ b/files/debs/ldap @@ -1,3 +1,3 @@ ldap-utils -slapd python-ldap +slapd diff --git a/files/debs/n-cpu b/files/debs/n-cpu index ffc947a36d..0da57ee047 100644 --- a/files/debs/n-cpu +++ b/files/debs/n-cpu @@ -1,8 +1,8 @@ -qemu-utils +cryptsetup +genisoimage lvm2 # NOPRIME open-iscsi -genisoimage -sysfsutils -sg3-utils python-guestfs # NOPRIME -cryptsetup +qemu-utils +sg3-utils +sysfsutils diff --git a/files/debs/neutron b/files/debs/neutron index b5a457e482..85145d3654 100644 --- a/files/debs/neutron +++ b/files/debs/neutron @@ -1,18 +1,18 @@ acl +dnsmasq-base +dnsmasq-utils # for dhcp_release only available in dist:precise ebtables iptables -iputils-ping iputils-arping +iputils-ping libmysqlclient-dev mysql-server #NOPRIME -sudo postgresql-server-dev-all -python-mysqldb python-mysql.connector -dnsmasq-base -dnsmasq-utils # for dhcp_release only available in dist:precise +python-mysqldb rabbitmq-server # NOPRIME -sqlite3 -vlan radvd # NOPRIME +sqlite3 +sudo uuid-runtime +vlan diff --git a/files/debs/nova b/files/debs/nova index fa394e734b..fe57fc4b2a 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -1,26 +1,26 @@ +conntrack +curl dnsmasq-base dnsmasq-utils # for dhcp_release -conntrack -kpartx -parted -iputils-arping -libmysqlclient-dev -mysql-server # NOPRIME -python-mysqldb -python-mysql.connector +ebtables gawk +genisoimage # required for config_drive iptables -ebtables -sqlite3 -sudo -qemu-kvm # NOPRIME -qemu # dist:wheezy,jessie NOPRIME +iputils-arping +kpartx +libjs-jquery-tablesorter # Needed for coverage html reports +libmysqlclient-dev libvirt-bin # NOPRIME libvirt-dev # NOPRIME +mysql-server # NOPRIME +parted pm-utils -libjs-jquery-tablesorter # Needed for coverage html reports -vlan -curl -genisoimage # required for config_drive +python-mysql.connector +python-mysqldb +qemu # dist:wheezy,jessie NOPRIME +qemu-kvm # NOPRIME rabbitmq-server # NOPRIME socat # used by ajaxterm +sqlite3 +sudo +vlan diff --git a/files/debs/zookeeper b/files/debs/zookeeper index 66227f7e31..f41b559007 100644 --- a/files/debs/zookeeper +++ b/files/debs/zookeeper @@ -1 +1 @@ -zookeeperd \ No newline at end of file +zookeeperd diff --git a/files/rpms-suse/ceilometer-collector b/files/rpms-suse/ceilometer-collector index 5e4dfcc35e..fc75ffad63 100644 --- a/files/rpms-suse/ceilometer-collector +++ b/files/rpms-suse/ceilometer-collector @@ -1,3 +1,3 @@ -# Not available in openSUSE main repositories, but can be fetched from OBS # (devel:languages:python and server:database projects) mongodb +# Not available in openSUSE main repositories, but can be fetched from OBS diff --git a/files/rpms-suse/ceph b/files/rpms-suse/ceph index 8d465000e1..8c4955df90 100644 --- a/files/rpms-suse/ceph +++ b/files/rpms-suse/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -xfsprogs lsb +xfsprogs diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder index 3fd03cc9be..56b1bb536a 100644 --- a/files/rpms-suse/cinder +++ b/files/rpms-suse/cinder @@ -1,6 +1,6 @@ lvm2 -tgt # NOPRIME -qemu-tools -python-devel -postgresql-devel open-iscsi +postgresql-devel +python-devel +qemu-tools +tgt # NOPRIME diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 3a7c4b56fd..651243d274 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -8,21 +8,21 @@ gcc-c++ git-core graphviz # docs iputils +libffi-devel # pyOpenSSL +libmysqlclient-devel # MySQL-python libopenssl-devel # to rebuild pyOpenSSL if needed +libxslt-devel # lxml lsof # useful when debugging make +net-tools openssh openssl +postgresql-devel # psycopg2 psmisc python-cmd2 # dist:opensuse-12.3 +python-devel # pyOpenSSL screen tar tcpdump unzip wget -net-tools -libffi-devel # pyOpenSSL -libxslt-devel # lxml -postgresql-devel # psycopg2 -libmysqlclient-devel # MySQL-python -python-devel # pyOpenSSL diff --git a/files/rpms-suse/horizon b/files/rpms-suse/horizon index 77f7c34b31..753ea76e04 100644 --- a/files/rpms-suse/horizon +++ b/files/rpms-suse/horizon @@ -1,2 +1,2 @@ -apache2 # NOPRIME apache2-mod_wsgi # NOPRIME +apache2 # NOPRIME diff --git a/files/rpms-suse/n-api b/files/rpms-suse/n-api index 6f59e603b2..af5ac2fc54 100644 --- a/files/rpms-suse/n-api +++ b/files/rpms-suse/n-api @@ -1,2 +1,2 @@ -python-dateutil fping +python-dateutil diff --git a/files/rpms-suse/n-cpu b/files/rpms-suse/n-cpu index b3a468d2d8..29bd31b365 100644 --- a/files/rpms-suse/n-cpu +++ b/files/rpms-suse/n-cpu @@ -1,7 +1,7 @@ -# Stuff for diablo volumes +cryptsetup genisoimage lvm2 open-iscsi -sysfsutils sg3_utils -cryptsetup +# Stuff for diablo volumes +sysfsutils diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron index 133979911e..4b0eefaaa5 100644 --- a/files/rpms-suse/neutron +++ b/files/rpms-suse/neutron @@ -7,7 +7,7 @@ iputils mariadb # NOPRIME postgresql-devel rabbitmq-server # NOPRIME +radvd # NOPRIME sqlite3 sudo vlan -radvd # NOPRIME diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 039456fc1b..2f3ad21a63 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -1,7 +1,7 @@ +conntrack-tools curl dnsmasq dnsmasq-utils # dist:opensuse-12.3,opensuse-13.1 -conntrack-tools ebtables gawk genisoimage # required for config_drive @@ -9,14 +9,14 @@ iptables iputils kpartx kvm # NOPRIME -# qemu as fallback if kvm cannot be used -qemu # NOPRIME libvirt # NOPRIME libvirt-python # NOPRIME mariadb # NOPRIME parted polkit python-devel +# qemu as fallback if kvm cannot be used +qemu # NOPRIME rabbitmq-server # NOPRIME socat sqlite3 diff --git a/files/rpms-suse/openvswitch b/files/rpms-suse/openvswitch index edfb4d21aa..53f8bb22cf 100644 --- a/files/rpms-suse/openvswitch +++ b/files/rpms-suse/openvswitch @@ -1,3 +1,3 @@ + openvswitch openvswitch-switch - diff --git a/files/rpms/ceilometer-collector b/files/rpms/ceilometer-collector index b139ed2b6b..a8b81187ed 100644 --- a/files/rpms/ceilometer-collector +++ b/files/rpms/ceilometer-collector @@ -1,3 +1,3 @@ -selinux-policy-targeted -mongodb-server #NOPRIME mongodb # NOPRIME +mongodb-server #NOPRIME +selinux-policy-targeted diff --git a/files/rpms/ceph b/files/rpms/ceph index 5483735741..64befc5f00 100644 --- a/files/rpms/ceph +++ b/files/rpms/ceph @@ -1,3 +1,3 @@ ceph # NOPRIME -xfsprogs redhat-lsb-core +xfsprogs diff --git a/files/rpms/cinder b/files/rpms/cinder index a88503b8bc..f28f04d8ba 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,5 @@ +iscsi-initiator-utils lvm2 -scsi-target-utils # NOPRIME -qemu-img postgresql-devel -iscsi-initiator-utils +qemu-img +scsi-target-utils # NOPRIME diff --git a/files/rpms/dstat b/files/rpms/dstat index 8a8f8fe737..2b643b8b1b 100644 --- a/files/rpms/dstat +++ b/files/rpms/dstat @@ -1 +1 @@ -dstat \ No newline at end of file +dstat diff --git a/files/rpms/general b/files/rpms/general index eb479d2050..cfd9479600 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -1,35 +1,35 @@ +bc bridge-utils curl dbus euca2ools # only for testing client gcc gcc-c++ +gettext # used for compiling message catalogs git-core graphviz # needed only for docs -openssh-server -openssl -openssl-devel # to rebuild pyOpenSSL if needed +iptables-services # NOPRIME f21,f22 +java-1.7.0-openjdk-headless # NOPRIME rhel7 +java-1.8.0-openjdk-headless # NOPRIME f21,f22 libffi-devel libxml2-devel # lxml libxslt-devel # lxml +libyaml-devel +mariadb-devel # MySQL-python +net-tools +openssh-server +openssl +openssl-devel # to rebuild pyOpenSSL if needed pkgconfig +postgresql-devel # psycopg2 psmisc +pyOpenSSL # version in pip uses too much memory python-devel +redhat-rpm-config # MySQL-python rhbz-1195207 f21 screen tar tcpdump unzip wget which -bc -libyaml-devel -gettext # used for compiling message catalogs -net-tools -java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f21,f22 -pyOpenSSL # version in pip uses too much memory -iptables-services # NOPRIME f21,f22 -mariadb-devel # MySQL-python -postgresql-devel # psycopg2 -redhat-rpm-config # MySQL-python rhbz-1195207 f21 zlib-devel diff --git a/files/rpms/horizon b/files/rpms/horizon index b2cf0ded6f..aeb2cb5c96 100644 --- a/files/rpms/horizon +++ b/files/rpms/horizon @@ -1,5 +1,5 @@ Django httpd # NOPRIME mod_wsgi # NOPRIME -pyxattr pcre-devel # pyScss +pyxattr diff --git a/files/rpms/keystone b/files/rpms/keystone index 7384150a4a..c01c261e19 100644 --- a/files/rpms/keystone +++ b/files/rpms/keystone @@ -1,3 +1,3 @@ +mod_ssl MySQL-python sqlite -mod_ssl diff --git a/files/rpms/ldap b/files/rpms/ldap index d89c4cf8c1..d5b8fa4374 100644 --- a/files/rpms/ldap +++ b/files/rpms/ldap @@ -1,2 +1,2 @@ -openldap-servers openldap-clients +openldap-servers diff --git a/files/rpms/n-cpu b/files/rpms/n-cpu index 81278b30bb..7773b0402d 100644 --- a/files/rpms/n-cpu +++ b/files/rpms/n-cpu @@ -1,7 +1,7 @@ -# Stuff for diablo volumes +cryptsetup +genisoimage iscsi-initiator-utils lvm2 -genisoimage -sysfsutils sg3_utils -cryptsetup +# Stuff for diablo volumes +sysfsutils diff --git a/files/rpms/neutron b/files/rpms/neutron index 29851bea9b..b3f79ed146 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -1,4 +1,3 @@ -MySQL-python acl dnsmasq # for q-dhcp dnsmasq-utils # for dhcp_release @@ -7,10 +6,11 @@ iptables iputils mysql-connector-python mysql-devel +MySQL-python mysql-server # NOPRIME openvswitch # NOPRIME postgresql-devel rabbitmq-server # NOPRIME +radvd # NOPRIME sqlite sudo -radvd # NOPRIME diff --git a/files/rpms/nova b/files/rpms/nova index 6eeb6230a9..e70f138a3c 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -1,8 +1,7 @@ -MySQL-python +conntrack-tools curl dnsmasq # for nova-network dnsmasq-utils # for dhcp_release -conntrack-tools ebtables gawk genisoimage # required for config_drive @@ -10,18 +9,19 @@ iptables iputils kpartx kvm # NOPRIME -qemu-kvm # NOPRIME libvirt-bin # NOPRIME libvirt-devel # NOPRIME libvirt-python # NOPRIME libxml2-python -numpy # needed by websockify for spice console m2crypto mysql-connector-python mysql-devel +MySQL-python mysql-server # NOPRIME +numpy # needed by websockify for spice console parted polkit +qemu-kvm # NOPRIME rabbitmq-server # NOPRIME sqlite sudo diff --git a/files/rpms/swift b/files/rpms/swift index 1bf57cc3a2..f56a81b0d1 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,7 +1,7 @@ curl memcached pyxattr +rsync-daemon # dist:f22,f23 sqlite xfsprogs xinetd -rsync-daemon # dist:f22,f23 diff --git a/files/rpms/zookeeper b/files/rpms/zookeeper index c0d1c3066d..1bfac538a2 100644 --- a/files/rpms/zookeeper +++ b/files/rpms/zookeeper @@ -1 +1 @@ -zookeeper \ No newline at end of file +zookeeper From 3dac869f80e90795efc2127b713cc5282f4eabb5 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 9 Nov 2015 17:26:24 +1100 Subject: [PATCH 0604/2941] Cleanup some of the deb/rpm installs python-devel and the mysql/postgresql client dev-libs should all be installed globally via the "general" installs; no need to installs them separately Change-Id: I91a9ace2e62a891634dbb4635ab2ad8c8dc59f91 --- files/debs/cinder | 1 - files/rpms-suse/cinder | 2 -- files/rpms-suse/glance | 1 - files/rpms-suse/keystone | 1 - files/rpms-suse/neutron | 1 - files/rpms-suse/nova | 1 - files/rpms-suse/swift | 1 - files/rpms/cinder | 1 - files/rpms/neutron | 1 - 9 files changed, 10 deletions(-) delete mode 100644 files/rpms-suse/glance diff --git a/files/debs/cinder b/files/debs/cinder index 48b8d0f7d3..3595e011da 100644 --- a/files/debs/cinder +++ b/files/debs/cinder @@ -1,4 +1,3 @@ -libpq-dev lvm2 open-iscsi open-iscsi-utils # Deprecated since quantal dist:precise diff --git a/files/rpms-suse/cinder b/files/rpms-suse/cinder index 56b1bb536a..189a232fa7 100644 --- a/files/rpms-suse/cinder +++ b/files/rpms-suse/cinder @@ -1,6 +1,4 @@ lvm2 open-iscsi -postgresql-devel -python-devel qemu-tools tgt # NOPRIME diff --git a/files/rpms-suse/glance b/files/rpms-suse/glance deleted file mode 100644 index bf512de575..0000000000 --- a/files/rpms-suse/glance +++ /dev/null @@ -1 +0,0 @@ -python-devel diff --git a/files/rpms-suse/keystone b/files/rpms-suse/keystone index c838b413c3..46832c786d 100644 --- a/files/rpms-suse/keystone +++ b/files/rpms-suse/keystone @@ -1,4 +1,3 @@ cyrus-sasl-devel openldap2-devel -python-devel sqlite3 diff --git a/files/rpms-suse/neutron b/files/rpms-suse/neutron index 4b0eefaaa5..e9abc6eca6 100644 --- a/files/rpms-suse/neutron +++ b/files/rpms-suse/neutron @@ -5,7 +5,6 @@ ebtables iptables iputils mariadb # NOPRIME -postgresql-devel rabbitmq-server # NOPRIME radvd # NOPRIME sqlite3 diff --git a/files/rpms-suse/nova b/files/rpms-suse/nova index 2f3ad21a63..ae115d2138 100644 --- a/files/rpms-suse/nova +++ b/files/rpms-suse/nova @@ -14,7 +14,6 @@ libvirt-python # NOPRIME mariadb # NOPRIME parted polkit -python-devel # qemu as fallback if kvm cannot be used qemu # NOPRIME rabbitmq-server # NOPRIME diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift index 6a824f944f..52e0a990e7 100644 --- a/files/rpms-suse/swift +++ b/files/rpms-suse/swift @@ -1,6 +1,5 @@ curl memcached -python-devel sqlite3 xfsprogs xinetd diff --git a/files/rpms/cinder b/files/rpms/cinder index f28f04d8ba..0274642fd6 100644 --- a/files/rpms/cinder +++ b/files/rpms/cinder @@ -1,5 +1,4 @@ iscsi-initiator-utils lvm2 -postgresql-devel qemu-img scsi-target-utils # NOPRIME diff --git a/files/rpms/neutron b/files/rpms/neutron index b3f79ed146..9683475d29 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -9,7 +9,6 @@ mysql-devel MySQL-python mysql-server # NOPRIME openvswitch # NOPRIME -postgresql-devel rabbitmq-server # NOPRIME radvd # NOPRIME sqlite From 7ddf6741d24485aa40de122f4bfdf4cd55e7cfad Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Mon, 9 Nov 2015 08:08:53 -0500 Subject: [PATCH 0605/2941] Neutron-legacy: Remove LINUXNET_VIF_DRIVER option A value is never assigned, and it ends up in the nova.conf file as: linuxnet_interface_driver = So, let's delete it. Change-Id: Ibc270ce6ee622eee871df1f8c11f21e8be8280ee --- lib/neutron-legacy | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 4e51425ffc..096869929b 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -488,7 +488,6 @@ function create_nova_conf_neutron { # optionally set options in nova_conf neutron_plugin_create_nova_conf - iniset $NOVA_CONF DEFAULT linuxnet_interface_driver "$LINUXNET_VIF_DRIVER" if is_service_enabled q-meta; then iniset $NOVA_CONF neutron service_metadata_proxy "True" fi From 5c5e08669cb7539886cb3477fc7c4c7deb701f50 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Mon, 9 Nov 2015 14:08:15 -0500 Subject: [PATCH 0606/2941] loop all ebtables tables ebtables has 3 built in tables, if we don't call them out we only get 'filter' (per man page). Change-Id: I52360cbb3b910cb492b61e2314848cc29dcd8266 --- tools/worlddump.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/worlddump.py b/tools/worlddump.py index 1b337a9a83..97e4d949c7 100755 --- a/tools/worlddump.py +++ b/tools/worlddump.py @@ -86,8 +86,10 @@ def disk_space(): def ebtables_dump(): + tables = ['filter', 'nat', 'broute'] _header("EB Tables Dump") - _dump_cmd("sudo ebtables -L") + for table in tables: + _dump_cmd("sudo ebtables -t %s -L" % table) def iptables_dump(): From 8a3b7d424d8edf53d0560db48247e6bca11176ee Mon Sep 17 00:00:00 2001 From: John Davidge Date: Tue, 7 Jul 2015 11:10:54 +0100 Subject: [PATCH 0607/2941] Fix stack failure when default subnetpool is set Currently stack.sh will fail if a value is set for default_ipv4_subnet_pool and/or default_ipv6_subnet_pool in neutron.conf. This is because setting either of these values overrides the default behaviour of using the implicit (none) subnetpool for subnet creation, and the subnetpools specified in neutron.conf have not been created at the time of the devstack calls to subnet-create. This patch fixes the failure by specifying subnetpool = None in calls to subnet-create, so that neutron will behave as devstack expects. This parameter will no longer be required once these configuration options are removed in the OpenStack N release, but will be required for compatibility with Kilo, Liberty, and Mitaka. Change-Id: I29b2d62a022b43f6623b127af2ca303f9de847b0 Closes-Bug: #1472200 --- exercises/neutron-adv-test.sh | 2 +- lib/neutron-legacy | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/exercises/neutron-adv-test.sh b/exercises/neutron-adv-test.sh index a8fbd86473..9bcb7669b7 100755 --- a/exercises/neutron-adv-test.sh +++ b/exercises/neutron-adv-test.sh @@ -235,7 +235,7 @@ function create_network { local NET_ID NET_ID=$(neutron net-create --tenant-id $TENANT_ID $NET_NAME $EXTRA| grep ' id ' | awk '{print $4}' ) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $TENANT_ID $NET_NAME $EXTRA" - neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY $NET_ID $CIDR + neutron subnet-create --ip-version 4 --tenant-id $TENANT_ID --gateway $GATEWAY --subnetpool None $NET_ID $CIDR neutron_debug_admin probe-create --device-owner compute $NET_ID source $TOP_DIR/openrc demo demo } diff --git a/lib/neutron-legacy b/lib/neutron-legacy index c244e5470a..0e7ce26674 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -542,12 +542,12 @@ function create_neutron_initial_network { die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" if [[ "$IP_VERSION" =~ 4.* ]]; then - SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) + SUBNET_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 4 ${ALLOCATION_POOL:+--allocation-pool $ALLOCATION_POOL} --name $PROVIDER_SUBNET_NAME --gateway $NETWORK_GATEWAY --subnetpool None $NET_ID $FIXED_RANGE | grep ' id ' | get_field 2) die_if_not_set $LINENO SUBNET_ID "Failure creating SUBNET_ID for $PROVIDER_SUBNET_NAME $TENANT_ID" fi if [[ "$IP_VERSION" =~ .*6 ]]; then - SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2) + SUBNET_V6_ID=$(neutron subnet-create --tenant_id $TENANT_ID --ip_version 6 --ipv6-address-mode $IPV6_ADDRESS_MODE --gateway $V6_NETWORK_GATEWAY --name $PROVIDER_SUBNET_NAME_V6 --subnetpool_id None $NET_ID $FIXED_RANGE_V6 | grep 'id' | get_field 2) die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $PROVIDER_SUBNET_NAME_V6 $TENANT_ID" fi @@ -1236,6 +1236,7 @@ function _neutron_create_private_subnet_v4 { subnet_params+="--ip_version 4 " subnet_params+="--gateway $NETWORK_GATEWAY " subnet_params+="--name $PRIVATE_SUBNET_NAME " + subnet_params+="--subnetpool None " subnet_params+="$NET_ID $FIXED_RANGE" local subnet_id subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) @@ -1252,6 +1253,7 @@ function _neutron_create_private_subnet_v6 { subnet_params+="--ip_version 6 " subnet_params+="--gateway $IPV6_PRIVATE_NETWORK_GATEWAY " subnet_params+="--name $IPV6_PRIVATE_SUBNET_NAME " + subnet_params+="--subnetpool None " subnet_params+="$NET_ID $FIXED_RANGE_V6 $ipv6_modes" local ipv6_subnet_id ipv6_subnet_id=$(neutron subnet-create $subnet_params | grep ' id ' | get_field 2) @@ -1265,6 +1267,7 @@ function _neutron_create_public_subnet_v4 { subnet_params+="${Q_FLOATING_ALLOCATION_POOL:+--allocation-pool $Q_FLOATING_ALLOCATION_POOL} " subnet_params+="--gateway $PUBLIC_NETWORK_GATEWAY " subnet_params+="--name $PUBLIC_SUBNET_NAME " + subnet_params+="--subnetpool None " subnet_params+="$EXT_NET_ID $FLOATING_RANGE " subnet_params+="-- --enable_dhcp=False" local id_and_ext_gw_ip @@ -1278,6 +1281,7 @@ function _neutron_create_public_subnet_v6 { local subnet_params="--ip_version 6 " subnet_params+="--gateway $IPV6_PUBLIC_NETWORK_GATEWAY " subnet_params+="--name $IPV6_PUBLIC_SUBNET_NAME " + subnet_params+="--subnetpool None " subnet_params+="$EXT_NET_ID $IPV6_PUBLIC_RANGE " subnet_params+="-- --enable_dhcp=False" local ipv6_id_and_ext_gw_ip From 04e73e17e8c536abb1ea86e7b0d037aef543da37 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Tue, 10 Nov 2015 18:58:11 +0100 Subject: [PATCH 0608/2941] On Ubuntu: don't start Zookeeper twice On Ubuntu, if the Zookeeper service is already running, attempting to start it again fails with non-zero exit code. This patch detects whether ZK is already started before trying to start it. Change-Id: If1257152de01fe5fe0351fdbb538bce083edbec0 Closes-Bug: #1513741 --- lib/zookeeper | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/zookeeper b/lib/zookeeper index e62ba8ada8..6637d52bca 100644 --- a/lib/zookeeper +++ b/lib/zookeeper @@ -69,7 +69,12 @@ function install_zookeeper { # start_zookeeper() - Start running processes, including screen function start_zookeeper { - start_service zookeeper + # Starting twice Zookeeper on Ubuntu exits with error code 1. See LP#1513741 + # Match both systemd and sysvinit output + local running="(active \(running\)|start/running)" + if ! is_ubuntu || ! sudo /usr/sbin/service zookeeper status | egrep -q "$running"; then + start_service zookeeper + fi } # stop_zookeeper() - Stop running processes (non-screen) From 536b8c1d2cf9d2523dc60f74190ef566a8c3fc4b Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Mon, 9 Nov 2015 10:05:37 +1100 Subject: [PATCH 0609/2941] Allow screen without logging to disk In some niche setups it is desirable to run OpenStack services under screen, but undesirable to automatically keep a persistent log from each service. Add a new variable SCREEN_IS_LOGGING that controls if screen logs each window to disk automatically. Ideally screen itself would be configured to log but just not activate. This isn't possible with the screerc syntax. Temporary logging can still be used by a developer with: C-a : logfile foo C-a : log on Change-Id: I2a3abf15dea95ae99ddbdfe1309382df601b7d93 --- functions-common | 14 +++++++++----- stackrc | 10 ++++++++++ 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/functions-common b/functions-common index ca0543da7d..ba3c79126d 100644 --- a/functions-common +++ b/functions-common @@ -1366,7 +1366,7 @@ function run_process { # Helper to launch a process in a named screen # Uses globals ``CURRENT_LOG_TIME``, ```LOGDIR``, ``SCREEN_LOGDIR``, `SCREEN_NAME``, -# ``SERVICE_DIR``, ``USE_SCREEN`` +# ``SERVICE_DIR``, ``USE_SCREEN``, ``SCREEN_IS_LOGGING`` # screen_process name "command-line" [group] # Run a command in a shell in a screen window, if an optional group # is provided, use sg to set the group of the command. @@ -1386,8 +1386,12 @@ function screen_process { echo "SCREEN_LOGDIR: $SCREEN_LOGDIR" echo "log: $real_logfile" if [[ -n ${LOGDIR} ]]; then - screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile" - screen -S $SCREEN_NAME -p $name -X log on + if [[ "$SCREEN_IS_LOGGING" == "True" ]]; then + screen -S $SCREEN_NAME -p $name -X logfile "$real_logfile" + screen -S $SCREEN_NAME -p $name -X log on + fi + # If logging isn't active then avoid a broken symlink + touch "$real_logfile" ln -sf "$real_logfile" ${LOGDIR}/${name}.log if [[ -n ${SCREEN_LOGDIR} ]]; then # Drop the backward-compat symlink @@ -1426,7 +1430,7 @@ function screen_process { } # Screen rc file builder -# Uses globals ``SCREEN_NAME``, ``SCREENRC`` +# Uses globals ``SCREEN_NAME``, ``SCREENRC``, ``SCREEN_IS_LOGGING`` # screen_rc service "command-line" function screen_rc { SCREEN_NAME=${SCREEN_NAME:-stack} @@ -1446,7 +1450,7 @@ function screen_rc { echo "screen -t $1 bash" >> $SCREENRC echo "stuff \"$2$NL\"" >> $SCREENRC - if [[ -n ${LOGDIR} ]]; then + if [[ -n ${LOGDIR} ]] && [[ "$SCREEN_IS_LOGGING" == "True" ]]; then echo "logfile ${LOGDIR}/${1}.log.${CURRENT_LOG_TIME}" >>$SCREENRC echo "log on" >>$SCREENRC fi diff --git a/stackrc b/stackrc index 3033b27580..76a5756dde 100644 --- a/stackrc +++ b/stackrc @@ -103,6 +103,16 @@ HORIZON_APACHE_ROOT="/dashboard" # be disabled for automated testing by setting this value to False. USE_SCREEN=True +# When using screen, should we keep a log file on disk? You might +# want this False if you have a long-running setup where verbose logs +# can fill-up the host. +# XXX: Ideally screen itself would be configured to log but just not +# activate. This isn't possible with the screerc syntax. Temporary +# logging can still be used by a developer with: +# C-a : logfile foo +# C-a : log on +SCREEN_IS_LOGGING=$(trueorfalse True SCREEN_IS_LOGGING) + # Passwords generated by interactive devstack runs if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password From f6cee0fa2041a9188ad5e3e24e87ebd513729da8 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Fri, 6 Nov 2015 18:18:57 +0100 Subject: [PATCH 0610/2941] Install kernel-modules package on fedora Since f21 the kernel modules are split to multiple packages and by default just the core modules gets installed. nova requires iscsi_tcp module for attaching a volume from any iscsi source (default cinder lvm setup). On el7 it is not required. Change-Id: I31705720ade5defd1b6d4b95bc51c2a11a5f0364 Related-Bug: #1429504 --- files/rpms/nova | 1 + 1 file changed, 1 insertion(+) diff --git a/files/rpms/nova b/files/rpms/nova index e70f138a3c..00e759636e 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -7,6 +7,7 @@ gawk genisoimage # required for config_drive iptables iputils +kernel-modules # dist:f21,f22,f23 kpartx kvm # NOPRIME libvirt-bin # NOPRIME From 59e86a3aae3ca49ae3a400c1fcc49c53c9328fd2 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Mon, 9 Nov 2015 11:06:39 -0500 Subject: [PATCH 0611/2941] Replace default route for inet6 When taking the IPv6 addresses from an interface, also update any routing table entries. Change-Id: I0424de6c5c1b0fcb7a9bc3fc1475036668cab09d Closes-Bug: 1514494 --- lib/neutron-legacy | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index c244e5470a..2c0c9cfb3b 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -799,7 +799,7 @@ function _move_neutron_addresses_route { local IP_ADD="" local IP_DEL="" local DEFAULT_ROUTE_GW - DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }") + DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf/ { print \$3; exit }") local ADD_OVS_PORT="" if [[ $af == "inet" ]]; then @@ -811,7 +811,7 @@ function _move_neutron_addresses_route { fi if [ "$DEFAULT_ROUTE_GW" != "" ]; then - ADD_DEFAULT_ROUTE="sudo ip r replace default via $DEFAULT_ROUTE_GW dev $to_intf" + ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" fi if [[ "$add_ovs_port" == "True" ]]; then From 9af81997b543f7634c180b73a036c59456118b50 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Tue, 10 Nov 2015 13:30:20 -0500 Subject: [PATCH 0612/2941] Only take the first global, non temporary ipv6 address Taking a temporary IPv6 address created through the OS' support for Privacy Extensions (RFC 4941) is not very useful. It would occur because it happened to be the first in the list returned from ip(8). Instead, grab the first IPv6 address that is not a temporary address. Related-Bug: #1488691 Change-Id: I7f455572241e7d0c7406f173239a2270a4d8926a --- lib/neutron-legacy | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 2c0c9cfb3b..eed477a550 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -807,7 +807,7 @@ function _move_neutron_addresses_route { fi if [[ $af == "inet6" ]]; then - IP_BRD=$(ip -f $af a s dev $from_intf | grep inet6 | awk '{ print $2, $3, $4; exit }') + IP_BRD=$(ip -f $af a s dev $from_intf | grep 'scope global' | sed '/temporary/d' | awk '{ print $2, $3, $4; exit }') fi if [ "$DEFAULT_ROUTE_GW" != "" ]; then From 1650166c2594905ca16c02b58430f4f5bd9ed24c Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Mon, 12 Oct 2015 11:01:44 -0400 Subject: [PATCH 0613/2941] docs: Add more networking details to single interface section Add complete localrcs, and also add a section for additional compute nodes, to help demonstrate the OVS layout and how traffic flows over VXLAN tunnels from compute nodes, to the L3 node, and out onto the wire. Closes-Bug: #1506733 Change-Id: Ibb5fd454bdcb8c13400c1e11f640c2aafc0f73ca --- doc/source/guides/neutron.rst | 172 +++++++++++++++++++++++++++++++++- 1 file changed, 171 insertions(+), 1 deletion(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 5891f68033..ee29087267 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -35,7 +35,7 @@ network and is on a shared subnet with other machines. network hardware_network { address = "172.18.161.0/24" router [ address = "172.18.161.1" ]; - devstack_laptop [ address = "172.18.161.6" ]; + devstack-1 [ address = "172.18.161.6" ]; } } @@ -43,9 +43,13 @@ network and is on a shared subnet with other machines. DevStack Configuration ---------------------- +The following is a complete `local.conf` for the host named +`devstack-1`. It will run all the API and services, as well as +serving as a hypervisor for guest instances. :: + [[local|localrc]] HOST_IP=172.18.161.6 SERVICE_HOST=172.18.161.6 MYSQL_HOST=172.18.161.6 @@ -57,6 +61,12 @@ DevStack Configuration SERVICE_PASSWORD=secrete SERVICE_TOKEN=secrete + # Do not use Nova-Network + disable_service n-net + # Enable Neutron + ENABLED_SERVICES+=,q-svc,q-dhcp,q-meta,q-agt,q-l3 + + ## Neutron options Q_USE_SECGROUP=True FLOATING_RANGE="172.18.161.0/24" @@ -71,6 +81,166 @@ DevStack Configuration OVS_BRIDGE_MAPPINGS=public:br-ex +Adding Additional Compute Nodes +------------------------------- + +Let's suppose that after installing DevStack on the first host, you +also want to do multinode testing and networking. + +Physical Network Setup +~~~~~~~~~~~~~~~~~~~~~~ + +.. nwdiag:: + + nwdiag { + inet [ shape = cloud ]; + router; + inet -- router; + + network hardware_network { + address = "172.18.161.0/24" + router [ address = "172.18.161.1" ]; + devstack-1 [ address = "172.18.161.6" ]; + devstack-2 [ address = "172.18.161.7" ]; + } + } + + +After DevStack installs and configures Neutron, traffic from guest VMs +flows out of `devstack-2` (the compute node) and is encapsulated in a +VXLAN tunnel back to `devstack-1` (the control node) where the L3 +agent is running. + +:: + + stack@devstack-2:~/devstack$ sudo ovs-vsctl show + 8992d965-0ba0-42fd-90e9-20ecc528bc29 + Bridge br-int + fail_mode: secure + Port br-int + Interface br-int + type: internal + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Bridge br-tun + fail_mode: secure + Port "vxlan-c0a801f6" + Interface "vxlan-c0a801f6" + type: vxlan + options: {df_default="true", in_key=flow, local_ip="172.18.161.7", out_key=flow, remote_ip="172.18.161.6"} + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + Port br-tun + Interface br-tun + type: internal + ovs_version: "2.0.2" + +Open vSwitch on the control node, where the L3 agent runs, is +configured to de-encapsulate traffic from compute nodes, then forward +it over the `br-ex` bridge, where `eth0` is attached. + +:: + + stack@devstack-1:~/devstack$ sudo ovs-vsctl show + 422adeea-48d1-4a1f-98b1-8e7239077964 + Bridge br-tun + fail_mode: secure + Port br-tun + Interface br-tun + type: internal + Port patch-int + Interface patch-int + type: patch + options: {peer=patch-tun} + Port "vxlan-c0a801d8" + Interface "vxlan-c0a801d8" + type: vxlan + options: {df_default="true", in_key=flow, local_ip="172.18.161.6", out_key=flow, remote_ip="172.18.161.7"} + Bridge br-ex + Port phy-br-ex + Interface phy-br-ex + type: patch + options: {peer=int-br-ex} + Port "eth0" + Interface "eth0" + Port br-ex + Interface br-ex + type: internal + Bridge br-int + fail_mode: secure + Port "tapce66332d-ea" + tag: 1 + Interface "tapce66332d-ea" + type: internal + Port "qg-65e5a4b9-15" + tag: 2 + Interface "qg-65e5a4b9-15" + type: internal + Port "qr-33e5e471-88" + tag: 1 + Interface "qr-33e5e471-88" + type: internal + Port "qr-acbe9951-70" + tag: 1 + Interface "qr-acbe9951-70" + type: internal + Port br-int + Interface br-int + type: internal + Port patch-tun + Interface patch-tun + type: patch + options: {peer=patch-int} + Port int-br-ex + Interface int-br-ex + type: patch + options: {peer=phy-br-ex} + ovs_version: "2.0.2" + +`br-int` is a bridge that the Open vSwitch mechanism driver creates, +which is used as the "integration bridge" where ports are created, and +plugged into the virtual switching fabric. `br-ex` is an OVS bridge +that is used to connect physical ports (like `eth0`), so that floating +IP traffic for tenants can be received from the physical network +infrastructure (and the internet), and routed to tenant network ports. +`br-tun` is a tunnel bridge that is used to connect OpenStack nodes +(like `devstack-2`) together. This bridge is used so that tenant +network traffic, using the VXLAN tunneling protocol, flows between +each compute node where tenant instances run. + + + +DevStack Compute Configuration +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The host `devstack-2` has a very minimal `local.conf`. + +:: + + [[local|localrc]] + HOST_IP=172.18.161.7 + SERVICE_HOST=172.18.161.6 + MYSQL_HOST=172.18.161.6 + RABBIT_HOST=172.18.161.6 + GLANCE_HOSTPORT=172.18.161.6:9292 + ADMIN_PASSWORD=secrete + MYSQL_PASSWORD=secrete + RABBIT_PASSWORD=secrete + SERVICE_PASSWORD=secrete + SERVICE_TOKEN=secrete + + ## Neutron options + PUBLIC_INTERFACE=eth0 + ENABLED_SERVICES=n-cpu,rabbit,q-agt + +Network traffic from `eth0` on the compute nodes is then NAT'd by the +controller node that runs Neutron's `neutron-l3-agent` and provides L3 +connectivity. + Neutron Networking with Open vSwitch and Provider Networks ========================================================== From 90dd262c19d7387ef6b438aea5e6eb13f3fd609d Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 10 Nov 2015 12:22:03 -0500 Subject: [PATCH 0614/2941] fail if devstack attempts to be run under virtualenv This has come up on the mailing list recently, we should just fail early and explicitly so that people don't get way down this path and not realize it's never going to work. Change-Id: I8a7f001adf3a5244b8655858ebd5fc7014a4af55 --- stack.sh | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/stack.sh b/stack.sh index 68b932e4b1..825ed968f3 100755 --- a/stack.sh +++ b/stack.sh @@ -93,6 +93,20 @@ if [[ $EUID -eq 0 ]]; then exit 1 fi +# OpenStack is designed to run at a system level, with system level +# installation of python packages. It does not support running under a +# virtual env, and will fail in really odd ways if you do this. Make +# this explicit as it has come up on the mailing list. +if [[ -n "$VIRTUAL_ENV" ]]; then + echo "You appear to be running under a python virtualenv." + echo "DevStack does not support this, as we my break the" + echo "virtualenv you are currently in by modifying " + echo "external system-level components the virtualenv relies on." + echo "We reccommend you use a separate virtual-machine if " + echo "you are worried about DevStack taking over your system." + exit 1 +fi + # Provide a safety switch for devstack. If you do a lot of devstack, # on a lot of different environments, you sometimes run it on the # wrong box. This makes there be a way to prevent that. From 7e550682977b0c3a6a667af6691760d8a7506e9b Mon Sep 17 00:00:00 2001 From: Richard Theis Date: Tue, 13 Oct 2015 07:51:05 -0500 Subject: [PATCH 0615/2941] doc: Update LBaaS v2 setup for Liberty - Updated LBaaS v2 setup to use Octavia. - Removed the old cirros image URL, the default should be sufficient. - Fixed nova boot commands based on Liberty DevStack. - Added sleeps to LBaaS v2 commands since most commands can take a few seconds to complete. - Added wait to load balancer creation since it can take a few minutes to complete. - Wrapped long lines in the descriptions. Change-Id: Ib4a3f02ebc2606e3e16591ae3a23676cb0a6cd64 --- doc/source/guides/devstack-with-lbaas-v2.rst | 30 ++++++++++++++------ 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/doc/source/guides/devstack-with-lbaas-v2.rst b/doc/source/guides/devstack-with-lbaas-v2.rst index f67978310d..747a9384d0 100644 --- a/doc/source/guides/devstack-with-lbaas-v2.rst +++ b/doc/source/guides/devstack-with-lbaas-v2.rst @@ -1,13 +1,17 @@ -Configure Load-Balancer in Kilo +Configure Load-Balancer Version 2 ================================= -The Kilo release of OpenStack will support Version 2 of the neutron load balancer. Until now, using OpenStack `LBaaS V2 `_ has required a good understanding of neutron and LBaaS architecture and several manual steps. +Starting in the OpenStack Liberty release, the +`neutron LBaaS v2 API `_ +is now stable while the LBaaS v1 API has been deprecated. The LBaaS v2 reference +driver is based on Octavia. Phase 1: Create DevStack + 2 nova instances -------------------------------------------- -First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space, make sure it is updated. Install git and any other developer tools you find useful. +First, set up a vm of your choice with at least 8 GB RAM and 16 GB disk space, +make sure it is updated. Install git and any other developer tools you find useful. Install devstack @@ -24,6 +28,7 @@ Edit your `local.conf` to look like [[local|localrc]] # Load the external LBaaS plugin. enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas + enable_plugin octavia https://git.openstack.org/openstack/octavia # ===== BEGIN localrc ===== DATABASE_PASSWORD=password @@ -42,13 +47,13 @@ Edit your `local.conf` to look like ENABLED_SERVICES+=,horizon # Nova ENABLED_SERVICES+=,n-api,n-crt,n-obj,n-cpu,n-cond,n-sch - IMAGE_URLS+=",https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img" # Glance ENABLED_SERVICES+=,g-api,g-reg # Neutron ENABLED_SERVICES+=,q-svc,q-agt,q-dhcp,q-l3,q-meta - # Enable LBaaS V2 + # Enable LBaaS v2 ENABLED_SERVICES+=,q-lbaasv2 + ENABLED_SERVICES+=,octavia,o-cw,o-hk,o-hm,o-api # Cinder ENABLED_SERVICES+=,c-api,c-vol,c-sch # Tempest @@ -69,11 +74,11 @@ Create two nova instances that we can use as test http servers: :: #create nova instances on private network - nova boot --image $(nova image-list | awk '/ cirros-0.3.0-x86_64-disk / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node1 - nova boot --image $(nova image-list | awk '/ cirros-0.3.0-x86_64-disk / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node2 + nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node1 + nova boot --image $(nova image-list | awk '/ cirros-.*-x86_64-uec / {print $2}') --flavor 1 --nic net-id=$(neutron net-list | awk '/ private / {print $2}') node2 nova list # should show the nova instances just created - #add secgroup rule to allow ssh etc.. + #add secgroup rules to allow ssh etc.. neutron security-group-rule-create default --protocol icmp neutron security-group-rule-create default --protocol tcp --port-range-min 22 --port-range-max 22 neutron security-group-rule-create default --protocol tcp --port-range-min 80 --port-range-max 80 @@ -91,9 +96,16 @@ Phase 2: Create your load balancers :: neutron lbaas-loadbalancer-create --name lb1 private-subnet + neutron lbaas-loadbalancer-show lb1 # Wait for the provisioning_status to be ACTIVE. neutron lbaas-listener-create --loadbalancer lb1 --protocol HTTP --protocol-port 80 --name listener1 + sleep 10 # Sleep since LBaaS actions can take a few seconds depending on the environment. neutron lbaas-pool-create --lb-algorithm ROUND_ROBIN --listener listener1 --protocol HTTP --name pool1 + sleep 10 neutron lbaas-member-create --subnet private-subnet --address 10.0.0.3 --protocol-port 80 pool1 + sleep 10 neutron lbaas-member-create --subnet private-subnet --address 10.0.0.5 --protocol-port 80 pool1 -Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes (in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is "curl that-lb-ip", which should alternate between showing the IPs of the two nodes. +Please note here that the "10.0.0.3" and "10.0.0.5" in the above commands are the IPs of the nodes +(in my test run-thru, they were actually 10.2 and 10.4), and the address of the created LB will be +reported as "vip_address" from the lbaas-loadbalancer-create, and a quick test of that LB is +"curl that-lb-ip", which should alternate between showing the IPs of the two nodes. From 2e8695b0756969d89cd4152e7496df9bf540eaa3 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Tue, 20 Oct 2015 11:21:57 +1100 Subject: [PATCH 0616/2941] Add development libraries needed by Pillow Change I8da7dd95ae24cf06dc7bdc300fcf39947a6df093 added Pillow build deps to nodepool thick slaves. This means that Pillow 3 will work in unit tests. Make the matching change to allow Pillow 3.0.0 to work under devstack. The longer term aim is to remove temporary upper cap. Change-Id: I2bec8cf1bfeaaa6ae329704229fdeb86d26e55c7 --- files/debs/general | 1 + files/rpms-suse/general | 2 ++ files/rpms/general | 1 + 3 files changed, 4 insertions(+) diff --git a/files/debs/general b/files/debs/general index 9b2715685e..1215147a16 100644 --- a/files/debs/general +++ b/files/debs/general @@ -8,6 +8,7 @@ git graphviz # needed for docs iputils-ping libffi-dev # for pyOpenSSL +libjpeg-dev # Pillow 3.0.0 libmysqlclient-dev # MySQL-python libpq-dev # psycopg2 libssl-dev # for pyOpenSSL diff --git a/files/rpms-suse/general b/files/rpms-suse/general index 651243d274..34a29554f7 100644 --- a/files/rpms-suse/general +++ b/files/rpms-suse/general @@ -9,6 +9,7 @@ git-core graphviz # docs iputils libffi-devel # pyOpenSSL +libjpeg8-devel # Pillow 3.0.0 libmysqlclient-devel # MySQL-python libopenssl-devel # to rebuild pyOpenSSL if needed libxslt-devel # lxml @@ -26,3 +27,4 @@ tar tcpdump unzip wget +zlib-devel diff --git a/files/rpms/general b/files/rpms/general index cfd9479600..40b06f489b 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -12,6 +12,7 @@ iptables-services # NOPRIME f21,f22 java-1.7.0-openjdk-headless # NOPRIME rhel7 java-1.8.0-openjdk-headless # NOPRIME f21,f22 libffi-devel +libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml libxslt-devel # lxml libyaml-devel From 5cad4d3fe94f2e3823a8d9a2588b3a580d69605c Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 10 Nov 2015 14:39:07 -0500 Subject: [PATCH 0617/2941] refactor zookeeper into a slightly more generic dlm module This attempts to make the zookeeper installation a bit more modular (assuming that other folks will want to add other dlms as plugins), and addresses the service start issues with zookeeper under ubuntu/upstart. Zookeeper is not going to be installed by default. Services need to ask for it with use_dlm. Change-Id: I33525e2b83a4497a57ec95f62880e0308c88b34f --- lib/dlm | 108 ++++++++++++++++++++++++++++++++++++++++++++++++++ lib/zookeeper | 91 ------------------------------------------ stack.sh | 20 +++------- stackrc | 2 +- unstack.sh | 2 +- 5 files changed, 115 insertions(+), 108 deletions(-) create mode 100644 lib/dlm delete mode 100644 lib/zookeeper diff --git a/lib/dlm b/lib/dlm new file mode 100644 index 0000000000..f68ee26b4b --- /dev/null +++ b/lib/dlm @@ -0,0 +1,108 @@ +#!/bin/bash +# +# lib/dlm +# +# Functions to control the installation and configuration of software +# that provides a dlm (and possibly other functions). The default is +# **zookeeper**, and is going to be the only backend supported in the +# devstack tree. + +# Dependencies: +# +# - ``functions`` file + +# ``stack.sh`` calls the entry points in this order: +# +# - is_dlm_enabled +# - install_dlm +# - configure_dlm +# - cleanup_dlm + +# Save trace setting +XTRACE=$(set +o | grep xtrace) +set +o xtrace + + +# Defaults +# -------- + +# + +# Set up default directories +ZOOKEEPER_DATA_DIR=$DEST/data/zookeeper +ZOOKEEPER_CONF_DIR=/etc/zookeeper + + +# Entry Points +# ------------ +# +# NOTE(sdague): it is expected that when someone wants to implement +# another one of these out of tree, they'll implement the following +# functions: +# +# - dlm_backend +# - install_dlm +# - configure_dlm +# - cleanup_dlm + +# This should be declared in the settings file of any plugin or +# service that needs to have a dlm in their enviroment. +function use_dlm { + enable_service $(dlm_backend) +} + +# A function to return the name of the backend in question, some users +# are going to need to know this. +function dlm_backend { + echo "zookeeper" +} + +# Test if a dlm is enabled (defaults to a zookeeper specific check) +function is_dlm_enabled { + [[ ,${ENABLED_SERVICES}, =~ ,"$(dlm_backend)", ]] && return 0 + return 1 +} + +# cleanup_dlm() - Remove residual data files, anything left over from previous +# runs that a clean run would need to clean up +function cleanup_dlm { + # NOTE(sdague): we don't check for is_enabled here because we + # should just delete this regardless. Some times users updated + # their service list before they run cleanup. + sudo rm -rf $ZOOKEEPER_DATA_DIR +} + +# configure_dlm() - Set config files, create data dirs, etc +function configure_dlm { + if is_dlm_enabled; then + sudo cp $FILES/zookeeper/* $ZOOKEEPER_CONF_DIR + sudo sed -i -e 's|.*dataDir.*|dataDir='$ZOOKEEPER_DATA_DIR'|' $ZOOKEEPER_CONF_DIR/zoo.cfg + # clean up from previous (possibly aborted) runs + # create required data files + sudo rm -rf $ZOOKEEPER_DATA_DIR + sudo mkdir -p $ZOOKEEPER_DATA_DIR + # restart after configuration, there is no reason to make this + # another step, because having data files that don't match the + # zookeeper running is just going to cause tears. + restart_service zookeeper + fi +} + +# install_dlm() - Collect source and prepare +function install_dlm { + if is_dlm_enabled; then + if is_ubuntu; then + install_package zookeeperd + else + die $LINENO "Don't know how to install zookeeper on this platform" + fi + fi +} + +# Restore xtrace +$XTRACE + +# Tell emacs to use shell-script-mode +## Local variables: +## mode: shell-script +## End: diff --git a/lib/zookeeper b/lib/zookeeper deleted file mode 100644 index 6637d52bca..0000000000 --- a/lib/zookeeper +++ /dev/null @@ -1,91 +0,0 @@ -#!/bin/bash -# -# lib/zookeeper -# Functions to control the installation and configuration of **zookeeper** - -# Dependencies: -# -# - ``functions`` file - -# ``stack.sh`` calls the entry points in this order: -# -# - is_zookeeper_enabled -# - install_zookeeper -# - configure_zookeeper -# - init_zookeeper -# - start_zookeeper -# - stop_zookeeper -# - cleanup_zookeeper - -# Save trace setting -XTRACE=$(set +o | grep xtrace) -set +o xtrace - - -# Defaults -# -------- - -# - -# Set up default directories -ZOOKEEPER_DATA_DIR=$DEST/data/zookeeper -ZOOKEEPER_CONF_DIR=/etc/zookeeper - - -# Entry Points -# ------------ - -# Test if any zookeeper service us enabled -# is_zookeeper_enabled -function is_zookeeper_enabled { - [[ ,${ENABLED_SERVICES}, =~ ,"zookeeper", ]] && return 0 - return 1 -} - -# cleanup_zookeeper() - Remove residual data files, anything left over from previous -# runs that a clean run would need to clean up -function cleanup_zookeeper { - sudo rm -rf $ZOOKEEPER_DATA_DIR -} - -# configure_zookeeper() - Set config files, create data dirs, etc -function configure_zookeeper { - sudo cp $FILES/zookeeper/* $ZOOKEEPER_CONF_DIR - sudo sed -i -e 's|.*dataDir.*|dataDir='$ZOOKEEPER_DATA_DIR'|' $ZOOKEEPER_CONF_DIR/zoo.cfg -} - -# init_zookeeper() - Initialize databases, etc. -function init_zookeeper { - # clean up from previous (possibly aborted) runs - # create required data files - sudo rm -rf $ZOOKEEPER_DATA_DIR - sudo mkdir -p $ZOOKEEPER_DATA_DIR -} - -# install_zookeeper() - Collect source and prepare -function install_zookeeper { - install_package zookeeperd -} - -# start_zookeeper() - Start running processes, including screen -function start_zookeeper { - # Starting twice Zookeeper on Ubuntu exits with error code 1. See LP#1513741 - # Match both systemd and sysvinit output - local running="(active \(running\)|start/running)" - if ! is_ubuntu || ! sudo /usr/sbin/service zookeeper status | egrep -q "$running"; then - start_service zookeeper - fi -} - -# stop_zookeeper() - Stop running processes (non-screen) -function stop_zookeeper { - stop_service zookeeper -} - -# Restore xtrace -$XTRACE - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/stack.sh b/stack.sh index 68b932e4b1..3cc21586d7 100755 --- a/stack.sh +++ b/stack.sh @@ -539,7 +539,7 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat -source $TOP_DIR/lib/zookeeper +source $TOP_DIR/lib/dlm # Extras Source # -------------- @@ -724,11 +724,10 @@ run_phase stack pre-install install_rpc_backend -if is_service_enabled zookeeper; then - cleanup_zookeeper - configure_zookeeper - init_zookeeper -fi +# NOTE(sdague): dlm install is conditional on one being enabled by configuration +install_dlm +configure_dlm + if is_service_enabled $DATABASE_BACKENDS; then install_database fi @@ -968,15 +967,6 @@ save_stackenv $LINENO start_dstat -# Zookeeper -# ----- - -# zookeeper for use with tooz for Distributed Lock Management capabilities etc., -if is_service_enabled zookeeper; then - start_zookeeper -fi - - # Keystone # -------- diff --git a/stackrc b/stackrc index 76a5756dde..f4a162b50c 100644 --- a/stackrc +++ b/stackrc @@ -69,7 +69,7 @@ if ! isset ENABLED_SERVICES ; then # Dashboard ENABLED_SERVICES+=,horizon # Additional services - ENABLED_SERVICES+=,rabbit,tempest,mysql,dstat,zookeeper + ENABLED_SERVICES+=,rabbit,tempest,mysql,dstat fi # SQLAlchemy supports multiple database drivers for each database server diff --git a/unstack.sh b/unstack.sh index 0cace3254a..8eded837fd 100755 --- a/unstack.sh +++ b/unstack.sh @@ -69,7 +69,7 @@ source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron-legacy source $TOP_DIR/lib/ldap source $TOP_DIR/lib/dstat -source $TOP_DIR/lib/zookeeper +source $TOP_DIR/lib/dlm # Extras Source # -------------- From 9329290183c96be45363325a244861065413562d Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Mon, 9 Nov 2015 15:45:04 +1100 Subject: [PATCH 0618/2941] Update comments to reflect current USE_SCREEN usage In a couple of places the tracking of USE_SCREEN has drifted from the comments. Correct that. Change-Id: I63bdd5ca4de49bf653f5bc8f8e0e5efe67ef605c --- functions-common | 10 ++++------ stackrc | 2 +- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/functions-common b/functions-common index 5c97aee9a3..3eeab09889 100644 --- a/functions-common +++ b/functions-common @@ -1352,6 +1352,7 @@ function is_running { # If the command includes shell metachatacters (;<>*) it must be run using a shell # If an optional group is provided sg will be used to run the # command as that group. +# Uses globals ``USE_SCREEN`` # run_process service "command-line" [group] function run_process { local service=$1 @@ -1370,7 +1371,7 @@ function run_process { # Helper to launch a process in a named screen # Uses globals ``CURRENT_LOG_TIME``, ```LOGDIR``, ``SCREEN_LOGDIR``, `SCREEN_NAME``, -# ``SERVICE_DIR``, ``USE_SCREEN``, ``SCREEN_IS_LOGGING`` +# ``SERVICE_DIR``, ``SCREEN_IS_LOGGING`` # screen_process name "command-line" [group] # Run a command in a shell in a screen window, if an optional group # is provided, use sg to set the group of the command. @@ -1381,7 +1382,6 @@ function screen_process { SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True USE_SCREEN) screen -S $SCREEN_NAME -X screen -t $name @@ -1465,14 +1465,13 @@ function screen_rc { # If a PID is available use it, kill the whole process group via TERM # If screen is being used kill the screen window; this will catch processes # that did not leave a PID behind -# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR``, ``USE_SCREEN`` +# Uses globals ``SCREEN_NAME``, ``SERVICE_DIR`` # screen_stop_service service function screen_stop_service { local service=$1 SCREEN_NAME=${SCREEN_NAME:-stack} SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True USE_SCREEN) if is_service_enabled $service; then # Clean up the screen window @@ -1490,7 +1489,6 @@ function stop_process { local service=$1 SERVICE_DIR=${SERVICE_DIR:-${DEST}/status} - USE_SCREEN=$(trueorfalse True USE_SCREEN) if is_service_enabled $service; then # Kill via pid if we have one available @@ -1552,11 +1550,11 @@ function service_check { } # Tail a log file in a screen if USE_SCREEN is true. +# Uses globals ``USE_SCREEN`` function tail_log { local name=$1 local logfile=$2 - USE_SCREEN=$(trueorfalse True USE_SCREEN) if [[ "$USE_SCREEN" = "True" ]]; then screen_process "$name" "sudo tail -f $logfile" fi diff --git a/stackrc b/stackrc index 76a5756dde..4f459397a0 100644 --- a/stackrc +++ b/stackrc @@ -101,7 +101,7 @@ HORIZON_APACHE_ROOT="/dashboard" # ctrl-c, up-arrow, enter to restart the service. Starting services # this way is slightly unreliable, and a bit slower, so this can # be disabled for automated testing by setting this value to False. -USE_SCREEN=True +USE_SCREEN=$(trueorfalse True USE_SCREEN) # When using screen, should we keep a log file on disk? You might # want this False if you have a long-running setup where verbose logs From 34a5aa5110c9651e2cf33d694d71b1e450495495 Mon Sep 17 00:00:00 2001 From: Tony Breeds Date: Tue, 10 Nov 2015 15:23:30 +1100 Subject: [PATCH 0619/2941] Remove unused compat variable SCREEN_DEV Currently we set USE_SCREEN to SCREEN_DEV if it's set. There is a comment to remove it once it's eracticated from CI. AFAICT this pre-condition has been met. Change-Id: I1423c8b9c18d1b3e34dbfe1c03be735c646a12b4 --- stackrc | 3 --- 1 file changed, 3 deletions(-) diff --git a/stackrc b/stackrc index 4f459397a0..075236890b 100644 --- a/stackrc +++ b/stackrc @@ -651,9 +651,6 @@ S3_SERVICE_PORT=${S3_SERVICE_PORT:-3333} PRIVATE_NETWORK_NAME=${PRIVATE_NETWORK_NAME:-"private"} PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-"public"} -# Compatibility until it's eradicated from CI -USE_SCREEN=${SCREEN_DEV:-$USE_SCREEN} - # Set default screen name SCREEN_NAME=${SCREEN_NAME:-stack} From 201e3c133e28acb6dcdeb017389718db0775a748 Mon Sep 17 00:00:00 2001 From: Huan Xie Date: Thu, 27 Aug 2015 12:34:24 +0100 Subject: [PATCH 0620/2941] XenAPI:Fix problems to support xenserver+neutron The lack of a CI for XenAPI + Neutron has meant this support has been broken over time. This is set of one-off fixes that are needed to reintroduce support while we work towards getting a CI functional Related-Bug: #1495423 Change-Id: Id41fdc77c155756bda9e2e9ac0446a49f06f0603 --- lib/neutron-legacy | 2 +- lib/neutron_plugins/openvswitch_agent | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index c244e5470a..ac99678e66 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -639,7 +639,7 @@ function install_neutron { plugin_dir=$($ssh_dom0 "$xen_functions; set -eux; xapi_plugin_location") # install neutron plugins to dom0 - tar -czf - -C $NEUTRON_DIR/neutron/plugins/openvswitch/agent/xenapi/etc/xapi.d/plugins/ ./ | + tar -czf - -C $NEUTRON_DIR/neutron/plugins/ml2/drivers/openvswitch/agent/xenapi/etc/xapi.d/plugins/ ./ | $ssh_dom0 "tar -xzf - -C $plugin_dir && chmod a+x $plugin_dir/*" fi } diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 5a843ffba7..6a333939d0 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -71,6 +71,9 @@ function neutron_plugin_configure_plugin_agent { # Make a copy of our config for domU sudo cp /$Q_PLUGIN_CONF_FILE "/$Q_PLUGIN_CONF_FILE.domU" + # change domU's config file to STACK_USER + sudo chown $STACK_USER:$STACK_USER /$Q_PLUGIN_CONF_FILE.domU + # Deal with Dom0's L2 Agent: Q_RR_DOM0_COMMAND="$NEUTRON_BIN_DIR/neutron-rootwrap-xen-dom0 $Q_RR_CONF_FILE" @@ -82,7 +85,14 @@ function neutron_plugin_configure_plugin_agent { # Under XS/XCP, the ovs agent needs to target the dom0 # integration bridge. This is enabled by using a root wrapper # that executes commands on dom0 via a XenAPI plugin. + # XenAPI does not support daemon rootwrap now, so set root_helper_daemon empty iniset /$Q_PLUGIN_CONF_FILE agent root_helper "$Q_RR_DOM0_COMMAND" + iniset /$Q_PLUGIN_CONF_FILE agent root_helper_daemon "" + + # Disable minimize polling, so that it can always detect OVS and Port changes + # This is a problem of xenserver + neutron, bug has been reported + # https://bugs.launchpad.net/neutron/+bug/1495423 + iniset /$Q_PLUGIN_CONF_FILE agent minimize_polling False # Set "physical" mapping iniset /$Q_PLUGIN_CONF_FILE ovs bridge_mappings "physnet1:$FLAT_NETWORK_BRIDGE" @@ -95,10 +105,14 @@ function neutron_plugin_configure_plugin_agent { # Create a bridge "br-$GUEST_INTERFACE_DEFAULT" _neutron_ovs_base_add_bridge "br-$GUEST_INTERFACE_DEFAULT" # Add $GUEST_INTERFACE_DEFAULT to that bridge - sudo ovs-vsctl add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT + sudo ovs-vsctl -- --may-exist add-port "br-$GUEST_INTERFACE_DEFAULT" $GUEST_INTERFACE_DEFAULT + + # Create external bridge and add port + _neutron_ovs_base_add_bridge $PUBLIC_BRIDGE + sudo ovs-vsctl -- --may-exist add-port $PUBLIC_BRIDGE $PUBLIC_INTERFACE_DEFAULT # Set bridge mappings to "physnet1:br-$GUEST_INTERFACE_DEFAULT" - iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT" + iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs bridge_mappings "physnet1:br-$GUEST_INTERFACE_DEFAULT,physnet-ex:$PUBLIC_BRIDGE" # Set integration bridge to domU's iniset "/$Q_PLUGIN_CONF_FILE.domU" ovs integration_bridge $OVS_BRIDGE # Set root wrap From c175040103b6a903c286a253f0df0ddc468feae3 Mon Sep 17 00:00:00 2001 From: Jordan Pittier Date: Thu, 12 Nov 2015 11:03:20 +0100 Subject: [PATCH 0621/2941] Fix typo in error message printed if ran under virtualenv Commit title says it all. I don't know how you feel about these kind of commits, I feel like it's a waste of resources but I also feel bad when I see big/obvious typo. Change-Id: If048bb2dbad1a0b5a13e56b5fa1e6ea7c01eb05e --- stack.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/stack.sh b/stack.sh index 825ed968f3..36bf2afd48 100755 --- a/stack.sh +++ b/stack.sh @@ -99,10 +99,10 @@ fi # this explicit as it has come up on the mailing list. if [[ -n "$VIRTUAL_ENV" ]]; then echo "You appear to be running under a python virtualenv." - echo "DevStack does not support this, as we my break the" + echo "DevStack does not support this, as we may break the" echo "virtualenv you are currently in by modifying " echo "external system-level components the virtualenv relies on." - echo "We reccommend you use a separate virtual-machine if " + echo "We recommend you use a separate virtual-machine if " echo "you are worried about DevStack taking over your system." exit 1 fi From 33c9a67ead4b61a9eb423f71ca4f8e062c3b5ebd Mon Sep 17 00:00:00 2001 From: Atsushi SAKAI Date: Thu, 12 Nov 2015 19:50:00 +0900 Subject: [PATCH 0622/2941] Fix typos on three comments and one message Fixes typos on three comments and one message in functions/functions-common Change-Id: I2c926ca29b284afd4534b92860fa46f248676a83 --- functions | 2 +- functions-common | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/functions b/functions index ca5955e974..34da1ba733 100644 --- a/functions +++ b/functions @@ -410,7 +410,7 @@ function get_instance_ip { ip=$(echo "$nova_result" | grep "$network_name" | get_field 2) if [[ $ip = "" ]];then echo "$nova_result" - die $LINENO "[Fail] Coudn't get ipaddress of VM" + die $LINENO "[Fail] Couldn't get ipaddress of VM" fi echo $ip } diff --git a/functions-common b/functions-common index 5c97aee9a3..98ecfb26ae 100644 --- a/functions-common +++ b/functions-common @@ -1036,7 +1036,7 @@ function _parse_package_files { # We are using BASH regexp matching feature. package=${BASH_REMATCH[1]} distros=${BASH_REMATCH[2]} - # In bash ${VAR,,} will lowecase VAR + # In bash ${VAR,,} will lowercase VAR # Look for a match in the distro list if [[ ! ${distros,,} =~ ${DISTRO,,} ]]; then # If no match then skip this package @@ -1509,7 +1509,7 @@ function stop_process { # this fixed in all services: # https://bugs.launchpad.net/oslo-incubator/+bug/1446583 sleep 1 - # /bin/true becakse pkill on a non existant process returns an error + # /bin/true because pkill on a non existent process returns an error pkill -g $(cat $SERVICE_DIR/$SCREEN_NAME/$service.pid) || /bin/true fi rm $SERVICE_DIR/$SCREEN_NAME/$service.pid @@ -1717,7 +1717,7 @@ function plugin_override_defaults { if [[ -f $dir/devstack/override-defaults ]]; then # be really verbose that an override is happening, as it # may not be obvious if things fail later. - echo "$plugin has overriden the following defaults" + echo "$plugin has overridden the following defaults" cat $dir/devstack/override-defaults source $dir/devstack/override-defaults fi From adcf40d5f8ec0509fe9230e04bf0bd3f269a3f53 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 5 Nov 2015 09:47:38 +0100 Subject: [PATCH 0623/2941] Ensure python is installed devstack can call python before parsing the package requirements, so the python installation needs to be done eralier. Closes-Bug: #1488625 Change-Id: I85cca899aeedd741cf7dc695435d61390e260f22 --- stack.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stack.sh b/stack.sh index 825ed968f3..afb695836b 100755 --- a/stack.sh +++ b/stack.sh @@ -335,6 +335,10 @@ if [ -z "`grep ^127.0.0.1 /etc/hosts | grep $LOCAL_HOSTNAME`" ]; then sudo sed -i "s/\(^127.0.0.1.*\)/\1 $LOCAL_HOSTNAME/" /etc/hosts fi +# Ensure python is installed +# -------------------------- +is_package_installed python || install_package python + # Configure Logging # ----------------- From 790266f0d2b752627a8ac641c8f1c9ba1e8e85e8 Mon Sep 17 00:00:00 2001 From: "Sean M. Collins" Date: Wed, 11 Nov 2015 13:36:35 -0500 Subject: [PATCH 0624/2941] Delete $IPV6_PUBLIC_NETWORK_GATEWAY IP during cleanup So that it does not end up being the IP address that is picked to move back to $PUBLIC_INTERFACE when we call _move_neutron_address_route Change-Id: I3d29d4f11feff308f6ad5d950ef004b48ec11b67 Closes-Bug: 1514984 --- lib/neutron-legacy | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index eed477a550..35029a2031 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -835,6 +835,10 @@ function cleanup_neutron { _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet" if [[ $(ip -f inet6 a s dev "$OVS_PHYSICAL_BRIDGE" | grep -c 'global') != 0 ]]; then + # ip(8) wants the prefix length when deleting + local v6_gateway + v6_gateway=$(ip -6 a s dev $OVS_PHYSICAL_BRIDGE | grep $IPV6_PUBLIC_NETWORK_GATEWAY | awk '{ print $2 }') + sudo ip -6 addr del $v6_gateway dev $OVS_PHYSICAL_BRIDGE _move_neutron_addresses_route "$OVS_PHYSICAL_BRIDGE" "$PUBLIC_INTERFACE" False "inet6" fi From fe7b56cdefa4d5cb99b868e5659128601edf3600 Mon Sep 17 00:00:00 2001 From: Atsushi SAKAI Date: Fri, 13 Nov 2015 17:06:16 +0900 Subject: [PATCH 0625/2941] Fix typos for stack.sh and lib of comments and message Fix 10 comments and 1 message stack.sh Certicate => Certificate (comment) lib/stack Sentinal => Sentinel (comment) lib/neutron-legacy overriden => overridden (comment) necesssary => necessary (comment) notifiy => notify (message) notifations => notifications (comment) lib/rpc_backend orginal => original (comment) cofiguration => configuration (comment) lib/stack confgured => configured (comment) lib/swift additinal => additional (comment) calclution => calculation (comment) maximun => maximum (comment) Change-Id: I3637388b67decb007cd49af9addecc654009559b --- lib/neutron-legacy | 6 +++--- lib/rpc_backend | 4 ++-- lib/stack | 2 +- lib/swift | 6 +++--- stack.sh | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index c244e5470a..a38b1c1d7e 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -256,7 +256,7 @@ ENABLE_TENANT_TUNNELS=${ENABLE_TENANT_TUNNELS:-True} # If using GRE tunnels for tenant networks, specify the range of # tunnel IDs from which tenant networks are allocated. Can be -# overriden in ``localrc`` in necesssary. +# overridden in ``localrc`` in necessary. TENANT_TUNNEL_RANGES=${TENANT_TUNNEL_RANGES:-1:1000} # To use VLANs for tenant networks, set to True in localrc. VLANs @@ -537,7 +537,7 @@ function create_neutron_initial_network { if is_provider_network; then die_if_not_set $LINENO PHYSICAL_NETWORK "You must specify the PHYSICAL_NETWORK" - die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specifiy the PROVIDER_NETWORK_TYPE" + die_if_not_set $LINENO PROVIDER_NETWORK_TYPE "You must specify the PROVIDER_NETWORK_TYPE" NET_ID=$(neutron net-create $PHYSICAL_NETWORK --tenant_id $TENANT_ID --provider:network_type $PROVIDER_NETWORK_TYPE --provider:physical_network "$PHYSICAL_NETWORK" ${SEGMENTATION_ID:+--provider:segmentation_id $SEGMENTATION_ID} --shared | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PHYSICAL_NETWORK $TENANT_ID" @@ -1123,7 +1123,7 @@ function _configure_neutron_service { iniset $NEUTRON_CONF DEFAULT auth_strategy $Q_AUTH_STRATEGY _neutron_setup_keystone $NEUTRON_CONF keystone_authtoken - # Configuration for neutron notifations to nova. + # Configuration for neutron notifications to nova. iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_status_changes $Q_NOTIFY_NOVA_PORT_STATUS_CHANGES iniset $NEUTRON_CONF DEFAULT notify_nova_on_port_data_changes $Q_NOTIFY_NOVA_PORT_DATA_CHANGES diff --git a/lib/rpc_backend b/lib/rpc_backend index 03eacd8674..298dcb6e5f 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -58,7 +58,7 @@ function restart_rpc_backend { # NOTE(bnemec): Retry initial rabbitmq configuration to deal with # the fact that sometimes it fails to start properly. # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1144100 - # NOTE(tonyb): Extend the orginal retry logic to only restart rabbitmq + # NOTE(tonyb): Extend the original retry logic to only restart rabbitmq # every second time around the loop. # See: https://bugs.launchpad.net/devstack/+bug/1449056 for details on # why this is needed. This can bee seen on vivid and Debian unstable @@ -106,7 +106,7 @@ function get_transport_url { fi } -# iniset cofiguration +# iniset configuration function iniset_rpc_backend { local package=$1 local file=$2 diff --git a/lib/stack b/lib/stack index 47e8ce2a22..7d98604b82 100644 --- a/lib/stack +++ b/lib/stack @@ -14,7 +14,7 @@ # Functions # --------- -# Generic service install handles venv creation if confgured for service +# Generic service install handles venv creation if configured for service # stack_install_service service function stack_install_service { local service=$1 diff --git a/lib/swift b/lib/swift index 3a8e80dd44..634d6ccb35 100644 --- a/lib/swift +++ b/lib/swift @@ -123,13 +123,13 @@ SWIFT_REPLICAS_SEQ=$(seq ${SWIFT_REPLICAS}) # trace through the logs when looking for its use. SWIFT_LOG_TOKEN_LENGTH=${SWIFT_LOG_TOKEN_LENGTH:-12} -# Set ``SWIFT_MAX_HEADER_SIZE`` to configure the maximun length of headers in +# Set ``SWIFT_MAX_HEADER_SIZE`` to configure the maximum length of headers in # Swift API SWIFT_MAX_HEADER_SIZE=${SWIFT_MAX_HEADER_SIZE:-16384} # Set ``OBJECT_PORT_BASE``, ``CONTAINER_PORT_BASE``, ``ACCOUNT_PORT_BASE`` -# Port bases used in port number calclution for the service "nodes" -# The specified port number will be used, the additinal ports calculated by +# Port bases used in port number calculation for the service "nodes" +# The specified port number will be used, the additional ports calculated by # base_port + node_num * 10 OBJECT_PORT_BASE=${OBJECT_PORT_BASE:-6613} CONTAINER_PORT_BASE=${CONTAINER_PORT_BASE:-6611} diff --git a/stack.sh b/stack.sh index 25c49c5709..d3d3fb2f60 100755 --- a/stack.sh +++ b/stack.sh @@ -926,8 +926,8 @@ fi restart_rpc_backend -# Export Certicate Authority Bundle -# --------------------------------- +# Export Certificate Authority Bundle +# ----------------------------------- # If certificates were used and written to the SSL bundle file then these # should be exported so clients can validate their connections. From 65a028bf1c3685e16fafbc7c44adaeeeb6e9cf58 Mon Sep 17 00:00:00 2001 From: "Chung Chih, Hung" Date: Fri, 13 Nov 2015 11:10:48 +0000 Subject: [PATCH 0626/2941] [ceph] Deploy ceph failed at controller node for multiple node Controller node wouldn't install libvirt package. The package will only been installed at nodes which had enable nova-compute. We only need to configure libvirt secret if it had enable nova-compute. Change-Id: I9cd6baf1820ce9f71c276d7e8b670307833578a5 Closes-Bug: 1515960 --- extras.d/60-ceph.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extras.d/60-ceph.sh b/extras.d/60-ceph.sh index 38b901b767..cc90128176 100644 --- a/extras.d/60-ceph.sh +++ b/extras.d/60-ceph.sh @@ -32,7 +32,7 @@ if is_service_enabled ceph; then echo_summary "Configuring Cinder for Ceph" configure_ceph_cinder fi - if is_service_enabled cinder || is_service_enabled nova; then + if is_service_enabled n-cpu; then # NOTE (leseb): the part below is a requirement to attach Ceph block devices echo_summary "Configuring libvirt secret" import_libvirt_secret_ceph From bbe59edb6f4a5828362c59a200f6ede00f97a4c3 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Wed, 21 Oct 2015 00:47:43 -0400 Subject: [PATCH 0627/2941] Use openstackclient in swift exercises With the release of osc 1.8.0, swift support has been expanded and we can now remove references to the swift CLI from this exercise file. Also made minor improvements to comments. Change-Id: I04069eb6251f8cbf8266183441b2cfdb64defd7d --- exercises/swift.sh | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/exercises/swift.sh b/exercises/swift.sh index afcede81cd..4a41e0f1ed 100755 --- a/exercises/swift.sh +++ b/exercises/swift.sh @@ -2,7 +2,7 @@ # **swift.sh** -# Test swift via the ``swift`` command line from ``python-swiftclient`` +# Test swift via the ``python-openstackclient`` command line echo "*********************************************************************" echo "Begin DevStack Exercise: $0" @@ -39,26 +39,29 @@ is_service_enabled s-proxy || exit 55 # Container name CONTAINER=ex-swift +OBJECT=/etc/issue # Testing Swift # ============= # Check if we have to swift via keystone -swift stat || die $LINENO "Failure getting status" +openstack object store account show || die $LINENO "Failure getting account status" # We start by creating a test container openstack container create $CONTAINER || die $LINENO "Failure creating container $CONTAINER" -# add some files into it. -openstack object create $CONTAINER /etc/issue || die $LINENO "Failure uploading file to container $CONTAINER" +# add a file into it. +openstack object create $CONTAINER $OBJECT || die $LINENO "Failure uploading file to container $CONTAINER" -# list them +# list the objects openstack object list $CONTAINER || die $LINENO "Failure listing contents of container $CONTAINER" -# And we may want to delete them now that we have tested that -# everything works. -swift delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER" +# delete the object first +openstack object delete $CONTAINER $OBJECT || die $LINENO "Failure deleting object $OBJECT in container $CONTAINER" + +# delete the container +openstack container delete $CONTAINER || die $LINENO "Failure deleting container $CONTAINER" set +o xtrace echo "*********************************************************************" From 01cf55a69259a52a9e5e8614347f238826c6a7ca Mon Sep 17 00:00:00 2001 From: yangyapeng Date: Thu, 29 Oct 2015 13:21:29 -0400 Subject: [PATCH 0628/2941] Fix RST in configuration.rst Fix minor RST issue from before file was converted. Change-Id: Ie16ceace9c17e98010e068641ce60ba9a365ede0 --- doc/source/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index e9921f049c..22841f6c13 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -211,7 +211,7 @@ Enabling Syslog Logging all services to a single syslog can be convenient. Enable syslogging by setting ``SYSLOG`` to ``True``. If the destination log host is not localhost ``SYSLOG_HOST`` and ``SYSLOG_PORT`` can be used -to direct the message stream to the log host. | +to direct the message stream to the log host. :: From ca7e4f285cfb68bae13e8df770dc2b5856559ecd Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 13 Nov 2015 11:15:15 +1100 Subject: [PATCH 0629/2941] Fix error detection & exit in report_results We wish to fail if we have >0 zero errors, not >1 errors (i.e. exactly one error did not trigger a failure!) This change also brings consistency to the pass & failure paths by ensuring report_results exits in both cases, since report_results is supposed to be the last thing run in a test file. Change-Id: Id4721dffe13721e6c3cd71bca40c3395627e98bf --- tests/unittest.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tests/unittest.sh b/tests/unittest.sh index df7a8b4534..2570319fbf 100644 --- a/tests/unittest.sh +++ b/tests/unittest.sh @@ -92,16 +92,17 @@ function assert_empty { fi } -# print a summary of passing and failing tests, exiting -# with an error if we have failed tests +# Print a summary of passing and failing tests and exit +# (with an error if we have failed tests) # usage: report_results function report_results { echo "$PASS Tests PASSED" - if [[ $ERROR -gt 1 ]]; then + if [[ $ERROR -gt 0 ]]; then echo echo "The following $ERROR tests FAILED" echo -e "$FAILED_FUNCS" echo "---" exit 1 fi + exit 0 } From ecf06dbadb7c4cafb7a2fab13e58c1b05dd8a3f2 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Mon, 9 Nov 2015 17:42:23 +1100 Subject: [PATCH 0630/2941] Add test for package file ordering Add a simple test to ensure package install files remain sorted alphabetically (follow-on from the sorting of the files done in I01e42defbf778626afd8dd457f93f0b02dd1a19d) Change-Id: I75568871e92afcd81dac2c3ce18b84aa34cdd289 --- tests/test_package_ordering.sh | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100755 tests/test_package_ordering.sh diff --git a/tests/test_package_ordering.sh b/tests/test_package_ordering.sh new file mode 100755 index 0000000000..a568abf928 --- /dev/null +++ b/tests/test_package_ordering.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# basic test to ensure that package-install files remain sorted +# alphabetically. + +TOP=$(cd $(dirname "$0")/.. && pwd) + +source $TOP/tests/unittest.sh + +PKG_FILES=$(find $TOP/files/debs $TOP/files/rpms $TOP/files/rpms-suse -type f) + +TMPDIR=$(mktemp -d) + +SORTED=${TMPDIR}/sorted +UNSORTED=${TMPDIR}/unsorted + +for p in $PKG_FILES; do + grep -v '^#' $p > ${UNSORTED} + sort ${UNSORTED} > ${SORTED} + + if [ -n "$(diff -c ${UNSORTED} ${SORTED})" ]; then + failed "$p is unsorted" + # output this, it's helpful to see what exactly is unsorted + diff -c ${UNSORTED} ${SORTED} + else + passed "$p is sorted" + fi +done + +rm -rf ${TMPDIR} + +report_results From 199d857442108326959d391c337e3b02b98a1b1e Mon Sep 17 00:00:00 2001 From: Johan Pas Date: Tue, 17 Nov 2015 00:56:25 +0100 Subject: [PATCH 0631/2941] Remove brackets from IPv6 address in mysql cfgfile stack.sh creates a user-specific configuration file ~/.my.cnf for mysql. If devstack is installed with SERVICE_IP_VERSION=6 option in local.conf, the IPv6 host address was stored in the ~/.my.cnf file with square brackets. However mysql does not use bracketing for IPv6 addresses, resulting in 'Unknown MySQL server host' error when 'mysql' command is run. With this patch IPv6 host address is written to ~/.my.cnf without brackets. Closes-Bug: #1516776 Change-Id: I27a7be8c75cf6b09b4a75dc4c9d09cd36bc5ac81 --- lib/databases/mysql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/databases/mysql b/lib/databases/mysql index c2ab32e5b2..cc74b33327 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -143,7 +143,7 @@ MYSQL_PRESEED [client] user=$DATABASE_USER password=$DATABASE_PASSWORD -host=$DATABASE_HOST +host=$MYSQL_HOST EOF chmod 0600 $HOME/.my.cnf fi From 95a9ff0587adece32817b4f432588b1ab76a5972 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 12 Nov 2015 14:49:20 +1100 Subject: [PATCH 0632/2941] Add option to skip EPEL & other repo installs Add an option to skip the EPEL & other repo installs for rhel7 based platforms. This option can serve two purposes; firstly as described in I834f20e9ceae151788cec3649385da1274d7ba46 during platform bringup, a publically available EPEL might not be available. This will allow you to pre-configure a hand-built repo, etc. so you can continue testing. The other thing is that in a CI system you might be frequently building images and pre-installing EPEL/RDO etc. In that case this is just extra work. Change-Id: I9809449f4a43fa9b547c6e3ca92722c7f6e66d6a --- stack.sh | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/stack.sh b/stack.sh index 9b811b7b67..e31cd604c5 100755 --- a/stack.sh +++ b/stack.sh @@ -263,9 +263,7 @@ fi # Some distros need to add repos beyond the defaults provided by the vendor # to pick up required packages. -if is_fedora && [[ $DISTRO == "rhel7" ]]; then - # RHEL requires EPEL for many Open Stack dependencies - +function _install_epel_and_rdo { # NOTE: We always remove and install latest -- some environments # use snapshot images, and if EPEL version updates they break # unless we update them to latest version. @@ -295,18 +293,27 @@ EOF sudo yum-config-manager --enable epel-bootstrap yum_install epel-release || \ die $LINENO "Error installing EPEL repo, cannot continue" - # EPEL rpm has installed it's version sudo rm -f /etc/yum.repos.d/epel-bootstrap.repo # ... and also optional to be enabled sudo yum-config-manager --enable rhel-7-server-optional-rpms + # install the lastest RDO sudo yum install -y https://rdoproject.org/repos/rdo-release.rpm if is_oraclelinux; then sudo yum-config-manager --enable ol7_optional_latest ol7_addons ol7_MySQL56 fi +} + +# If you have all the repos installed above already setup (e.g. a CI +# situation where they are on your image) you may choose to skip this +# to speed things up +SKIP_EPEL_INSTALL=$(trueorfalse False SKIP_EPEL_INSTALL) +if is_fedora && [[ $DISTRO == "rhel7" ]] && \ + [[ ${SKIP_EPEL_INSTALL} != True ]]; then + _install_epel_and_rdo fi From cdba7b0e533b07d9ea896ced5085c5ce98ee2aaa Mon Sep 17 00:00:00 2001 From: Rob Crittenden Date: Tue, 26 May 2015 15:33:45 -0400 Subject: [PATCH 0633/2941] Specify HTTPS URLs to fix tls-proxy mode A number of new settings are required for glance, cinder and keystone to be installable when the tls-proxy service is enabled. For cinder a new public_endpoint option was added and this needs to be set to the secure port. Keystone needs the admin_endpoint and public_endpoints defined otherwise during discovery the default, non-secure versions, will be returned. The keystone authtoken identity_uri was set at its default value in the glance registry and API configuration files. Change-Id: Ibb944ad7eb000edc6bccfcded765d1976d4d46d0 Closes-Bug: #1460807 --- lib/cinder | 2 ++ lib/glance | 3 +++ lib/keystone | 3 +++ 3 files changed, 8 insertions(+) diff --git a/lib/cinder b/lib/cinder index 1307c11f7a..cc203ad414 100644 --- a/lib/cinder +++ b/lib/cinder @@ -309,6 +309,8 @@ function configure_cinder { if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original iniset $CINDER_CONF DEFAULT osapi_volume_listen_port $CINDER_SERVICE_PORT_INT + + iniset $CINDER_CONF DEFAULT public_endpoint $CINDER_SERVICE_PROTOCOL://$CINDER_SERVICE_HOST:$CINDER_SERVICE_PORT fi if [ "$SYSLOG" != "False" ]; then diff --git a/lib/glance b/lib/glance index 2eb93a46e6..5712943bca 100644 --- a/lib/glance +++ b/lib/glance @@ -167,6 +167,9 @@ function configure_glance { iniset $GLANCE_API_CONF DEFAULT bind_port $GLANCE_SERVICE_PORT_INT iniset $GLANCE_API_CONF DEFAULT public_endpoint $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT iniset $GLANCE_REGISTRY_CONF DEFAULT bind_port $GLANCE_REGISTRY_PORT_INT + + iniset $GLANCE_API_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI + iniset $GLANCE_REGISTRY_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI fi # Register SSL certificates if provided diff --git a/lib/keystone b/lib/keystone index 5a2afbfe02..c484795c7e 100644 --- a/lib/keystone +++ b/lib/keystone @@ -233,6 +233,9 @@ function configure_keystone { # Set the service ports for a proxy to take the originals iniset $KEYSTONE_CONF eventlet_server public_port $KEYSTONE_SERVICE_PORT_INT iniset $KEYSTONE_CONF eventlet_server admin_port $KEYSTONE_AUTH_PORT_INT + + iniset $KEYSTONE_CONF DEFAULT public_endpoint $KEYSTONE_SERVICE_URI + iniset $KEYSTONE_CONF DEFAULT admin_endpoint $KEYSTONE_AUTH_URI fi iniset $KEYSTONE_CONF DEFAULT admin_token "$SERVICE_TOKEN" From bd4048a3c50e3cd215785e187e9e40b78bd064ae Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Wed, 18 Nov 2015 10:55:22 +1300 Subject: [PATCH 0634/2941] Never uninstall python-pip on fedora Python in f23 and f22 depends on the python-pip package so removing it results in a nonfunctional system. pip on fedora installs to /usr so pip can safely override the system pip for all versions of Fedora. Change-Id: I336c7ffdf00784ca8deba7d6612a08b96a0ad098 Closes-Bug: #1467569 --- tools/install_pip.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/install_pip.sh b/tools/install_pip.sh index ab5efb2e77..1728816890 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -111,8 +111,10 @@ get_versions # Eradicate any and all system packages -# python in f23 depends on the python-pip package -if ! { is_fedora && [[ $DISTRO == "f23" ]]; }; then +# Python in f23 and f22 depends on the python-pip package so removing it +# results in a nonfunctional system. pip on fedora installs to /usr so pip +# can safely override the system pip for all versions of fedora +if ! is_fedora ; then uninstall_package python-pip fi From bb9caeae00bb9e4654838f782d2e07331f4ecae4 Mon Sep 17 00:00:00 2001 From: Brian Haley Date: Mon, 16 Nov 2015 17:18:42 -0500 Subject: [PATCH 0635/2941] Consolidate two /sbin/ip commands into a single one When determining the primary IP/IPv6 addresses on a system, we can use the /sbin/ip command to filter them for us. The resulting address is parsed the same way for both address families, so we can use just a single command. Change-Id: I0471ff5a258a16a23061941ac38063dbf3d7c233 --- lib/neutron-legacy | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 85f7fc0a7c..978943dae2 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -801,13 +801,7 @@ function _move_neutron_addresses_route { DEFAULT_ROUTE_GW=$(ip -f $af r | awk "/default.+$from_intf/ { print \$3; exit }") local ADD_OVS_PORT="" - if [[ $af == "inet" ]]; then - IP_BRD=$(ip -f $af a s dev $from_intf | grep inet | awk '{ print $2, $3, $4; exit }') - fi - - if [[ $af == "inet6" ]]; then - IP_BRD=$(ip -f $af a s dev $from_intf | grep 'scope global' | sed '/temporary/d' | awk '{ print $2, $3, $4; exit }') - fi + IP_BRD=$(ip -f $af a s dev $from_intf scope global primary | grep inet | awk '{ print $2, $3, $4; exit }') if [ "$DEFAULT_ROUTE_GW" != "" ]; then ADD_DEFAULT_ROUTE="sudo ip -f $af r replace default via $DEFAULT_ROUTE_GW dev $to_intf" From 2960bfa7f63c26d94c8aed2c3e1a6ff039b530f0 Mon Sep 17 00:00:00 2001 From: Tushar Gohad Date: Tue, 17 Nov 2015 14:06:28 -0700 Subject: [PATCH 0636/2941] Add liberasurecode-dev as a swift dependency Swift requirement PyECLib won't bundle liberasurecode going forward given the package is available in common distros now. This patch adds liberasurecode-dev(el) package to the devstack debs/rpms list for Swift as a PyECLib build/install dependency. Change-Id: Idbc2ca3f677f1b8153ebf3a5912f4354525a55c7 --- files/debs/swift | 1 + files/rpms-suse/swift | 1 + files/rpms/swift | 1 + 3 files changed, 3 insertions(+) diff --git a/files/debs/swift b/files/debs/swift index 726786ee18..4b8ac3d793 100644 --- a/files/debs/swift +++ b/files/debs/swift @@ -1,4 +1,5 @@ curl +liberasurecode-dev make memcached sqlite3 diff --git a/files/rpms-suse/swift b/files/rpms-suse/swift index 52e0a990e7..3663b98545 100644 --- a/files/rpms-suse/swift +++ b/files/rpms-suse/swift @@ -1,4 +1,5 @@ curl +liberasurecode-devel memcached sqlite3 xfsprogs diff --git a/files/rpms/swift b/files/rpms/swift index f56a81b0d1..46dc59d74d 100644 --- a/files/rpms/swift +++ b/files/rpms/swift @@ -1,4 +1,5 @@ curl +liberasurecode-devel memcached pyxattr rsync-daemon # dist:f22,f23 From 93e2499ee1abfd2f9ed1ebda7a7d4d0deab04f80 Mon Sep 17 00:00:00 2001 From: Deepak C Shetty Date: Wed, 18 Nov 2015 12:29:33 +0530 Subject: [PATCH 0637/2941] doc: document override_defaults phase override_defaults phase was added to devstack in [1] but documentation was pending. This patch adds the same. For history around override_defaults, one can refer to the mail thread [2] Also fixes a small typo [1]: https://review.openstack.org/#/c/167933/ [2]: http://lists.openstack.org/pipermail/openstack-dev/2015-March/059621.html Change-Id: I1b58ca0ce0e4b85a1dbd710b4c426606fd4dcf45 --- doc/source/plugins.rst | 13 +++++++++++-- extras.d/README.md | 9 ++++++--- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index 8bd3797cd2..b8da7e1237 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -19,7 +19,16 @@ DevStack supports a standard mechanism for including plugins from external repositories. The plugin interface assumes the following: An external git repository that includes a ``devstack/`` top level -directory. Inside this directory there can be 2 files. +directory. Inside this directory there can be 3 files. + +- ``override_defaults`` - a file containing global variables that + will be sourced before the lib/* files. This allows the plugin + to override the defaults that are otherwise set in the lib/* + files. + + For example, override_defaults may export CINDER_ENABLED_BACKENDS + to include the plugin-specific storage backend and thus be able + to override the default lvm only storage backend for Cinder. - ``settings`` - a file containing global variables that will be sourced very early in the process. This is helpful if other plugins @@ -38,7 +47,7 @@ directory. Inside this directory there can be 2 files. - ``plugin.sh`` - the actual plugin. It is executed by devstack at well defined points during a ``stack.sh`` run. The plugin.sh - internal structure is discussed bellow. + internal structure is discussed below. Plugins are registered by adding the following to the localrc section diff --git a/extras.d/README.md b/extras.d/README.md index 7c2e4fe970..4cec14b4e7 100644 --- a/extras.d/README.md +++ b/extras.d/README.md @@ -14,10 +14,13 @@ The scripts are sourced at the beginning of each script that calls them. The entire `stack.sh` variable space is available. The scripts are sourced with one or more arguments, the first of which defines the hook phase: - source | stack | unstack | clean + override_defaults | source | stack | unstack | clean - source: always called first in any of the scripts, used to set the - initial defaults in a lib/* script or similar + override_defaults: always called first in any of the scripts, used to + override defaults (if need be) that are otherwise set in lib/* scripts + + source: called by stack.sh. Used to set the initial defaults in a lib/* + script or similar stack: called by stack.sh. There are four possible values for the second arg to distinguish the phase stack.sh is in: From 7860f2ba3189b0361693c8ee9c65d8d03fb115d6 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 17 Nov 2015 11:59:07 -0500 Subject: [PATCH 0638/2941] install ebtables locking workaround ebtables is racing with itself when nova and libvirt attempt to create rules at the same time in the nat table. ebtables now has an explicit --concurrent flag, that all tools must opt into to prevent ebtables from inherently being unsafe to run. libvirt gained this support in 1.2.11, which is too new for our ubuntu primary testing environment. Nova still hasn't added this support, though even if it did, we'd run into the issue with libvirt. We can do the most ghetto thing possible and create a wrapper for ebtables that does explicit locking on it's own. It's pretty terrible, but it should work. And it is the kind of work around that people unable to upgrade libvirt will probably need to do. This is an opt in value which we should set in the gate to True. Related-Bug: #1501558 Change-Id: Ic6fa847eba34c21593b9df86a1c2c179534d0ba5 --- files/ebtables.workaround | 23 +++++++++++++++++++++ lib/nova_plugins/functions-libvirt | 5 +++++ stackrc | 10 +++++++++ tools/install_ebtables_workaround.sh | 31 ++++++++++++++++++++++++++++ 4 files changed, 69 insertions(+) create mode 100644 files/ebtables.workaround create mode 100755 tools/install_ebtables_workaround.sh diff --git a/files/ebtables.workaround b/files/ebtables.workaround new file mode 100644 index 0000000000..c8af51fad5 --- /dev/null +++ b/files/ebtables.workaround @@ -0,0 +1,23 @@ +#!/bin/bash +# +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# +# This is a terrible, terrible, truly terrible work around for +# environments that have libvirt < 1.2.11. ebtables requires that you +# specifically tell it you would like to not race and get punched in +# the face when 2 run at the same time with a --concurrent flag. + +flock -w 300 /var/lock/ebtables.nova /sbin/ebtables.real $@ diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 78c59786d8..045fc8b919 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -31,6 +31,11 @@ function install_libvirt { fi install_package libvirt-bin libvirt-dev pip_install_gr libvirt-python + if [[ "$EBTABLES_RACE_FIX" == "True" ]]; then + # Work around for bug #1501558. We can remove this once we + # get to a version of Ubuntu that has new enough libvirt. + TOP_DIR=$TOP_DIR $TOP_DIR/tools/install_ebtables_workaround.sh + fi #pip_install_gr elif is_fedora || is_suse; then install_package kvm diff --git a/stackrc b/stackrc index 76a5756dde..53ed25d83e 100644 --- a/stackrc +++ b/stackrc @@ -769,6 +769,16 @@ GIT_DEPTH=${GIT_DEPTH:-0} # Use native SSL for servers in ``SSL_ENABLED_SERVICES`` USE_SSL=$(trueorfalse False USE_SSL) +# ebtables is inherently racey. If you run it by two or more processes +# simultaneously it will collide, badly, in the kernel and produce +# failures or corruption of ebtables. The only way around it is for +# all tools running ebtables to only ever do so with the --concurrent +# flag. This requires libvirt >= 1.2.11. +# +# If you don't have this then the following work around will replace +# ebtables with a wrapper script so that it is safe to run without +# that flag. +EBTABLES_RACE_FIX=$(trueorfalse False EBTABLES_RACE_FIX) # Following entries need to be last items in file diff --git a/tools/install_ebtables_workaround.sh b/tools/install_ebtables_workaround.sh new file mode 100755 index 0000000000..45ced87f13 --- /dev/null +++ b/tools/install_ebtables_workaround.sh @@ -0,0 +1,31 @@ +#!/bin/bash -eu +# +# Copyright 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# +# +# This replaces the ebtables on your system with a wrapper script that +# does implicit locking. This is needed if libvirt < 1.2.11 on your platform. + +EBTABLES=/sbin/ebtables +EBTABLESREAL=/sbin/ebtables.real +FILES=$TOP_DIR/files + +if [[ -f "$EBTABLES" ]]; then + if file $EBTABLES | grep ELF; then + sudo mv $EBTABLES $EBTABLESREAL + sudo install -m 0755 $FILES/ebtables.workaround $EBTABLES + echo "Replaced ebtables with locking workaround" + fi +fi From 2ba36cda7940d630514a7864132837191d8c561f Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Thu, 12 Nov 2015 13:52:36 +1100 Subject: [PATCH 0639/2941] Add vercmp function The existing vercmp_numbers function only handles, as the name says, numbers. I noticed that "sort" has had a version sort for a long time [1] and, rather than re-implement it badly, use this as a version of vercmp that works a bit more naturally. This is intended to be used in an "if" statement as in prog_ver=$(prog_ver --version | grep ...) if vercmp $prog_ver "<" 2.0; then ... fi A test-case is added to test the basic features and some edge-cases. [1] http://git.savannah.gnu.org/gitweb/?p=coreutils.git;a=commitdiff;h=4c9fae4e97d95a9f89d1399a8aeb03051f0fec96 Change-Id: Ie55283acdc40a095b80b2631a55310072883ad0d --- functions | 46 +++++++++++++++++++++++++++++++++++++++++++ tests/test_vercmp.sh | 47 ++++++++++++++++++++++++++++++++++++++++++++ tests/unittest.sh | 45 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 138 insertions(+) create mode 100755 tests/test_vercmp.sh diff --git a/functions b/functions index 34da1ba733..e5e3400ff8 100644 --- a/functions +++ b/functions @@ -527,12 +527,58 @@ function vercmp_numbers { typeset v1=$1 v2=$2 sep typeset -a ver1 ver2 + deprecated "vercmp_numbers is deprecated for more generic vercmp" + IFS=. read -ra ver1 <<< "$v1" IFS=. read -ra ver2 <<< "$v2" _vercmp_r "${#ver1[@]}" "${ver1[@]}" "${ver2[@]}" } +# vercmp ver1 op ver2 +# Compare VER1 to VER2 +# - op is one of < <= == >= > +# - returns true if satisified +# e.g. +# if vercmp 1.0 "<" 2.0; then +# ... +# fi +function vercmp { + local v1=$1 + local op=$2 + local v2=$3 + local result + + # sort the two numbers with sort's "-V" argument. Based on if v2 + # swapped places with v1, we can determine ordering. + result=$(echo -e "$v1\n$v2" | sort -V | head -1) + + case $op in + "==") + [ "$v1" = "$v2" ] + return + ;; + ">") + [ "$v1" != "$v2" ] && [ "$result" = "$v2" ] + return + ;; + "<") + [ "$v1" != "$v2" ] && [ "$result" = "$v1" ] + return + ;; + ">=") + [ "$result" = "$v2" ] + return + ;; + "<=") + [ "$result" = "$v1" ] + return + ;; + *) + die $LINENO "unrecognised op: $op" + ;; + esac +} # This function sets log formatting options for colorizing log # output to stdout. It is meant to be called by lib modules. diff --git a/tests/test_vercmp.sh b/tests/test_vercmp.sh new file mode 100755 index 0000000000..c88bf86d7e --- /dev/null +++ b/tests/test_vercmp.sh @@ -0,0 +1,47 @@ +#!/usr/bin/env bash + +# Tests for DevStack vercmp functionality + +TOP=$(cd $(dirname "$0")/.. && pwd) + +# Import common functions +source $TOP/functions +source $TOP/tests/unittest.sh + +assert_true "numeric gt" vercmp 2.0 ">" 1.0 +assert_true "numeric gte" vercmp 2.0 ">=" 1.0 +assert_true "numeric gt" vercmp 1.0.1 ">" 1.0 +assert_true "numeric gte" vercmp 1.0.1 ">=" 1.0 +assert_true "alpha gt" vercmp 1.0.1b ">" 1.0.1a +assert_true "alpha gte" vercmp 1.0.1b ">=" 1.0.1a +assert_true "alpha gt" vercmp b ">" a +assert_true "alpha gte" vercmp b ">=" a +assert_true "alpha gt" vercmp 2.0-rc3 ">" 2.0-rc1 +assert_true "alpha gte" vercmp 2.0-rc3 ">=" 2.0-rc1 + +assert_false "numeric gt fail" vercmp 1.0 ">" 1.0 +assert_true "numeric gte" vercmp 1.0 ">=" 1.0 +assert_false "numeric gt fail" vercmp 0.9 ">" 1.0 +assert_false "numeric gte fail" vercmp 0.9 ">=" 1.0 +assert_false "numeric gt fail" vercmp 0.9.9 ">" 1.0 +assert_false "numeric gte fail" vercmp 0.9.9 ">=" 1.0 +assert_false "numeric gt fail" vercmp 0.9a.9 ">" 1.0.1 +assert_false "numeric gte fail" vercmp 0.9a.9 ">=" 1.0.1 + +assert_false "numeric lt" vercmp 1.0 "<" 1.0 +assert_true "numeric lte" vercmp 1.0 "<=" 1.0 +assert_true "numeric lt" vercmp 1.0 "<" 1.0.1 +assert_true "numeric lte" vercmp 1.0 "<=" 1.0.1 +assert_true "alpha lt" vercmp 1.0.1a "<" 1.0.1b +assert_true "alpha lte" vercmp 1.0.1a "<=" 1.0.1b +assert_true "alpha lt" vercmp a "<" b +assert_true "alpha lte" vercmp a "<=" b +assert_true "alpha lt" vercmp 2.0-rc1 "<" 2.0-rc3 +assert_true "alpha lte" vercmp 2.0-rc1 "<=" 2.0-rc3 + +assert_true "eq" vercmp 1.0 "==" 1.0 +assert_true "eq" vercmp 1.0.1 "==" 1.0.1 +assert_false "eq fail" vercmp 1.0.1 "==" 1.0.2 +assert_false "eq fail" vercmp 2.0-rc1 "==" 2.0-rc2 + +report_results diff --git a/tests/unittest.sh b/tests/unittest.sh index 2570319fbf..6c697d7925 100644 --- a/tests/unittest.sh +++ b/tests/unittest.sh @@ -92,6 +92,51 @@ function assert_empty { fi } +# assert the arguments evaluate to true +# assert_true "message" arg1 arg2 +function assert_true { + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` + local msg=$1 + shift + + $@ + if [ $? -eq 0 ]; then + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" + else + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: test failed in $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) + fi +} + +# assert the arguments evaluate to false +# assert_false "message" arg1 arg2 +function assert_false { + local lineno + lineno=`caller 0 | awk '{print $1}'` + local function + function=`caller 0 | awk '{print $2}'` + local msg=$1 + shift + + $@ + if [ $? -eq 0 ]; then + FAILED_FUNCS+="$function:L$lineno\n" + echo "ERROR: test failed in $function:L$lineno!" + echo " $msg" + ERROR=$((ERROR+1)) + else + PASS=$((PASS+1)) + echo "PASS: $function:L$lineno - $msg" + fi +} + + # Print a summary of passing and failing tests and exit # (with an error if we have failed tests) # usage: report_results From e0129f3c248d8f246b470e21982cfefb919482b6 Mon Sep 17 00:00:00 2001 From: Attila Fazekas Date: Thu, 19 Nov 2015 10:47:58 +0100 Subject: [PATCH 0640/2941] Fedora 23 supported This change allows to use f23 without the FORCE=yes option. Make sure you have latest kernel, or you have kernel-modules installed for the running kernel. f21 support will be removed when the gate jobs are upgraded to use newer fedora version. Change-Id: I6e3e64088187a7f6da745e3cfb07524fd31782ab --- files/rpms/general | 4 ++-- lib/ceph | 2 +- stack.sh | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/files/rpms/general b/files/rpms/general index 40b06f489b..280468283b 100644 --- a/files/rpms/general +++ b/files/rpms/general @@ -8,9 +8,9 @@ gcc-c++ gettext # used for compiling message catalogs git-core graphviz # needed only for docs -iptables-services # NOPRIME f21,f22 +iptables-services # NOPRIME f21,f22,f23 java-1.7.0-openjdk-headless # NOPRIME rhel7 -java-1.8.0-openjdk-headless # NOPRIME f21,f22 +java-1.8.0-openjdk-headless # NOPRIME f21,f22,f23 libffi-devel libjpeg-turbo-devel # Pillow 3.0.0 libxml2-devel # lxml diff --git a/lib/ceph b/lib/ceph index 29d2aca54f..f573136a4e 100644 --- a/lib/ceph +++ b/lib/ceph @@ -116,7 +116,7 @@ function undefine_virsh_secret { # check_os_support_ceph() - Check if the operating system provides a decent version of Ceph function check_os_support_ceph { - if [[ ! ${DISTRO} =~ (trusty|f21|f22) ]]; then + if [[ ! ${DISTRO} =~ (trusty|f21|f22|f23) ]]; then echo "WARNING: your distro $DISTRO does not provide (at least) the Firefly release. Please use Ubuntu Trusty or Fedora 20 (and higher)" if [[ "$FORCE_CEPH_INSTALL" != "yes" ]]; then die $LINENO "If you wish to install Ceph on this distribution anyway run with FORCE_CEPH_INSTALL=yes" diff --git a/stack.sh b/stack.sh index a3d943a4df..537f81e253 100755 --- a/stack.sh +++ b/stack.sh @@ -192,7 +192,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|trusty|vivid|wily|7.0|wheezy|sid|testing|jessie|f21|f22|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (precise|trusty|vivid|wily|7.0|wheezy|sid|testing|jessie|f21|f22|f23|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From 7f0be4fc5f5bd8f45087e53c94fae3b9146f486e Mon Sep 17 00:00:00 2001 From: "Swapnil Kulkarni (coolsvap)" Date: Fri, 20 Nov 2015 10:52:59 +0530 Subject: [PATCH 0641/2941] Updated Typos in devstack (1/5) Updated HACKING.rst for typos (2/5) Updated typos in lib/dlm (3/5) Updated typos in lib/ironic (4/5) Updated typos in unittest.sh (5/5) Updated typos in test_meta_config.sh Change-Id: I7aafa3af69df9dc6a5923a8557f380d48b73433a --- HACKING.rst | 2 +- lib/dlm | 2 +- lib/ironic | 4 ++-- tests/test_meta_config.sh | 2 +- tests/unittest.sh | 2 +- 5 files changed, 6 insertions(+), 6 deletions(-) diff --git a/HACKING.rst b/HACKING.rst index d66687e351..d763c75b8b 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -328,7 +328,7 @@ There are some broad criteria that will be followed when reviewing your change * **Is it passing tests** -- your change will not be reviewed - throughly unless the official CI has run successfully against it. + thoroughly unless the official CI has run successfully against it. * **Does this belong in DevStack** -- DevStack reviewers have a default position of "no" but are ready to be convinced by your diff --git a/lib/dlm b/lib/dlm index f68ee26b4b..95e9b0ac3d 100644 --- a/lib/dlm +++ b/lib/dlm @@ -46,7 +46,7 @@ ZOOKEEPER_CONF_DIR=/etc/zookeeper # - cleanup_dlm # This should be declared in the settings file of any plugin or -# service that needs to have a dlm in their enviroment. +# service that needs to have a dlm in their environment. function use_dlm { enable_service $(dlm_backend) } diff --git a/lib/ironic b/lib/ironic index 016e639d03..6a32983b24 100644 --- a/lib/ironic +++ b/lib/ironic @@ -92,7 +92,7 @@ IRONIC_VM_LOG_DIR=${IRONIC_VM_LOG_DIR:-$IRONIC_DATA_DIR/logs/} # Use DIB to create deploy ramdisk and kernel. IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse True IRONIC_BUILD_DEPLOY_RAMDISK) # If not use DIB, these files are used as deploy ramdisk/kernel. -# (The value must be a absolute path) +# (The value must be an absolute path) IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-} IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-} IRONIC_DEPLOY_ELEMENT=${IRONIC_DEPLOY_ELEMENT:-deploy-ironic} @@ -672,7 +672,7 @@ function configure_iptables { # enable tftp natting for allowing connections to HOST_IP's tftp server sudo modprobe nf_conntrack_tftp sudo modprobe nf_nat_tftp - # explicitly allow DHCP - packets are occassionally being dropped here + # explicitly allow DHCP - packets are occasionally being dropped here sudo iptables -I INPUT -p udp --dport 67:68 --sport 67:68 -j ACCEPT || true # nodes boot from TFTP and callback to the API server listening on $HOST_IP sudo iptables -I INPUT -d $HOST_IP -p udp --dport 69 -j ACCEPT || true diff --git a/tests/test_meta_config.sh b/tests/test_meta_config.sh index f3e94af8f8..327fb56185 100755 --- a/tests/test_meta_config.sh +++ b/tests/test_meta_config.sh @@ -24,7 +24,7 @@ function check_result { } # mock function-common:die so that it does not -# interupt our test script +# interrupt our test script function die { exit -1 } diff --git a/tests/unittest.sh b/tests/unittest.sh index 2570319fbf..26b5b8e592 100644 --- a/tests/unittest.sh +++ b/tests/unittest.sh @@ -46,7 +46,7 @@ function failed { ERROR=$((ERROR+1)) } -# assert string comparision of val1 equal val2, printing out msg +# assert string comparison of val1 equal val2, printing out msg # usage: assert_equal val1 val2 msg function assert_equal { local lineno From 255a58fec613b1304c8396cd969c72043073be30 Mon Sep 17 00:00:00 2001 From: Komei Shimamura Date: Fri, 20 Nov 2015 18:36:05 +0900 Subject: [PATCH 0642/2941] Add existing devstack plugins to the devstack plugin list Change-Id: I336a4c652a78e778e39652f1f16ff69be10ab065 --- doc/source/plugin-registry.rst | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 49b3a7fc02..429f31af2d 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -76,16 +76,30 @@ Alternate Configs Additional Services =================== -+----------------+--------------------------------------------------+------------+ -| Plugin Name | URL | Comments | -| | | | -+----------------+--------------------------------------------------+------------+ -|ec2-api |git://git.openstack.org/openstack/ec2-api |[as1]_ | -+----------------+--------------------------------------------------+------------+ -|ironic-inspector|git://git.openstack.org/openstack/ironic-inspector| | -+----------------+--------------------------------------------------+------------+ -| | | | -+----------------+--------------------------------------------------+------------+ ++-----------------+------------------------------------------------------------+------------+ +| Plugin Name | URL | Comments | +| | | | ++-----------------+------------------------------------------------------------+------------+ +|amqp1 |git://git.openstack.org/openstack/devstack-plugin-amqp1 | | ++-----------------+------------------------------------------------------------+------------+ +|bdd |git://git.openstack.org/openstack/devstack-plugin-bdd | | ++-----------------+------------------------------------------------------------+------------+ +|ec2-api |git://git.openstack.org/openstack/ec2-api |[as1]_ | ++-----------------+------------------------------------------------------------+------------+ +|glusterfs |git://git.openstack.org/openstack/devstack-plugin-glusterfs | | ++-----------------+------------------------------------------------------------+------------+ +|hdfs |git://git.openstack.org/openstack/devstack-plugin-hdfs | | ++-----------------+------------------------------------------------------------+------------+ +|ironic-inspector |git://git.openstack.org/openstack/ironic-inspector | | ++-----------------+------------------------------------------------------------+------------+ +|pika |git://git.openstack.org/openstack/devstack-plugin-pika | | ++-----------------+------------------------------------------------------------+------------+ +|sheepdog |git://git.openstack.org/openstack/devstack-plugin-sheepdog | | ++-----------------+------------------------------------------------------------+------------+ +|zmq |git://git.openstack.org/openstack/devstack-plugin-zmq | | ++-----------------+------------------------------------------------------------+------------+ +| | | | ++-----------------+------------------------------------------------------------+------------+ .. [as1] first functional devstack plugin, hence why used in most of the examples. From c18b4a4e5c4ffa25fba1da0ec99c3ff061d1472e Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 18 Nov 2015 10:02:31 -0500 Subject: [PATCH 0643/2941] remove precise from the supported list We haven't been testing master on precise for a long time, and changes are coming that won't work on precise. If people want to keep running on precise they should realize they are on their own with it. And that we won't block any changes that use it. Change-Id: I3697f1c2409ad08f49793dabb37011606188e6f6 --- stack.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stack.sh b/stack.sh index 537f81e253..8625b5aaeb 100755 --- a/stack.sh +++ b/stack.sh @@ -192,7 +192,7 @@ source $TOP_DIR/stackrc # Warn users who aren't on an explicitly supported distro, but allow them to # override check and attempt installation with ``FORCE=yes ./stack`` -if [[ ! ${DISTRO} =~ (precise|trusty|vivid|wily|7.0|wheezy|sid|testing|jessie|f21|f22|f23|rhel7) ]]; then +if [[ ! ${DISTRO} =~ (trusty|vivid|wily|7.0|wheezy|sid|testing|jessie|f21|f22|f23|rhel7) ]]; then echo "WARNING: this script has not been tested on $DISTRO" if [[ "$FORCE" != "yes" ]]; then die $LINENO "If you wish to run this script anyway run with FORCE=yes" From f0dd6894af777d53c6d158d0720ea1b189f065a7 Mon Sep 17 00:00:00 2001 From: vsaienko Date: Wed, 18 Nov 2015 10:12:34 +0200 Subject: [PATCH 0644/2941] Use autogenerated flavor id Fix to trove has been merged, and autogenerated flavor ID is available since Kilo. Related-Bug: #1333852 Change-Id: Ie4b3dd11a23fa5f91cf9ff22dd05f1afd0532cb4 --- lib/ironic | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/lib/ironic b/lib/ironic index 016e639d03..294115cafe 100644 --- a/lib/ironic +++ b/lib/ironic @@ -652,13 +652,9 @@ function enroll_nodes { total_cpus=$((total_cpus+$ironic_node_cpu)) done < $ironic_hwinfo_file - # create the nova flavor - # NOTE(adam_g): Attempting to use an autogenerated UUID for flavor id here uncovered - # bug (LP: #1333852) in Trove. This can be changed to use an auto flavor id when the - # bug is fixed in Juno. local adjusted_disk adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk)) - nova flavor-create --ephemeral $ironic_ephemeral_disk baremetal 551 $ironic_node_ram $adjusted_disk $ironic_node_cpu + nova flavor-create --ephemeral $ironic_ephemeral_disk baremetal auto $ironic_node_ram $adjusted_disk $ironic_node_cpu nova flavor-key baremetal set "cpu_arch"="x86_64" From a366b97c0a76304bf0ddf7eb78e0efb4493df221 Mon Sep 17 00:00:00 2001 From: obutenko Date: Tue, 20 Oct 2015 19:07:04 +0300 Subject: [PATCH 0645/2941] Add flag for test_incremental_backup Forced creation of incremental backup is not implemented in old release (Juno and Kilo). The test is skipped by default for Juno and Kilo gates. Need to add flag to unskip this test in new release. New test: Idde2c14aba78382b1063ce20269f4832f9fdd583 Change-Id: I565b5941d6067644fc9ca6cb0891d97f4946e031 Partial-Bug: #1506394 --- lib/tempest | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/tempest b/lib/tempest index 76fd6cac74..202f6d93e7 100644 --- a/lib/tempest +++ b/lib/tempest @@ -463,6 +463,8 @@ function configure_tempest { iniset $TEMPEST_CONFIG volume-feature-enabled bootable True # TODO(jordanP): Remove the extend_with_snapshot flag when Juno is end of life. iniset $TEMPEST_CONFIG volume-feature-enabled extend_with_snapshot True + # TODO(obutenko): Remove the incremental_backup_force flag when Kilo and Juno is end of life. + iniset $TEMPEST_CONFIG volume-feature-enabled incremental_backup_force True local volume_api_extensions=${VOLUME_API_EXTENSIONS:-"all"} if [[ ! -z "$DISABLE_VOLUME_API_EXTENSIONS" ]]; then From bdc0fa8ab13ec5c75f1c793ca33f5a773fac1abc Mon Sep 17 00:00:00 2001 From: John Kasperski Date: Mon, 23 Nov 2015 11:56:33 -0600 Subject: [PATCH 0646/2941] Neutron: Clean up documentation typo Remove duplicate SERVICE_HOST and MYSQL_HOST settings in the examples. Change-Id: I0e102b671f03ccb183d30ec6a762d00ebcf1e4b5 --- doc/source/guides/neutron.rst | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/doc/source/guides/neutron.rst b/doc/source/guides/neutron.rst index 996c7d1f59..9dcb654a27 100644 --- a/doc/source/guides/neutron.rst +++ b/doc/source/guides/neutron.rst @@ -354,8 +354,6 @@ controller node. HOST_IP=10.0.0.2 SERVICE_HOST=10.0.0.2 MYSQL_HOST=10.0.0.2 - SERVICE_HOST=10.0.0.2 - MYSQL_HOST=10.0.0.2 RABBIT_HOST=10.0.0.2 GLANCE_HOSTPORT=10.0.0.2:9292 PUBLIC_INTERFACE=eth1 @@ -397,7 +395,7 @@ would be a public IP address range that you or your organization has allocated to you, so that you could access your instances from the public internet. -The following is the DevStack configuration on +The following is the DevStack configuration on compute node 1. :: @@ -405,8 +403,6 @@ compute node 1. HOST_IP=10.0.0.3 SERVICE_HOST=10.0.0.2 MYSQL_HOST=10.0.0.2 - SERVICE_HOST=10.0.0.2 - MYSQL_HOST=10.0.0.2 RABBIT_HOST=10.0.0.2 GLANCE_HOSTPORT=10.0.0.2:9292 ADMIN_PASSWORD=secrete From 2ed28132e03066edb52fab6e640a96ee2d3424ac Mon Sep 17 00:00:00 2001 From: Einst Crazy Date: Thu, 5 Nov 2015 16:38:00 +0800 Subject: [PATCH 0647/2941] Add create stack user to quickstart document Add instructions on creating a user to the documentation, and call out that the standard cloud-users are probably ok too Change-Id: I1119a43f1d5ae7c0c208bf0cc16e2f7bee29a69d --- doc/source/index.rst | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/doc/source/index.rst b/doc/source/index.rst index b65730ffe8..ec345c9f64 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -44,6 +44,18 @@ Quick Start We recommend at least a :ref:`minimal-configuration` be set up. +#. Add Stack User + + Devstack should be run as a non-root user with sudo enabled + (standard logins to cloud images such as "ubuntu" or "cloud-user" + are usually fine). + + You can quickly create a separate `stack` user to run DevStack with + + :: + + devstack/tools/create-stack-user.sh; su stack + #. Start the install :: From d663e29d40b04017c515c19891bb846ea984acde Mon Sep 17 00:00:00 2001 From: Kahou Lei Date: Sat, 24 Oct 2015 12:18:57 -0700 Subject: [PATCH 0648/2941] Ensure the Linux Bridge agent can be used with provider networking The root cause is that when provider network is used, devstack is trying to build ovs related interface. We need to make a condition such that if linux bridge is used, don't build any ovs related interface. Change-Id: I7f26ce7893a0ecce55b3467cd5621abf25745b8e Closes-bug: #1509705 --- lib/neutron-legacy | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 4e51425ffc..ecc45edbf7 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -553,9 +553,11 @@ function create_neutron_initial_network { die_if_not_set $LINENO SUBNET_V6_ID "Failure creating SUBNET_V6_ID for $PROVIDER_SUBNET_NAME_V6 $TENANT_ID" fi - sudo ip link set $OVS_PHYSICAL_BRIDGE up - sudo ip link set br-int up - sudo ip link set $PUBLIC_INTERFACE up + if [[ $Q_AGENT == "openvswitch" ]]; then + sudo ip link set $OVS_PHYSICAL_BRIDGE up + sudo ip link set br-int up + sudo ip link set $PUBLIC_INTERFACE up + fi else NET_ID=$(neutron net-create --tenant-id $TENANT_ID "$PRIVATE_NETWORK_NAME" | grep ' id ' | get_field 2) die_if_not_set $LINENO NET_ID "Failure creating NET_ID for $PRIVATE_NETWORK_NAME $TENANT_ID" @@ -701,7 +703,7 @@ function start_neutron_service_and_check { function start_neutron_l2_agent { run_process q-agt "$AGENT_BINARY --config-file $NEUTRON_CONF --config-file /$Q_PLUGIN_CONF_FILE" - if is_provider_network; then + if is_provider_network && [[ $Q_AGENT == "openvswitch" ]]; then sudo ovs-vsctl --no-wait -- --may-exist add-port $OVS_PHYSICAL_BRIDGE $PUBLIC_INTERFACE sudo ip link set $OVS_PHYSICAL_BRIDGE up sudo ip link set br-int up From ad69e69e3f278bd28319224035997e11477617c4 Mon Sep 17 00:00:00 2001 From: Arun S A G Date: Fri, 20 Nov 2015 20:01:24 -0800 Subject: [PATCH 0649/2941] Set unprovision and active timeout to match build_timeout The build_timeout for the ironic baremetal build is at 340s. Modify the unprovision_timeout and active_timeout to match BUILD_TIMEOUT to avoid frequent failures during IPA gate jobs. Change-Id: Idfdc54210e33c71719c7fd0c905d0b802809e173 Related-Bug: #1393099 --- lib/tempest | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/tempest b/lib/tempest index 76fd6cac74..0c54f9737a 100644 --- a/lib/tempest +++ b/lib/tempest @@ -508,7 +508,8 @@ function configure_tempest { # Baremetal if [ "$VIRT_DRIVER" = "ironic" ] ; then iniset $TEMPEST_CONFIG baremetal driver_enabled True - iniset $TEMPEST_CONFIG baremetal unprovision_timeout 300 + iniset $TEMPEST_CONFIG baremetal unprovision_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG baremetal active_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONFIG baremetal deploy_img_dir $FILES iniset $TEMPEST_CONFIG baremetal node_uuid $IRONIC_NODE_UUID iniset $TEMPEST_CONFIG compute-feature-enabled change_password False From 0b4c83a07e09eead9dad88f5b8349574e53b1c45 Mon Sep 17 00:00:00 2001 From: Rafael Folco Date: Thu, 26 Nov 2015 10:08:36 -0600 Subject: [PATCH 0650/2941] Single call for 'nova flavor-list' Code simplification only, no functional changes. Change-Id: I0b836bc77c0553528e3bc9e0cea98d59856ccdf5 --- lib/tempest | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/lib/tempest b/lib/tempest index 76fd6cac74..c3276d8a51 100644 --- a/lib/tempest +++ b/lib/tempest @@ -196,8 +196,8 @@ function configure_tempest { if is_service_enabled nova; then # If ``DEFAULT_INSTANCE_TYPE`` is not declared, use the new behavior # Tempest creates its own instance types + available_flavors=$(nova flavor-list) if [[ -z "$DEFAULT_INSTANCE_TYPE" ]]; then - available_flavors=$(nova flavor-list) if [[ ! ( $available_flavors =~ 'm1.nano' ) ]]; then nova flavor-create m1.nano 42 64 0 1 fi @@ -210,15 +210,14 @@ function configure_tempest { else # Check Nova for existing flavors, if ``DEFAULT_INSTANCE_TYPE`` is set use it. boto_instance_type=$DEFAULT_INSTANCE_TYPE - flavor_lines=`nova flavor-list` IFS=$'\r\n' flavors="" - for line in $flavor_lines; do + for line in $available_flavors; do f=$(echo $line | awk "/ $DEFAULT_INSTANCE_TYPE / { print \$2 }") flavors="$flavors $f" done - for line in $flavor_lines; do + for line in $available_flavors; do flavors="$flavors `echo $line | grep -v "^\(|\s*ID\|+--\)" | cut -d' ' -f2`" done From be3e553556a1a89f5046db79d3bc88fcad1d982a Mon Sep 17 00:00:00 2001 From: vsaienko Date: Mon, 23 Nov 2015 16:07:21 +0200 Subject: [PATCH 0651/2941] Add discussion of LIBS_FROM_GIT Add a pointer to installing clients via LIBS_FROM_GIT to local.conf sample. Mention in the git tree setup that the projects within are usually installed via released pip versions. Change-Id: I245094e51ea4a8ce983f6a1e48b6ab7ca5d309d0 --- samples/local.conf | 10 +++++++++- stackrc | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/samples/local.conf b/samples/local.conf index b92097dd8d..34c9e8b48e 100644 --- a/samples/local.conf +++ b/samples/local.conf @@ -63,7 +63,8 @@ LOGDAYS=2 # Using milestone-proposed branches # --------------------------------- -# Uncomment these to grab the milestone-proposed branches from the repos: +# Uncomment these to grab the milestone-proposed branches from the +# repos: #CINDER_BRANCH=milestone-proposed #GLANCE_BRANCH=milestone-proposed #HORIZON_BRANCH=milestone-proposed @@ -74,6 +75,13 @@ LOGDAYS=2 #NEUTRON_BRANCH=milestone-proposed #SWIFT_BRANCH=milestone-proposed +# Using git versions of clients +# ----------------------------- +# By default clients are installed from pip. See LIBS_FROM_GIT in +# stackrc for details on getting clients from specific branches or +# revisions. e.g. +# LIBS_FROM_GIT="python-ironicclient" +# IRONICCLIENT_BRANCH=refs/changes/44/2.../1 # Swift # ----- diff --git a/stackrc b/stackrc index 23a4a7c4c4..cfdcd4d6b2 100644 --- a/stackrc +++ b/stackrc @@ -268,6 +268,7 @@ GITBRANCH["tempest-lib"]=${TEMPEST_LIB_BRANCH:-master} ############## # # OpenStack Client Library Components +# Note default install is from pip, see LIBS_FROM_GIT # ############## @@ -317,6 +318,7 @@ GITDIR["python-openstackclient"]=$DEST/python-openstackclient ################### # # Oslo Libraries +# Note default install is from pip, see LIBS_FROM_GIT # ################### From 523f48803609b35350b624244fa73b1030c1d5fa Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Tue, 13 Oct 2015 11:03:03 +1100 Subject: [PATCH 0652/2941] Namespace XTRACE commands I noticed this when debugging some grenade issues failures. An include of grenade/functions stores the current value of XTRACE (on) and disables xtrace for the rest of the import. We then include devstack's "functions" library, which now overwrites the stored value of XTRACE the current state; i.e. disabled. When it finishes it restores the prior state (disabled), and then grenade restores the same value of XTRACE (disabled). The result is that xtrace is incorrectly disabled until the next time it just happens to be turned on. The solution is to name-space the store of the current-value of xtrace so when we finish sourcing a file, we always restore the tracing value to what it was when we entered. Some files had already discovered this. In general there is inconsistency around the setting of the variable, and a lot of obvious copy-paste. This brings consistency across all files by using _XTRACE_* prefixes for the sotre/restore of tracing values. Change-Id: Iba7739eada5711d9c269cb4127fa712e9f961695 --- functions | 4 ++-- functions-common | 4 ++-- inc/meta-config | 4 ++-- lib/apache | 4 ++-- lib/ceph | 4 ++-- lib/cinder | 4 ++-- lib/cinder_backends/ceph | 4 ++-- lib/cinder_backends/glusterfs | 4 ++-- lib/cinder_backends/lvm | 4 ++-- lib/cinder_backends/netapp_iscsi | 4 ++-- lib/cinder_backends/netapp_nfs | 4 ++-- lib/cinder_backends/nfs | 4 ++-- lib/cinder_backends/solidfire | 4 ++-- lib/cinder_backends/vmdk | 4 ++-- lib/cinder_backends/xiv | 4 ++-- lib/cinder_plugins/XenAPINFS | 4 ++-- lib/cinder_plugins/glusterfs | 4 ++-- lib/cinder_plugins/nfs | 4 ++-- lib/cinder_plugins/sheepdog | 4 ++-- lib/cinder_plugins/vsphere | 4 ++-- lib/database | 4 ++-- lib/databases/mysql | 4 ++-- lib/databases/postgresql | 4 ++-- lib/dlm | 4 ++-- lib/dstat | 4 ++-- lib/glance | 4 ++-- lib/heat | 4 ++-- lib/horizon | 4 ++-- lib/infra | 4 ++-- lib/ironic | 8 ++++---- lib/keystone | 4 ++-- lib/ldap | 4 ++-- lib/lvm | 4 ++-- lib/neutron-legacy | 4 ++-- lib/neutron_plugins/bigswitch_floodlight | 4 ++-- lib/neutron_plugins/brocade | 4 ++-- lib/neutron_plugins/cisco | 4 ++-- lib/neutron_plugins/embrane | 5 +++-- lib/neutron_plugins/linuxbridge_agent | 4 ++-- lib/neutron_plugins/ml2 | 4 ++-- lib/neutron_plugins/nuage | 4 ++-- lib/neutron_plugins/openvswitch | 5 +++-- lib/neutron_plugins/openvswitch_agent | 4 ++-- lib/neutron_plugins/ovs_base | 4 ++-- lib/neutron_plugins/services/firewall | 4 ++-- lib/neutron_plugins/services/loadbalancer | 4 ++-- lib/neutron_plugins/services/metering | 5 +++-- lib/neutron_plugins/services/vpn | 4 ++-- lib/neutron_thirdparty/bigswitch_floodlight | 4 ++-- lib/nova | 4 ++-- lib/nova_plugins/functions-libvirt | 4 ++-- lib/nova_plugins/hypervisor-fake | 4 ++-- lib/nova_plugins/hypervisor-ironic | 4 ++-- lib/nova_plugins/hypervisor-libvirt | 4 ++-- lib/nova_plugins/hypervisor-openvz | 4 ++-- lib/nova_plugins/hypervisor-vsphere | 4 ++-- lib/nova_plugins/hypervisor-xenserver | 4 ++-- lib/oslo | 4 ++-- lib/rpc_backend | 4 ++-- lib/swift | 4 ++-- lib/tempest | 4 ++-- lib/template | 4 ++-- stack.sh | 7 +++++-- 63 files changed, 134 insertions(+), 128 deletions(-) diff --git a/functions b/functions index 34da1ba733..762fc472c2 100644 --- a/functions +++ b/functions @@ -22,7 +22,7 @@ source ${FUNC_DIR}/inc/python source ${FUNC_DIR}/inc/rootwrap # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_FUNCTIONS=$(set +o | grep xtrace) set +o xtrace # Check if a function already exists @@ -603,7 +603,7 @@ function create_disk { } # Restore xtrace -$XTRACE +$_XTRACE_FUNCTIONS # Local variables: # mode: shell-script diff --git a/functions-common b/functions-common index 6a065ba83c..d68ae77971 100644 --- a/functions-common +++ b/functions-common @@ -32,7 +32,7 @@ # # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_FUNCTIONS_COMMON=$(set +o | grep xtrace) set +o xtrace # ensure we don't re-source this in the same environment @@ -2254,7 +2254,7 @@ function time_totals { } # Restore xtrace -$XTRACE +$_XTRACE_FUNCTIONS_COMMON # Local variables: # mode: shell-script diff --git a/inc/meta-config b/inc/meta-config index b9ab6b207f..b6fe437802 100644 --- a/inc/meta-config +++ b/inc/meta-config @@ -20,7 +20,7 @@ # file-name is the destination of the config file # Save trace setting -INC_META_XTRACE=$(set +o | grep xtrace) +_XTRACE_INC_META=$(set +o | grep xtrace) set +o xtrace @@ -197,7 +197,7 @@ function merge_config_group { # Restore xtrace -$INC_META_XTRACE +$_XTRACE_INC_META # Local variables: # mode: shell-script diff --git a/lib/apache b/lib/apache index 17526c74d0..c9e02a2b58 100644 --- a/lib/apache +++ b/lib/apache @@ -19,7 +19,7 @@ # - restart_apache_server # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LIB_APACHE=$(set +o | grep xtrace) set +o xtrace # Allow overriding the default Apache user and group, default to @@ -191,7 +191,7 @@ function restart_apache_server { } # Restore xtrace -$XTRACE +$_XTRACE_LIB_APACHE # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/ceph b/lib/ceph index f573136a4e..4ac498ab97 100644 --- a/lib/ceph +++ b/lib/ceph @@ -18,7 +18,7 @@ # - cleanup_ceph # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LIB_CEPH=$(set +o | grep xtrace) set +o xtrace @@ -375,7 +375,7 @@ function stop_ceph { # Restore xtrace -$XTRACE +$_XTRACE_LIB_CEPH ## Local variables: ## mode: shell-script diff --git a/lib/cinder b/lib/cinder index 1307c11f7a..70b198c2e9 100644 --- a/lib/cinder +++ b/lib/cinder @@ -20,7 +20,7 @@ # - cleanup_cinder # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER=$(set +o | grep xtrace) set +o xtrace @@ -567,7 +567,7 @@ function create_cinder_volume_group { # Restore xtrace -$XTRACE +$_XTRACE_CINDER # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/cinder_backends/ceph b/lib/cinder_backends/ceph index 7e9d2d334e..c21350ba01 100644 --- a/lib/cinder_backends/ceph +++ b/lib/cinder_backends/ceph @@ -22,7 +22,7 @@ # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_CEPH=$(set +o | grep xtrace) set +o xtrace @@ -76,7 +76,7 @@ function configure_cinder_backend_ceph { } # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_CEPH # Local variables: # mode: shell-script diff --git a/lib/cinder_backends/glusterfs b/lib/cinder_backends/glusterfs index 00c62e04cd..4e34f8ef6c 100644 --- a/lib/cinder_backends/glusterfs +++ b/lib/cinder_backends/glusterfs @@ -19,7 +19,7 @@ # configure_cinder_backend_glusterfs - Configure Cinder for GlusterFS backends # Save trace setting -GLUSTERFS_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_GLUSTERFS=$(set +o | grep xtrace) set +o xtrace @@ -41,7 +41,7 @@ function configure_cinder_backend_glusterfs { # Restore xtrace -$GLUSTERFS_XTRACE +$_XTRACE_CINDER_GLUSTERFS # Local variables: # mode: shell-script diff --git a/lib/cinder_backends/lvm b/lib/cinder_backends/lvm index 411b82c190..d927f9cd6b 100644 --- a/lib/cinder_backends/lvm +++ b/lib/cinder_backends/lvm @@ -22,7 +22,7 @@ # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_LVM=$(set +o | grep xtrace) set +o xtrace @@ -68,7 +68,7 @@ function init_cinder_backend_lvm { } # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_LVM # mode: shell-script # End: diff --git a/lib/cinder_backends/netapp_iscsi b/lib/cinder_backends/netapp_iscsi index be9442eb83..5cce30a6d3 100644 --- a/lib/cinder_backends/netapp_iscsi +++ b/lib/cinder_backends/netapp_iscsi @@ -20,7 +20,7 @@ # configure_cinder_backend_netapp_iscsi - configure iSCSI # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_NETAPP=$(set +o | grep xtrace) set +o xtrace @@ -59,7 +59,7 @@ function configure_cinder_backend_netapp_iscsi { # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_NETAPP # Local variables: # mode: shell-script diff --git a/lib/cinder_backends/netapp_nfs b/lib/cinder_backends/netapp_nfs index dc919ad86b..7ba36d2a3b 100644 --- a/lib/cinder_backends/netapp_nfs +++ b/lib/cinder_backends/netapp_nfs @@ -20,7 +20,7 @@ # configure_cinder_backend_netapp_nfs - configure NFS # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_NETAPP=$(set +o | grep xtrace) set +o xtrace @@ -70,7 +70,7 @@ function cleanup_cinder_backend_netapp_nfs { # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_NETAPP # Local variables: # mode: shell-script diff --git a/lib/cinder_backends/nfs b/lib/cinder_backends/nfs index fc51b2b440..89a37a1f02 100644 --- a/lib/cinder_backends/nfs +++ b/lib/cinder_backends/nfs @@ -19,7 +19,7 @@ # configure_cinder_backend_nfs - Configure Cinder for NFS backends # Save trace setting -NFS_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_NFS=$(set +o | grep xtrace) set +o xtrace @@ -38,7 +38,7 @@ function configure_cinder_backend_nfs { # Restore xtrace -$NFS_XTRACE +$_XTRACE_CINDER_NFS # Local variables: # mode: shell-script diff --git a/lib/cinder_backends/solidfire b/lib/cinder_backends/solidfire index 7cc70fc86d..16bc527863 100644 --- a/lib/cinder_backends/solidfire +++ b/lib/cinder_backends/solidfire @@ -17,7 +17,7 @@ # configure_cinder_driver - make configuration changes, including those to other services # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_SOLIDFIRE=$(set +o | grep xtrace) set +o xtrace @@ -42,7 +42,7 @@ function configure_cinder_backend_solidfire { # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_SOLIDFIRE # Local variables: # mode: shell-script diff --git a/lib/cinder_backends/vmdk b/lib/cinder_backends/vmdk index d5b945354b..3a6a5cf2ff 100644 --- a/lib/cinder_backends/vmdk +++ b/lib/cinder_backends/vmdk @@ -15,7 +15,7 @@ # configure_cinder_backend_vmdk - Configure Cinder for VMware vmdk backends # Save trace setting -VMDK_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_VMDK=$(set +o | grep xtrace) set +o xtrace @@ -40,7 +40,7 @@ function configure_cinder_backend_vmdk { # Restore xtrace -$VMDK_XTRACE +$_XTRACE_CINDER_VMDK # Local variables: # mode: shell-script diff --git a/lib/cinder_backends/xiv b/lib/cinder_backends/xiv index 6eadaae93b..e8b5da05d5 100644 --- a/lib/cinder_backends/xiv +++ b/lib/cinder_backends/xiv @@ -42,7 +42,7 @@ # configure_cinder_backend_xiv - Configure Cinder for xiv backends # Save trace setting -XIV_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_XIV=$(set +o | grep xtrace) set +o xtrace # Defaults @@ -79,7 +79,7 @@ function configure_cinder_backend_xiv { } # Restore xtrace -$XIV_XTRACE +$_XTRACE_CINDER_XIV # Local variables: # mode: shell-script diff --git a/lib/cinder_plugins/XenAPINFS b/lib/cinder_plugins/XenAPINFS index f7306955cb..92135e7c4f 100644 --- a/lib/cinder_plugins/XenAPINFS +++ b/lib/cinder_plugins/XenAPINFS @@ -15,7 +15,7 @@ # configure_cinder_driver - make configuration changes, including those to other services # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_XENAPINFS=$(set +o | grep xtrace) set +o xtrace @@ -39,7 +39,7 @@ function configure_cinder_driver { } # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_XENAPINFS # Local variables: # mode: shell-script diff --git a/lib/cinder_plugins/glusterfs b/lib/cinder_plugins/glusterfs index 35ceb27ce1..329dd6c649 100644 --- a/lib/cinder_plugins/glusterfs +++ b/lib/cinder_plugins/glusterfs @@ -15,7 +15,7 @@ # configure_cinder_driver - make configuration changes, including those to other services # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_GLUSTERFS=$(set +o | grep xtrace) set +o xtrace @@ -45,7 +45,7 @@ function configure_cinder_driver { } # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_GLUSTERFS # Local variables: # mode: shell-script diff --git a/lib/cinder_plugins/nfs b/lib/cinder_plugins/nfs index 83b39932cf..6e4ffe068e 100644 --- a/lib/cinder_plugins/nfs +++ b/lib/cinder_plugins/nfs @@ -15,7 +15,7 @@ # configure_cinder_driver - make configuration changes, including those to other services # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_NFS=$(set +o | grep xtrace) set +o xtrace @@ -36,7 +36,7 @@ function configure_cinder_driver { } # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_NFS # Local variables: # mode: shell-script diff --git a/lib/cinder_plugins/sheepdog b/lib/cinder_plugins/sheepdog index ca343f708b..558de46c6d 100644 --- a/lib/cinder_plugins/sheepdog +++ b/lib/cinder_plugins/sheepdog @@ -15,7 +15,7 @@ # configure_cinder_driver - make configuration changes, including those to other services # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_SHEEPDOG=$(set +o | grep xtrace) set +o xtrace @@ -34,7 +34,7 @@ function configure_cinder_driver { } # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_SHEEPDOG # Local variables: # mode: shell-script diff --git a/lib/cinder_plugins/vsphere b/lib/cinder_plugins/vsphere index f14ddf0998..1b28ffe602 100644 --- a/lib/cinder_plugins/vsphere +++ b/lib/cinder_plugins/vsphere @@ -15,7 +15,7 @@ # configure_cinder_driver - make configuration changes, including those to other services # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_CINDER_VSPHERE=$(set +o | grep xtrace) set +o xtrace @@ -37,7 +37,7 @@ function configure_cinder_driver { } # Restore xtrace -$MY_XTRACE +$_XTRACE_CINDER_VSPHERE # Local variables: # mode: shell-script diff --git a/lib/database b/lib/database index 13740b90e6..0d720527df 100644 --- a/lib/database +++ b/lib/database @@ -20,7 +20,7 @@ # and call register_database $DATABASE_TYPE # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LIB_DB=$(set +o | grep xtrace) set +o xtrace DATABASE_BACKENDS="" @@ -137,7 +137,7 @@ function database_connection_url { # Restore xtrace -$XTRACE +$_XTRACE_LIB_DB # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/databases/mysql b/lib/databases/mysql index cc74b33327..1bbbd62cc3 100644 --- a/lib/databases/mysql +++ b/lib/databases/mysql @@ -8,7 +8,7 @@ # - DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_DB_MYSQL=$(set +o | grep xtrace) set +o xtrace MYSQL_DRIVER=${MYSQL_DRIVER:-PyMySQL} @@ -178,7 +178,7 @@ function database_connection_url_mysql { # Restore xtrace -$MY_XTRACE +$_XTRACE_DB_MYSQL # Local variables: # mode: shell-script diff --git a/lib/databases/postgresql b/lib/databases/postgresql index 78c7bedc90..913e8ffacd 100644 --- a/lib/databases/postgresql +++ b/lib/databases/postgresql @@ -8,7 +8,7 @@ # - DATABASE_{HOST,USER,PASSWORD} must be defined # Save trace setting -PG_XTRACE=$(set +o | grep xtrace) +_XTRACE_PG=$(set +o | grep xtrace) set +o xtrace @@ -119,7 +119,7 @@ function database_connection_url_postgresql { # Restore xtrace -$PG_XTRACE +$_XTRACE_PG # Local variables: # mode: shell-script diff --git a/lib/dlm b/lib/dlm index 95e9b0ac3d..74eb67ee8f 100644 --- a/lib/dlm +++ b/lib/dlm @@ -19,7 +19,7 @@ # - cleanup_dlm # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_DLM=$(set +o | grep xtrace) set +o xtrace @@ -100,7 +100,7 @@ function install_dlm { } # Restore xtrace -$XTRACE +$_XTRACE_DLM # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/dstat b/lib/dstat index fe4790b12d..b705948094 100644 --- a/lib/dstat +++ b/lib/dstat @@ -13,7 +13,7 @@ # - stop_dstat # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_DSTAT=$(set +o | grep xtrace) set +o xtrace # start_dstat() - Start running processes, including screen @@ -34,4 +34,4 @@ function stop_dstat { } # Restore xtrace -$XTRACE +$_XTRACE_DSTAT diff --git a/lib/glance b/lib/glance index 2eb93a46e6..eb5832e910 100644 --- a/lib/glance +++ b/lib/glance @@ -21,7 +21,7 @@ # - cleanup_glance # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_GLANCE=$(set +o | grep xtrace) set +o xtrace @@ -403,7 +403,7 @@ function stop_glance { } # Restore xtrace -$XTRACE +$_XTRACE_GLANCE # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/heat b/lib/heat index e42bdf0b9e..54666a5212 100644 --- a/lib/heat +++ b/lib/heat @@ -23,7 +23,7 @@ # - cleanup_heat # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_HEAT=$(set +o | grep xtrace) set +o xtrace @@ -464,7 +464,7 @@ function build_heat_pip_mirror { } # Restore xtrace -$XTRACE +$_XTRACE_HEAT # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/horizon b/lib/horizon index ff63b06ab2..67181fcf29 100644 --- a/lib/horizon +++ b/lib/horizon @@ -19,7 +19,7 @@ # - cleanup_horizon # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_HORIZON=$(set +o | grep xtrace) set +o xtrace @@ -193,7 +193,7 @@ function _prepare_message_catalog_compilation { # Restore xtrace -$XTRACE +$_XTRACE_HORIZON # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/infra b/lib/infra index ab32efecd9..cf003cce01 100644 --- a/lib/infra +++ b/lib/infra @@ -15,7 +15,7 @@ # - install_infra # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_INFRA=$(set +o | grep xtrace) set +o xtrace @@ -50,7 +50,7 @@ function install_infra { } # Restore xtrace -$XTRACE +$_XTRACE_INFRA # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/ironic b/lib/ironic index 6a32983b24..23d8dda3ff 100644 --- a/lib/ironic +++ b/lib/ironic @@ -21,8 +21,8 @@ # - cleanup_ironic # Save trace and pipefail settings -XTRACE=$(set +o | grep xtrace) -PIPEFAIL=$(set +o | grep pipefail) +_XTRACE_IRONIC=$(set +o | grep xtrace) +_PIPEFAIL_IRONIC=$(set +o | grep pipefail) set +o xtrace set +o pipefail @@ -855,8 +855,8 @@ function cleanup_baremetal_basic_ops { } # Restore xtrace + pipefail -$XTRACE -$PIPEFAIL +$_XTRACE_IRONIC +$_PIPEFAIL_IRONIC # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/keystone b/lib/keystone index 5a2afbfe02..b19202b0f7 100644 --- a/lib/keystone +++ b/lib/keystone @@ -28,7 +28,7 @@ # - _cleanup_keystone_apache_wsgi # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_KEYSTONE=$(set +o | grep xtrace) set +o xtrace # Defaults @@ -592,7 +592,7 @@ function stop_keystone { # Restore xtrace -$XTRACE +$_XTRACE_KEYSTONE # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/ldap b/lib/ldap index 0414fea639..65056aea2f 100644 --- a/lib/ldap +++ b/lib/ldap @@ -8,7 +8,7 @@ # - install_ldap() # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LDAP=$(set +o | grep xtrace) set +o xtrace @@ -166,7 +166,7 @@ function clear_ldap_state { } # Restore xtrace -$XTRACE +$_XTRACE_LDAP # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/lvm b/lib/lvm index 468a99aecc..ae6023a836 100644 --- a/lib/lvm +++ b/lib/lvm @@ -16,7 +16,7 @@ # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_LVM=$(set +o | grep xtrace) set +o xtrace @@ -182,7 +182,7 @@ function set_lvm_filter { } # Restore xtrace -$MY_XTRACE +$_XTRACE_LVM # mode: shell-script # End: diff --git a/lib/neutron-legacy b/lib/neutron-legacy index 978943dae2..d00630af70 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -357,7 +357,7 @@ else fi # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON=$(set +o | grep xtrace) set +o xtrace @@ -1492,7 +1492,7 @@ function is_provider_network { # Restore xtrace -$XTRACE +$_XTRACE_NEUTRON # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/neutron_plugins/bigswitch_floodlight b/lib/neutron_plugins/bigswitch_floodlight index f52105e658..586ded79b4 100644 --- a/lib/neutron_plugins/bigswitch_floodlight +++ b/lib/neutron_plugins/bigswitch_floodlight @@ -4,7 +4,7 @@ # ------------------------------------ # Save trace setting -BS_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_BIGSWITCH=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base @@ -75,4 +75,4 @@ function neutron_plugin_check_adv_test_requirements { } # Restore xtrace -$BS_XTRACE +$_XTRACE_NEUTRON_BIGSWITCH diff --git a/lib/neutron_plugins/brocade b/lib/neutron_plugins/brocade index 953360e070..6ba0a66c3f 100644 --- a/lib/neutron_plugins/brocade +++ b/lib/neutron_plugins/brocade @@ -4,7 +4,7 @@ # ---------------------- # Save trace setting -BRCD_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_BROCADE=$(set +o | grep xtrace) set +o xtrace function is_neutron_ovs_base_plugin { @@ -81,4 +81,4 @@ function neutron_plugin_check_adv_test_requirements { } # Restore xtrace -$BRCD_XTRACE +$_XTRACE_NEUTRON_BROCADE diff --git a/lib/neutron_plugins/cisco b/lib/neutron_plugins/cisco index 7d0cf1af39..fc2cb8ad17 100644 --- a/lib/neutron_plugins/cisco +++ b/lib/neutron_plugins/cisco @@ -4,7 +4,7 @@ # --------------------------- # Save trace setting -CISCO_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_CISCO=$(set +o | grep xtrace) set +o xtrace # Scecify the VSM parameters @@ -154,4 +154,4 @@ function neutron_plugin_setup_interface_driver { } # Restore xtrace -$CISCO_XTRACE +$_XTRACE_NEUTRON_CISCO diff --git a/lib/neutron_plugins/embrane b/lib/neutron_plugins/embrane index 2028496ca1..385dab8354 100644 --- a/lib/neutron_plugins/embrane +++ b/lib/neutron_plugins/embrane @@ -4,7 +4,7 @@ # --------------------------- # Save trace setting -EMBR_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_EMBR=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch @@ -39,4 +39,5 @@ function neutron_plugin_configure_service { } # Restore xtrace -$EMBR_XTRACE +$_XTRACE_NEUTRON_EMBR + diff --git a/lib/neutron_plugins/linuxbridge_agent b/lib/neutron_plugins/linuxbridge_agent index f28bcfeadd..096722b096 100644 --- a/lib/neutron_plugins/linuxbridge_agent +++ b/lib/neutron_plugins/linuxbridge_agent @@ -4,7 +4,7 @@ # ----------------------------- # Save trace setting -PLUGIN_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_LB=$(set +o | grep xtrace) set +o xtrace function neutron_lb_cleanup { @@ -93,4 +93,4 @@ function neutron_plugin_check_adv_test_requirements { } # Restore xtrace -$PLUGIN_XTRACE +$_XTRACE_NEUTRON_LB diff --git a/lib/neutron_plugins/ml2 b/lib/neutron_plugins/ml2 index ace5335a78..30e1b036f3 100644 --- a/lib/neutron_plugins/ml2 +++ b/lib/neutron_plugins/ml2 @@ -4,7 +4,7 @@ # ------------------------------ # Save trace setting -ML2_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_ML2=$(set +o | grep xtrace) set +o xtrace # Enable this to simply and quickly enable tunneling with ML2. @@ -137,4 +137,4 @@ function has_neutron_plugin_security_group { } # Restore xtrace -$ML2_XTRACE +$_XTRACE_NEUTRON_ML2 diff --git a/lib/neutron_plugins/nuage b/lib/neutron_plugins/nuage index 9e5307ba53..61e634e453 100644 --- a/lib/neutron_plugins/nuage +++ b/lib/neutron_plugins/nuage @@ -4,7 +4,7 @@ # ---------------------- # Save trace setting -NU_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_NU=$(set +o | grep xtrace) set +o xtrace function neutron_plugin_create_nova_conf { @@ -66,4 +66,4 @@ function has_neutron_plugin_security_group { } # Restore xtrace -$NU_XTRACE +$_XTRACE_NEUTRON_NU diff --git a/lib/neutron_plugins/openvswitch b/lib/neutron_plugins/openvswitch index 891ab4982b..130eaacab3 100644 --- a/lib/neutron_plugins/openvswitch +++ b/lib/neutron_plugins/openvswitch @@ -7,7 +7,7 @@ # which has been removed in Juno. # Save trace setting -OVS_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_OVS=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/openvswitch_agent @@ -56,4 +56,5 @@ function has_neutron_plugin_security_group { } # Restore xtrace -$OVS_XTRACE +$_XTRACE_NEUTRON_OVS + diff --git a/lib/neutron_plugins/openvswitch_agent b/lib/neutron_plugins/openvswitch_agent index 6a333939d0..b1acacd4f3 100644 --- a/lib/neutron_plugins/openvswitch_agent +++ b/lib/neutron_plugins/openvswitch_agent @@ -4,7 +4,7 @@ # ----------------------------- # Save trace setting -OVSA_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_OVSL2=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/neutron_plugins/ovs_base @@ -132,4 +132,4 @@ function neutron_plugin_check_adv_test_requirements { } # Restore xtrace -$OVSA_XTRACE +$_XTRACE_NEUTRON_OVSL2 diff --git a/lib/neutron_plugins/ovs_base b/lib/neutron_plugins/ovs_base index d3fd198b08..91aff336fe 100644 --- a/lib/neutron_plugins/ovs_base +++ b/lib/neutron_plugins/ovs_base @@ -4,7 +4,7 @@ # ------------------------------------- # Save trace setting -OVSB_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_OVS_BASE=$(set +o | grep xtrace) set +o xtrace OVS_BRIDGE=${OVS_BRIDGE:-br-int} @@ -114,4 +114,4 @@ function _neutron_ovs_base_configure_nova_vif_driver { } # Restore xtrace -$OVSB_XTRACE +$_XTRACE_NEUTRON_OVS_BASE diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 3496da82f8..1d81a21825 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -4,7 +4,7 @@ # --------------------------- # Save trace setting -FW_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_FIREWALL=$(set +o | grep xtrace) set +o xtrace FWAAS_PLUGIN=neutron_fwaas.services.firewall.fwaas_plugin.FirewallPlugin @@ -26,4 +26,4 @@ function neutron_fwaas_stop { } # Restore xtrace -$FW_XTRACE +$_XTRACE_NEUTRON_FIREWALL diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index 7865f6fd6e..b07d06c32b 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -4,7 +4,7 @@ # --------------------------- # Save trace setting -LB_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_LB=$(set +o | grep xtrace) set +o xtrace @@ -48,4 +48,4 @@ function neutron_lbaas_stop { } # Restore xtrace -$LB_XTRACE +$_XTRACE_NEUTRON_LB diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index c75ab19d4e..5fd2fdce44 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -4,7 +4,7 @@ # --------------------------- # Save trace setting -METER_XTRACE=$(set +o | grep xtrace) +_XTRACE_NETURON_METER=$(set +o | grep xtrace) set +o xtrace @@ -29,4 +29,5 @@ function neutron_metering_stop { } # Restore xtrace -$METER_XTRACE +$_XTRACE_NETURON_METER + diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index c0e7457413..8a379f588c 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -4,7 +4,7 @@ # --------------------------- # Save trace setting -VPN_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_VPN=$(set +o | grep xtrace) set +o xtrace @@ -53,4 +53,4 @@ function neutron_vpn_stop { } # Restore xtrace -$VPN_XTRACE +$_XTRACE_NEUTRON_VPN diff --git a/lib/neutron_thirdparty/bigswitch_floodlight b/lib/neutron_thirdparty/bigswitch_floodlight index e3f4689fd7..45a4f2e263 100644 --- a/lib/neutron_thirdparty/bigswitch_floodlight +++ b/lib/neutron_thirdparty/bigswitch_floodlight @@ -4,7 +4,7 @@ # ------------------------------------------ # Save trace setting -BS3_XTRACE=$(set +o | grep xtrace) +_XTRACE_NEUTRON_BIGSWITCH=$(set +o | grep xtrace) set +o xtrace BS_FL_CONTROLLERS_PORT=${BS_FL_CONTROLLERS_PORT:-localhost:80} @@ -51,4 +51,4 @@ function check_bigswitch_floodlight { } # Restore xtrace -$BS3_XTRACE +$_XTRACE_NEUTRON_BIGSWITCH diff --git a/lib/nova b/lib/nova index ba05f53b87..e5712939fe 100644 --- a/lib/nova +++ b/lib/nova @@ -25,7 +25,7 @@ # - cleanup_nova # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LIB_NOVA=$(set +o | grep xtrace) set +o xtrace @@ -958,7 +958,7 @@ function stop_nova { # Restore xtrace -$XTRACE +$_XTRACE_LIB_NOVA # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index 045fc8b919..dae55c6eba 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -8,7 +8,7 @@ # ``STACK_USER`` has to be defined # Save trace setting -LV_XTRACE=$(set +o | grep xtrace) +_XTRACE_NOVA_FN_LIBVIRT=$(set +o | grep xtrace) set +o xtrace # Defaults @@ -134,7 +134,7 @@ EOF # Restore xtrace -$LV_XTRACE +$_XTRACE_NOVA_FN_LIBVIRT # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-fake b/lib/nova_plugins/hypervisor-fake index 3180d91f0a..2434dce884 100644 --- a/lib/nova_plugins/hypervisor-fake +++ b/lib/nova_plugins/hypervisor-fake @@ -17,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_VIRTFAKE=$(set +o | grep xtrace) set +o xtrace @@ -72,7 +72,7 @@ function stop_nova_hypervisor { # Restore xtrace -$MY_XTRACE +$_XTRACE_VIRTFAKE # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-ironic b/lib/nova_plugins/hypervisor-ironic index b9e286d5b6..c6ed85d63e 100644 --- a/lib/nova_plugins/hypervisor-ironic +++ b/lib/nova_plugins/hypervisor-ironic @@ -17,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_HYP_IRONIC=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/nova_plugins/functions-libvirt @@ -81,7 +81,7 @@ function stop_nova_hypervisor { # Restore xtrace -$MY_XTRACE +$_XTRACE_HYP_IRONIC # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index c54a7166a0..8bbaa2133d 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -17,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_NOVA_LIBVIRT=$(set +o | grep xtrace) set +o xtrace source $TOP_DIR/lib/nova_plugins/functions-libvirt @@ -105,7 +105,7 @@ function stop_nova_hypervisor { # Restore xtrace -$MY_XTRACE +$_XTRACE_NOVA_LIBVIRT # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-openvz b/lib/nova_plugins/hypervisor-openvz index cce36b8d3f..58ab5c11ac 100644 --- a/lib/nova_plugins/hypervisor-openvz +++ b/lib/nova_plugins/hypervisor-openvz @@ -17,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_OPENVZ=$(set +o | grep xtrace) set +o xtrace @@ -62,7 +62,7 @@ function stop_nova_hypervisor { # Restore xtrace -$MY_XTRACE +$_XTRACE_OPENVZ # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-vsphere b/lib/nova_plugins/hypervisor-vsphere index 698f836bad..7c08bc945b 100644 --- a/lib/nova_plugins/hypervisor-vsphere +++ b/lib/nova_plugins/hypervisor-vsphere @@ -17,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_NOVA_VSPHERE=$(set +o | grep xtrace) set +o xtrace @@ -64,7 +64,7 @@ function stop_nova_hypervisor { # Restore xtrace -$MY_XTRACE +$_XTRACE_NOVA_VSPHERE # Local variables: # mode: shell-script diff --git a/lib/nova_plugins/hypervisor-xenserver b/lib/nova_plugins/hypervisor-xenserver index e097990bd3..3eb9149bb4 100644 --- a/lib/nova_plugins/hypervisor-xenserver +++ b/lib/nova_plugins/hypervisor-xenserver @@ -17,7 +17,7 @@ # cleanup_nova_hypervisor - remove transient data and cache # Save trace setting -MY_XTRACE=$(set +o | grep xtrace) +_XTRACE_XENSERVER=$(set +o | grep xtrace) set +o xtrace @@ -111,7 +111,7 @@ function stop_nova_hypervisor { # Restore xtrace -$MY_XTRACE +$_XTRACE_XENSERVER # Local variables: # mode: shell-script diff --git a/lib/oslo b/lib/oslo index 56615faaa3..3d6fbb38c6 100644 --- a/lib/oslo +++ b/lib/oslo @@ -16,7 +16,7 @@ # - install_oslo # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LIB_OSLO=$(set +o | grep xtrace) set +o xtrace @@ -95,7 +95,7 @@ function install_oslo { } # Restore xtrace -$XTRACE +$_XTRACE_LIB_OSLO # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/rpc_backend b/lib/rpc_backend index 298dcb6e5f..3864adec32 100644 --- a/lib/rpc_backend +++ b/lib/rpc_backend @@ -21,7 +21,7 @@ # of this file which is a standard interface. # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_RPC_BACKEND=$(set +o | grep xtrace) set +o xtrace # Functions @@ -141,7 +141,7 @@ function rabbit_setuser { } # Restore xtrace -$XTRACE +$_XTRACE_RPC_BACKEND # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/swift b/lib/swift index d7ccc24111..b596142ad2 100644 --- a/lib/swift +++ b/lib/swift @@ -24,7 +24,7 @@ # - _cleanup_swift_apache_wsgi # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_LIB_SWIFT=$(set +o | grep xtrace) set +o xtrace @@ -823,7 +823,7 @@ function swift_configure_tempurls { } # Restore xtrace -$XTRACE +$_XTRACE_LIB_SWIFT # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/tempest b/lib/tempest index 76fd6cac74..85e0e7fa35 100644 --- a/lib/tempest +++ b/lib/tempest @@ -38,7 +38,7 @@ # - init_tempest # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_TEMPEST=$(set +o | grep xtrace) set +o xtrace @@ -649,7 +649,7 @@ function init_tempest { } # Restore xtrace -$XTRACE +$_XTRACE_TEMPEST # Tell emacs to use shell-script-mode ## Local variables: diff --git a/lib/template b/lib/template index 2703788af4..08d10bbf71 100644 --- a/lib/template +++ b/lib/template @@ -21,7 +21,7 @@ # - cleanup_XXXX # Save trace setting -XTRACE=$(set +o | grep xtrace) +_XTRACE_TEMPLATE=$(set +o | grep xtrace) set +o xtrace @@ -92,7 +92,7 @@ function stop_XXXX { } # Restore xtrace -$XTRACE +$_XTRACE_TEMPLATE # Tell emacs to use shell-script-mode ## Local variables: diff --git a/stack.sh b/stack.sh index 8625b5aaeb..19d05c904a 100755 --- a/stack.sh +++ b/stack.sh @@ -573,7 +573,8 @@ run_phase source # Generic helper to configure passwords function read_password { - XTRACE=$(set +o | grep xtrace) + local xtrace + xtrace=$(set +o | grep xtrace) set +o xtrace var=$1; msg=$2 pw=${!var} @@ -616,7 +617,9 @@ function read_password { eval "$var=$pw" echo "$var=$pw" >> $localrc fi - $XTRACE + + # restore previous xtrace value + $xtrace } From f95315b6ea56b3f2cb18caeac734dd15e6704b93 Mon Sep 17 00:00:00 2001 From: Shinobu KINJO Date: Sat, 7 Nov 2015 10:21:08 +0900 Subject: [PATCH 0653/2941] Ensure link is set to up, when moving IP addresses across interfaces. - To add, initialize and set up a valiable named IP_UP - To bring up interface after moving IP to OVS bridge Change-Id: I70f5974c115be6f7e7422a9a325f36cf3b71455a Closes-Bug: #1469596 --- lib/neutron-legacy | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index c244e5470a..ecff3e5774 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -798,6 +798,7 @@ function _move_neutron_addresses_route { local IP_ADD="" local IP_DEL="" + local IP_UP="" local DEFAULT_ROUTE_GW DEFAULT_ROUTE_GW=$(ip r | awk "/default.+$from_intf/ { print \$3; exit }") local ADD_OVS_PORT="" @@ -821,9 +822,10 @@ function _move_neutron_addresses_route { if [[ "$IP_BRD" != "" ]]; then IP_DEL="sudo ip addr del $IP_BRD dev $from_intf" IP_ADD="sudo ip addr add $IP_BRD dev $to_intf" + IP_UP="sudo ip link set $to_intf up" fi - $IP_DEL; $IP_ADD; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE + $IP_DEL; $IP_ADD; $IP_UP; $ADD_OVS_PORT; $ADD_DEFAULT_ROUTE fi } From 5509ed579773bf1a69bc5fb406a206a6da010c56 Mon Sep 17 00:00:00 2001 From: Atsushi SAKAI Date: Mon, 30 Nov 2015 20:20:21 +0900 Subject: [PATCH 0654/2941] Fix comment typos in inc/ and tests/ directories valu3 => value3 enviromnet => environment direcotry => directory virualenv => virtualenv editiable => editable envirnment => environment Change-Id: I97fb2d44a37b16d02d4fbdb08bfa33414349f651 --- inc/ini-config | 2 +- inc/python | 8 ++++---- tests/run-process.sh | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/inc/ini-config b/inc/ini-config index 42a66c63b6..d2830d79cd 100644 --- a/inc/ini-config +++ b/inc/ini-config @@ -205,7 +205,7 @@ $option = $value } # Set a multiple line option in an INI file -# iniset_multiline [-sudo] config-file section option value1 value2 valu3 ... +# iniset_multiline [-sudo] config-file section option value1 value2 value3 ... function iniset_multiline { local xtrace xtrace=$(set +o | grep xtrace) diff --git a/inc/python b/inc/python index 91ceb44499..59668a2c6a 100644 --- a/inc/python +++ b/inc/python @@ -17,7 +17,7 @@ set +o xtrace # Global Config Variables -# PROJECT_VENV contains the name of the virtual enviromnet for each +# PROJECT_VENV contains the name of the virtual environment for each # project. A null value installs to the system Python directories. declare -A PROJECT_VENV @@ -35,7 +35,7 @@ function get_pip_command { fi } -# Get the path to the direcotry where python executables are installed. +# Get the path to the directory where python executables are installed. # get_python_exec_prefix function get_python_exec_prefix { local xtrace @@ -93,7 +93,7 @@ function pip_install { fi if [[ $TRACK_DEPENDS = True && ! "$@" =~ virtualenv ]]; then # TRACK_DEPENDS=True installation creates a circular dependency when - # we attempt to install virtualenv into a virualenv, so we must global + # we attempt to install virtualenv into a virtualenv, so we must global # that installation. source $DEST/.venv/bin/activate local cmd_pip=$DEST/.venv/bin/pip @@ -199,7 +199,7 @@ function setup_lib { setup_install $dir } -# setup a library by name in editiable mode. If we are trying to use +# setup a library by name in editable mode. If we are trying to use # the library from git, we'll do a git based install, otherwise we'll # punt and the library should be installed by a requirements pull from # another project. diff --git a/tests/run-process.sh b/tests/run-process.sh index bdf1395d07..301b9a032b 100755 --- a/tests/run-process.sh +++ b/tests/run-process.sh @@ -5,7 +5,7 @@ # # Set USE_SCREEN True|False to change use of screen. # -# This script emulates the basic exec envirnment in ``stack.sh`` to test +# This script emulates the basic exec environment in ``stack.sh`` to test # the process spawn and kill operations. if [[ -z $1 ]]; then From 779d8670287952b8865a1d506ba2d68406139430 Mon Sep 17 00:00:00 2001 From: Joe D'Andrea Date: Mon, 30 Nov 2015 15:35:13 +0000 Subject: [PATCH 0655/2941] Force heat deferred_auth_method to password in standalone mode Heat does not support Keystone Trusts when deployed in standalone mode. This change forces an error when HEAT_DEFERRED_AUTH is set to anything other than "password" if HEAT_STANDALONE is True and advises of the acceptable setting. Change-Id: Ib4ee9d9af396093137a2a0f99f1b18ae153ccdb3 Closes-Bug: #1463837 --- lib/heat | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/heat b/lib/heat index 54666a5212..fdcf5bcaad 100644 --- a/lib/heat +++ b/lib/heat @@ -65,6 +65,12 @@ if [[ "$HEAT_STANDALONE" = "True" ]]; then # for standalone, use defaults which require no service user HEAT_STACK_DOMAIN=$(trueorfalse False HEAT_STACK_DOMAIN) HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-password} + if [[ ${HEAT_DEFERRED_AUTH} != "password" ]]; then + # Heat does not support keystone trusts when deployed in + # standalone mode + die $LINENO \ + 'HEAT_DEFERRED_AUTH can only be set to "password" when HEAT_STANDALONE is True.' + fi else HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN) HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts} From 30d5fae31548ed998e4c053ba957f95e068eaebc Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Tue, 10 Nov 2015 13:44:15 +0000 Subject: [PATCH 0656/2941] Neutron: Use generated configuration files if available Generate the neutron core sample config files by using the oslo generator. The files are generated with a .sample extension and replace the static example configuration files. Once the generation code is delivered, the static config files will be removed. Change-Id: Ic37a16b6cf8eb92030649f1fc8b198738a8cc104 Related-blueprint: autogen-neutron-conf-file Partial-bug: #1199963 Depends-On: I1c6dc4e7d479f1b7c755597caded24a0f018c712 Co-Authored-By: Louis Taylor --- lib/neutron-legacy | 19 +++++++++++++------ lib/neutron_plugins/services/metering | 2 +- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index d00630af70..a60714f43b 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -870,7 +870,10 @@ function _create_neutron_conf_dir { function _configure_neutron_common { _create_neutron_conf_dir - cp $NEUTRON_DIR/etc/neutron.conf $NEUTRON_CONF + # Uses oslo config generator to generate core sample configuration files + (cd $NEUTRON_DIR && exec ./tools/generate_config_file_samples.sh) + + cp $NEUTRON_DIR/etc/neutron.conf.sample $NEUTRON_CONF Q_POLICY_FILE=$NEUTRON_CONF_DIR/policy.json cp $NEUTRON_DIR/etc/policy.json $Q_POLICY_FILE @@ -895,7 +898,9 @@ function _configure_neutron_common { Q_PLUGIN_CONF_FILE=$Q_PLUGIN_CONF_PATH/$Q_PLUGIN_CONF_FILENAME # NOTE(hichihara): Some neutron vendor plugins were already decomposed and # there is no config file in Neutron tree. They should prepare the file in each plugin. - if [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then + if [ -f "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" ]; then + cp "$NEUTRON_DIR/$Q_PLUGIN_CONF_FILE.sample" /$Q_PLUGIN_CONF_FILE + elif [ -f $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE ]; then cp $NEUTRON_DIR/$Q_PLUGIN_CONF_FILE /$Q_PLUGIN_CONF_FILE fi @@ -903,6 +908,8 @@ function _configure_neutron_common { iniset $NEUTRON_CONF DEFAULT state_path $DATA_DIR/neutron iniset $NEUTRON_CONF DEFAULT use_syslog $SYSLOG iniset $NEUTRON_CONF DEFAULT bind_host $Q_LISTEN_ADDRESS + iniset $NEUTRON_CONF oslo_concurrency lock_path $DATA_DIR/neutron/lock + # If addition config files are set, make sure their path name is set as well if [[ ${#Q_PLUGIN_EXTRA_CONF_FILES[@]} > 0 && $Q_PLUGIN_EXTRA_CONF_PATH == '' ]]; then die $LINENO "Neutron additional plugin config not set.. exiting" @@ -959,7 +966,7 @@ function _configure_neutron_debug_command { return fi - cp $NEUTRON_DIR/etc/l3_agent.ini $NEUTRON_TEST_CONFIG_FILE + cp $NEUTRON_DIR/etc/l3_agent.ini.sample $NEUTRON_TEST_CONFIG_FILE iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT verbose False iniset $NEUTRON_TEST_CONFIG_FILE DEFAULT debug False @@ -975,7 +982,7 @@ function _configure_neutron_debug_command { function _configure_neutron_dhcp_agent { - cp $NEUTRON_DIR/etc/dhcp_agent.ini $Q_DHCP_CONF_FILE + cp $NEUTRON_DIR/etc/dhcp_agent.ini.sample $Q_DHCP_CONF_FILE iniset $Q_DHCP_CONF_FILE DEFAULT verbose True iniset $Q_DHCP_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL @@ -1007,7 +1014,7 @@ function _configure_neutron_l3_agent { neutron_vpn_configure_agent fi - cp $NEUTRON_DIR/etc/l3_agent.ini $Q_L3_CONF_FILE + cp $NEUTRON_DIR/etc/l3_agent.ini.sample $Q_L3_CONF_FILE iniset $Q_L3_CONF_FILE DEFAULT verbose True iniset $Q_L3_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL @@ -1028,7 +1035,7 @@ function _configure_neutron_l3_agent { } function _configure_neutron_metadata_agent { - cp $NEUTRON_DIR/etc/metadata_agent.ini $Q_META_CONF_FILE + cp $NEUTRON_DIR/etc/metadata_agent.ini.sample $Q_META_CONF_FILE iniset $Q_META_CONF_FILE DEFAULT verbose True iniset $Q_META_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL diff --git a/lib/neutron_plugins/services/metering b/lib/neutron_plugins/services/metering index 5fd2fdce44..5b32468d21 100644 --- a/lib/neutron_plugins/services/metering +++ b/lib/neutron_plugins/services/metering @@ -21,7 +21,7 @@ function neutron_agent_metering_configure_agent { METERING_AGENT_CONF_FILENAME="$METERING_AGENT_CONF_PATH/metering_agent.ini" - cp $NEUTRON_DIR/etc/metering_agent.ini $METERING_AGENT_CONF_FILENAME + cp $NEUTRON_DIR/etc/metering_agent.ini.sample $METERING_AGENT_CONF_FILENAME } function neutron_metering_stop { From e0ac37c257bf08db8d220d13773859d9202305d2 Mon Sep 17 00:00:00 2001 From: Dmitry Guryanov Date: Mon, 30 Nov 2015 18:48:23 +0300 Subject: [PATCH 0657/2941] Fix path setup in add_sudo_secure_path There are two bugs in add_sudo_secure_path. Firstly we don't properly check if the file exists, so always append the new line. This will overwrite any existing changes. Secondly the logic for checking if the path exists is inverted, so we miss adding paths when we should. This particularly causes failures when installing with virtualenv's since the paths are inside the virtualenv, rather than the standard system locations. Change-Id: I646fe0c68958470d464fe4f3d81d5c17dd6f2ab6 Closes-bug: #1521241 --- inc/rootwrap | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/inc/rootwrap b/inc/rootwrap index 63ab59adc7..2a6e4b648f 100644 --- a/inc/rootwrap +++ b/inc/rootwrap @@ -22,14 +22,14 @@ function add_sudo_secure_path { local line # This is pretty simplistic for now - assume only the first line is used - if [[ -r SUDO_SECURE_PATH_FILE ]]; then + if [[ -r $SUDO_SECURE_PATH_FILE ]]; then line=$(head -1 $SUDO_SECURE_PATH_FILE) else line="Defaults:$STACK_USER secure_path=/usr/local/sbin:/usr/local/bin:/usr/sbin:/sbin:/usr/bin:/bin" fi # Only add ``dir`` if it is not already present - if [[ $line =~ $dir ]]; then + if [[ ! $line =~ $dir ]]; then echo "${line}:$dir" | sudo tee $SUDO_SECURE_PATH_FILE sudo chmod 400 $SUDO_SECURE_PATH_FILE sudo chown root:root $SUDO_SECURE_PATH_FILE From 1afc28bf6c33c792eb9d2fd1992534e82af29291 Mon Sep 17 00:00:00 2001 From: Ian Wienand Date: Fri, 27 Nov 2015 14:15:56 +1100 Subject: [PATCH 0658/2941] Turn off tracing when outputting errors When outputting these error strings, turn off the tracing so the user can actually read it. Also reword the "not root" user message so it fits into a standard terminal window length. Change-Id: I466c60865bc1128f4edd219f831a9c6cffa67829 Parital-Bug: #1517199 --- stack.sh | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/stack.sh b/stack.sh index 8625b5aaeb..083c488fc1 100755 --- a/stack.sh +++ b/stack.sh @@ -75,6 +75,7 @@ fi # Check if run in POSIX shell if [[ "${POSIXLY_CORRECT}" == "y" ]]; then + set +o xtrace echo "You are running POSIX compatibility mode, DevStack requires bash 4.2 or newer." exit 1 fi @@ -85,11 +86,11 @@ fi # action to create a suitable user account. if [[ $EUID -eq 0 ]]; then - echo "You are running this script as root." - echo "Cut it out." - echo "Really." - echo "If you need an account to run DevStack, do this (as root, heh) to create a non-root account:" - echo "$TOP_DIR/tools/create-stack-user.sh" + set +o xtrace + echo "DevStack should be run as a user with sudo permissions, " + echo "not root." + echo "A \"stack\" user configured correctly can be created with:" + echo " $TOP_DIR/tools/create-stack-user.sh" exit 1 fi @@ -98,6 +99,7 @@ fi # virtual env, and will fail in really odd ways if you do this. Make # this explicit as it has come up on the mailing list. if [[ -n "$VIRTUAL_ENV" ]]; then + set +o xtrace echo "You appear to be running under a python virtualenv." echo "DevStack does not support this, as we may break the" echo "virtualenv you are currently in by modifying " @@ -111,6 +113,7 @@ fi # on a lot of different environments, you sometimes run it on the # wrong box. This makes there be a way to prevent that. if [[ -e $HOME/.no-devstack ]]; then + set +o xtrace echo "You've marked this host as a no-devstack host, to save yourself from" echo "running devstack accidentally. If this is in error, please remove the" echo "~/.no-devstack file" From ddc3839bdc7ff73f6224273605db10fd88cd60df Mon Sep 17 00:00:00 2001 From: Doug Hellmann Date: Thu, 7 May 2015 21:06:24 +0000 Subject: [PATCH 0659/2941] Enable optional Python 3 support Add USE_PYTHON3 and PYTHON3_VERSION variables to allow services to use python 3 if they indicate support in their python package metadata. Tested in Heat here -> I837c2fba682ab430d50e9f43913f2fed20325a7a. Project config change to add a dedicated job to Heat is here -> I0837e62d6ccc66397a5e409f0961edd4be31f467 Change-Id: I079e18b58b214bf8362945c253d6d894ca8b1a6b --- inc/python | 47 +++++++++++++++++++++++++++++++++++++--- lib/stack | 1 + stackrc | 11 ++++++++++ tools/install_pip.sh | 8 +++++++ tools/install_prereqs.sh | 3 +++ 5 files changed, 67 insertions(+), 3 deletions(-) diff --git a/inc/python b/inc/python index 59668a2c6a..c157604699 100644 --- a/inc/python +++ b/inc/python @@ -28,10 +28,13 @@ declare -A PROJECT_VENV # Get the path to the pip command. # get_pip_command function get_pip_command { - which pip || which pip-python + local version="$1" + # NOTE(dhellmann): I don't know if we actually get a pip3.4-python + # under any circumstances. + which pip${version} || which pip${version}-python if [ $? -ne 0 ]; then - die $LINENO "Unable to find pip; cannot continue" + die $LINENO "Unable to find pip${version}; cannot continue" fi } @@ -66,6 +69,13 @@ function pip_install_gr { pip_install $clean_name } +# Determine the python versions supported by a package +function get_python_versions_for_package { + local name=$1 + cd $name && python setup.py --classifiers \ + | grep 'Language' | cut -f5 -d: | grep '\.' | tr '\n' ' ' +} + # Wrapper for ``pip install`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``PIP_VIRTUAL_ENV``, # ``PIP_UPGRADE``, ``TRACK_DEPENDS``, ``*_proxy``, @@ -104,8 +114,22 @@ function pip_install { local sudo_pip="env" else local cmd_pip - cmd_pip=$(get_pip_command) + cmd_pip=$(get_pip_command $PYTHON2_VERSION) local sudo_pip="sudo -H" + if python3_enabled; then + # Look at the package classifiers to find the python + # versions supported, and if we find the version of + # python3 we've been told to use, use that instead of the + # default pip + local package_dir=${!#} + local python_versions + if [[ -d "$package_dir" ]]; then + python_versions=$(get_python_versions_for_package $package_dir) + if [[ $python_versions =~ $PYTHON3_VERSION ]]; then + cmd_pip=$(get_pip_command $PYTHON3_VERSION) + fi + fi + fi fi fi @@ -113,6 +137,8 @@ function pip_install { # Always apply constraints cmd_pip="$cmd_pip -c $REQUIREMENTS_DIR/upper-constraints.txt" + # FIXME(dhellmann): Need to force multiple versions of pip for + # packages like setuptools? local pip_version pip_version=$(python -c "import pip; \ print(pip.__version__.strip('.')[0])") @@ -276,6 +302,21 @@ function setup_package { fi } +# Report whether python 3 should be used +function python3_enabled { + if [[ $USE_PYTHON3 == "True" ]]; then + return 0 + else + return 1 + fi +} + +# Install python3 packages +function install_python3 { + if is_ubuntu; then + apt_get install python3.4 python3.4-dev + fi +} # Restore xtrace $INC_PY_TRACE diff --git a/lib/stack b/lib/stack index 7d98604b82..f09ddcee85 100644 --- a/lib/stack +++ b/lib/stack @@ -19,6 +19,7 @@ function stack_install_service { local service=$1 if type install_${service} >/dev/null 2>&1; then + # FIXME(dhellmann): Needs to be python3-aware at some point. if [[ ${USE_VENV} = True && -n ${PROJECT_VENV[$service]:-} ]]; then rm -rf ${PROJECT_VENV[$service]} source $TOP_DIR/tools/build_venv.sh ${PROJECT_VENV[$service]} ${ADDITIONAL_VENV_PACKAGES//,/ } diff --git a/stackrc b/stackrc index 5dd109c0a4..f949ccbad9 100644 --- a/stackrc +++ b/stackrc @@ -118,6 +118,17 @@ if [[ -r $RC_DIR/.localrc.password ]]; then source $RC_DIR/.localrc.password fi +# Control whether Python 3 should be used. +export USE_PYTHON3=${USE_PYTHON3:-False} + +# When Python 3 is supported by an application, adding the specific +# version of Python 3 to this variable will install the app using that +# version of the interpreter instead of 2.7. +export PYTHON3_VERSION=${PYTHON3_VERSION:-3.4} + +# Just to be more explicit on the Python 2 version to use. +export PYTHON2_VERSION=${PYTHON2_VERSION:-2.7} + # allow local overrides of env variables, including repo config if [[ -f $RC_DIR/localrc ]]; then # Old-style user-supplied config diff --git a/tools/install_pip.sh b/tools/install_pip.sh index ab5efb2e77..f239c7bb16 100755 --- a/tools/install_pip.sh +++ b/tools/install_pip.sh @@ -8,6 +8,7 @@ # Assumptions: # - update pip to $INSTALL_PIP_VERSION +# - if USE_PYTHON3=True, PYTHON3_VERSION refers to a version already installed set -o errexit set -o xtrace @@ -31,6 +32,8 @@ GetDistro echo "Distro: $DISTRO" function get_versions { + # FIXME(dhellmann): Deal with multiple python versions here? This + # is just used for reporting, so maybe not? PIP=$(which pip 2>/dev/null || which pip-python 2>/dev/null || true) if [[ -n $PIP ]]; then PIP_VERSION=$($PIP --version | awk '{ print $2}') @@ -75,6 +78,9 @@ function install_get_pip { touch $LOCAL_PIP.downloaded fi sudo -H -E python $LOCAL_PIP + if python3_enabled; then + sudo -H -E python${PYTHON3_VERSION} $LOCAL_PIP + fi } @@ -114,6 +120,7 @@ get_versions # python in f23 depends on the python-pip package if ! { is_fedora && [[ $DISTRO == "f23" ]]; }; then uninstall_package python-pip + uninstall_package python3-pip fi install_get_pip @@ -122,6 +129,7 @@ if [[ -n $PYPI_ALTERNATIVE_URL ]]; then configure_pypi_alternative_url fi +set -x pip_install -U setuptools get_versions diff --git a/tools/install_prereqs.sh b/tools/install_prereqs.sh index 38452cd90f..031f8a8eca 100755 --- a/tools/install_prereqs.sh +++ b/tools/install_prereqs.sh @@ -81,6 +81,9 @@ if [[ -n "$SYSLOG" && "$SYSLOG" != "False" ]]; then fi fi +if python3_enabled; then + install_python3 +fi # Mark end of run # --------------- From 88ee8ce4684e13865123636dd5d2baa5d6a44ef7 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Wed, 2 Dec 2015 07:47:31 -0500 Subject: [PATCH 0660/2941] create apt_get_update to try to work around broken mirrors Ubuntu's apt mirroring mechanism produces inconsistent mirrors pretty regularly. The devstack-gate apt-get update model seems to have been more effective getting past this than what we did in devstack. Adopt that method for our updates. Change-Id: I97c7896ef38b275aacb4f933fc849acee1bab858 --- functions-common | 39 +++++++++++++++++++++++++++++---------- 1 file changed, 29 insertions(+), 10 deletions(-) diff --git a/functions-common b/functions-common index d68ae77971..d4099ffcfa 100644 --- a/functions-common +++ b/functions-common @@ -978,6 +978,34 @@ function _get_package_dir { echo "$pkg_dir" } +# Wrapper for ``apt-get update`` to try multiple times on the update +# to address bad package mirrors (which happen all the time). +function apt_get_update { + # only do this once per run + if [[ "$REPOS_UPDATED" == "True" && "$RETRY_UPDATE" != "True" ]]; then + return + fi + + # bail if we are offline + [[ "$OFFLINE" = "True" ]] && return + + local sudo="sudo" + [[ "$(id -u)" = "0" ]] && sudo="env" + + # time all the apt operations + time_start "apt-get-update" + + local proxies="http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} no_proxy=${no_proxy:-} " + local update_cmd="$sudo $proxies apt-get update" + if ! timeout 300 sh -c "while ! $update_cmd; do sleep 30; done"; then + die $LINENO "Failed to update apt repos, we're dead now" + fi + + REPOS_UPDATED=True + # stop the clock + time_stop "apt-get-update" +} + # Wrapper for ``apt-get`` to set cache and proxy environment variables # Uses globals ``OFFLINE``, ``*_proxy`` # apt_get operation package [package ...] @@ -1158,16 +1186,7 @@ function update_package_repo { fi if is_ubuntu; then - local xtrace - xtrace=$(set +o | grep xtrace) - set +o xtrace - if [[ "$REPOS_UPDATED" != "True" || "$RETRY_UPDATE" = "True" ]]; then - # if there are transient errors pulling the updates, that's fine. - # It may be secondary repositories that we don't really care about. - apt_get update || /bin/true - REPOS_UPDATED=True - fi - $xtrace + apt_get_update fi } From 41d01104b16bf5d0d6d4d7d2a1e5883d34bff810 Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Thu, 3 Dec 2015 08:12:23 -0500 Subject: [PATCH 0661/2941] remove generic extras.d support This removes the generic extras.d support, which we said we'd do at Mitaka-1. In tree extras.d continues to function as before, though we need stories to get ceph and ironic into plugins, and a better solution for Tempest. Change-Id: I8b134446dc08a2c3852423ca71af2f469f85496e --- functions-common | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/functions-common b/functions-common index d68ae77971..91a148603b 100644 --- a/functions-common +++ b/functions-common @@ -1741,17 +1741,18 @@ function run_phase { if [[ -d $TOP_DIR/extras.d ]]; then local extra_plugin_file_name for extra_plugin_file_name in $TOP_DIR/extras.d/*.sh; do - [[ -r $extra_plugin_file_name ]] && source $extra_plugin_file_name $mode $phase - # NOTE(sdague): generate a big warning about using - # extras.d in an unsupported way which will let us track - # unsupported usage in the gate. + # NOTE(sdague): only process extras.d for the 3 explicitly + # white listed elements in tree. We want these to move out + # over time as well, but they are in tree, so we need to + # manage that. local exceptions="50-ironic.sh 60-ceph.sh 80-tempest.sh" local extra extra=$(basename $extra_plugin_file_name) if [[ ! ( $exceptions =~ "$extra" ) ]]; then - deprecated "extras.d support is being removed in Mitaka-1" - deprecated "jobs for project $extra will break after that point" - deprecated "please move project to a supported devstack plugin model" + warn "use of extras.d is no longer supported" + warn "processing of project $extra is skipped" + else + [[ -r $extra_plugin_file_name ]] && source $extra_plugin_file_name $mode $phase fi done fi From 2391d4321ffe3a0a482e01d133038dfd38c76bac Mon Sep 17 00:00:00 2001 From: Devananda van der Veen Date: Thu, 3 Dec 2015 09:16:18 -0800 Subject: [PATCH 0662/2941] Update lib/ironic to always skip cleaning Previously, devstack would disable ironic's cleaning phase if a driver with "agent" in the name was used. However, we have begun using the IPA ramdisk for all tests in the gate, which caused cleaning to be run for the "pxe_ssh" job which therefore fails due to timeouts. As a result, for now, we need to always disable cleaning. As a point of record, we should actually be testing cleaning in the gate. However, running 'shred' on the disks of a nested VM is too slow and causes the gate to timeout // take too long. Some options have been discussed for ways to test the callback mechanism but avoid actually running 'shred' on the disks. This needs to be revisited. Change-Id: Id15cf6cc49122b08e557e44871b31a8c0d20b55d Related-to-Bug: #1517277 --- lib/ironic | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/lib/ironic b/lib/ironic index 27b0c8d4ff..2fb2004d71 100644 --- a/lib/ironic +++ b/lib/ironic @@ -365,6 +365,9 @@ function configure_ironic_conductor { iniset $IRONIC_CONF_FILE pxe pxe_append_params "$pxe_params" fi + # Set these options for scenarios in which the agent fetches the image + # directly from glance, and don't set them where the image is pushed + # over iSCSI. if is_deployed_by_agent; then if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then iniset $IRONIC_CONF_FILE glance swift_temp_url_key $SWIFT_TEMPURL_KEY @@ -379,9 +382,13 @@ function configure_ironic_conductor { iniset $IRONIC_CONF_FILE glance swift_container glance iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600 iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30 - iniset $IRONIC_CONF_FILE agent agent_erase_devices_priority 0 fi + # FIXME: this really needs to be tested in the gate. + # For now, any test using the agent ramdisk should skip cleaning + # because it is too slow to run in the gate. + iniset $IRONIC_CONF_FILE agent agent_erase_devices_priority 0 + if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then local pxebin pxebin=`basename $IRONIC_PXE_BOOT_IMAGE` From 7b7101f1c5ccd4d0722245613df8c8f7a67e79b9 Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Tue, 1 Dec 2015 22:17:42 +0000 Subject: [PATCH 0663/2941] Neutron FWaaS: Use generated configuration files if available Generate the Neutron FWaaS sample config files by using the oslo generator. The files are generated with a .sample extension and replace the static example configuration files. Once the generation code is delivered, the static config files will be removed. Change-Id: Ic8208850a27408c8fbeed80ecdb43345aa7dfaa4 Related-blueprint: autogen-neutron-conf-file Partial-bug: #1199963 Depends-On: I8e9113dfb88e5290f6eedd012d1a52fc35c3c88c --- lib/neutron_plugins/services/firewall | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/lib/neutron_plugins/services/firewall b/lib/neutron_plugins/services/firewall index 1d81a21825..2b7f32d233 100644 --- a/lib/neutron_plugins/services/firewall +++ b/lib/neutron_plugins/services/firewall @@ -14,8 +14,11 @@ function neutron_fwaas_configure_common { } function neutron_fwaas_configure_driver { + # Uses oslo config generator to generate FWaaS sample configuration files + (cd $NEUTRON_FWAAS_DIR && exec ./tools/generate_config_file_samples.sh) + FWAAS_DRIVER_CONF_FILENAME=/etc/neutron/fwaas_driver.ini - cp $NEUTRON_FWAAS_DIR/etc/fwaas_driver.ini $FWAAS_DRIVER_CONF_FILENAME + cp $NEUTRON_FWAAS_DIR/etc/fwaas_driver.ini.sample $FWAAS_DRIVER_CONF_FILENAME iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas enabled True iniset_multiline $FWAAS_DRIVER_CONF_FILENAME fwaas driver "neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas.IptablesFwaasDriver" From 2a688440132173d493dff7c7c3760681d41e11be Mon Sep 17 00:00:00 2001 From: Steven Hardy Date: Tue, 8 Dec 2015 13:26:29 +0000 Subject: [PATCH 0664/2941] Heat - revise keystone/trusts config to avoid deprecated options There are some inter-related changes required to avoid using legacy fallback/deprecated paths in heat, which result in warnings in the log, e.g because we fall-back to reusing keystone auth_token configuration instead of heat specific sections. To fix this: - Don't explicitly set deferred_auth_method=trusts, as this is now the default (since kilo) - Create a new "trustee" section containing configuration used for the password auth-plugin associated with deferred authentication via trusts (support for this was added during liberty to enable us to stop incorrectly using the keystone auth_token config) - Create a "clients_keystone" section to avoid falling back to the legacy behavior of stealing the uri from auth_token. This also means we can remove the FIXME and auth_token auth_uri mentioned by jamielennox. Change-Id: Ie34332a7aec3b9b271df0759dd6ab66b45302832 Related-Bug: #1300246 --- lib/heat | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/lib/heat b/lib/heat index fdcf5bcaad..3666776317 100644 --- a/lib/heat +++ b/lib/heat @@ -56,6 +56,10 @@ HEAT_ENV_DIR=$HEAT_CONF_DIR/environment.d HEAT_TEMPLATES_DIR=$HEAT_CONF_DIR/templates HEAT_API_HOST=${HEAT_API_HOST:-$HOST_IP} HEAT_API_PORT=${HEAT_API_PORT:-8004} +HEAT_SERVICE_USER=${HEAT_SERVICE_USER:-heat} +HEAT_TRUSTEE_USER=${HEAT_TRUSTEE_USER:-$HEAT_SERVICE_USER} +HEAT_TRUSTEE_PASSWORD=${HEAT_TRUSTEE_PASSWORD:-$SERVICE_PASSWORD} +HEAT_TRUSTEE_DOMAIN=${HEAT_TRUSTEE_DOMAIN:-default} # Support entry points installation of console scripts HEAT_BIN_DIR=$(get_python_exec_prefix) @@ -73,7 +77,7 @@ if [[ "$HEAT_STANDALONE" = "True" ]]; then fi else HEAT_STACK_DOMAIN=$(trueorfalse True HEAT_STACK_DOMAIN) - HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-trusts} + HEAT_DEFERRED_AUTH=${HEAT_DEFERRED_AUTH:-} fi HEAT_PLUGIN_DIR=${HEAT_PLUGIN_DIR:-$DATA_DIR/heat/plugins} ENABLE_HEAT_PLUGINS=${ENABLE_HEAT_PLUGINS:-} @@ -134,30 +138,39 @@ function configure_heat { setup_colorized_logging $HEAT_CONF DEFAULT tenant user fi - iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH + if [ ! -z "$HEAT_DEFERRED_AUTH" ]; then + iniset $HEAT_CONF DEFAULT deferred_auth_method $HEAT_DEFERRED_AUTH + fi if [ "$HEAT_USE_MOD_WSGI" == "True" ]; then _config_heat_apache_wsgi fi - # NOTE(jamielennox): heat re-uses specific values from the - # keystone_authtoken middleware group and so currently fails when using the - # auth plugin setup. This should be fixed in heat. Heat is also the only - # service that requires the auth_uri to include a /v2.0. Remove this custom - # setup when bug #1300246 is resolved. - iniset $HEAT_CONF keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0 if [[ "$HEAT_STANDALONE" = "True" ]]; then iniset $HEAT_CONF paste_deploy flavor standalone iniset $HEAT_CONF clients_heat url "http://$HEAT_API_HOST:$HEAT_API_PORT/v1/%(tenant_id)s" else iniset $HEAT_CONF keystone_authtoken identity_uri $KEYSTONE_AUTH_URI - iniset $HEAT_CONF keystone_authtoken admin_user heat + iniset $HEAT_CONF keystone_authtoken admin_user $HEAT_SERVICE_USER iniset $HEAT_CONF keystone_authtoken admin_password $SERVICE_PASSWORD iniset $HEAT_CONF keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME iniset $HEAT_CONF keystone_authtoken cafile $SSL_BUNDLE_FILE iniset $HEAT_CONF keystone_authtoken signing_dir $HEAT_AUTH_CACHE_DIR fi + # If HEAT_DEFERRED_AUTH is unset or explicitly set to trusts, configure + # the section for the client plugin associated with the trustee + if [ -z "$HEAT_DEFERRED_AUTH" -o "trusts" == "$HEAT_DEFERRED_AUTH" ]; then + iniset $HEAT_CONF trustee auth_plugin password + iniset $HEAT_CONF trustee auth_url $KEYSTONE_AUTH_URI + iniset $HEAT_CONF trustee username $HEAT_TRUSTEE_USER + iniset $HEAT_CONF trustee password $HEAT_TRUSTEE_PASSWORD + iniset $HEAT_CONF trustee user_domain_id $HEAT_TRUSTEE_DOMAIN + fi + + # clients_keystone + iniset $HEAT_CONF clients_keystone auth_uri $KEYSTONE_AUTH_URI + # ec2authtoken iniset $HEAT_CONF ec2authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0 From 642b07b930cf5f49d1ed9aa220021d45ca631b1a Mon Sep 17 00:00:00 2001 From: ghanshyam Date: Thu, 19 Nov 2015 10:01:14 +0900 Subject: [PATCH 0665/2941] Add compute microversions configuration on tempest Many projects like Nova, Ironic etc have implemented the microversions for versioning their APIs. Tempest is going to tests those microversions - I57b78b4c0543b6fb0533b556886a19a03297555e. For testing microversion in Tempest on gate, we need to set a valid range of microversion in Tempest config and based on that Tempest will run appropriate tests. This commit adds the below range options for compute microversion testing- - [None, 'latest'] - for master branch as default - [None, None] - for tests running on v2.0 - option to set the range. Depends-On: I81e86faca6f8c0ffb7da22154a62236ac25cf0c0 Partially implements blueprint api-microversions-testing-support Change-Id: I171b862d1bba1af467f5b9a76288216c39e2adda --- lib/tempest | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/tempest b/lib/tempest index 6adc449ad0..61351c0628 100644 --- a/lib/tempest +++ b/lib/tempest @@ -357,6 +357,30 @@ function configure_tempest { compute_api_extensions=$(remove_disabled_extensions $compute_api_extensions $DISABLE_COMPUTE_API_EXTENSIONS) fi + # Set the microversion range for compute tests. + # This is used to run the Nova microversions tests. + # Setting [None, latest] range of microversion which allow Tempest to run all microversions tests. + # NOTE- To avoid microversion tests failure on stable branch, we need to change "tempest_compute_max_microversion" + # for stable branch on each release which should be changed from "latest" to max supported version of that release. + local tempest_compute_min_microversion=${TEMPEST_COMPUTE_MIN_MICROVERSION:-None} + local tempest_compute_max_microversion=${TEMPEST_COMPUTE_MAX_MICROVERSION:-"latest"} + # Reset microversions to None where v2.0 is running which does not support microversion. + # Both "None" means no microversion testing. + if [[ "$TEMPEST_COMPUTE_TYPE" == "compute_legacy" ]]; then + tempest_compute_min_microversion=None + tempest_compute_max_microversion=None + fi + if [ "$tempest_compute_min_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG compute-feature-enabled min_microversion + else + iniset $TEMPEST_CONFIG compute-feature-enabled min_microversion $tempest_compute_min_microversion + fi + if [ "$tempest_compute_max_microversion" == "None" ]; then + inicomment $TEMPEST_CONFIG compute-feature-enabled max_microversion + else + iniset $TEMPEST_CONFIG compute-feature-enabled max_microversion $tempest_compute_max_microversion + fi + iniset $TEMPEST_CONFIG compute-feature-enabled resize True iniset $TEMPEST_CONFIG compute-feature-enabled live_migration ${LIVE_MIGRATION_AVAILABLE:-False} iniset $TEMPEST_CONFIG compute-feature-enabled change_password False From af0801de3c1b1c51cf1a995c2939e182d2ef4926 Mon Sep 17 00:00:00 2001 From: Philipp Marek Date: Wed, 9 Dec 2015 13:51:56 +0100 Subject: [PATCH 0666/2941] Make logfile symlinks with relative names. Using absolute names for the symlink breaks in quite a few ways; * when creating a tar file of the logs, * when serving via NFS, or any other case where the directory gets transferred to a different machine. So just create the symlink with relative names, then they'll work in any location. Change-Id: I432a69754985fc71feb0068b7adca01066d7bc1b --- functions-common | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/functions-common b/functions-common index d4099ffcfa..0c5e48cadf 100644 --- a/functions-common +++ b/functions-common @@ -1309,10 +1309,11 @@ function _run_process { exec 3>&- exec 6>&- - local real_logfile="${LOGDIR}/${service}.log.${CURRENT_LOG_TIME}" + local logfile="${service}.log.${CURRENT_LOG_TIME}" + local real_logfile="${LOGDIR}/${logfile}" if [[ -n ${LOGDIR} ]]; then exec 1>&"$real_logfile" 2>&1 - ln -sf "$real_logfile" ${LOGDIR}/${service}.log + bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${service}.log" if [[ -n ${SCREEN_LOGDIR} ]]; then # Drop the backward-compat symlink ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${service}.log @@ -1399,7 +1400,8 @@ function screen_process { screen -S $SCREEN_NAME -X screen -t $name - local real_logfile="${LOGDIR}/${name}.log.${CURRENT_LOG_TIME}" + local logfile="${name}.log.${CURRENT_LOG_TIME}" + local real_logfile="${LOGDIR}/${logfile}" echo "LOGDIR: $LOGDIR" echo "SCREEN_LOGDIR: $SCREEN_LOGDIR" echo "log: $real_logfile" @@ -1410,7 +1412,7 @@ function screen_process { fi # If logging isn't active then avoid a broken symlink touch "$real_logfile" - ln -sf "$real_logfile" ${LOGDIR}/${name}.log + bash -c "cd '$LOGDIR' && ln -sf '$logfile' ${name}.log" if [[ -n ${SCREEN_LOGDIR} ]]; then # Drop the backward-compat symlink ln -sf "$real_logfile" ${SCREEN_LOGDIR}/screen-${1}.log From 56632fc75ddd4af3239c44e27673854dd65f4628 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Thu, 10 Dec 2015 05:57:19 -0800 Subject: [PATCH 0667/2941] Fix override-defaults in plugin docs Docs specify that this file should be override_defaults, when really devstack looks for override-defaults. Change-Id: I3900ec4d16ffb48c6969dac5081ea2817536c246 --- doc/source/plugins.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/source/plugins.rst b/doc/source/plugins.rst index b8da7e1237..83e5609efa 100644 --- a/doc/source/plugins.rst +++ b/doc/source/plugins.rst @@ -21,12 +21,12 @@ external repositories. The plugin interface assumes the following: An external git repository that includes a ``devstack/`` top level directory. Inside this directory there can be 3 files. -- ``override_defaults`` - a file containing global variables that +- ``override-defaults`` - a file containing global variables that will be sourced before the lib/* files. This allows the plugin to override the defaults that are otherwise set in the lib/* files. - For example, override_defaults may export CINDER_ENABLED_BACKENDS + For example, override-defaults may export CINDER_ENABLED_BACKENDS to include the plugin-specific storage backend and thus be able to override the default lvm only storage backend for Cinder. From 9fc3ba408a97d0dd39ce26dd8dbcdb3b110cde71 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Thu, 10 Dec 2015 13:33:28 +0000 Subject: [PATCH 0668/2941] Ironic: add flag for using plugin This adds a flag to skip ironic code if the ironic devstack plugin is in use. This flag will be set to true in ironic's devstack plugin to indicate that the plugin should be in control, rather than devstack. This is for the transition period only, and will be removed with the rest of the ironic code in the devstack tree, once the gate is configured to use the ironic plugin. Change-Id: Id01d97fd13fa9f866d645ec5077834ddb78b2b89 --- extras.d/50-ironic.sh | 7 +++++++ lib/ironic | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh index 3b8e3d5045..0ee6a94758 100644 --- a/extras.d/50-ironic.sh +++ b/extras.d/50-ironic.sh @@ -1,5 +1,12 @@ # ironic.sh - Devstack extras script to install ironic +# NOTE(jroll) this is used for the transition to a devstack plugin in +# the ironic tree. +IRONIC_USING_PLUGIN=$(trueorfalse False IRONIC_USING_PLUGIN) +if [[ "$IRONIC_USING_PLUGIN" == "True" ]] ; then + return 0 +fi + if is_service_enabled ir-api ir-cond; then if [[ "$1" == "source" ]]; then # Initial source diff --git a/lib/ironic b/lib/ironic index 2fb2004d71..dd4f8bf65f 100644 --- a/lib/ironic +++ b/lib/ironic @@ -26,6 +26,13 @@ _PIPEFAIL_IRONIC=$(set +o | grep pipefail) set +o xtrace set +o pipefail +# NOTE(jroll) this is used for the transition to a devstack plugin in +# the ironic tree. +IRONIC_USING_PLUGIN=$(trueorfalse False IRONIC_USING_PLUGIN) +if [[ "$IRONIC_USING_PLUGIN" == "True" ]] ; then + return 0 +fi + # Defaults # -------- From 10bff0e9968d7a9c59ea7f09f49775973f8b5008 Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Thu, 3 Dec 2015 15:18:10 +0000 Subject: [PATCH 0669/2941] Neutron LBaaS: Use generated configuration files if available Generate the Neutron LBaaS sample config files by using the oslo generator. The files are generated with a .sample extension and replace the static example configuration files. Once the generation code is delivered, the static config files will be removed. Change-Id: Iae1e581ec2bea9c0ced700229effcc716d53fe4e Related-blueprint: autogen-neutron-conf-file Partial-bug: #1199963 Depends-On: I25507f3bc6e995580aa91a912c2cf4110757df15 --- lib/neutron-legacy | 11 +++++++++-- lib/neutron_plugins/services/loadbalancer | 2 +- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index caf89e3d8c..628f6463d1 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -112,6 +112,9 @@ NEUTRON_CONF_DIR=/etc/neutron NEUTRON_CONF=$NEUTRON_CONF_DIR/neutron.conf export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/debug.ini"} +# Default provider for load balancer service +DEFAULT_LB_PROVIDER=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default + # Agent binaries. Note, binary paths for other agents are set in per-service # scripts in lib/neutron_plugins/services/ AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" @@ -1058,8 +1061,12 @@ function _configure_neutron_ceilometer_notifications { } function _configure_neutron_lbaas { - if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf ]; then - cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf $NEUTRON_CONF_DIR + # Uses oslo config generator to generate LBaaS sample configuration files + (cd $NEUTRON_LBAAS_DIR && exec ./tools/generate_config_file_samples.sh) + + if [ -f $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample ]; then + cp $NEUTRON_LBAAS_DIR/etc/neutron_lbaas.conf.sample $NEUTRON_CONF_DIR/neutron_lbaas.conf + iniset $NEUTRON_CONF_DIR/neutron_lbaas.conf service_providers service_provider $DEFAULT_LB_PROVIDER fi neutron_agent_lbaas_configure_common neutron_agent_lbaas_configure_agent diff --git a/lib/neutron_plugins/services/loadbalancer b/lib/neutron_plugins/services/loadbalancer index b07d06c32b..30e9480f2e 100644 --- a/lib/neutron_plugins/services/loadbalancer +++ b/lib/neutron_plugins/services/loadbalancer @@ -28,7 +28,7 @@ function neutron_agent_lbaas_configure_agent { LBAAS_AGENT_CONF_FILENAME="$LBAAS_AGENT_CONF_PATH/lbaas_agent.ini" - cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini $LBAAS_AGENT_CONF_FILENAME + cp $NEUTRON_LBAAS_DIR/etc/lbaas_agent.ini.sample $LBAAS_AGENT_CONF_FILENAME # ovs_use_veth needs to be set before the plugin configuration # occurs to allow plugins to override the setting. From e42306d9db86a6cbb7cf1c062d8a5bdcd8479654 Mon Sep 17 00:00:00 2001 From: gordon chung Date: Thu, 10 Dec 2015 14:54:01 -0500 Subject: [PATCH 0670/2941] only set admin_* options for eventlet keystone+apache don't need these values set. Change-Id: Iebdb31b5f0888613e0454f09a426933d6fcd71b3 see: http://lists.openstack.org/pipermail/openstack-dev/2015-December/081984.html --- lib/keystone | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lib/keystone b/lib/keystone index b19202b0f7..6b4118de0b 100644 --- a/lib/keystone +++ b/lib/keystone @@ -218,8 +218,6 @@ function configure_keystone { iniset_rpc_backend keystone $KEYSTONE_CONF - iniset $KEYSTONE_CONF eventlet_server admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST" - # Register SSL certificates if provided if is_ssl_enabled_service key; then ensure_certificates KEYSTONE @@ -296,13 +294,14 @@ function configure_keystone { iniset $KEYSTONE_CONF DEFAULT logging_debug_format_suffix "%(funcName)s %(pathname)s:%(lineno)d" iniset $KEYSTONE_CONF DEFAULT logging_exception_prefix "%(process)d TRACE %(name)s %(instance)s" _config_keystone_apache_wsgi + else + iniset $KEYSTONE_CONF eventlet_server admin_bind_host "$KEYSTONE_ADMIN_BIND_HOST" + iniset $KEYSTONE_CONF eventlet_server admin_workers "$API_WORKERS" + # Public workers will use the server default, typically number of CPU. fi iniset $KEYSTONE_CONF DEFAULT max_token_size 16384 - iniset $KEYSTONE_CONF eventlet_server admin_workers "$API_WORKERS" - # Public workers will use the server default, typically number of CPU. - iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/" } From 69431725eb526d9daf320d7a05cccf22d10eaafd Mon Sep 17 00:00:00 2001 From: Anusha Ramineni Date: Tue, 8 Dec 2015 12:04:27 +0530 Subject: [PATCH 0671/2941] Add congress to plugin registry Now congress supports devstack plugin model, hence including the same in plugin registry Change-Id: I1ac83c529a466e6a75a9b46ed9b56085140a63ed --- doc/source/plugin-registry.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index 49b3a7fc02..c68d926822 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -26,6 +26,8 @@ The following are plugins that exist for official OpenStack projects. +------------------+---------------------------------------------+--------------------+ |ceilometer |git://git.openstack.org/openstack/ceilometer | metering | +------------------+---------------------------------------------+--------------------+ +|congress |git://git.openstack.org/openstack/congress | governance | ++------------------+---------------------------------------------+--------------------+ |gnocchi |git://git.openstack.org/openstack/gnocchi | metric | +------------------+---------------------------------------------+--------------------+ |magnum |git://git.openstack.org/openstack/magnum | | From 357dff588ef63935a2a36e89b1aa96419ef09ece Mon Sep 17 00:00:00 2001 From: Sean McGinnis Date: Fri, 11 Dec 2015 13:51:24 -0600 Subject: [PATCH 0672/2941] Remove Cinder API version cap Cinder API was pinned to v1 due to openstackclient missing some of the v2 commands, as reported in osc bug 1475060. That bug has since been marked invalid, but its intent was covered by the blueprint: https://blueprints.launchpad.net/python-openstackclient/+spec/volume-v2 This removes the pinning to the v1 API now that osc supports v2. Also removing the enablement of v1 as it was deprecated three releases ago and we would like to get more coverage on v2. Change-Id: Ia4d97734738d026c8721791669110778ff5eb6e5 --- lib/cinder | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/lib/cinder b/lib/cinder index 2119858253..569f3ab0a3 100644 --- a/lib/cinder +++ b/lib/cinder @@ -270,10 +270,6 @@ function configure_cinder { iniset $CINDER_CONF DEFAULT state_path $CINDER_STATE_PATH iniset $CINDER_CONF oslo_concurrency lock_path $CINDER_STATE_PATH iniset $CINDER_CONF DEFAULT periodic_interval $CINDER_PERIODIC_INTERVAL - # NOTE(thingee): Cinder V1 API is deprecated and defaults to off as of - # Juno. Keep it enabled so we can continue testing while it's still - # supported. - iniset $CINDER_CONF DEFAULT enable_v1_api true iniset $CINDER_CONF DEFAULT os_region_name "$REGION_NAME" @@ -550,9 +546,7 @@ function create_volume_types { local be be_name for be in ${CINDER_ENABLED_BACKENDS//,/ }; do be_name=${be##*:} - # FIXME(jamielennox): Remove --os-volume-api-version pinning when - # osc supports volume type create on v2 api. bug #1475060 - openstack volume type create --os-volume-api-version 1 --property volume_backend_name="${be_name}" ${be_name} + openstack volume type create --property volume_backend_name="${be_name}" ${be_name} done fi } From cf4f76299f84135a03fe29e5eddc97490eba806a Mon Sep 17 00:00:00 2001 From: Mahito OGURA Date: Wed, 12 Aug 2015 10:21:27 +0900 Subject: [PATCH 0673/2941] Update the vnc config options group 'DEFAULT' to 'vnc' n-api log output WARNINGs that vnc config options group 'DEFAULT' is deprecated. New vnc config options group is 'vnc'. This is change of Nova.[1] This patch changes the vnc config options group 'DEFAULT' to 'vnc'. [1] https://bugs.launchpad.net/nova/+bug/1447528 Change-Id: If54f750bac83298e90bdca27b5992fe2e5fbb712 Closes-Bug: 1483583 --- lib/nova | 12 ++++++------ lib/nova_plugins/hypervisor-libvirt | 10 +++++----- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/lib/nova b/lib/nova index 6337f875ef..3e3f0f49ac 100644 --- a/lib/nova +++ b/lib/nova @@ -594,9 +594,9 @@ function create_nova_conf { # These settings don't hurt anything if n-xvnc and n-novnc are disabled if is_service_enabled n-cpu; then NOVNCPROXY_URL=${NOVNCPROXY_URL:-"http://$SERVICE_HOST:6080/vnc_auto.html"} - iniset $NOVA_CONF DEFAULT novncproxy_base_url "$NOVNCPROXY_URL" + iniset $NOVA_CONF vnc novncproxy_base_url "$NOVNCPROXY_URL" XVPVNCPROXY_URL=${XVPVNCPROXY_URL:-"http://$SERVICE_HOST:6081/console"} - iniset $NOVA_CONF DEFAULT xvpvncproxy_base_url "$XVPVNCPROXY_URL" + iniset $NOVA_CONF vnc xvpvncproxy_base_url "$XVPVNCPROXY_URL" SPICEHTML5PROXY_URL=${SPICEHTML5PROXY_URL:-"http://$SERVICE_HOST:6082/spice_auto.html"} iniset $NOVA_CONF spice html5proxy_base_url "$SPICEHTML5PROXY_URL" fi @@ -606,13 +606,13 @@ function create_nova_conf { # For multi-host, this should be the management ip of the compute host. VNCSERVER_LISTEN=${VNCSERVER_LISTEN=$NOVA_SERVICE_LOCAL_HOST} VNCSERVER_PROXYCLIENT_ADDRESS=${VNCSERVER_PROXYCLIENT_ADDRESS=$NOVA_SERVICE_LOCAL_HOST} - iniset $NOVA_CONF DEFAULT vnc_enabled true - iniset $NOVA_CONF DEFAULT vncserver_listen "$VNCSERVER_LISTEN" - iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" + iniset $NOVA_CONF vnc enabled true + iniset $NOVA_CONF vnc vncserver_listen "$VNCSERVER_LISTEN" + iniset $NOVA_CONF vnc vncserver_proxyclient_address "$VNCSERVER_PROXYCLIENT_ADDRESS" iniset $NOVA_CONF DEFAULT novncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" iniset $NOVA_CONF DEFAULT xvpvncproxy_host "$NOVA_SERVICE_LISTEN_ADDRESS" else - iniset $NOVA_CONF DEFAULT vnc_enabled false + iniset $NOVA_CONF vnc enabled false fi if is_service_enabled n-spice; then diff --git a/lib/nova_plugins/hypervisor-libvirt b/lib/nova_plugins/hypervisor-libvirt index 8bbaa2133d..1b4f7ae80a 100644 --- a/lib/nova_plugins/hypervisor-libvirt +++ b/lib/nova_plugins/hypervisor-libvirt @@ -48,13 +48,13 @@ function configure_nova_hypervisor { iniset $NOVA_CONF DEFAULT firewall_driver "$LIBVIRT_FIREWALL_DRIVER" # Power architecture currently does not support graphical consoles. if is_arch "ppc64"; then - iniset $NOVA_CONF DEFAULT vnc_enabled "false" + iniset $NOVA_CONF vnc enabled "false" fi # arm64-specific configuration if is_arch "aarch64"; then # arm64 architecture currently does not support graphical consoles. - iniset $NOVA_CONF DEFAULT vnc_enabled "false" + iniset $NOVA_CONF vnc enabled "false" fi # File injection is being disabled by default in the near future - @@ -65,9 +65,9 @@ function configure_nova_hypervisor { iniset $NOVA_CONF libvirt connection_uri "parallels+unix:///system" iniset $NOVA_CONF libvirt images_type "ploop" iniset $NOVA_CONF DEFAULT force_raw_images "False" - iniset $NOVA_CONF DEFAULT vncserver_proxyclient_address $HOST_IP - iniset $NOVA_CONF DEFAULT vncserver_listen $HOST_IP - iniset $NOVA_CONF DEFAULT vnc_keymap + iniset $NOVA_CONF vnc vncserver_proxyclient_address $HOST_IP + iniset $NOVA_CONF vnc vncserver_listen $HOST_IP + iniset $NOVA_CONF vnc keymap fi } From 76cbbe37aae5d54542d62a5c6deec428a8cdc75e Mon Sep 17 00:00:00 2001 From: Mark McLoughlin Date: Mon, 7 Dec 2015 05:05:04 -0500 Subject: [PATCH 0674/2941] libvirt: don't repeatedly configure libvirtd logging /etc/libvirt is not world-readable (at least on Fedora and RHEL) so use sudo with the grep that checks whether we have already configured libvirtd logging. Also, change the regex so we don't count commented out logging config. Change-Id: I67484b28aafd0fa828385321fa96d9141cb4cb59 Signed-off-by: Mark McLoughlin --- lib/nova_plugins/functions-libvirt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/nova_plugins/functions-libvirt b/lib/nova_plugins/functions-libvirt index dae55c6eba..4f9b239a1e 100644 --- a/lib/nova_plugins/functions-libvirt +++ b/lib/nova_plugins/functions-libvirt @@ -110,10 +110,10 @@ EOF local log_filters="1:libvirt 1:qemu 1:conf 1:security 3:object 3:event 3:json 3:file 1:util 1:qemu_monitor" fi local log_outputs="1:file:/var/log/libvirt/libvirtd.log" - if ! grep -q "log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then + if ! sudo grep -q "^log_filters=\"$log_filters\"" /etc/libvirt/libvirtd.conf; then echo "log_filters=\"$log_filters\"" | sudo tee -a /etc/libvirt/libvirtd.conf fi - if ! grep -q "log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then + if ! sudo grep -q "^log_outputs=\"$log_outputs\"" /etc/libvirt/libvirtd.conf; then echo "log_outputs=\"$log_outputs\"" | sudo tee -a /etc/libvirt/libvirtd.conf fi fi From 239a9788b3f73495efbdf586425a83d714be4412 Mon Sep 17 00:00:00 2001 From: YAMAMOTO Takashi Date: Mon, 7 Dec 2015 17:09:59 +0900 Subject: [PATCH 0675/2941] Remove a stale comment about SQLALCHEMY_DATABASE_DRIVER SQLALCHEMY_DATABASE_DRIVER is no longer used after If6d8d08e5b7b7c48ca012677b536d71058def6fd . Also, remove mysql connector packages from the install list. Closes-Bug: #1523377 Related-Bug: #1493304 Change-Id: I5ecbc3b0bac989faa5c46d3c2866558a505414d8 --- files/debs/keystone | 1 - files/debs/neutron | 1 - files/debs/nova | 1 - files/rpms/neutron | 1 - files/rpms/nova | 1 - stackrc | 12 ------------ 6 files changed, 17 deletions(-) diff --git a/files/debs/keystone b/files/debs/keystone index 0795167047..370e4aac51 100644 --- a/files/debs/keystone +++ b/files/debs/keystone @@ -1,6 +1,5 @@ libkrb5-dev libldap2-dev libsasl2-dev -python-mysql.connector python-mysqldb sqlite3 diff --git a/files/debs/neutron b/files/debs/neutron index 85145d3654..e53cc68ccf 100644 --- a/files/debs/neutron +++ b/files/debs/neutron @@ -8,7 +8,6 @@ iputils-ping libmysqlclient-dev mysql-server #NOPRIME postgresql-server-dev-all -python-mysql.connector python-mysqldb rabbitmq-server # NOPRIME radvd # NOPRIME diff --git a/files/debs/nova b/files/debs/nova index fe57fc4b2a..58dad411a8 100644 --- a/files/debs/nova +++ b/files/debs/nova @@ -15,7 +15,6 @@ libvirt-dev # NOPRIME mysql-server # NOPRIME parted pm-utils -python-mysql.connector python-mysqldb qemu # dist:wheezy,jessie NOPRIME qemu-kvm # NOPRIME diff --git a/files/rpms/neutron b/files/rpms/neutron index 9683475d29..2e49a0cf93 100644 --- a/files/rpms/neutron +++ b/files/rpms/neutron @@ -4,7 +4,6 @@ dnsmasq-utils # for dhcp_release ebtables iptables iputils -mysql-connector-python mysql-devel MySQL-python mysql-server # NOPRIME diff --git a/files/rpms/nova b/files/rpms/nova index 00e759636e..4db9a06d95 100644 --- a/files/rpms/nova +++ b/files/rpms/nova @@ -15,7 +15,6 @@ libvirt-devel # NOPRIME libvirt-python # NOPRIME libxml2-python m2crypto -mysql-connector-python mysql-devel MySQL-python mysql-server # NOPRIME diff --git a/stackrc b/stackrc index 5dd109c0a4..0c311ad65b 100644 --- a/stackrc +++ b/stackrc @@ -72,18 +72,6 @@ if ! isset ENABLED_SERVICES ; then ENABLED_SERVICES+=,rabbit,tempest,mysql,dstat fi -# SQLAlchemy supports multiple database drivers for each database server -# type. For example, deployer may use MySQLdb, MySQLConnector, or oursql -# to access MySQL database. -# -# When defined, the variable controls which database driver is used to -# connect to database server. Otherwise using default driver defined for -# each database type. -# -# You can find the list of currently supported drivers for each database -# type at: http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html -# SQLALCHEMY_DATABASE_DRIVER="mysqldb" - # Global toggle for enabling services under mod_wsgi. If this is set to # ``True`` all services that use HTTPD + mod_wsgi as the preferred method of # deployment, will be deployed under Apache. If this is set to ``False`` all From 2b4d6d16211cc55794b7d96594394f5e8c40fa4b Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Fri, 4 Dec 2015 14:40:03 +0000 Subject: [PATCH 0676/2941] Neutron VPNaaS: Use generated configuration files if available Generate the Neutron VPNaaS sample config files by using the oslo generator. The files are generated with a .sample extension and replace the static example configuration files. Once the generation code is delivered, the static config files will be removed. Change-Id: Icef8f7e8f0e8e78bfffa7a5af3f9f2300376b115 Related-blueprint: autogen-neutron-conf-file Partial-bug: #1199963 Depends-On: I4a6094b8218dfd320d05bfb1e3bc121e8930c551 --- lib/neutron-legacy | 6 ++++-- lib/neutron_plugins/services/vpn | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index caf89e3d8c..0089a0d4cf 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -1079,8 +1079,10 @@ function _configure_neutron_fwaas { } function _configure_neutron_vpn { - if [ -f $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf ]; then - cp $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf $NEUTRON_CONF_DIR + # Uses oslo config generator to generate VPNaaS sample configuration files + (cd $NEUTRON_VPNAAS_DIR && exec ./tools/generate_config_file_samples.sh) + if [ -f $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf.sample ]; then + cp $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf.sample $NEUTRON_CONF_DIR/neutron_vpnaas.conf fi neutron_vpn_install_agent_packages neutron_vpn_configure_common diff --git a/lib/neutron_plugins/services/vpn b/lib/neutron_plugins/services/vpn index 8a379f588c..e790913847 100644 --- a/lib/neutron_plugins/services/vpn +++ b/lib/neutron_plugins/services/vpn @@ -29,7 +29,9 @@ function neutron_vpn_configure_common { } function neutron_vpn_configure_agent { - cp $NEUTRON_VPNAAS_DIR/etc/vpn_agent.ini $Q_VPN_CONF_FILE + # Uses oslo config generator to generate LBaaS sample configuration files + (cd $NEUTRON_VPNAAS_DIR && exec ./tools/generate_config_file_samples.sh) + cp $NEUTRON_VPNAAS_DIR/etc/vpn_agent.ini.sample $Q_VPN_CONF_FILE if [[ "$IPSEC_PACKAGE" == "strongswan" ]]; then iniset_multiline $Q_VPN_CONF_FILE vpnagent vpn_device_driver neutron_vpnaas.services.vpn.device_drivers.strongswan_ipsec.StrongSwanDriver if is_fedora; then From 97b9e970b27f2303b8404aaa31a9f2e7d51ee787 Mon Sep 17 00:00:00 2001 From: Dave Chen Date: Tue, 15 Dec 2015 03:33:48 +0800 Subject: [PATCH 0677/2941] Remove the support for keystone extensions All keystone extensions have been moved into cores and are enabled by default, there is no need to configure the extension in devstack but configure it in devstack will block the install process. Change-Id: I7d21b122c641f601295ee7ece3583404b3874dbd Closes-Bug: #1526033 --- lib/keystone | 33 --------------------------------- 1 file changed, 33 deletions(-) diff --git a/lib/keystone b/lib/keystone index 6b4118de0b..336ad12cbb 100644 --- a/lib/keystone +++ b/lib/keystone @@ -52,10 +52,6 @@ KEYSTONE_CONF_DIR=${KEYSTONE_CONF_DIR:-/etc/keystone} KEYSTONE_CONF=$KEYSTONE_CONF_DIR/keystone.conf KEYSTONE_PASTE_INI=${KEYSTONE_PASTE_INI:-$KEYSTONE_CONF_DIR/keystone-paste.ini} -# Set up additional extensions, such as oauth1, federation -# Example of KEYSTONE_EXTENSIONS=oauth1,federation -KEYSTONE_EXTENSIONS=${KEYSTONE_EXTENSIONS:-} - # Toggle for deploying Keystone under HTTPD + mod_wsgi KEYSTONE_USE_MOD_WSGI=${KEYSTONE_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} @@ -192,8 +188,6 @@ function configure_keystone { inidelete $KEYSTONE_PASTE_INI composite:admin \\/v2.0 fi - configure_keystone_extensions - # Rewrite stock ``keystone.conf`` if is_service_enabled ldap; then @@ -305,25 +299,6 @@ function configure_keystone { iniset $KEYSTONE_CONF fernet_tokens key_repository "$KEYSTONE_CONF_DIR/fernet-keys/" } -function configure_keystone_extensions { - # Add keystone extension into keystone v3 application pipeline - local extension_value - local api_v3 - local extension - local api_v3_extension - for extension_value in ${KEYSTONE_EXTENSIONS//,/ }; do - if [[ -z "${extension_value}" ]]; then - continue - fi - api_v3=$(iniget $KEYSTONE_PASTE_INI pipeline:api_v3 pipeline) - extension=$(echo $api_v3 | sed -ne "/${extension_value}/ p;" ) - if [[ -z $extension ]]; then - api_v3_extension=$(echo $api_v3 | sed -ne "s/service_v3/${extension_value}_extension service_v3/p;" ) - iniset $KEYSTONE_PASTE_INI pipeline:api_v3 pipeline "$api_v3_extension" - fi - done -} - # create_keystone_accounts() - Sets up common required keystone accounts # Tenant User Roles @@ -468,14 +443,6 @@ function init_keystone { # Initialize keystone database $KEYSTONE_BIN_DIR/keystone-manage db_sync - local extension_value - for extension_value in ${KEYSTONE_EXTENSIONS//,/ }; do - if [[ -z "${extension_value}" ]]; then - continue - fi - $KEYSTONE_BIN_DIR/keystone-manage db_sync --extension "${extension_value}" - done - if [[ "$KEYSTONE_TOKEN_FORMAT" == "pki" || "$KEYSTONE_TOKEN_FORMAT" == "pkiz" ]]; then # Set up certificates rm -rf $KEYSTONE_CONF_DIR/ssl From 43f62c08499de004a964c3a2f90ce400a0f932ad Mon Sep 17 00:00:00 2001 From: Akihiro Motoki Date: Tue, 15 Dec 2015 16:44:41 +0900 Subject: [PATCH 0678/2941] Move horizon config and start to appropriate phase in stack.sh Previously horizon configuration and start are done too early and as a result horizon init and start need to be run twice after horizon plugins are enabled. - horizon config was done before "run_phase stack install" - horizon init and start were done before "run_phase stack post-config" This commit rearrange horizon setup to the appropriate phases defined in the devstack plugin interface. - Configuration of horizon settings is moved to configure_horizon. - horizon config is now called between run_phase stack install and post-config. - horizon init and start are now called between run_phase stack post-config and extra. Change-Id: I8bf2ceaf7734c4f7cec68bc05d7cdbae81ef311e --- lib/horizon | 8 +++++--- stack.sh | 14 ++++++++------ 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/lib/horizon b/lib/horizon index 67181fcf29..dca31116e2 100644 --- a/lib/horizon +++ b/lib/horizon @@ -83,10 +83,7 @@ function configure_horizon { # Message catalog compilation is handled by Django admin script, # so compiling them after the installation avoids Django installation twice. (cd $HORIZON_DIR; ./run_tests.sh -N --compilemessages) -} -# init_horizon() - Initialize databases, etc. -function init_horizon { # ``local_settings.py`` is used to override horizon default settings. local local_settings=$HORIZON_DIR/openstack_dashboard/local/local_settings.py cp $HORIZON_SETTINGS $local_settings @@ -113,6 +110,7 @@ function init_horizon { horizon_conf=$(apache_site_config_for horizon) # Configure apache to run horizon + # Set up the django horizon application to serve via apache/wsgi sudo sh -c "sed -e \" s,%USER%,$APACHE_USER,g; s,%GROUP%,$APACHE_GROUP,g; @@ -133,7 +131,10 @@ function init_horizon { exit_distro_not_supported "horizon apache configuration" fi enable_apache_site horizon +} +# init_horizon() - Initialize databases, etc. +function init_horizon { # Remove old log files that could mess with how DevStack detects whether Horizon # has been successfully started (see start_horizon() and functions::screen_it()) # and run_process @@ -147,6 +148,7 @@ function init_horizon { django_admin=django-admin.py fi + # These need to be run after horizon plugins are configured. DJANGO_SETTINGS_MODULE=openstack_dashboard.settings $django_admin collectstatic --noinput DJANGO_SETTINGS_MODULE=openstack_dashboard.settings $django_admin compress --force diff --git a/stack.sh b/stack.sh index 19d05c904a..e65d22f114 100755 --- a/stack.sh +++ b/stack.sh @@ -840,7 +840,6 @@ if is_service_enabled horizon; then install_django_openstack_auth # dashboard stack_install_service horizon - configure_horizon fi if is_service_enabled heat; then @@ -1060,12 +1059,9 @@ write_clouds_yaml # Horizon # ------- -# Set up the django horizon application to serve via apache/wsgi - if is_service_enabled horizon; then - echo_summary "Configuring and starting Horizon" - init_horizon - start_horizon + echo_summary "Configuring Horizon" + configure_horizon fi @@ -1290,6 +1286,12 @@ if is_service_enabled heat; then fi fi +if is_service_enabled horizon; then + echo_summary "Starting Horizon" + init_horizon + start_horizon +fi + # Create account rc files # ======================= From 563a7e75b7d26275a7416eb4d6641fcfe867b45a Mon Sep 17 00:00:00 2001 From: Sean Dague Date: Tue, 15 Dec 2015 17:16:19 -0500 Subject: [PATCH 0679/2941] set the validation path to fixed for n-net See if using fixed IPs for connectivity to hosts is more reliable than floating ips, which really were not intended for these purposes (at least in nova-net). Change-Id: I251710ee9186a68bb3ddc58ca803c33b81c8ac49 --- lib/tempest | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/tempest b/lib/tempest index 6adc449ad0..e651dc30ed 100644 --- a/lib/tempest +++ b/lib/tempest @@ -329,6 +329,9 @@ function configure_tempest { iniset $TEMPEST_CONFIG compute flavor_ref $flavor_ref iniset $TEMPEST_CONFIG compute flavor_ref_alt $flavor_ref_alt iniset $TEMPEST_CONFIG compute ssh_connect_method $ssh_connect_method + # set the equiv validation option here as well to ensure they are + # in sync. They shouldn't be separate options. + iniset $TEMPEST_CONFIG validation connect_method $ssh_connect_method if [[ ! $(is_service_enabled n-cell) && ! $(is_service_enabled neutron) ]]; then iniset $TEMPEST_CONFIG compute fixed_network_name $PRIVATE_NETWORK_NAME fi From 2ca8af45a78226f29c3251cbef6449a0a51a4c1f Mon Sep 17 00:00:00 2001 From: Atsushi SAKAI Date: Tue, 8 Dec 2015 15:36:13 +0900 Subject: [PATCH 0680/2941] Add 5 time measurement points run_process process starting time test_with_retry process starting time (for neutron legacy) restart_apache_server Apache HTTPD restart time wait_for_service HTTP server waiting time git_timed git command execution time example ======================== DevStack Components Timed ======================== run_process - 52 secs test_with_retry - 3 secs apt-get-update - 8 secs pip_install - 76 secs restart_apache_server - 9 secs wait_for_service - 11 secs git_timed - 127 secs apt-get - 15 secs Change-Id: I66140726617450cd9fe9b702092cacf053a20065 --- functions | 2 ++ functions-common | 6 ++++++ lib/apache | 2 ++ 3 files changed, 10 insertions(+) diff --git a/functions b/functions index 762fc472c2..9495710e92 100644 --- a/functions +++ b/functions @@ -357,7 +357,9 @@ CURL_GET="${CURL_GET:-curl -g}" function wait_for_service { local timeout=$1 local url=$2 + time_start "wait_for_service" timeout $timeout sh -c "while ! $CURL_GET -k --noproxy '*' -s $url >/dev/null; do sleep 1; done" + time_stop "wait_for_service" } diff --git a/functions-common b/functions-common index 1b01eefaf9..023203d1fc 100644 --- a/functions-common +++ b/functions-common @@ -597,6 +597,7 @@ function git_timed { timeout=${GIT_TIMEOUT} fi + time_start "git_timed" until timeout -s SIGINT ${timeout} git "$@"; do # 124 is timeout(1)'s special return code when it reached the # timeout; otherwise assume fatal failure @@ -611,6 +612,7 @@ function git_timed { fi sleep 5 done + time_stop "git_timed" } # git update using reference as a branch. @@ -1373,6 +1375,7 @@ function run_process { local command="$2" local group=$3 + time_start "run_process" if is_service_enabled $service; then if [[ "$USE_SCREEN" = "True" ]]; then screen_process "$service" "$command" "$group" @@ -1381,6 +1384,7 @@ function run_process { _run_process "$service" "$command" "$group" & fi fi + time_stop "run_process" } # Helper to launch a process in a named screen @@ -2196,9 +2200,11 @@ function test_with_retry { local until=${3:-10} local sleep=${4:-0.5} + time_start "test_with_retry" if ! timeout $until sh -c "while ! $testcmd; do sleep $sleep; done"; then die $LINENO "$failmsg" fi + time_stop "test_with_retry" } # Timing infrastructure - figure out where large blocks of time are diff --git a/lib/apache b/lib/apache index c9e02a2b58..2c84c7a481 100644 --- a/lib/apache +++ b/lib/apache @@ -185,9 +185,11 @@ function restart_apache_server { # Apache can be slow to stop, doing an explicit stop, sleep, start helps # to mitigate issues where apache will claim a port it's listening on is # still in use and fail to start. + time_start "restart_apache_server" stop_service $APACHE_NAME sleep 3 start_service $APACHE_NAME + time_stop "restart_apache_server" } # Restore xtrace From 00b5f4af92a640a2507046cf76ee57caa166310b Mon Sep 17 00:00:00 2001 From: Mike Turek Date: Tue, 15 Dec 2015 18:16:35 +0000 Subject: [PATCH 0681/2941] Replace deprecated baremetal timeouts with new deploy_timeout See review 258670 for more information. The preceeding patch consolidates Ironic timeouts into one blanket timeout. This patch sets the new timeout via the BUILD_TIMEOUT variable and removes the deprecated timeouts. Change-Id: I320461b2b40aa2b68afc38a901a5933e39aac1b6 Related-Bug: #1526863 --- lib/tempest | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/tempest b/lib/tempest index 61351c0628..b9d9e80fc2 100644 --- a/lib/tempest +++ b/lib/tempest @@ -533,8 +533,7 @@ function configure_tempest { # Baremetal if [ "$VIRT_DRIVER" = "ironic" ] ; then iniset $TEMPEST_CONFIG baremetal driver_enabled True - iniset $TEMPEST_CONFIG baremetal unprovision_timeout $BUILD_TIMEOUT - iniset $TEMPEST_CONFIG baremetal active_timeout $BUILD_TIMEOUT + iniset $TEMPEST_CONFIG baremetal deploy_timeout $BUILD_TIMEOUT iniset $TEMPEST_CONFIG baremetal deploy_img_dir $FILES iniset $TEMPEST_CONFIG baremetal node_uuid $IRONIC_NODE_UUID iniset $TEMPEST_CONFIG compute-feature-enabled change_password False From 168be83597dcfc38b6d552ecbf20b5093580e2cb Mon Sep 17 00:00:00 2001 From: Davanum Srinivas Date: Thu, 8 Oct 2015 07:57:44 -0700 Subject: [PATCH 0682/2941] Nuke EC2 API service in defaults Tempest does not test EC2 by default anymore: Ib5e24e19bcba9808a9f49fe7f328668df77fe4f9 So we don't need to run nova ec2 API service by default. Change-Id: Ieec0ca1361baf0978d96e69e1134f699c1af3bb9 --- stackrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stackrc b/stackrc index f949ccbad9..edf23ce997 100644 --- a/stackrc +++ b/stackrc @@ -91,7 +91,7 @@ fi ENABLE_HTTPD_MOD_WSGI_SERVICES=True # Set the default Nova APIs to enable -NOVA_ENABLED_APIS=ec2,osapi_compute,metadata +NOVA_ENABLED_APIS=osapi_compute,metadata # Set the root URL for Horizon HORIZON_APACHE_ROOT="/dashboard" From 47115b0314bcb9006d674fb0c7da6ac6eb94de29 Mon Sep 17 00:00:00 2001 From: Martin Hickey Date: Fri, 18 Dec 2015 11:08:38 +0000 Subject: [PATCH 0683/2941] Neutron VPNaaS: Set default service provider Default value needs to be set for service_provider config item in neutron_vpnaas.conf. This is to support backward compatability for using the enable_service q-vpn. It should be noted that the recommended way to use VPN is the devstack plugin. Change-Id: I0d5960c81c47a138087d480527eff2a8eef59445 Closes-bug: #1527483 --- lib/neutron-legacy | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/lib/neutron-legacy b/lib/neutron-legacy index e0c4676297..6af44e6ab8 100644 --- a/lib/neutron-legacy +++ b/lib/neutron-legacy @@ -115,6 +115,9 @@ export NEUTRON_TEST_CONFIG_FILE=${NEUTRON_TEST_CONFIG_FILE:-"$NEUTRON_CONF_DIR/d # Default provider for load balancer service DEFAULT_LB_PROVIDER=LOADBALANCER:Haproxy:neutron_lbaas.services.loadbalancer.drivers.haproxy.plugin_driver.HaproxyOnHostPluginDriver:default +# Default provider for VPN service +DEFAULT_VPN_PROVIDER=VPN:openswan:neutron_vpnaas.services.vpn.service_drivers.ipsec.IPsecVPNDriver:default + # Agent binaries. Note, binary paths for other agents are set in per-service # scripts in lib/neutron_plugins/services/ AGENT_DHCP_BINARY="$NEUTRON_BIN_DIR/neutron-dhcp-agent" @@ -1092,6 +1095,7 @@ function _configure_neutron_vpn { (cd $NEUTRON_VPNAAS_DIR && exec ./tools/generate_config_file_samples.sh) if [ -f $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf.sample ]; then cp $NEUTRON_VPNAAS_DIR/etc/neutron_vpnaas.conf.sample $NEUTRON_CONF_DIR/neutron_vpnaas.conf + iniset $NEUTRON_CONF_DIR/neutron_vpnaas.conf service_providers service_provider $DEFAULT_VPN_PROVIDER fi neutron_vpn_install_agent_packages neutron_vpn_configure_common From 47367071cdc110c40ec5c92d12f4dbe50cc553d4 Mon Sep 17 00:00:00 2001 From: Jim Rollenhagen Date: Thu, 10 Dec 2015 14:24:00 +0000 Subject: [PATCH 0684/2941] Remove ironic code from tree This removes all of the ironic code from the devstack tree, in favor of the devstack plugin in Ironic's tree. Depends-On: I659e3de5c64df798441798ff48ba5c9c0506585a Depends-On: I2c52bc014f1b0dbc6b0ae22a4deb4132b4c28621 Change-Id: I5125fce295c79600781469c2f48bea80e7600081 --- clean.sh | 1 - doc/source/index.rst | 2 - doc/source/plugin-registry.rst | 2 + extras.d/50-ironic.sh | 50 -- files/apache-ironic.template | 12 - files/debs/ironic | 19 - files/rpms/ironic | 14 - functions-common | 12 +- lib/ironic | 874 ------------------- stackrc | 8 +- tools/ironic/scripts/cleanup-node | 25 - tools/ironic/scripts/configure-vm | 93 -- tools/ironic/scripts/create-node | 79 -- tools/ironic/scripts/setup-network | 28 - tools/ironic/templates/brbm.xml | 6 - tools/ironic/templates/tftpd-xinetd.template | 14 - tools/ironic/templates/vm.xml | 49 -- 17 files changed, 16 insertions(+), 1272 deletions(-) delete mode 100644 extras.d/50-ironic.sh delete mode 100644 files/apache-ironic.template delete mode 100644 files/debs/ironic delete mode 100644 files/rpms/ironic delete mode 100644 lib/ironic delete mode 100755 tools/ironic/scripts/cleanup-node delete mode 100755 tools/ironic/scripts/configure-vm delete mode 100755 tools/ironic/scripts/create-node delete mode 100755 tools/ironic/scripts/setup-network delete mode 100644 tools/ironic/templates/brbm.xml delete mode 100644 tools/ironic/templates/tftpd-xinetd.template delete mode 100644 tools/ironic/templates/vm.xml diff --git a/clean.sh b/clean.sh index ae28aa9ab7..fc6f80dad4 100755 --- a/clean.sh +++ b/clean.sh @@ -50,7 +50,6 @@ source $TOP_DIR/lib/cinder source $TOP_DIR/lib/swift source $TOP_DIR/lib/heat source $TOP_DIR/lib/neutron-legacy -source $TOP_DIR/lib/ironic # Extras Source diff --git a/doc/source/index.rst b/doc/source/index.rst index ec345c9f64..2622436820 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -174,7 +174,6 @@ Scripts * `lib/heat `__ * `lib/horizon `__ * `lib/infra `__ -* `lib/ironic `__ * `lib/keystone `__ * `lib/ldap `__ * `lib/neutron-legacy `__ @@ -189,7 +188,6 @@ Scripts * `clean.sh `__ * `run\_tests.sh `__ -* `extras.d/50-ironic.sh `__ * `extras.d/60-ceph.sh `__ * `extras.d/70-tuskar.sh `__ * `extras.d/80-tempest.sh `__ diff --git a/doc/source/plugin-registry.rst b/doc/source/plugin-registry.rst index c68d926822..7682defbed 100644 --- a/doc/source/plugin-registry.rst +++ b/doc/source/plugin-registry.rst @@ -30,6 +30,8 @@ The following are plugins that exist for official OpenStack projects. +------------------+---------------------------------------------+--------------------+ |gnocchi |git://git.openstack.org/openstack/gnocchi | metric | +------------------+---------------------------------------------+--------------------+ +|ironic |git://git.openstack.org/openstack/ironic | baremetal | ++------------------+---------------------------------------------+--------------------+ |magnum |git://git.openstack.org/openstack/magnum | | +------------------+---------------------------------------------+--------------------+ |manila |git://git.openstack.org/openstack/manila | file shares | diff --git a/extras.d/50-ironic.sh b/extras.d/50-ironic.sh deleted file mode 100644 index 0ee6a94758..0000000000 --- a/extras.d/50-ironic.sh +++ /dev/null @@ -1,50 +0,0 @@ -# ironic.sh - Devstack extras script to install ironic - -# NOTE(jroll) this is used for the transition to a devstack plugin in -# the ironic tree. -IRONIC_USING_PLUGIN=$(trueorfalse False IRONIC_USING_PLUGIN) -if [[ "$IRONIC_USING_PLUGIN" == "True" ]] ; then - return 0 -fi - -if is_service_enabled ir-api ir-cond; then - if [[ "$1" == "source" ]]; then - # Initial source - source $TOP_DIR/lib/ironic - elif [[ "$1" == "stack" && "$2" == "install" ]]; then - echo_summary "Installing Ironic" - install_ironic - install_ironicclient - cleanup_ironic - elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then - echo_summary "Configuring Ironic" - configure_ironic - - if is_service_enabled key; then - create_ironic_accounts - fi - - elif [[ "$1" == "stack" && "$2" == "extra" ]]; then - # Initialize ironic - init_ironic - - # Start the ironic API and ironic taskmgr components - echo_summary "Starting Ironic" - start_ironic - - if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then - prepare_baremetal_basic_ops - fi - fi - - if [[ "$1" == "unstack" ]]; then - stop_ironic - if [[ "$IRONIC_BAREMETAL_BASIC_OPS" = "True" ]]; then - cleanup_baremetal_basic_ops - fi - fi - - if [[ "$1" == "clean" ]]; then - cleanup_ironic - fi -fi diff --git a/files/apache-ironic.template b/files/apache-ironic.template deleted file mode 100644 index 88641946f6..0000000000 --- a/files/apache-ironic.template +++ /dev/null @@ -1,12 +0,0 @@ -Listen %PUBLICPORT% - - - DocumentRoot "%HTTPROOT%" - - Options Indexes FollowSymLinks - AllowOverride None - Order allow,deny - Allow from all - Require all granted - - diff --git a/files/debs/ironic b/files/debs/ironic deleted file mode 100644 index 4d5a6aa6b7..0000000000 --- a/files/debs/ironic +++ /dev/null @@ -1,19 +0,0 @@ -docker.io -ipmitool -iptables -ipxe -libguestfs0 -libvirt-bin -open-iscsi -openssh-client -openvswitch-datapath-dkms -openvswitch-switch -python-libguestfs -python-libvirt -qemu -qemu-kvm -qemu-utils -sgabios -syslinux -tftpd-hpa -xinetd diff --git a/files/rpms/ironic b/files/rpms/ironic deleted file mode 100644 index 2bf8bb370e..0000000000 --- a/files/rpms/ironic +++ /dev/null @@ -1,14 +0,0 @@ -docker-io -ipmitool -iptables -ipxe-bootimgs -libguestfs -libvirt -libvirt-python -net-tools -openssh-clients -openvswitch -sgabios -syslinux -tftp-server -xinetd diff --git a/functions-common b/functions-common index 1b01eefaf9..c3eef469d3 100644 --- a/functions-common +++ b/functions-common @@ -954,6 +954,15 @@ function get_endpoint_url { -c URL -f value) } +# check if we are using ironic with hardware +# TODO(jroll) this is a kludge left behind when ripping ironic code +# out of tree, as it is used by nova and neutron. +# figure out a way to refactor nova/neutron code to eliminate this +function is_ironic_hardware { + is_service_enabled ironic && [[ -n "${IRONIC_DEPLOY_DRIVER##*_ssh}" ]] && return 0 + return 1 +} + # Package Functions # ================= @@ -1764,7 +1773,7 @@ function run_phase { # white listed elements in tree. We want these to move out # over time as well, but they are in tree, so we need to # manage that. - local exceptions="50-ironic.sh 60-ceph.sh 80-tempest.sh" + local exceptions="60-ceph.sh 80-tempest.sh" local extra extra=$(basename $extra_plugin_file_name) if [[ ! ( $exceptions =~ "$extra" ) ]]; then @@ -1952,7 +1961,6 @@ function is_service_enabled { [[ ${service} == n-cpu-* && ${ENABLED_SERVICES} =~ "n-cpu" ]] && enabled=0 [[ ${service} == "nova" && ${ENABLED_SERVICES} =~ "n-" ]] && enabled=0 [[ ${service} == "glance" && ${ENABLED_SERVICES} =~ "g-" ]] && enabled=0 - [[ ${service} == "ironic" && ${ENABLED_SERVICES} =~ "ir-" ]] && enabled=0 [[ ${service} == "neutron" && ${ENABLED_SERVICES} =~ "q-" ]] && enabled=0 [[ ${service} == "trove" && ${ENABLED_SERVICES} =~ "tr-" ]] && enabled=0 [[ ${service} == "swift" && ${ENABLED_SERVICES} =~ "s-" ]] && enabled=0 diff --git a/lib/ironic b/lib/ironic deleted file mode 100644 index dd4f8bf65f..0000000000 --- a/lib/ironic +++ /dev/null @@ -1,874 +0,0 @@ -#!/bin/bash -# -# lib/ironic -# Functions to control the configuration and operation of the **Ironic** service - -# Dependencies: -# -# - ``functions`` file -# - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined -# - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined -# - ``SERVICE_HOST`` -# - ``KEYSTONE_TOKEN_FORMAT`` must be defined - -# ``stack.sh`` calls the entry points in this order: -# -# - install_ironic -# - install_ironicclient -# - init_ironic -# - start_ironic -# - stop_ironic -# - cleanup_ironic - -# Save trace and pipefail settings -_XTRACE_IRONIC=$(set +o | grep xtrace) -_PIPEFAIL_IRONIC=$(set +o | grep pipefail) -set +o xtrace -set +o pipefail - -# NOTE(jroll) this is used for the transition to a devstack plugin in -# the ironic tree. -IRONIC_USING_PLUGIN=$(trueorfalse False IRONIC_USING_PLUGIN) -if [[ "$IRONIC_USING_PLUGIN" == "True" ]] ; then - return 0 -fi - -# Defaults -# -------- - -# Set up default directories -GITDIR["python-ironicclient"]=$DEST/python-ironicclient -GITDIR["ironic-lib"]=$DEST/ironic-lib - -IRONIC_DIR=$DEST/ironic -IRONIC_PYTHON_AGENT_DIR=$DEST/ironic-python-agent -IRONIC_DATA_DIR=$DATA_DIR/ironic -IRONIC_STATE_PATH=/var/lib/ironic -IRONIC_AUTH_CACHE_DIR=${IRONIC_AUTH_CACHE_DIR:-/var/cache/ironic} -IRONIC_CONF_DIR=${IRONIC_CONF_DIR:-/etc/ironic} -IRONIC_CONF_FILE=$IRONIC_CONF_DIR/ironic.conf -IRONIC_ROOTWRAP_CONF=$IRONIC_CONF_DIR/rootwrap.conf -IRONIC_POLICY_JSON=$IRONIC_CONF_DIR/policy.json - -# Deploy callback timeout can be changed from its default (1800), if required. -IRONIC_CALLBACK_TIMEOUT=${IRONIC_CALLBACK_TIMEOUT:-} - -# Deploy to hardware platform -IRONIC_HW_NODE_CPU=${IRONIC_HW_NODE_CPU:-1} -IRONIC_HW_NODE_RAM=${IRONIC_HW_NODE_RAM:-512} -IRONIC_HW_NODE_DISK=${IRONIC_HW_NODE_DISK:-10} -IRONIC_HW_EPHEMERAL_DISK=${IRONIC_HW_EPHEMERAL_DISK:-0} -# The file is composed of multiple lines, each line includes four field -# separated by white space: IPMI address, MAC address, IPMI username -# and IPMI password. -# -# 192.168.110.107 00:1e:67:57:50:4c root otc123 -IRONIC_IPMIINFO_FILE=${IRONIC_IPMIINFO_FILE:-$IRONIC_DATA_DIR/hardware_info} - -# Set up defaults for functional / integration testing -IRONIC_NODE_UUID=${IRONIC_NODE_UUID:-`uuidgen`} -IRONIC_SCRIPTS_DIR=${IRONIC_SCRIPTS_DIR:-$TOP_DIR/tools/ironic/scripts} -IRONIC_TEMPLATES_DIR=${IRONIC_TEMPLATES_DIR:-$TOP_DIR/tools/ironic/templates} -IRONIC_BAREMETAL_BASIC_OPS=$(trueorfalse False IRONIC_BAREMETAL_BASIC_OPS) -IRONIC_ENABLED_DRIVERS=${IRONIC_ENABLED_DRIVERS:-fake,pxe_ssh,pxe_ipmitool} -IRONIC_SSH_USERNAME=${IRONIC_SSH_USERNAME:-`whoami`} -IRONIC_SSH_TIMEOUT=${IRONIC_SSH_TIMEOUT:-15} -IRONIC_SSH_KEY_DIR=${IRONIC_SSH_KEY_DIR:-$IRONIC_DATA_DIR/ssh_keys} -IRONIC_SSH_KEY_FILENAME=${IRONIC_SSH_KEY_FILENAME:-ironic_key} -IRONIC_KEY_FILE=${IRONIC_KEY_FILE:-$IRONIC_SSH_KEY_DIR/$IRONIC_SSH_KEY_FILENAME} -IRONIC_SSH_VIRT_TYPE=${IRONIC_SSH_VIRT_TYPE:-virsh} -IRONIC_TFTPBOOT_DIR=${IRONIC_TFTPBOOT_DIR:-$IRONIC_DATA_DIR/tftpboot} -IRONIC_TFTPSERVER_IP=${IRONIC_TFTPSERVER_IP:-$HOST_IP} -IRONIC_VM_SSH_PORT=${IRONIC_VM_SSH_PORT:-22} -IRONIC_VM_SSH_ADDRESS=${IRONIC_VM_SSH_ADDRESS:-$HOST_IP} -IRONIC_VM_COUNT=${IRONIC_VM_COUNT:-1} -IRONIC_VM_SPECS_CPU=${IRONIC_VM_SPECS_CPU:-1} -IRONIC_VM_SPECS_RAM=${IRONIC_VM_SPECS_RAM:-512} -IRONIC_VM_SPECS_DISK=${IRONIC_VM_SPECS_DISK:-10} -IRONIC_VM_EPHEMERAL_DISK=${IRONIC_VM_EPHEMERAL_DISK:-0} -IRONIC_VM_EMULATOR=${IRONIC_VM_EMULATOR:-/usr/bin/qemu-system-x86_64} -IRONIC_VM_NETWORK_BRIDGE=${IRONIC_VM_NETWORK_BRIDGE:-brbm} -IRONIC_VM_NETWORK_RANGE=${IRONIC_VM_NETWORK_RANGE:-192.0.2.0/24} -IRONIC_VM_MACS_CSV_FILE=${IRONIC_VM_MACS_CSV_FILE:-$IRONIC_DATA_DIR/ironic_macs.csv} -IRONIC_AUTHORIZED_KEYS_FILE=${IRONIC_AUTHORIZED_KEYS_FILE:-$HOME/.ssh/authorized_keys} - -# By default, baremetal VMs will console output to file. -IRONIC_VM_LOG_CONSOLE=${IRONIC_VM_LOG_CONSOLE:-True} -IRONIC_VM_LOG_DIR=${IRONIC_VM_LOG_DIR:-$IRONIC_DATA_DIR/logs/} - -# Use DIB to create deploy ramdisk and kernel. -IRONIC_BUILD_DEPLOY_RAMDISK=$(trueorfalse True IRONIC_BUILD_DEPLOY_RAMDISK) -# If not use DIB, these files are used as deploy ramdisk/kernel. -# (The value must be an absolute path) -IRONIC_DEPLOY_RAMDISK=${IRONIC_DEPLOY_RAMDISK:-} -IRONIC_DEPLOY_KERNEL=${IRONIC_DEPLOY_KERNEL:-} -IRONIC_DEPLOY_ELEMENT=${IRONIC_DEPLOY_ELEMENT:-deploy-ironic} - -IRONIC_AGENT_KERNEL_URL=${IRONIC_AGENT_KERNEL_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe.vmlinuz} -IRONIC_AGENT_RAMDISK_URL=${IRONIC_AGENT_RAMDISK_URL:-http://tarballs.openstack.org/ironic-python-agent/coreos/files/coreos_production_pxe_image-oem.cpio.gz} - -# Which deploy driver to use - valid choices right now -# are ``pxe_ssh``, ``pxe_ipmitool``, ``agent_ssh`` and ``agent_ipmitool``. -IRONIC_DEPLOY_DRIVER=${IRONIC_DEPLOY_DRIVER:-pxe_ssh} - -# TODO(agordeev): replace 'ubuntu' with host distro name getting -IRONIC_DEPLOY_FLAVOR=${IRONIC_DEPLOY_FLAVOR:-ubuntu $IRONIC_DEPLOY_ELEMENT} - -# Support entry points installation of console scripts -IRONIC_BIN_DIR=$(get_python_exec_prefix) - -# Ironic connection info. Note the port must be specified. -IRONIC_SERVICE_PROTOCOL=http -IRONIC_SERVICE_PORT=${IRONIC_SERVICE_PORT:-6385} -IRONIC_HOSTPORT=${IRONIC_HOSTPORT:-$SERVICE_HOST:$IRONIC_SERVICE_PORT} - -# Enable iPXE -IRONIC_IPXE_ENABLED=$(trueorfalse False IRONIC_IPXE_ENABLED) -IRONIC_HTTP_DIR=${IRONIC_HTTP_DIR:-$IRONIC_DATA_DIR/httpboot} -IRONIC_HTTP_SERVER=${IRONIC_HTTP_SERVER:-$HOST_IP} -IRONIC_HTTP_PORT=${IRONIC_HTTP_PORT:-8088} - -# NOTE(lucasagomes): This flag is used to differentiate the nodes that -# uses IPA as their deploy ramdisk from nodes that uses the agent_* drivers -# (which also uses IPA but depends on Swift Temp URLs to work). At present, -# all drivers that uses the iSCSI approach for their deployment supports -# using both, IPA or bash ramdisks for the deployment. In the future we -# want to remove the support for the bash ramdisk in favor of IPA, once -# we get there this flag can be removed, and all conditionals that uses -# it should just run by default. -IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA=$(trueorfalse False IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA) - -# get_pxe_boot_file() - Get the PXE/iPXE boot file path -function get_pxe_boot_file { - local relpath=syslinux/pxelinux.0 - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - relpath=ipxe/undionly.kpxe - fi - - local pxe_boot_file - if is_ubuntu; then - pxe_boot_file=/usr/lib/$relpath - elif is_fedora || is_suse; then - pxe_boot_file=/usr/share/$relpath - fi - - echo $pxe_boot_file -} - -# PXE boot image -IRONIC_PXE_BOOT_IMAGE=${IRONIC_PXE_BOOT_IMAGE:-$(get_pxe_boot_file)} - - -# Functions -# --------- - -# Test if any Ironic services are enabled -# is_ironic_enabled -function is_ironic_enabled { - [[ ,${ENABLED_SERVICES} =~ ,"ir-" ]] && return 0 - return 1 -} - -function is_ironic_hardware { - is_ironic_enabled && [[ -n "${IRONIC_DEPLOY_DRIVER##*_ssh}" ]] && return 0 - return 1 -} - -function is_deployed_by_agent { - [[ -z "${IRONIC_DEPLOY_DRIVER%%agent*}" ]] && return 0 - return 1 -} - -function is_deployed_with_ipa_ramdisk { - is_deployed_by_agent || [[ "$IRONIC_DEPLOY_DRIVER_ISCSI_WITH_IPA" == "True" ]] && return 0 - return 1 -} - -# install_ironic() - Collect source and prepare -function install_ironic { - # make sure all needed service were enabled - local req_services="key" - if [[ "$VIRT_DRIVER" == "ironic" ]]; then - req_services+=" nova glance neutron" - fi - for srv in $req_services; do - if ! is_service_enabled "$srv"; then - die $LINENO "$srv should be enabled for Ironic." - fi - done - - if use_library_from_git "ironic-lib"; then - git_clone_by_name "ironic-lib" - setup_dev_lib "ironic-lib" - fi - - git_clone $IRONIC_REPO $IRONIC_DIR $IRONIC_BRANCH - setup_develop $IRONIC_DIR - - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - install_apache_wsgi - fi -} - -# install_ironicclient() - Collect sources and prepare -function install_ironicclient { - if use_library_from_git "python-ironicclient"; then - git_clone_by_name "python-ironicclient" - setup_dev_lib "python-ironicclient" - sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ironicclient"]}/tools/,/etc/bash_completion.d/}ironic.bash_completion - else - # nothing actually "requires" ironicclient, so force instally from pypi - pip_install_gr python-ironicclient - fi -} - -# _cleanup_ironic_apache_wsgi() - Remove wsgi files, disable and remove apache vhost file -function _cleanup_ironic_apache_wsgi { - sudo rm -rf $IRONIC_HTTP_DIR - disable_apache_site ironic - sudo rm -f $(apache_site_config_for ironic) - restart_apache_server -} - -# _config_ironic_apache_wsgi() - Set WSGI config files of Ironic -function _config_ironic_apache_wsgi { - local ironic_apache_conf - ironic_apache_conf=$(apache_site_config_for ironic) - sudo cp $FILES/apache-ironic.template $ironic_apache_conf - sudo sed -e " - s|%PUBLICPORT%|$IRONIC_HTTP_PORT|g; - s|%HTTPROOT%|$IRONIC_HTTP_DIR|g; - " -i $ironic_apache_conf - enable_apache_site ironic -} - -# cleanup_ironic() - Remove residual data files, anything left over from previous -# runs that would need to clean up. -function cleanup_ironic { - sudo rm -rf $IRONIC_AUTH_CACHE_DIR $IRONIC_CONF_DIR -} - -# configure_ironic_dirs() - Create all directories required by Ironic and -# associated services. -function configure_ironic_dirs { - sudo install -d -o $STACK_USER $IRONIC_CONF_DIR $STACK_USER $IRONIC_DATA_DIR \ - $IRONIC_STATE_PATH $IRONIC_TFTPBOOT_DIR $IRONIC_TFTPBOOT_DIR/pxelinux.cfg - sudo chown -R $STACK_USER:$LIBVIRT_GROUP $IRONIC_TFTPBOOT_DIR - - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - sudo install -d -o $STACK_USER -g $LIBVIRT_GROUP $IRONIC_HTTP_DIR - fi - - if [ ! -f $IRONIC_PXE_BOOT_IMAGE ]; then - die $LINENO "PXE boot file $IRONIC_PXE_BOOT_IMAGE not found." - fi - - # Copy PXE binary - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - cp $IRONIC_PXE_BOOT_IMAGE $IRONIC_TFTPBOOT_DIR - else - # Syslinux >= 5.00 pxelinux.0 binary is not "stand-alone" anymore, - # it depends on some c32 modules to work correctly. - # More info: http://www.syslinux.org/wiki/index.php/Library_modules - cp -aR $(dirname $IRONIC_PXE_BOOT_IMAGE)/*.{c32,0} $IRONIC_TFTPBOOT_DIR - fi -} - -# configure_ironic() - Set config files, create data dirs, etc -function configure_ironic { - configure_ironic_dirs - - # Copy over ironic configuration file and configure common parameters. - cp $IRONIC_DIR/etc/ironic/ironic.conf.sample $IRONIC_CONF_FILE - iniset $IRONIC_CONF_FILE DEFAULT debug True - inicomment $IRONIC_CONF_FILE DEFAULT log_file - iniset $IRONIC_CONF_FILE database connection `database_connection_url ironic` - iniset $IRONIC_CONF_FILE DEFAULT state_path $IRONIC_STATE_PATH - iniset $IRONIC_CONF_FILE DEFAULT use_syslog $SYSLOG - # Configure Ironic conductor, if it was enabled. - if is_service_enabled ir-cond; then - configure_ironic_conductor - fi - - # Configure Ironic API, if it was enabled. - if is_service_enabled ir-api; then - configure_ironic_api - fi - - # Format logging - if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then - setup_colorized_logging $IRONIC_CONF_FILE DEFAULT tenant user - fi - - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]]; then - _config_ironic_apache_wsgi - fi -} - -# configure_ironic_api() - Is used by configure_ironic(). Performs -# API specific configuration. -function configure_ironic_api { - iniset $IRONIC_CONF_FILE DEFAULT auth_strategy keystone - iniset $IRONIC_CONF_FILE oslo_policy policy_file $IRONIC_POLICY_JSON - - # TODO(Yuki Nishiwaki): This is a temporary work-around until Ironic is fixed(bug#1422632). - # These codes need to be changed to use the function of configure_auth_token_middleware - # after Ironic conforms to the new auth plugin. - iniset $IRONIC_CONF_FILE keystone_authtoken identity_uri $KEYSTONE_AUTH_URI - iniset $IRONIC_CONF_FILE keystone_authtoken auth_uri $KEYSTONE_SERVICE_URI/v2.0 - iniset $IRONIC_CONF_FILE keystone_authtoken admin_user ironic - iniset $IRONIC_CONF_FILE keystone_authtoken admin_password $SERVICE_PASSWORD - iniset $IRONIC_CONF_FILE keystone_authtoken admin_tenant_name $SERVICE_TENANT_NAME - iniset $IRONIC_CONF_FILE keystone_authtoken cafile $SSL_BUNDLE_FILE - iniset $IRONIC_CONF_FILE keystone_authtoken signing_dir $IRONIC_AUTH_CACHE_DIR/api - - iniset_rpc_backend ironic $IRONIC_CONF_FILE - iniset $IRONIC_CONF_FILE api port $IRONIC_SERVICE_PORT - - cp -p $IRONIC_DIR/etc/ironic/policy.json $IRONIC_POLICY_JSON -} - -# configure_ironic_conductor() - Is used by configure_ironic(). -# Sets conductor specific settings. -function configure_ironic_conductor { - cp $IRONIC_DIR/etc/ironic/rootwrap.conf $IRONIC_ROOTWRAP_CONF - cp -r $IRONIC_DIR/etc/ironic/rootwrap.d $IRONIC_CONF_DIR - local ironic_rootwrap - ironic_rootwrap=$(get_rootwrap_location ironic) - local rootwrap_isudoer_cmd="$ironic_rootwrap $IRONIC_CONF_DIR/rootwrap.conf *" - - # Set up the rootwrap sudoers for ironic - local tempfile - tempfile=`mktemp` - echo "$STACK_USER ALL=(root) NOPASSWD: $rootwrap_isudoer_cmd" >$tempfile - chmod 0440 $tempfile - sudo chown root:root $tempfile - sudo mv $tempfile /etc/sudoers.d/ironic-rootwrap - - iniset $IRONIC_CONF_FILE DEFAULT rootwrap_config $IRONIC_ROOTWRAP_CONF - iniset $IRONIC_CONF_FILE DEFAULT enabled_drivers $IRONIC_ENABLED_DRIVERS - iniset $IRONIC_CONF_FILE conductor api_url $IRONIC_SERVICE_PROTOCOL://$HOST_IP:$IRONIC_SERVICE_PORT - if [[ -n "$IRONIC_CALLBACK_TIMEOUT" ]]; then - iniset $IRONIC_CONF_FILE conductor deploy_callback_timeout $IRONIC_CALLBACK_TIMEOUT - fi - iniset $IRONIC_CONF_FILE pxe tftp_server $IRONIC_TFTPSERVER_IP - iniset $IRONIC_CONF_FILE pxe tftp_root $IRONIC_TFTPBOOT_DIR - iniset $IRONIC_CONF_FILE pxe tftp_master_path $IRONIC_TFTPBOOT_DIR/master_images - - local pxe_params="" - if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then - pxe_params+="nofb nomodeset vga=normal console=ttyS0" - if is_deployed_with_ipa_ramdisk; then - pxe_params+=" systemd.journald.forward_to_console=yes" - fi - fi - # When booting with less than 1GB, we need to switch from default tmpfs - # to ramfs for ramdisks to decompress successfully. - if (is_ironic_hardware && [[ "$IRONIC_HW_NODE_RAM" -lt 1024 ]]) || - (! is_ironic_hardware && [[ "$IRONIC_VM_SPECS_RAM" -lt 1024 ]]); then - pxe_params+=" rootfstype=ramfs" - fi - if [[ -n "$pxe_params" ]]; then - iniset $IRONIC_CONF_FILE pxe pxe_append_params "$pxe_params" - fi - - # Set these options for scenarios in which the agent fetches the image - # directly from glance, and don't set them where the image is pushed - # over iSCSI. - if is_deployed_by_agent; then - if [[ "$SWIFT_ENABLE_TEMPURLS" == "True" ]] ; then - iniset $IRONIC_CONF_FILE glance swift_temp_url_key $SWIFT_TEMPURL_KEY - else - die $LINENO "SWIFT_ENABLE_TEMPURLS must be True to use agent_ssh driver in Ironic." - fi - iniset $IRONIC_CONF_FILE glance swift_endpoint_url http://${HOST_IP}:${SWIFT_DEFAULT_BIND_PORT:-8080} - iniset $IRONIC_CONF_FILE glance swift_api_version v1 - local tenant_id - tenant_id=$(get_or_create_project $SERVICE_TENANT_NAME default) - iniset $IRONIC_CONF_FILE glance swift_account AUTH_${tenant_id} - iniset $IRONIC_CONF_FILE glance swift_container glance - iniset $IRONIC_CONF_FILE glance swift_temp_url_duration 3600 - iniset $IRONIC_CONF_FILE agent heartbeat_timeout 30 - fi - - # FIXME: this really needs to be tested in the gate. - # For now, any test using the agent ramdisk should skip cleaning - # because it is too slow to run in the gate. - iniset $IRONIC_CONF_FILE agent agent_erase_devices_priority 0 - - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - local pxebin - pxebin=`basename $IRONIC_PXE_BOOT_IMAGE` - iniset $IRONIC_CONF_FILE pxe ipxe_enabled True - iniset $IRONIC_CONF_FILE pxe pxe_config_template '\$pybasedir/drivers/modules/ipxe_config.template' - iniset $IRONIC_CONF_FILE pxe pxe_bootfile_name $pxebin - iniset $IRONIC_CONF_FILE pxe http_root $IRONIC_HTTP_DIR - iniset $IRONIC_CONF_FILE pxe http_url "http://$IRONIC_HTTP_SERVER:$IRONIC_HTTP_PORT" - fi -} - -# create_ironic_cache_dir() - Part of the init_ironic() process -function create_ironic_cache_dir { - # Create cache dir - sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/api - sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/api - rm -f $IRONIC_AUTH_CACHE_DIR/api/* - sudo mkdir -p $IRONIC_AUTH_CACHE_DIR/registry - sudo chown $STACK_USER $IRONIC_AUTH_CACHE_DIR/registry - rm -f $IRONIC_AUTH_CACHE_DIR/registry/* -} - -# create_ironic_accounts() - Set up common required ironic accounts - -# Tenant User Roles -# ------------------------------------------------------------------ -# service ironic admin # if enabled -function create_ironic_accounts { - - # Ironic - if [[ "$ENABLED_SERVICES" =~ "ir-api" ]]; then - # Get ironic user if exists - - # NOTE(Shrews): This user MUST have admin level privileges! - create_service_user "ironic" "admin" - - if [[ "$KEYSTONE_CATALOG_BACKEND" = 'sql' ]]; then - - get_or_create_service "ironic" "baremetal" "Ironic baremetal provisioning service" - get_or_create_endpoint "baremetal" \ - "$REGION_NAME" \ - "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ - "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" \ - "$IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT" - fi - fi -} - - -# init_ironic() - Initialize databases, etc. -function init_ironic { - # Save private network as cleaning network - local cleaning_network_uuid - cleaning_network_uuid=$(neutron net-list | grep private | get_field 1) - iniset $IRONIC_CONF_FILE neutron cleaning_network_uuid ${cleaning_network_uuid} - - # (Re)create ironic database - recreate_database ironic - - # Migrate ironic database - $IRONIC_BIN_DIR/ironic-dbsync --config-file=$IRONIC_CONF_FILE - - create_ironic_cache_dir -} - -# _ironic_bm_vm_names() - Generates list of names for baremetal VMs. -function _ironic_bm_vm_names { - local idx - local num_vms - num_vms=$(($IRONIC_VM_COUNT - 1)) - for idx in $(seq 0 $num_vms); do - echo "baremetal${IRONIC_VM_NETWORK_BRIDGE}_${idx}" - done -} - -# start_ironic() - Start running processes, including screen -function start_ironic { - # Start Ironic API server, if enabled. - if is_service_enabled ir-api; then - start_ironic_api - fi - - # Start Ironic conductor, if enabled. - if is_service_enabled ir-cond; then - start_ironic_conductor - fi - - # Start Apache if iPXE is enabled - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - restart_apache_server - fi -} - -# start_ironic_api() - Used by start_ironic(). -# Starts Ironic API server. -function start_ironic_api { - run_process ir-api "$IRONIC_BIN_DIR/ironic-api --config-file=$IRONIC_CONF_FILE" - echo "Waiting for ir-api ($IRONIC_HOSTPORT) to start..." - if ! timeout $SERVICE_TIMEOUT sh -c "while ! wget --no-proxy -q -O- $IRONIC_SERVICE_PROTOCOL://$IRONIC_HOSTPORT; do sleep 1; done"; then - die $LINENO "ir-api did not start" - fi -} - -# start_ironic_conductor() - Used by start_ironic(). -# Starts Ironic conductor. -function start_ironic_conductor { - run_process ir-cond "$IRONIC_BIN_DIR/ironic-conductor --config-file=$IRONIC_CONF_FILE" - # TODO(romcheg): Find a way to check whether the conductor has started. -} - -# stop_ironic() - Stop running processes -function stop_ironic { - stop_process ir-api - stop_process ir-cond - - # Cleanup the WSGI files - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - _cleanup_ironic_apache_wsgi - fi -} - -function create_ovs_taps { - local ironic_net_id - ironic_net_id=$(neutron net-list | grep private | get_field 1) - - # Work around: No netns exists on host until a Neutron port is created. We - # need to create one in Neutron to know what netns to tap into prior to the - # first node booting. - local port_id - port_id=$(neutron port-create private | grep " id " | get_field 2) - - # intentional sleep to make sure the tag has been set to port - sleep 10 - - local tapdev - tapdev=$(sudo ip netns exec qdhcp-${ironic_net_id} ip link list | grep " tap" | cut -d':' -f2 | cut -d'@' -f1 | cut -b2-) - local tag_id - tag_id=$(sudo ovs-vsctl show |grep ${tapdev} -A1 -m1 | grep tag | cut -d':' -f2 | cut -b2-) - - # make sure veth pair is not existing, otherwise delete its links - sudo ip link show ovs-tap1 && sudo ip link delete ovs-tap1 - sudo ip link show brbm-tap1 && sudo ip link delete brbm-tap1 - # create veth pair for future interconnection between br-int and brbm - sudo ip link add brbm-tap1 type veth peer name ovs-tap1 - sudo ip link set dev brbm-tap1 up - sudo ip link set dev ovs-tap1 up - - sudo ovs-vsctl -- --if-exists del-port ovs-tap1 -- add-port br-int ovs-tap1 tag=$tag_id - sudo ovs-vsctl -- --if-exists del-port brbm-tap1 -- add-port $IRONIC_VM_NETWORK_BRIDGE brbm-tap1 - - # Remove the port needed only for workaround. - neutron port-delete $port_id - - # Finally, share the fixed tenant network across all tenants. This allows the host - # to serve TFTP to a single network namespace via the tap device created above. - neutron net-update $ironic_net_id --shared true -} - -function create_bridge_and_vms { - # Call libvirt setup scripts in a new shell to ensure any new group membership - sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/setup-network" - if [[ "$IRONIC_VM_LOG_CONSOLE" == "True" ]] ; then - local log_arg="$IRONIC_VM_LOG_DIR" - else - local log_arg="" - fi - local vm_name - for vm_name in $(_ironic_bm_vm_names); do - sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/create-node $vm_name \ - $IRONIC_VM_SPECS_CPU $IRONIC_VM_SPECS_RAM $IRONIC_VM_SPECS_DISK \ - amd64 $IRONIC_VM_NETWORK_BRIDGE $IRONIC_VM_EMULATOR \ - $log_arg" >> $IRONIC_VM_MACS_CSV_FILE - done - create_ovs_taps -} - -function wait_for_nova_resources { - # After nodes have been enrolled, we need to wait for both ironic and - # nova's periodic tasks to populate the resource tracker with available - # nodes and resources. Wait up to 2 minutes for a given resource before - # timing out. - local resource=$1 - local expected_count=$2 - local i - echo_summary "Waiting 2 minutes for Nova resource tracker to pick up $resource >= $expected_count" - for i in $(seq 1 120); do - if [ $(nova hypervisor-stats | grep " $resource " | get_field 2) -ge $expected_count ]; then - return 0 - fi - sleep 1 - done - die $LINENO "Timed out waiting for Nova hypervisor-stats $resource >= $expected_count" -} - -function enroll_nodes { - local chassis_id - chassis_id=$(ironic chassis-create -d "ironic test chassis" | grep " uuid " | get_field 2) - - if ! is_ironic_hardware; then - local ironic_node_cpu=$IRONIC_VM_SPECS_CPU - local ironic_node_ram=$IRONIC_VM_SPECS_RAM - local ironic_node_disk=$IRONIC_VM_SPECS_DISK - local ironic_ephemeral_disk=$IRONIC_VM_EPHEMERAL_DISK - local ironic_hwinfo_file=$IRONIC_VM_MACS_CSV_FILE - local node_options="\ - -i deploy_kernel=$IRONIC_DEPLOY_KERNEL_ID \ - -i deploy_ramdisk=$IRONIC_DEPLOY_RAMDISK_ID \ - -i ssh_virt_type=$IRONIC_SSH_VIRT_TYPE \ - -i ssh_address=$IRONIC_VM_SSH_ADDRESS \ - -i ssh_port=$IRONIC_VM_SSH_PORT \ - -i ssh_username=$IRONIC_SSH_USERNAME \ - -i ssh_key_filename=$IRONIC_KEY_FILE" - else - local ironic_node_cpu=$IRONIC_HW_NODE_CPU - local ironic_node_ram=$IRONIC_HW_NODE_RAM - local ironic_node_disk=$IRONIC_HW_NODE_DISK - local ironic_ephemeral_disk=$IRONIC_HW_EPHEMERAL_DISK - if [[ -z "${IRONIC_DEPLOY_DRIVER##*_ipmitool}" ]]; then - local ironic_hwinfo_file=$IRONIC_IPMIINFO_FILE - fi - fi - - local total_nodes=0 - local total_cpus=0 - while read hardware_info; do - if ! is_ironic_hardware; then - local mac_address=$hardware_info - elif [[ -z "${IRONIC_DEPLOY_DRIVER##*_ipmitool}" ]]; then - local ipmi_address - ipmi_address=$(echo $hardware_info |awk '{print $1}') - local mac_address - mac_address=$(echo $hardware_info |awk '{print $2}') - local ironic_ipmi_username - ironic_ipmi_username=$(echo $hardware_info |awk '{print $3}') - local ironic_ipmi_passwd - ironic_ipmi_passwd=$(echo $hardware_info |awk '{print $4}') - # Currently we require all hardware platform have same CPU/RAM/DISK info - # in future, this can be enhanced to support different type, and then - # we create the bare metal flavor with minimum value - local node_options="-i ipmi_address=$ipmi_address -i ipmi_password=$ironic_ipmi_passwd\ - -i ipmi_username=$ironic_ipmi_username" - node_options+=" -i deploy_kernel=$IRONIC_DEPLOY_KERNEL_ID" - node_options+=" -i deploy_ramdisk=$IRONIC_DEPLOY_RAMDISK_ID" - fi - - # First node created will be used for testing in ironic w/o glance - # scenario, so we need to know its UUID. - local standalone_node_uuid="" - if [ $total_nodes -eq 0 ]; then - standalone_node_uuid="--uuid $IRONIC_NODE_UUID" - fi - - local node_id - node_id=$(ironic node-create $standalone_node_uuid\ - --chassis_uuid $chassis_id \ - --driver $IRONIC_DEPLOY_DRIVER \ - --name node-$total_nodes \ - -p cpus=$ironic_node_cpu\ - -p memory_mb=$ironic_node_ram\ - -p local_gb=$ironic_node_disk\ - -p cpu_arch=x86_64 \ - $node_options \ - | grep " uuid " | get_field 2) - - ironic port-create --address $mac_address --node $node_id - - total_nodes=$((total_nodes+1)) - total_cpus=$((total_cpus+$ironic_node_cpu)) - done < $ironic_hwinfo_file - - local adjusted_disk - adjusted_disk=$(($ironic_node_disk - $ironic_ephemeral_disk)) - nova flavor-create --ephemeral $ironic_ephemeral_disk baremetal auto $ironic_node_ram $adjusted_disk $ironic_node_cpu - - nova flavor-key baremetal set "cpu_arch"="x86_64" - - if [ "$VIRT_DRIVER" == "ironic" ]; then - wait_for_nova_resources "count" $total_nodes - wait_for_nova_resources "vcpus" $total_cpus - fi -} - -function configure_iptables { - # enable tftp natting for allowing connections to HOST_IP's tftp server - sudo modprobe nf_conntrack_tftp - sudo modprobe nf_nat_tftp - # explicitly allow DHCP - packets are occasionally being dropped here - sudo iptables -I INPUT -p udp --dport 67:68 --sport 67:68 -j ACCEPT || true - # nodes boot from TFTP and callback to the API server listening on $HOST_IP - sudo iptables -I INPUT -d $HOST_IP -p udp --dport 69 -j ACCEPT || true - sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true - if is_deployed_by_agent; then - # agent ramdisk gets instance image from swift - sudo iptables -I INPUT -d $HOST_IP -p tcp --dport ${SWIFT_DEFAULT_BIND_PORT:-8080} -j ACCEPT || true - fi - - if [[ "$IRONIC_IPXE_ENABLED" == "True" ]] ; then - sudo iptables -I INPUT -d $HOST_IP -p tcp --dport $IRONIC_HTTP_PORT -j ACCEPT || true - fi -} - -function configure_tftpd { - # stop tftpd and setup serving via xinetd - stop_service tftpd-hpa || true - [ -f /etc/init/tftpd-hpa.conf ] && echo "manual" | sudo tee /etc/init/tftpd-hpa.override - sudo cp $IRONIC_TEMPLATES_DIR/tftpd-xinetd.template /etc/xinetd.d/tftp - sudo sed -e "s|%TFTPBOOT_DIR%|$IRONIC_TFTPBOOT_DIR|g" -i /etc/xinetd.d/tftp - - # setup tftp file mapping to satisfy requests at the root (booting) and - # /tftpboot/ sub-dir (as per deploy-ironic elements) - echo "r ^([^/]) $IRONIC_TFTPBOOT_DIR/\1" >$IRONIC_TFTPBOOT_DIR/map-file - echo "r ^(/tftpboot/) $IRONIC_TFTPBOOT_DIR/\2" >>$IRONIC_TFTPBOOT_DIR/map-file - - chmod -R 0755 $IRONIC_TFTPBOOT_DIR - restart_service xinetd -} - -function configure_ironic_ssh_keypair { - if [[ ! -d $HOME/.ssh ]]; then - mkdir -p $HOME/.ssh - chmod 700 $HOME/.ssh - fi - if [[ ! -e $IRONIC_KEY_FILE ]]; then - if [[ ! -d $(dirname $IRONIC_KEY_FILE) ]]; then - mkdir -p $(dirname $IRONIC_KEY_FILE) - fi - echo -e 'n\n' | ssh-keygen -q -t rsa -P '' -f $IRONIC_KEY_FILE - fi - cat $IRONIC_KEY_FILE.pub | tee -a $IRONIC_AUTHORIZED_KEYS_FILE -} - -function ironic_ssh_check { - local key_file=$1 - local floating_ip=$2 - local port=$3 - local default_instance_user=$4 - local active_timeout=$5 - if ! timeout $active_timeout sh -c "while ! ssh -p $port -o StrictHostKeyChecking=no -i $key_file ${default_instance_user}@$floating_ip echo success; do sleep 1; done"; then - die $LINENO "server didn't become ssh-able!" - fi -} - -function configure_ironic_auxiliary { - configure_ironic_ssh_keypair - ironic_ssh_check $IRONIC_KEY_FILE $IRONIC_VM_SSH_ADDRESS $IRONIC_VM_SSH_PORT $IRONIC_SSH_USERNAME $IRONIC_SSH_TIMEOUT -} - -function build_ipa_coreos_ramdisk { - echo "Building ironic-python-agent deploy ramdisk" - local kernel_path=$1 - local ramdisk_path=$2 - git_clone $IRONIC_PYTHON_AGENT_REPO $IRONIC_PYTHON_AGENT_DIR $IRONIC_PYTHON_AGENT_BRANCH - cd $IRONIC_PYTHON_AGENT_DIR - imagebuild/coreos/build_coreos_image.sh - cp imagebuild/coreos/UPLOAD/coreos_production_pxe_image-oem.cpio.gz $ramdisk_path - cp imagebuild/coreos/UPLOAD/coreos_production_pxe.vmlinuz $kernel_path - sudo rm -rf UPLOAD - cd - -} - -# build deploy kernel+ramdisk, then upload them to glance -# this function sets ``IRONIC_DEPLOY_KERNEL_ID``, ``IRONIC_DEPLOY_RAMDISK_ID`` -function upload_baremetal_ironic_deploy { - declare -g IRONIC_DEPLOY_KERNEL_ID IRONIC_DEPLOY_RAMDISK_ID - echo_summary "Creating and uploading baremetal images for ironic" - - # install diskimage-builder - if [[ $(type -P ramdisk-image-create) == "" ]]; then - pip_install_gr "diskimage-builder" - fi - - if [ -z "$IRONIC_DEPLOY_KERNEL" -o -z "$IRONIC_DEPLOY_RAMDISK" ]; then - local IRONIC_DEPLOY_KERNEL_PATH=$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.kernel - local IRONIC_DEPLOY_RAMDISK_PATH=$TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER.initramfs - else - local IRONIC_DEPLOY_KERNEL_PATH=$IRONIC_DEPLOY_KERNEL - local IRONIC_DEPLOY_RAMDISK_PATH=$IRONIC_DEPLOY_RAMDISK - fi - - if [ ! -e "$IRONIC_DEPLOY_RAMDISK_PATH" -o ! -e "$IRONIC_DEPLOY_KERNEL_PATH" ]; then - # files don't exist, need to build them - if [ "$IRONIC_BUILD_DEPLOY_RAMDISK" = "True" ]; then - # we can build them only if we're not offline - if [ "$OFFLINE" != "True" ]; then - if is_deployed_with_ipa_ramdisk; then - build_ipa_coreos_ramdisk $IRONIC_DEPLOY_KERNEL_PATH $IRONIC_DEPLOY_RAMDISK_PATH - else - ramdisk-image-create $IRONIC_DEPLOY_FLAVOR \ - -o $TOP_DIR/files/ir-deploy-$IRONIC_DEPLOY_DRIVER - fi - else - die $LINENO "Deploy kernel+ramdisk files don't exist and cannot be build in OFFLINE mode" - fi - else - if is_deployed_with_ipa_ramdisk; then - # download the agent image tarball - wget "$IRONIC_AGENT_KERNEL_URL" -O $IRONIC_DEPLOY_KERNEL_PATH - wget "$IRONIC_AGENT_RAMDISK_URL" -O $IRONIC_DEPLOY_RAMDISK_PATH - else - die $LINENO "Deploy kernel+ramdisk files don't exist and their building was disabled explicitly by IRONIC_BUILD_DEPLOY_RAMDISK" - fi - fi - fi - - local token - token=$(openstack token issue -c id -f value) - die_if_not_set $LINENO token "Keystone fail to get token" - - # load them into glance - IRONIC_DEPLOY_KERNEL_ID=$(openstack \ - --os-token $token \ - --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \ - image create \ - $(basename $IRONIC_DEPLOY_KERNEL_PATH) \ - --public --disk-format=aki \ - --container-format=aki \ - < $IRONIC_DEPLOY_KERNEL_PATH | grep ' id ' | get_field 2) - IRONIC_DEPLOY_RAMDISK_ID=$(openstack \ - --os-token $token \ - --os-url $GLANCE_SERVICE_PROTOCOL://$GLANCE_HOSTPORT \ - image create \ - $(basename $IRONIC_DEPLOY_RAMDISK_PATH) \ - --public --disk-format=ari \ - --container-format=ari \ - < $IRONIC_DEPLOY_RAMDISK_PATH | grep ' id ' | get_field 2) -} - -function prepare_baremetal_basic_ops { - if ! is_ironic_hardware; then - configure_ironic_auxiliary - fi - upload_baremetal_ironic_deploy - if ! is_ironic_hardware; then - create_bridge_and_vms - fi - enroll_nodes - configure_tftpd - configure_iptables -} - -function cleanup_baremetal_basic_ops { - rm -f $IRONIC_VM_MACS_CSV_FILE - if [ -f $IRONIC_KEY_FILE ]; then - local key - key=$(cat $IRONIC_KEY_FILE.pub) - # remove public key from authorized_keys - grep -v "$key" $IRONIC_AUTHORIZED_KEYS_FILE > temp && mv temp $IRONIC_AUTHORIZED_KEYS_FILE - chmod 0600 $IRONIC_AUTHORIZED_KEYS_FILE - fi - sudo rm -rf $IRONIC_DATA_DIR $IRONIC_STATE_PATH - - local vm_name - for vm_name in $(_ironic_bm_vm_names); do - sudo su $STACK_USER -c "$IRONIC_SCRIPTS_DIR/cleanup-node $vm_name $IRONIC_VM_NETWORK_BRIDGE" - done - - sudo rm -rf /etc/xinetd.d/tftp /etc/init/tftpd-hpa.override - restart_service xinetd - sudo iptables -D INPUT -d $HOST_IP -p udp --dport 69 -j ACCEPT || true - sudo iptables -D INPUT -d $HOST_IP -p tcp --dport $IRONIC_SERVICE_PORT -j ACCEPT || true - if is_deployed_by_agent; then - # agent ramdisk gets instance image from swift - sudo iptables -D INPUT -d $HOST_IP -p tcp --dport ${SWIFT_DEFAULT_BIND_PORT:-8080} -j ACCEPT || true - fi - sudo rmmod nf_conntrack_tftp || true - sudo rmmod nf_nat_tftp || true -} - -# Restore xtrace + pipefail -$_XTRACE_IRONIC -$_PIPEFAIL_IRONIC - -# Tell emacs to use shell-script-mode -## Local variables: -## mode: shell-script -## End: diff --git a/stackrc b/stackrc index f949ccbad9..8e6ea42a00 100644 --- a/stackrc +++ b/stackrc @@ -225,10 +225,6 @@ HEAT_BRANCH=${HEAT_BRANCH:-master} HORIZON_REPO=${HORIZON_REPO:-${GIT_BASE}/openstack/horizon.git} HORIZON_BRANCH=${HORIZON_BRANCH:-master} -# baremetal provisioning service -IRONIC_REPO=${IRONIC_REPO:-${GIT_BASE}/openstack/ironic.git} -IRONIC_BRANCH=${IRONIC_BRANCH:-master} - # unified auth system (manages accounts/tokens) KEYSTONE_REPO=${KEYSTONE_REPO:-${GIT_BASE}/openstack/keystone.git} KEYSTONE_BRANCH=${KEYSTONE_BRANCH:-master} @@ -298,6 +294,8 @@ GITBRANCH["python-heatclient"]=${HEATCLIENT_BRANCH:-master} # ironic client GITREPO["python-ironicclient"]=${IRONICCLIENT_REPO:-${GIT_BASE}/openstack/python-ironicclient.git} GITBRANCH["python-ironicclient"]=${IRONICCLIENT_BRANCH:-master} +# ironic plugin is out of tree, but nova uses it. set GITDIR here. +GITDIR["python-ironicclient"]=$DEST/python-ironicclient # the base authentication plugins that clients use to authenticate GITREPO["keystoneauth"]=${KEYSTONEAUTH_REPO:-${GIT_BASE}/openstack/keystoneauth.git} @@ -484,6 +482,8 @@ GITBRANCH["os-brick"]=${OS_BRICK_BRANCH:-master} # ironic common lib GITREPO["ironic-lib"]=${IRONIC_LIB_REPO:-${GIT_BASE}/openstack/ironic-lib.git} GITBRANCH["ironic-lib"]=${IRONIC_LIB_BRANCH:-master} +# this doesn't exist in a lib file, so set it here +GITDIR["ironic-lib"]=$DEST/ironic-lib ################## diff --git a/tools/ironic/scripts/cleanup-node b/tools/ironic/scripts/cleanup-node deleted file mode 100755 index c4e4e706f4..0000000000 --- a/tools/ironic/scripts/cleanup-node +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash - -# **cleanup-nodes** - -# Cleans up baremetal poseur nodes and volumes created during ironic setup -# Assumes calling user has proper libvirt group membership and access. - -set -exu - -LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} -LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} - -NAME=$1 -NETWORK_BRIDGE=$2 - -export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI - -VOL_NAME="$NAME.qcow2" -virsh list | grep -q $NAME && virsh destroy $NAME -virsh list --inactive | grep -q $NAME && virsh undefine $NAME - -if virsh pool-list | grep -q $LIBVIRT_STORAGE_POOL ; then - virsh vol-list $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && - virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL -fi diff --git a/tools/ironic/scripts/configure-vm b/tools/ironic/scripts/configure-vm deleted file mode 100755 index 378fcb85ad..0000000000 --- a/tools/ironic/scripts/configure-vm +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python - -import argparse -import os.path - -import libvirt - -templatedir = os.path.join(os.path.dirname(os.path.dirname(__file__)), - 'templates') - - -CONSOLE_LOG = """ - - - - - - - - - - - - - - - -""" - - -def main(): - parser = argparse.ArgumentParser( - description="Configure a kvm virtual machine for the seed image.") - parser.add_argument('--name', default='seed', - help='the name to give the machine in libvirt.') - parser.add_argument('--image', - help='Use a custom image file (must be qcow2).') - parser.add_argument('--engine', default='qemu', - help='The virtualization engine to use') - parser.add_argument('--arch', default='i686', - help='The architecture to use') - parser.add_argument('--memory', default='2097152', - help="Maximum memory for the VM in KB.") - parser.add_argument('--cpus', default='1', - help="CPU count for the VM.") - parser.add_argument('--bootdev', default='hd', - help="What boot device to use (hd/network).") - parser.add_argument('--network', default="brbm", - help='The libvirt network name to use') - parser.add_argument('--libvirt-nic-driver', default='e1000', - help='The libvirt network driver to use') - parser.add_argument('--console-log', - help='File to log console') - parser.add_argument('--emulator', default=None, - help='Path to emulator bin for vm template') - args = parser.parse_args() - with file(templatedir + '/vm.xml', 'rb') as f: - source_template = f.read() - params = { - 'name': args.name, - 'imagefile': args.image, - 'engine': args.engine, - 'arch': args.arch, - 'memory': args.memory, - 'cpus': args.cpus, - 'bootdev': args.bootdev, - 'network': args.network, - 'nicdriver': args.libvirt_nic_driver, - 'emulator': args.emulator, - } - - if args.emulator: - params['emulator'] = args.emulator - else: - if os.path.exists("/usr/bin/kvm"): # Debian - params['emulator'] = "/usr/bin/kvm" - elif os.path.exists("/usr/bin/qemu-kvm"): # Redhat - params['emulator'] = "/usr/bin/qemu-kvm" - - if args.console_log: - params['bios_serial'] = "" - params['console_log'] = CONSOLE_LOG % {'console_log': args.console_log} - else: - params['bios_serial'] = '' - params['console_log'] = '' - libvirt_template = source_template % params - conn = libvirt.open("qemu:///system") - - a = conn.defineXML(libvirt_template) - print ("Created machine %s with UUID %s" % (args.name, a.UUIDString())) - -if __name__ == '__main__': - main() diff --git a/tools/ironic/scripts/create-node b/tools/ironic/scripts/create-node deleted file mode 100755 index b018acddc9..0000000000 --- a/tools/ironic/scripts/create-node +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env bash - -# **create-nodes** - -# Creates baremetal poseur nodes for ironic testing purposes - -set -ex - -# Keep track of the DevStack directory -TOP_DIR=$(cd $(dirname "$0")/.. && pwd) - -NAME=$1 -CPU=$2 -MEM=$(( 1024 * $3 )) -# Extra G to allow fuzz for partition table : flavor size and registered size -# need to be different to actual size. -DISK=$(( $4 + 1)) - -case $5 in - i386) ARCH='i686' ;; - amd64) ARCH='x86_64' ;; - *) echo "Unsupported arch $4!" ; exit 1 ;; -esac - -BRIDGE=$6 -EMULATOR=$7 -LOGDIR=$8 - -LIBVIRT_NIC_DRIVER=${LIBVIRT_NIC_DRIVER:-"e1000"} -LIBVIRT_STORAGE_POOL=${LIBVIRT_STORAGE_POOL:-"default"} -LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} - -export VIRSH_DEFAULT_CONNECT_URI=$LIBVIRT_CONNECT_URI - -if ! virsh pool-list --all | grep -q $LIBVIRT_STORAGE_POOL; then - virsh pool-define-as --name $LIBVIRT_STORAGE_POOL dir --target /var/lib/libvirt/images >&2 - virsh pool-autostart $LIBVIRT_STORAGE_POOL >&2 - virsh pool-start $LIBVIRT_STORAGE_POOL >&2 -fi - -pool_state=$(virsh pool-info $LIBVIRT_STORAGE_POOL | grep State | awk '{ print $2 }') -if [ "$pool_state" != "running" ] ; then - [ ! -d /var/lib/libvirt/images ] && sudo mkdir /var/lib/libvirt/images - virsh pool-start $LIBVIRT_STORAGE_POOL >&2 -fi - -if [ -n "$LOGDIR" ] ; then - mkdir -p "$LOGDIR" -fi - -PREALLOC= -if [ -f /etc/debian_version ]; then - PREALLOC="--prealloc-metadata" -fi - -if [ -n "$LOGDIR" ] ; then - VM_LOGGING="--console-log $LOGDIR/${NAME}_console.log" -else - VM_LOGGING="" -fi -VOL_NAME="${NAME}.qcow2" - -if ! virsh list --all | grep -q $NAME; then - virsh vol-list --pool $LIBVIRT_STORAGE_POOL | grep -q $VOL_NAME && - virsh vol-delete $VOL_NAME --pool $LIBVIRT_STORAGE_POOL >&2 - virsh vol-create-as $LIBVIRT_STORAGE_POOL ${VOL_NAME} ${DISK}G --format qcow2 $PREALLOC >&2 - volume_path=$(virsh vol-path --pool $LIBVIRT_STORAGE_POOL $VOL_NAME) - # Pre-touch the VM to set +C, as it can only be set on empty files. - sudo touch "$volume_path" - sudo chattr +C "$volume_path" || true - $TOP_DIR/scripts/configure-vm \ - --bootdev network --name $NAME --image "$volume_path" \ - --arch $ARCH --cpus $CPU --memory $MEM --libvirt-nic-driver $LIBVIRT_NIC_DRIVER \ - --emulator $EMULATOR --network $BRIDGE $VM_LOGGING >&2 - -fi - -# echo mac -virsh dumpxml $NAME | grep "mac address" | head -1 | cut -d\' -f2 diff --git a/tools/ironic/scripts/setup-network b/tools/ironic/scripts/setup-network deleted file mode 100755 index 83308ed416..0000000000 --- a/tools/ironic/scripts/setup-network +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# **setup-network** - -# Setups openvswitch libvirt network suitable for -# running baremetal poseur nodes for ironic testing purposes - -set -exu - -LIBVIRT_CONNECT_URI=${LIBVIRT_CONNECT_URI:-"qemu:///system"} - -# Keep track of the DevStack directory -TOP_DIR=$(cd $(dirname "$0")/.. && pwd) -BRIDGE_SUFFIX=${1:-''} -BRIDGE_NAME=brbm$BRIDGE_SUFFIX - -export VIRSH_DEFAULT_CONNECT_URI="$LIBVIRT_CONNECT_URI" - -# Only add bridge if missing -(sudo ovs-vsctl list-br | grep ${BRIDGE_NAME}$) || sudo ovs-vsctl add-br ${BRIDGE_NAME} - -# Remove bridge before replacing it. -(virsh net-list | grep "${BRIDGE_NAME} ") && virsh net-destroy ${BRIDGE_NAME} -(virsh net-list --inactive | grep "${BRIDGE_NAME} ") && virsh net-undefine ${BRIDGE_NAME} - -virsh net-define <(sed s/brbm/$BRIDGE_NAME/ $TOP_DIR/templates/brbm.xml) -virsh net-autostart ${BRIDGE_NAME} -virsh net-start ${BRIDGE_NAME} diff --git a/tools/ironic/templates/brbm.xml b/tools/ironic/templates/brbm.xml deleted file mode 100644 index 0769d3f1d0..0000000000 --- a/tools/ironic/templates/brbm.xml +++ /dev/null @@ -1,6 +0,0 @@ - - brbm - - - - diff --git a/tools/ironic/templates/tftpd-xinetd.template b/tools/ironic/templates/tftpd-xinetd.template deleted file mode 100644 index 5f3d03f3bb..0000000000 --- a/tools/ironic/templates/tftpd-xinetd.template +++ /dev/null @@ -1,14 +0,0 @@ -service tftp -{ - protocol = udp - port = 69 - socket_type = dgram - wait = yes - user = root - server = /usr/sbin/in.tftpd - server_args = -v -v -v -v -v --map-file %TFTPBOOT_DIR%/map-file %TFTPBOOT_DIR% - disable = no - # This is a workaround for Fedora, where TFTP will listen only on - # IPv6 endpoint, if IPv4 flag is not used. - flags = IPv4 -} diff --git a/tools/ironic/templates/vm.xml b/tools/ironic/templates/vm.xml deleted file mode 100644 index ae7d685256..0000000000 --- a/tools/ironic/templates/vm.xml +++ /dev/null @@ -1,49 +0,0 @@ - - %(name)s - %(memory)s - %(cpus)s - - hvm - - - %(bios_serial)s - - - - - - - - destroy - restart - restart - - %(emulator)s - - - - -
- - -
- - - - - -
- - - -